summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOleksandr Byelkin <sanja@mariadb.com>2021-02-02 17:55:53 +0100
committerOleksandr Byelkin <sanja@mariadb.com>2021-02-02 17:55:53 +0100
commit0f810b58faf487c7cd59f2ee520f62deb0f712eb (patch)
treedfb52d41ffd83d0dbffb969c091fe190b9457a24
parent542d769ea1a22a7a6a87c9fe76ff911a162ade44 (diff)
parent251b52190070095e4c65ffb0ae545d49330a02b2 (diff)
downloadmariadb-git-10.4-merge-attempt.tar.gz
Merge branch 'bb-10.3-release' into bb-10.4-release10.4-merge-attempt
-rw-r--r--client/mysqldump.c103
-rwxr-xr-xdebian/autobake-deb.sh6
-rw-r--r--include/mysql_com.h2
m---------libmariadb0
-rw-r--r--libmysqld/lib_sql.cc3
-rw-r--r--man/mysqldump.19
-rw-r--r--mysql-test/main/create.result32
-rw-r--r--mysql-test/main/create.test150
-rw-r--r--mysql-test/main/ctype_utf32.result2
-rw-r--r--mysql-test/main/ctype_utf8mb4.result6
-rw-r--r--mysql-test/main/ctype_utf8mb4_myisam.result4
-rw-r--r--mysql-test/main/gis-json.result3
-rw-r--r--mysql-test/main/gis-json.test2
-rw-r--r--mysql-test/main/group_by.result48
-rw-r--r--mysql-test/main/group_by.test37
-rw-r--r--mysql-test/main/information_schema.result12
-rw-r--r--mysql-test/main/information_schema.test17
-rw-r--r--mysql-test/main/kill.result5
-rw-r--r--mysql-test/main/kill.test6
-rw-r--r--mysql-test/main/mix2_myisam.result2
-rw-r--r--mysql-test/main/myisam.result15
-rw-r--r--mysql-test/main/myisam.test1
-rw-r--r--mysql-test/main/mysql_upgrade.result240
-rw-r--r--mysql-test/main/mysql_upgrade.test30
-rw-r--r--mysql-test/main/mysqldump-system.result24
-rw-r--r--mysql-test/main/mysqldump.result315
-rw-r--r--mysql-test/main/mysqldump.test37
-rw-r--r--mysql-test/main/ps_show_log.result65
-rw-r--r--mysql-test/main/ps_show_log.test73
-rw-r--r--mysql-test/main/range.result17
-rw-r--r--mysql-test/main/range.test14
-rw-r--r--mysql-test/main/range_mrr_icp.result17
-rw-r--r--mysql-test/main/skip_grants.opt (renamed from mysql-test/main/skip_grants-master.opt)0
-rw-r--r--mysql-test/main/skip_grants.result31
-rw-r--r--mysql-test/main/skip_grants.test46
-rw-r--r--mysql-test/main/sp-ucs2.result6
-rw-r--r--mysql-test/main/sp-ucs2.test6
-rw-r--r--mysql-test/main/stat_tables.result14
-rw-r--r--mysql-test/main/stat_tables.test12
-rw-r--r--mysql-test/main/stat_tables_innodb.result14
-rw-r--r--mysql-test/main/table_elim.result2
-rw-r--r--mysql-test/main/table_value_constr.result55
-rw-r--r--mysql-test/main/table_value_constr.test51
-rw-r--r--mysql-test/main/user_limits.result27
-rw-r--r--mysql-test/main/user_limits.test26
-rw-r--r--mysql-test/main/view.result16
-rw-r--r--mysql-test/main/view.test22
-rwxr-xr-xmysql-test/mysql-test-run.pl10
-rw-r--r--mysql-test/suite/binlog/include/binlog_xa_recover.inc3
-rw-r--r--mysql-test/suite/binlog/r/binlog_xa_recover.result2
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_xa_recover.result2
-rw-r--r--mysql-test/suite/galera/disabled.def3
-rw-r--r--mysql-test/suite/galera/r/galera_UK_conflict.result89
-rw-r--r--mysql-test/suite/galera/r/galera_fk_cascade_delete.result26
-rw-r--r--mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result20
-rw-r--r--mysql-test/suite/galera/r/galera_var_sst_auth.result16
-rw-r--r--mysql-test/suite/galera/r/lp1376747-4.result10
-rw-r--r--mysql-test/suite/galera/t/galera_UK_conflict.test148
-rw-r--r--mysql-test/suite/galera/t/galera_fk_cascade_delete.test19
-rw-r--r--mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test27
-rw-r--r--mysql-test/suite/galera/t/galera_truncate.test3
-rw-r--r--mysql-test/suite/galera/t/galera_var_sst_auth.test29
-rw-r--r--mysql-test/suite/galera/t/lp1376747-4.test36
-rw-r--r--mysql-test/suite/galera_3nodes/disabled.def10
-rw-r--r--mysql-test/suite/gcol/inc/gcol_column_def_options.inc5
-rw-r--r--mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result14
-rw-r--r--mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result14
-rw-r--r--mysql-test/suite/gcol/r/innodb_virtual_fk.result5
-rw-r--r--mysql-test/suite/gcol/t/innodb_virtual_fk.test6
-rw-r--r--mysql-test/suite/innodb/r/alter_mdl_timeout.result23
-rw-r--r--mysql-test/suite/innodb/r/file_format_defaults.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb.result2
-rw-r--r--mysql-test/suite/innodb/t/alter_mdl_timeout.opt1
-rw-r--r--mysql-test/suite/innodb/t/alter_mdl_timeout.test32
-rw-r--r--mysql-test/suite/innodb_fts/r/create.result10
-rw-r--r--mysql-test/suite/innodb_fts/t/create.test11
-rw-r--r--mysql-test/suite/innodb_zip/r/index_large_prefix.result10
-rw-r--r--mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result2
-rw-r--r--mysql-test/suite/maria/maria-ucs2.result4
-rw-r--r--mysql-test/suite/maria/maria.result14
-rw-r--r--mysql-test/suite/maria/maria3.result2
-rw-r--r--mysql-test/suite/maria/mrr.result4
-rw-r--r--mysql-test/suite/perfschema/r/schema.result22
-rw-r--r--mysql-test/suite/perfschema/r/table_schema.result22
-rw-r--r--mysql-test/suite/perfschema/r/threads_mysql.result22
-rw-r--r--mysql-test/suite/rpl/disabled.def3
-rw-r--r--mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test4
-rw-r--r--mysql-test/suite/rpl/r/rpl_relay_max_extension.result37
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_utf32.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_spec_variables.result60
-rw-r--r--mysql-test/suite/rpl/r/rpl_table_options.result9
-rw-r--r--mysql-test/suite/rpl/t/rpl_relay_max_extension.test109
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_create_table.test5
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test6
-rw-r--r--mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_spec_variables.test8
-rw-r--r--mysql-test/suite/rpl/t/rpl_table_options.test1
-rw-r--r--mysql-test/suite/sql_sequence/mysqldump.result42
-rw-r--r--mysql-test/suite/sql_sequence/mysqldump.test11
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result12
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff2
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result2
-rw-r--r--mysql-test/suite/wsrep/r/variables.result2
-rw-r--r--plugin/feedback/sender_thread.cc2
-rw-r--r--scripts/mysql_system_tables_fix.sql24
-rw-r--r--sql/create_options.cc5
-rw-r--r--sql/field.cc9
-rw-r--r--sql/ha_partition.cc11
-rw-r--r--sql/handler.cc3
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item.cc24
-rw-r--r--sql/item.h19
-rw-r--r--sql/item_cmpfunc.cc14
-rw-r--r--sql/key.cc9
-rw-r--r--sql/log_event.cc6
-rw-r--r--sql/mdl.cc6
-rw-r--r--sql/mysqld.cc42
-rw-r--r--sql/mysqld.h4
-rw-r--r--sql/opt_range.cc34
-rw-r--r--sql/partition_info.cc4
-rw-r--r--sql/protocol.cc6
-rw-r--r--sql/rpl_parallel.cc5
-rw-r--r--sql/rpl_rli.cc4
-rw-r--r--sql/slave.cc242
-rw-r--r--sql/spatial.cc1
-rw-r--r--sql/sql_acl.cc6
-rw-r--r--sql/sql_class.cc48
-rw-r--r--sql/sql_class.h18
-rw-r--r--sql/sql_connect.cc2
-rw-r--r--sql/sql_handler.cc5
-rw-r--r--sql/sql_lex.cc3
-rw-r--r--sql/sql_manager.cc87
-rw-r--r--sql/sql_manager.h2
-rw-r--r--sql/sql_parse.cc1
-rw-r--r--sql/sql_plugin.h10
-rw-r--r--sql/sql_prepare.cc11
-rw-r--r--sql/sql_repl.cc17
-rw-r--r--sql/sql_repl.h1
-rw-r--r--sql/sql_select.cc28
-rw-r--r--sql/sql_select.h18
-rw-r--r--sql/sql_sequence.cc13
-rw-r--r--sql/sql_show.cc36
-rw-r--r--sql/sql_statistics.cc9
-rw-r--r--sql/sql_table.cc13
-rw-r--r--sql/sql_test.cc1
-rw-r--r--sql/sql_tvc.cc4
-rw-r--r--sql/sql_update.cc3
-rw-r--r--sql/sql_yacc.yy68
-rw-r--r--sql/sql_yacc_ora.yy82
-rw-r--r--sql/table.h38
-rw-r--r--sql/unireg.cc3
-rw-r--r--sql/upgrade_conf_file.cc1
-rw-r--r--sql/wsrep_sst.cc6
-rw-r--r--sql/wsrep_thd.cc539
-rw-r--r--storage/archive/ha_archive.cc4
-rw-r--r--storage/cassandra/ha_cassandra.cc42
-rw-r--r--storage/connect/CMakeLists.txt55
-rw-r--r--storage/connect/block.h42
-rw-r--r--storage/connect/bson.cpp1789
-rw-r--r--storage/connect/bson.h208
-rw-r--r--storage/connect/bsonudf.cpp6080
-rw-r--r--storage/connect/bsonudf.h403
-rw-r--r--storage/connect/cmgfam.cpp49
-rw-r--r--storage/connect/cmgfam.h7
-rw-r--r--storage/connect/colblk.cpp4
-rw-r--r--storage/connect/connect.cc3
-rw-r--r--storage/connect/filamap.cpp15
-rw-r--r--storage/connect/filamtxt.cpp464
-rw-r--r--storage/connect/filamtxt.h45
-rw-r--r--storage/connect/filamvct.cpp24
-rw-r--r--storage/connect/filamzip.cpp3
-rw-r--r--storage/connect/global.h27
-rw-r--r--storage/connect/ha_connect.cc159
-rw-r--r--storage/connect/jdbconn.cpp1
-rw-r--r--storage/connect/jmgfam.cpp41
-rw-r--r--storage/connect/jmgfam.h7
-rw-r--r--storage/connect/jmgoconn.cpp4
-rw-r--r--storage/connect/json.cpp1903
-rw-r--r--storage/connect/json.h474
-rw-r--r--storage/connect/jsonudf.cpp507
-rw-r--r--storage/connect/jsonudf.h98
-rw-r--r--storage/connect/libdoc.cpp2
-rw-r--r--storage/connect/mycat.cc52
-rw-r--r--storage/connect/mysql-test/connect/disabled.def5
-rw-r--r--storage/connect/mysql-test/connect/r/alter_xml.result4
-rw-r--r--storage/connect/mysql-test/connect/r/alter_xml2.result4
-rw-r--r--storage/connect/mysql-test/connect/r/bson.result517
-rw-r--r--storage/connect/mysql-test/connect/r/bson_java_2.result385
-rw-r--r--storage/connect/mysql-test/connect/r/bson_java_3.result385
-rw-r--r--storage/connect/mysql-test/connect/r/bson_mongo_c.result385
-rw-r--r--storage/connect/mysql-test/connect/r/bson_udf.result708
-rw-r--r--storage/connect/mysql-test/connect/r/jdbc_oracle.result8
-rw-r--r--storage/connect/mysql-test/connect/r/json.result148
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_2.result71
-rw-r--r--storage/connect/mysql-test/connect/r/json_java_3.result71
-rw-r--r--storage/connect/mysql-test/connect/r/json_mongo_c.result71
-rw-r--r--storage/connect/mysql-test/connect/r/json_udf.result6
-rw-r--r--storage/connect/mysql-test/connect/r/json_udf_bin.result2
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_c.result35
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_java_2.result35
-rw-r--r--storage/connect/mysql-test/connect/r/mongo_java_3.result35
-rw-r--r--storage/connect/mysql-test/connect/r/odbc_oracle.result38
-rw-r--r--storage/connect/mysql-test/connect/r/rest.result19
-rw-r--r--storage/connect/mysql-test/connect/r/xml.result3
-rw-r--r--storage/connect/mysql-test/connect/r/xml2.result38
-rw-r--r--storage/connect/mysql-test/connect/r/xml2_html.result6
-rw-r--r--storage/connect/mysql-test/connect/r/xml2_mult.result4
-rw-r--r--storage/connect/mysql-test/connect/r/xml2_zip.result24
-rw-r--r--storage/connect/mysql-test/connect/r/xml_html.result6
-rw-r--r--storage/connect/mysql-test/connect/r/xml_mult.result4
-rw-r--r--storage/connect/mysql-test/connect/r/xml_zip.result24
-rw-r--r--storage/connect/mysql-test/connect/r/zip.result42
-rw-r--r--storage/connect/mysql-test/connect/t/alter_xml.test2
-rw-r--r--storage/connect/mysql-test/connect/t/alter_xml2.test2
-rw-r--r--storage/connect/mysql-test/connect/t/bson.test294
-rw-r--r--storage/connect/mysql-test/connect/t/bson_java_2.test14
-rw-r--r--storage/connect/mysql-test/connect/t/bson_java_3.test14
-rw-r--r--storage/connect/mysql-test/connect/t/bson_mongo_c.test10
-rw-r--r--storage/connect/mysql-test/connect/t/bson_udf.inc70
-rw-r--r--storage/connect/mysql-test/connect/t/bson_udf.test281
-rw-r--r--storage/connect/mysql-test/connect/t/bson_udf2.inc61
-rw-r--r--storage/connect/mysql-test/connect/t/ini_grant.result89
-rw-r--r--storage/connect/mysql-test/connect/t/jdbc_oracle.test8
-rw-r--r--storage/connect/mysql-test/connect/t/json.test146
-rw-r--r--storage/connect/mysql-test/connect/t/json_java_2.test1
-rw-r--r--storage/connect/mysql-test/connect/t/json_java_3.test1
-rw-r--r--storage/connect/mysql-test/connect/t/mongo_test.inc37
-rw-r--r--storage/connect/mysql-test/connect/t/odbc_oracle.test30
-rw-r--r--storage/connect/mysql-test/connect/t/rest.inc17
-rw-r--r--storage/connect/mysql-test/connect/t/rest.test17
-rw-r--r--storage/connect/mysql-test/connect/t/xml.test1
-rw-r--r--storage/connect/mysql-test/connect/t/xml2.test46
-rw-r--r--storage/connect/mysql-test/connect/t/xml2_html.test6
-rw-r--r--storage/connect/mysql-test/connect/t/xml2_mult.test4
-rw-r--r--storage/connect/mysql-test/connect/t/xml2_zip.test24
-rw-r--r--storage/connect/mysql-test/connect/t/xml_html.test6
-rw-r--r--storage/connect/mysql-test/connect/t/xml_mult.test4
-rw-r--r--storage/connect/mysql-test/connect/t/xml_zip.test24
-rw-r--r--storage/connect/mysql-test/connect/t/zip.test30
-rw-r--r--storage/connect/myutil.cpp3
-rw-r--r--storage/connect/plgdbsem.h5
-rw-r--r--storage/connect/plugutil.cpp74
-rw-r--r--storage/connect/tabbson.cpp2542
-rw-r--r--storage/connect/tabbson.h336
-rw-r--r--storage/connect/tabdos.cpp7
-rw-r--r--storage/connect/tabfmt.cpp29
-rw-r--r--storage/connect/tabjson.cpp645
-rw-r--r--storage/connect/tabjson.h30
-rw-r--r--storage/connect/tabrest.cpp117
-rw-r--r--storage/connect/tabrest.h5
-rw-r--r--storage/connect/user_connect.cc3
-rw-r--r--storage/connect/value.cpp6
-rw-r--r--storage/connect/value.h12
-rw-r--r--storage/connect/xobject.h1
-rw-r--r--storage/csv/ha_tina.cc9
-rw-r--r--storage/federated/ha_federated.cc19
-rw-r--r--storage/federatedx/ha_federatedx.cc19
-rw-r--r--storage/innobase/btr/btr0cur.cc29
-rw-r--r--storage/innobase/fil/fil0fil.cc1
-rw-r--r--storage/innobase/fts/fts0fts.cc2
-rw-r--r--storage/innobase/handler/ha_innodb.cc528
-rw-r--r--storage/innobase/handler/handler0alter.cc4
-rw-r--r--storage/innobase/include/ha_prototypes.h2
-rw-r--r--storage/innobase/include/srv0srv.h4
-rw-r--r--storage/innobase/include/trx0trx.h5
-rw-r--r--storage/innobase/include/univ.i2
-rw-r--r--storage/innobase/lock/lock0lock.cc66
-rw-r--r--storage/innobase/log/log0recv.cc1
-rw-r--r--storage/innobase/row/row0upd.cc33
-rw-r--r--storage/innobase/srv/srv0srv.cc5
-rw-r--r--storage/innobase/srv/srv0start.cc6
-rw-r--r--storage/innobase/trx/trx0trx.cc8
-rw-r--r--storage/maria/ma_recovery_util.c1
-rw-r--r--storage/mroonga/ha_mroonga.cpp28
-rw-r--r--storage/mroonga/lib/mrn_debug_column_access.cpp2
-rw-r--r--storage/mroonga/lib/mrn_debug_column_access.hpp6
-rw-r--r--storage/oqgraph/ha_oqgraph.cc10
-rw-r--r--storage/perfschema/pfs_engine_table.cc23
-rw-r--r--storage/perfschema/table_accounts.cc4
-rw-r--r--storage/perfschema/table_esgs_by_account_by_event_name.cc4
-rw-r--r--storage/perfschema/table_esgs_by_host_by_event_name.cc2
-rw-r--r--storage/perfschema/table_esgs_by_user_by_event_name.cc2
-rw-r--r--storage/perfschema/table_esms_by_account_by_event_name.cc4
-rw-r--r--storage/perfschema/table_esms_by_host_by_event_name.cc2
-rw-r--r--storage/perfschema/table_esms_by_user_by_event_name.cc2
-rw-r--r--storage/perfschema/table_ews_by_account_by_event_name.cc4
-rw-r--r--storage/perfschema/table_ews_by_host_by_event_name.cc2
-rw-r--r--storage/perfschema/table_ews_by_user_by_event_name.cc2
-rw-r--r--storage/perfschema/table_hosts.cc2
-rw-r--r--storage/perfschema/table_setup_actors.cc6
-rw-r--r--storage/perfschema/table_threads.cc4
-rw-r--r--storage/perfschema/table_users.cc2
-rw-r--r--storage/rocksdb/ha_rocksdb.cc11
-rw-r--r--storage/rocksdb/rdb_datadic.cc6
-rw-r--r--storage/sequence/sequence.cc4
-rw-r--r--storage/sphinx/ha_sphinx.cc4
-rw-r--r--storage/spider/ha_spider.cc12
-rw-r--r--storage/spider/spd_db_conn.cc60
-rw-r--r--storage/spider/spd_db_mysql.cc17
-rw-r--r--storage/tokudb/ha_tokudb.cc16
-rw-r--r--strings/ctype-ucs2.c10
-rw-r--r--unittest/mysys/stacktrace-t.c2
302 files changed, 22701 insertions, 3117 deletions
diff --git a/client/mysqldump.c b/client/mysqldump.c
index d6ff2dd275f..fc5b3bfabaf 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -42,6 +42,11 @@
/* on merge conflict, bump to a higher version again */
#define DUMP_VERSION "10.19"
+/**
+ First mysql version supporting sequences.
+*/
+#define FIRST_SEQUENCE_VERSION 100300
+
#include <my_global.h>
#include <my_sys.h>
#include <my_user.h>
@@ -92,6 +97,11 @@
/* Max length GTID position that we will output. */
#define MAX_GTID_LENGTH 1024
+/* Dump sequence/tables control */
+#define DUMP_TABLE_ALL -1
+#define DUMP_TABLE_TABLE 0
+#define DUMP_TABLE_SEQUENCE 1
+
static my_bool ignore_table_data(const uchar *hash_key, size_t len);
static void add_load_option(DYNAMIC_STRING *str, const char *option,
const char *option_value);
@@ -1063,6 +1073,20 @@ static int get_options(int *argc, char ***argv)
if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option)))
return(ho_error);
+ /*
+ Dumping under --system=stats with --replace or --inser-ignore is safe and will not
+ retult into race condition. Otherwise dump only structure and ignore data by default
+ while dumping.
+ */
+ if (!(opt_system & OPT_SYSTEM_STATS) && !(opt_ignore || opt_replace_into))
+ {
+ if (my_hash_insert(&ignore_data,
+ (uchar*) my_strdup("mysql.innodb_index_stats", MYF(MY_WME))) ||
+ my_hash_insert(&ignore_data,
+ (uchar*) my_strdup("mysql.innodb_table_stats", MYF(MY_WME))))
+ return(EX_EOM);
+ }
+
if (opt_system & OPT_SYSTEM_ALL)
opt_system|= ~0;
@@ -3867,14 +3891,6 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key,
DBUG_ENTER("dump_table");
/*
- Check does table has a sequence structure and if has apply different sql queries
- */
- if (check_if_ignore_table(table, table_type) & IGNORE_SEQUENCE_TABLE)
- {
- get_sequence_structure(table, db);
- DBUG_VOID_RETURN;
- }
- /*
Make sure you get the create table info before the following check for
--no-data flag below. Otherwise, the create table info won't be printed.
*/
@@ -4358,18 +4374,36 @@ err:
} /* dump_table */
-static char *getTableName(int reset)
+static char *getTableName(int reset, int want_sequences)
{
MYSQL_ROW row;
if (!get_table_name_result)
{
- if (!(get_table_name_result= mysql_list_tables(mysql,NullS)))
- return(NULL);
+ if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION)
+ {
+ const char *query= "SHOW FULL TABLES";
+ if (mysql_query_with_error_report(mysql, 0, query))
+ return (NULL);
+
+ if (!(get_table_name_result= mysql_store_result(mysql)))
+ return (NULL);
+ }
+ else
+ {
+ if (!(get_table_name_result= mysql_list_tables(mysql,NullS)))
+ return(NULL);
+ }
}
if ((row= mysql_fetch_row(get_table_name_result)))
- return((char*) row[0]);
+ {
+ if (want_sequences != DUMP_TABLE_ALL)
+ while (row && MY_TEST(strcmp(row[1], "SEQUENCE")) == want_sequences)
+ row= mysql_fetch_row(get_table_name_result);
+ if (row)
+ return((char*) row[0]);
+ }
if (reset)
mysql_data_seek(get_table_name_result,0); /* We want to read again */
else
@@ -4762,7 +4796,7 @@ static int dump_all_servers()
static int dump_all_stats()
{
- my_bool prev_no_create_info;
+ my_bool prev_no_create_info, prev_opt_replace_into;
if (mysql_select_db(mysql, "mysql"))
{
@@ -4770,6 +4804,8 @@ static int dump_all_stats()
return 1; /* If --force */
}
fprintf(md_result_file,"\nUSE mysql;\n");
+ prev_opt_replace_into= opt_replace_into;
+ opt_replace_into|= !opt_ignore;
prev_no_create_info= opt_no_create_info;
opt_no_create_info= 1; /* don't overwrite recreate tables */
/* EITS added in 10.0.1 */
@@ -4788,6 +4824,7 @@ static int dump_all_stats()
dump_table("innodb_table_stats", "mysql", NULL, 0);
}
opt_no_create_info= prev_no_create_info;
+ opt_replace_into= prev_opt_replace_into;
return 0;
}
@@ -4798,12 +4835,14 @@ static int dump_all_stats()
static int dump_all_timezones()
{
- my_bool opt_prev_no_create_info;
+ my_bool opt_prev_no_create_info, opt_prev_replace_into;
if (mysql_select_db(mysql, "mysql"))
{
DB_error(mysql, "when selecting the database");
return 1; /* If --force */
}
+ opt_prev_replace_into= opt_replace_into;
+ opt_replace_into|= !opt_ignore;
opt_prev_no_create_info= opt_no_create_info;
opt_no_create_info= 1;
fprintf(md_result_file,"\nUSE mysql;\n");
@@ -4813,6 +4852,7 @@ static int dump_all_timezones()
dump_table("time_zone_transition", "mysql", NULL, 0);
dump_table("time_zone_transition_type", "mysql", NULL, 0);
opt_no_create_info= opt_prev_no_create_info;
+ opt_replace_into= opt_prev_replace_into;
return 0;
}
@@ -5302,7 +5342,7 @@ static int dump_all_tables_in_db(char *database)
{
DYNAMIC_STRING query;
init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024);
- for (numrows= 0 ; (table= getTableName(1)) ; )
+ for (numrows= 0 ; (table= getTableName(1, DUMP_TABLE_ALL)) ; )
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key,end - hash_key))
@@ -5336,7 +5376,19 @@ static int dump_all_tables_in_db(char *database)
DBUG_RETURN(1);
}
}
- while ((table= getTableName(0)))
+
+ if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION &&
+ !opt_no_create_info)
+ {
+ // First process sequences
+ while ((table= getTableName(1, DUMP_TABLE_SEQUENCE)))
+ {
+ char *end= strmov(afterdot, table);
+ if (include_table((uchar*) hash_key, end - hash_key))
+ get_sequence_structure(table, database);
+ }
+ }
+ while ((table= getTableName(0, DUMP_TABLE_TABLE)))
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key, end - hash_key))
@@ -5485,7 +5537,7 @@ static my_bool dump_all_views_in_db(char *database)
{
DYNAMIC_STRING query;
init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024);
- for (numrows= 0 ; (table= getTableName(1)); )
+ for (numrows= 0 ; (table= getTableName(1, DUMP_TABLE_TABLE)); )
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key,end - hash_key))
@@ -5508,7 +5560,7 @@ static my_bool dump_all_views_in_db(char *database)
else
verbose_msg("-- dump_all_views_in_db : logs flushed successfully!\n");
}
- while ((table= getTableName(0)))
+ while ((table= getTableName(0, DUMP_TABLE_TABLE)))
{
char *end= strmov(afterdot, table);
if (include_table((uchar*) hash_key, end - hash_key))
@@ -5638,7 +5690,7 @@ static int get_sys_var_lower_case_table_names()
static int dump_selected_tables(char *db, char **table_names, int tables)
{
- char table_buff[NAME_LEN*2+3];
+ char table_buff[NAME_LEN*2+3], table_type[NAME_LEN];
DYNAMIC_STRING lock_tables_query;
char **dump_tables, **pos, **end;
int lower_case_table_names;
@@ -5735,9 +5787,22 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
DBUG_RETURN(1);
}
}
+
+ if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION)
+ {
+ /* Dump Sequence first */
+ for (pos= dump_tables; pos < end; pos++)
+ {
+ DBUG_PRINT("info",("Dumping sequence(?) %s", *pos));
+ if (check_if_ignore_table(*pos, table_type) & IGNORE_SEQUENCE_TABLE)
+ get_sequence_structure(*pos, db);
+ }
+ }
/* Dump each selected table */
for (pos= dump_tables; pos < end; pos++)
{
+ if (check_if_ignore_table(*pos, table_type) & IGNORE_SEQUENCE_TABLE)
+ continue;
DBUG_PRINT("info",("Dumping table %s", *pos));
dump_table(*pos, db, NULL, 0);
if (opt_dump_triggers &&
diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh
index d1c0d779269..5ace5ea0a05 100755
--- a/debian/autobake-deb.sh
+++ b/debian/autobake-deb.sh
@@ -96,6 +96,12 @@ then
sed '/Package: mariadb-plugin-rocksdb/,/^$/d' -i debian/control
fi
+## Skip TokuDB if arch is not amd64
+if [[ ! $(dpkg-architecture -q DEB_BUILD_ARCH) =~ amd64 ]]
+then
+ sed '/Package: mariadb-plugin-tokudb/,/^$/d' -i debian/control
+fi
+
# Always remove aws plugin, see -DNOT_FOR_DISTRIBUTION in CMakeLists.txt
sed '/Package: mariadb-plugin-aws-key-management-10.2/,/^$/d' -i debian/control
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 9e5215f29b3..65f686f3063 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -26,7 +26,7 @@
#define HOSTNAME_LENGTH 60
#define SYSTEM_CHARSET_MBMAXLEN 3
#define NAME_CHAR_LEN 64U /* Field/table name length */
-#define USERNAME_CHAR_LENGTH 128U
+#define USERNAME_CHAR_LENGTH 128
#define NAME_LEN (NAME_CHAR_LEN*SYSTEM_CHARSET_MBMAXLEN)
#define USERNAME_LENGTH (USERNAME_CHAR_LENGTH*SYSTEM_CHARSET_MBMAXLEN)
#define DEFINER_CHAR_LENGTH (USERNAME_CHAR_LENGTH + HOSTNAME_LENGTH + 1)
diff --git a/libmariadb b/libmariadb
-Subproject 018663324bf9cbe11b0b2191c6fb6c10564bb4e
+Subproject e62ff462c58ce154596a0f1da9e79cd4395396e
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 9fd6eb7805a..42a93f9a38b 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -625,7 +625,8 @@ int init_embedded_server(int argc, char **argv, char **groups)
(void) thr_setconcurrency(concurrency); // 10 by default
- start_handle_manager();
+ if (flush_time && flush_time != ~(ulong) 0L)
+ start_handle_manager();
// FIXME initialize binlog_filter and rpl_filter if not already done
// corresponding delete is in clean_up()
diff --git a/man/mysqldump.1 b/man/mysqldump.1
index 705a3cdd7ad..192fd6f8597 100644
--- a/man/mysqldump.1
+++ b/man/mysqldump.1
@@ -2261,7 +2261,7 @@ servers \- remote (federated) servers as \fBCREATE SERVER\fR\&.
.sp -1
.IP \(bu 2.3
.\}
-stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBINSERT\fR/\fBREPLACE INFO\fR statements without (re)creating tables\&.
+stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBREPLACE INTO\fR (or \fBINSERT IGNORE\fR if \fB\-\-insert\-into\fR is specified) statements without (re)creating tables\&.
.RE
.RS 4
.ie n \{\
@@ -2271,7 +2271,7 @@ stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS
.sp -1
.IP \(bu 2.3
.\}
-timezones \- timezone related system tables dumped as \fBINSERT\fR/\fBREPLACE INTO\fR statements without (re)creating tables\&.
+timezones \- timezone related system tables dumped as \fBREPLACE INTO\fR (or \fBINSERT IGNORE\fR if \fB\-\-insert\-into\fR is specified) statements without (re)creating tables\&.
.RE
.sp
The format of the output is affected by \fB\-\-replace\fR and \fB\-\-insert\-into\fR\&. The \fB\-\-replace\fR option will output \fBCREATE OR REPLACE\fR
@@ -2281,12 +2281,11 @@ With \fB\-\-system=user\fR (or \fBall\fR), and \fB\-\-replace\fR, SQL is generat
.sp
The \fB\-\-insert\-into\fR option will cause \fBCREATE IF NOT EXIST\fR forms of SQL to generated if available.
.sp
-For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-info\fR have the usual effects.
+For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-into\fR have the usual effects.
.sp
Enabling specific options here will cause the relevant tables in the mysql database to be ignored when dumping the mysql database or \fB\-\-all\-databases\fR\&.
.sp
-Experimentally this option is designed to be able to dump system information from MySQL-5\&.7 and 8\&.0 servers\&. SQL generated is also
-experimentally compatible with MySQL-5\&.7/8\&.0\&. Mappings of implemenation specific grants/plugins isn't always one-to-one however\&.
+To help in migrating from MySQL to MariaDB, this option is designed to be able to dump system information from MySQL-5\&.7 and 8\&.0 servers\&. SQL generated is also experimentally compatible with MySQL-5\&.7/8\&.0. Mappings of implementation specific grants/plugins isn't always one-to-one however between MariaDB and MySQL and will require manual changes\&.
.sp
.RE
.RS 4
diff --git a/mysql-test/main/create.result b/mysql-test/main/create.result
index 2905103b707..73666cb89a3 100644
--- a/mysql-test/main/create.result
+++ b/mysql-test/main/create.result
@@ -1,7 +1,4 @@
call mtr.add_suppression("table or database name 't-1'");
-drop table if exists t1,t2,t3,t4,t5;
-drop database if exists mysqltest;
-drop view if exists v1;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -1225,7 +1222,7 @@ drop table if exists t1,t2,t3;
# Fix modified for MariaDB: we support this syntax
create table t1 (a int) transactional=0;
Warnings:
-Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1'
+Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=0'
create table t2 (a int) page_checksum=1;
create table t3 (a int) row_format=page;
drop table t1,t2,t3;
@@ -2001,18 +1998,43 @@ alter table t1 add
key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0064 (f64) comment 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy';
ERROR HY000: Cannot create table `t1`: index information is too long. Decrease number of indexes or use shorter index names or shorter comments.
drop table t1;
-End of 5.5 tests
+#
+# End of 5.5 tests
+#
+#
+# MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS
+#
create table t1;
ERROR 42000: A table must have at least 1 column
+#
+# MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT
+#
create table t1 (i int, j int, key(i), key(i)) as select 1 as i, 2 as j;
Warnings:
Note 1831 Duplicate index `i_2`. This is deprecated and will be disallowed in a future release
drop table t1;
+#
+# End of 10.0 tests
+#
+#
+# MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE
+#
+create table t1 (c int(10) unsigned) engine=memory transactional=0;
+ERROR HY000: Table storage engine 'MEMORY' does not support the create option 'TRANSACTIONAL=0'
+#
+# End of 10.2 tests
+#
+#
+# MDEV-17544 No warning when trying to name a primary key constraint.
+#
CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2));
Warnings:
Warning 1280 Name 'foo' ignored for PRIMARY key.
DROP TABLE t1;
#
+# End of 10.3 tests
+#
+#
# 10.4 Test
#
# MDEV-21017: Assertion `!is_set() || (m_status == DA_OK_BULK &&
diff --git a/mysql-test/main/create.test b/mysql-test/main/create.test
index a6a6ba0c782..7996c4a13ef 100644
--- a/mysql-test/main/create.test
+++ b/mysql-test/main/create.test
@@ -5,12 +5,6 @@ call mtr.add_suppression("table or database name 't-1'");
# Check some special create statements.
#
---disable_warnings
-drop table if exists t1,t2,t3,t4,t5;
-drop database if exists mysqltest;
-drop view if exists v1;
---enable_warnings
-
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@@ -29,30 +23,30 @@ drop table t1;
# Test of some CREATE TABLE'S that should fail
#
---error 1146
+--error ER_NO_SUCH_TABLE
create table t2 engine=heap select * from t1;
---error 1146
+--error ER_NO_SUCH_TABLE
create table t2 select auto+1 from t1;
drop table if exists t1,t2;
---error 1167
+--error ER_WRONG_KEY_COLUMN
create table t1 (b char(0) not null, index(b));
---error 1163
+--error ER_TABLE_CANT_HANDLE_BLOB
create table t1 (a int not null,b text) engine=heap;
drop table if exists t1;
---error 1075
+--error ER_WRONG_AUTO_KEY
create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap;
--- error 1049
+--error ER_BAD_DB_ERROR
create table not_existing_database.test (a int);
create table `a/a` (a int);
show create table `a/a`;
create table t1 like `a/a`;
drop table `a/a`;
drop table `t1`;
---error 1103
+--error ER_WRONG_TABLE_NAME
create table `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int);
---error 1059
+--error ER_TOO_LONG_IDENT
create table a (`aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` int);
#
@@ -62,17 +56,17 @@ create table t1 (a datetime default now());
drop table t1;
create table t1 (a datetime on update now());
drop table t1;
---error 1067
+--error ER_INVALID_DEFAULT
create table t1 (a int default 100 auto_increment);
---error 1067
+--error ER_INVALID_DEFAULT
create table t1 (a tinyint default 1000);
---error 1067
+--error ER_INVALID_DEFAULT
create table t1 (a varchar(5) default 'abcdef');
create table t1 (a varchar(5) default 'abcde');
insert into t1 values();
select * from t1;
---error 1067
+--error ER_INVALID_DEFAULT
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
alter table t1 alter column a set default 'abcdef';
drop table t1;
@@ -97,13 +91,13 @@ create table mysqltest.test2$ (a int);
drop table mysqltest.test2$;
drop database mysqltest;
---error 1103
+--error ER_WRONG_TABLE_NAME
create table `` (a int);
---error 1103
+--error ER_WRONG_TABLE_NAME
drop table if exists ``;
---error 1166
+--error ER_WRONG_COLUMN_NAME
create table t1 (`` int);
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
create table t1 (i int, index `` (i));
#
@@ -158,13 +152,13 @@ create table t2 (a int) select * from t1;
describe t1;
describe t2;
drop table if exists t2;
---error 1060
+--error ER_DUP_FIELDNAME
create table t2 (a int, a float) select * from t1;
drop table if exists t2;
---error 1060
+--error ER_DUP_FIELDNAME
create table t2 (a int) select a as b, a+1 as b from t1;
drop table if exists t2;
---error 1060
+--error ER_DUP_FIELDNAME
create table t2 (b int) select a as b, a+1 as b from t1;
drop table if exists t1,t2;
@@ -176,7 +170,7 @@ CREATE TABLE t1 (a int not null);
INSERT INTO t1 values (1),(2),(1);
--error ER_DUP_ENTRY
CREATE TABLE t2 (primary key(a)) SELECT * FROM t1;
---error 1146
+--error ER_NO_SUCH_TABLE
SELECT * from t2;
DROP TABLE t1;
DROP TABLE IF EXISTS t2;
@@ -202,7 +196,7 @@ SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
show create table t1;
drop table t1;
---error 1286
+--error ER_UNKNOWN_STORAGE_ENGINE
SET SESSION storage_engine="gemini";
SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
@@ -216,11 +210,11 @@ drop table t1;
#
create table t1 ( k1 varchar(2), k2 int, primary key(k1,k2));
insert into t1 values ("a", 1), ("b", 2);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values ("c", NULL);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values (NULL, 3);
---error 1048
+--error ER_BAD_NULL_ERROR
insert into t1 values (NULL, NULL);
drop table t1;
@@ -262,11 +256,11 @@ drop table t1;
# "Table truncated when creating another table name with Spaces"
#
---error 1103
+--error ER_WRONG_TABLE_NAME
create table `t1 `(a int);
---error 1102
+--error ER_WRONG_DB_NAME
create database `db1 `;
---error 1166
+--error ER_WRONG_COLUMN_NAME
create table t1(`a ` int);
#
@@ -274,11 +268,11 @@ create table t1(`a ` int);
# "Parser permits multiple commas without syntax error"
#
---error 1064
+--error ER_PARSE_ERROR
create table t1 (a int,);
---error 1064
+--error ER_PARSE_ERROR
create table t1 (a int,,b int);
---error 1064
+--error ER_PARSE_ERROR
create table t1 (,b int);
#
@@ -320,13 +314,13 @@ create table t2 like t3;
show create table t2;
select * from t2;
create table t3 like t1;
---error 1050
+--error ER_TABLE_EXISTS_ERROR
create table t3 like mysqltest.t3;
---error 1049
+--error ER_BAD_DB_ERROR
create table non_existing_database.t1 like t1;
--error ER_NO_SUCH_TABLE
create table t4 like non_existing_table;
---error 1050
+--error ER_TABLE_EXISTS_ERROR
create temporary table t3 like t1;
drop table t1, t2, t3;
drop table t3;
@@ -360,7 +354,7 @@ SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
show create table t1;
drop table t1;
---error 1286
+--error ER_UNKNOWN_STORAGE_ENGINE
SET SESSION storage_engine="gemini";
SELECT @@storage_engine;
CREATE TABLE t1 (a int not null);
@@ -480,9 +474,9 @@ use test;
# Test for Bug 856 'Naming a key "Primary" causes trouble'
#
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
create table t1 (a int, index `primary` (a));
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
create table t1 (a int, index `PRIMARY` (a));
create table t1 (`primary` int, index(`primary`));
@@ -491,9 +485,9 @@ create table t2 (`PRIMARY` int, index(`PRIMARY`));
show create table t2;
create table t3 (a int);
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
alter table t3 add index `primary` (a);
---error 1280
+--error ER_WRONG_NAME_FOR_INDEX
alter table t3 add index `PRIMARY` (a);
create table t4 (`primary` int);
@@ -548,11 +542,11 @@ drop table t1;
#
# Bug#10413: Invalid column name is not rejected
#
---error 1103
+--error ER_WRONG_TABLE_NAME
create table t1(column.name int);
---error 1103
+--error ER_WRONG_TABLE_NAME
create table t1(test.column.name int);
---error 1102
+--error ER_WRONG_DB_NAME
create table t1(xyz.t1.name int);
create table t1(t1.name int);
create table t2(test.t2.name int);
@@ -591,7 +585,7 @@ drop table if exists test.t1;
create database mysqltest;
use mysqltest;
create view v1 as select 'foo' from dual;
---error 1347
+--error ER_WRONG_OBJECT
create table t1 like v1;
drop view v1;
drop database mysqltest;
@@ -712,7 +706,7 @@ drop table t1, t2;
#
# Bug #15316 SET value having comma not correctly handled
#
---error 1367
+--error ER_ILLEGAL_VALUE_FOR_TYPE
create table t1(a set("a,b","c,d") not null);
# End of 4.1 tests
@@ -914,9 +908,9 @@ INSERT IGNORE INTO t1 (b) VALUES (5);
CREATE TABLE IF NOT EXISTS t2 (a INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY)
SELECT a FROM t1;
---error 1062
+--error ER_DUP_ENTRY
INSERT INTO t2 SELECT a FROM t1;
---error 1062
+--error ER_DUP_ENTRY
INSERT INTO t2 SELECT a FROM t1;
DROP TABLE t1, t2;
@@ -976,24 +970,24 @@ drop table t1,t2;
# Test incorrect database names
#
---error 1102
+--error ER_WRONG_DB_NAME
CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
---error 1102
+--error ER_WRONG_DB_NAME
DROP DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
# TODO: enable these tests when RENAME DATABASE is implemented.
-# --error 1049
+# --error ER_BAD_DB_ERROR
# RENAME DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TO a;
-# --error 1102
+# --error ER_WRONG_DB_NAME
# RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
# create database mysqltest;
-# --error 1102
+# --error ER_WRONG_DB_NAME
# RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
# drop database mysqltest;
---error 1102
+--error ER_WRONG_DB_NAME
USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
---error 1102
+--error ER_WRONG_DB_NAME
SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
#
@@ -1853,27 +1847,47 @@ alter table t1 add
key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0064 (f64) comment 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy';
drop table t1;
---echo End of 5.5 tests
+--echo #
+--echo # End of 5.5 tests
+--echo #
-#
-# MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS
-#
+--echo #
+--echo # MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS
+--echo #
--error ER_TABLE_MUST_HAVE_COLUMNS
create table t1;
-#
-# MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT
-#
+--echo #
+--echo # MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT
+--echo #
create table t1 (i int, j int, key(i), key(i)) as select 1 as i, 2 as j;
drop table t1;
-#
-# MDEV-17544 No warning when trying to name a primary key constraint.
-#
+--echo #
+--echo # End of 10.0 tests
+--echo #
+
+--echo #
+--echo # MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE
+--echo #
+--error ER_ILLEGAL_HA_CREATE_OPTION
+create table t1 (c int(10) unsigned) engine=memory transactional=0;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
+--echo # MDEV-17544 No warning when trying to name a primary key constraint.
+--echo #
CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2));
DROP TABLE t1;
--echo #
+--echo # End of 10.3 tests
+--echo #
+
+--echo #
--echo # 10.4 Test
--echo #
--echo # MDEV-21017: Assertion `!is_set() || (m_status == DA_OK_BULK &&
diff --git a/mysql-test/main/ctype_utf32.result b/mysql-test/main/ctype_utf32.result
index de1f4c3810f..da71f6eb59c 100644
--- a/mysql-test/main/ctype_utf32.result
+++ b/mysql-test/main/ctype_utf32.result
@@ -1306,7 +1306,7 @@ create table t1 (a varchar(334) character set utf32 primary key);
ERROR 42000: Specified key was too long; max key length is 1000 bytes
create table t1 (a varchar(333) character set utf32, key(a));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
insert into t1 values (repeat('a',333)), (repeat('b',333));
flush tables;
check table t1;
diff --git a/mysql-test/main/ctype_utf8mb4.result b/mysql-test/main/ctype_utf8mb4.result
index bdcc07d590e..2762873b9c7 100644
--- a/mysql-test/main/ctype_utf8mb4.result
+++ b/mysql-test/main/ctype_utf8mb4.result
@@ -1478,7 +1478,7 @@ a varchar(255) NOT NULL default '',
KEY a (a)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
insert into t1 values (_utf8mb4 0xe880bd);
insert into t1 values (_utf8mb4 0x5b);
select hex(a) from t1;
@@ -1526,7 +1526,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
INSERT INTO t1 VALUES('uu');
check table t1;
@@ -2726,7 +2726,7 @@ DEFAULT CHARACTER SET utf8,
MODIFY subject varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci,
MODIFY p varchar(255) CHARACTER SET utf8;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/main/ctype_utf8mb4_myisam.result b/mysql-test/main/ctype_utf8mb4_myisam.result
index 8940d10fc78..ee2bd4431fc 100644
--- a/mysql-test/main/ctype_utf8mb4_myisam.result
+++ b/mysql-test/main/ctype_utf8mb4_myisam.result
@@ -1443,7 +1443,7 @@ a varchar(255) NOT NULL default '',
KEY a (a)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
insert into t1 values (_utf8mb4 0xe880bd);
insert into t1 values (_utf8mb4 0x5b);
select hex(a) from t1;
@@ -1491,7 +1491,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
INSERT INTO t1 VALUES('uu');
check table t1;
diff --git a/mysql-test/main/gis-json.result b/mysql-test/main/gis-json.result
index 1d6e2193fc9..d507a9994ff 100644
--- a/mysql-test/main/gis-json.result
+++ b/mysql-test/main/gis-json.result
@@ -104,6 +104,9 @@ a
NULL
Warnings:
Warning 4076 Incorrect GeoJSON format - empty 'coordinates' array.
+SELECT ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }");
+ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }")
+NULL
#
# End of 10.2 tests
#
diff --git a/mysql-test/main/gis-json.test b/mysql-test/main/gis-json.test
index b91ef235fd0..a97e9411e5c 100644
--- a/mysql-test/main/gis-json.test
+++ b/mysql-test/main/gis-json.test
@@ -44,6 +44,8 @@ SELECT st_astext(st_geomfromgeojson('{"type": "MultiLineString","coordinates": [
SELECT st_astext(st_geomfromgeojson('{"type": "Polygon","coordinates": []}')) as a;
SELECT st_astext(st_geomfromgeojson('{"type": "MultiPolygon","coordinates": []}')) as a;
+SELECT ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }");
+
--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result
index ecd115c2dd0..47f399ecd3d 100644
--- a/mysql-test/main/group_by.result
+++ b/mysql-test/main/group_by.result
@@ -2884,6 +2884,52 @@ GROUP BY t.table_name;
ERROR HY001: Out of sort memory, consider increasing server sort buffer size
SET max_sort_length= @save_max_sort_length;
#
+# MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view
+#
+CREATE TABLE t1
+(
+id INT PRIMARY KEY AUTO_INCREMENT,
+dt datetime,
+INDEX(dt),
+foo int
+);
+INSERT INTO t1 VALUES (1,'2020-09-26 12:00:00',1);
+INSERT INTO t1 VALUES (2,'2020-09-26 13:00:00',1);
+INSERT INTO t1 VALUES (3,'2020-09-27 13:00:00',1);
+INSERT INTO t1 VALUES (4,'2020-09-27 12:00:00',1);
+INSERT INTO t1 VALUES (5,'2020-09-28 12:00:00',1);
+INSERT INTO t1 VALUES (6,'2020-09-28 13:00:00',1);
+INSERT INTO t1 VALUES (7,'2020-09-25 12:00:00',1);
+INSERT INTO t1 VALUES (8,'2020-09-25 13:00:00',1);
+INSERT INTO t1 VALUES (9,'2020-09-26 13:00:00',1);
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE VIEW v2 AS SELECT * FROM t1 ORDER BY dt;
+SELECT dt, sum(foo) AS foo FROM v1 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+dt foo
+2020-09-25 12:00:00 1
+2020-09-25 13:00:00 1
+2020-09-26 12:00:00 1
+2020-09-26 13:00:00 2
+2020-09-27 12:00:00 1
+2020-09-27 13:00:00 1
+2020-09-28 12:00:00 1
+2020-09-28 13:00:00 1
+SELECT dt, sum(foo) AS foo FROM v2 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+dt foo
+2020-09-25 12:00:00 1
+2020-09-25 13:00:00 1
+2020-09-26 12:00:00 1
+2020-09-26 13:00:00 2
+2020-09-27 12:00:00 1
+2020-09-27 13:00:00 1
+2020-09-28 12:00:00 1
+2020-09-28 13:00:00 1
+DROP TABLE t1;
+DROP VIEW v1,v2;
+#
+# End of 10.2 tests
+#
+#
# MDEV-16170
# Server crashes in Item_null_result::type_handler on SELECT with ROLLUP
#
@@ -2894,4 +2940,6 @@ f COUNT(*)
1 1
NULL 1
DROP TABLE t1;
+#
# End of 10.3 tests
+#
diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test
index 3dc0ee9f00b..16cb7cfb9fd 100644
--- a/mysql-test/main/group_by.test
+++ b/mysql-test/main/group_by.test
@@ -1987,7 +1987,6 @@ drop table t1;
--echo # GROUP BY leads to crash
--echo #
-
CALL mtr.add_suppression("Out of sort memory");
CALL mtr.add_suppression("Sort aborted");
SET @save_max_sort_length= @@max_sort_length;
@@ -2000,6 +1999,40 @@ GROUP BY t.table_name;
SET max_sort_length= @save_max_sort_length;
--echo #
+--echo # MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view
+--echo #
+
+CREATE TABLE t1
+(
+ id INT PRIMARY KEY AUTO_INCREMENT,
+ dt datetime,
+ INDEX(dt),
+ foo int
+);
+
+INSERT INTO t1 VALUES (1,'2020-09-26 12:00:00',1);
+INSERT INTO t1 VALUES (2,'2020-09-26 13:00:00',1);
+INSERT INTO t1 VALUES (3,'2020-09-27 13:00:00',1);
+INSERT INTO t1 VALUES (4,'2020-09-27 12:00:00',1);
+INSERT INTO t1 VALUES (5,'2020-09-28 12:00:00',1);
+INSERT INTO t1 VALUES (6,'2020-09-28 13:00:00',1);
+INSERT INTO t1 VALUES (7,'2020-09-25 12:00:00',1);
+INSERT INTO t1 VALUES (8,'2020-09-25 13:00:00',1);
+INSERT INTO t1 VALUES (9,'2020-09-26 13:00:00',1);
+
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE VIEW v2 AS SELECT * FROM t1 ORDER BY dt;
+SELECT dt, sum(foo) AS foo FROM v1 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+SELECT dt, sum(foo) AS foo FROM v2 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt;
+
+DROP TABLE t1;
+DROP VIEW v1,v2;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
+--echo #
--echo # MDEV-16170
--echo # Server crashes in Item_null_result::type_handler on SELECT with ROLLUP
--echo #
@@ -2009,4 +2042,6 @@ INSERT INTO t1 VALUES ('2032-10-08');
SELECT d != '2023-03-04' AS f, COUNT(*) FROM t1 GROUP BY d WITH ROLLUP;
DROP TABLE t1;
+--echo #
--echo # End of 10.3 tests
+--echo #
diff --git a/mysql-test/main/information_schema.result b/mysql-test/main/information_schema.result
index 55c997eee16..910a657e034 100644
--- a/mysql-test/main/information_schema.result
+++ b/mysql-test/main/information_schema.result
@@ -2228,6 +2228,18 @@ TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAUL
Warnings:
Warning 1931 Query execution was interrupted. The query examined at least ### rows, which exceeds LIMIT ROWS EXAMINED (10). The query result may be incomplete
#
+# MDEV-24179: AAssertion `m_status == DA_ERROR || m_status == DA_OK ||
+# m_status == DA_OK_BULK' failed in Diagnostics_area::message()
+#
+call mtr.add_suppression("Sort aborted.*");
+DROP DATABASE test;
+CREATE DATABASE test;
+USE test;
+CREATE VIEW v AS SELECT table_schema AS object_schema, table_name AS object_name, table_type AS object_type FROM information_schema.tables ORDER BY object_schema;
+SELECT * FROM v LIMIT ROWS EXAMINED 9;
+ERROR HY000: Sort aborted:
+DROP VIEW v;
+#
# End of 10.2 Test
#
#
diff --git a/mysql-test/main/information_schema.test b/mysql-test/main/information_schema.test
index fb9eaa02bff..4e05ac4e5db 100644
--- a/mysql-test/main/information_schema.test
+++ b/mysql-test/main/information_schema.test
@@ -1935,6 +1935,23 @@ replace_regex /at least \d+ rows/at least ### rows/;
SELECT * FROM INFORMATION_SCHEMA.`COLUMNS` LIMIT ROWS EXAMINED 10;
--echo #
+--echo # MDEV-24179: AAssertion `m_status == DA_ERROR || m_status == DA_OK ||
+--echo # m_status == DA_OK_BULK' failed in Diagnostics_area::message()
+--echo #
+
+call mtr.add_suppression("Sort aborted.*");
+
+DROP DATABASE test;
+CREATE DATABASE test;
+USE test;
+CREATE VIEW v AS SELECT table_schema AS object_schema, table_name AS object_name, table_type AS object_type FROM information_schema.tables ORDER BY object_schema;
+
+--error ER_FILSORT_ABORT
+SELECT * FROM v LIMIT ROWS EXAMINED 9;
+
+DROP VIEW v;
+
+--echo #
--echo # End of 10.2 Test
--echo #
diff --git a/mysql-test/main/kill.result b/mysql-test/main/kill.result
index 1ea06aee096..7ed229036c7 100644
--- a/mysql-test/main/kill.result
+++ b/mysql-test/main/kill.result
@@ -413,3 +413,8 @@ ALTER TABLE t2 DROP c;
UNLOCK TABLES;
DROP VIEW v1;
DROP TABLE t1, t2;
+#
+# KILL QUERY ID USER
+#
+kill query id user 'foo';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ''foo'' at line 1
diff --git a/mysql-test/main/kill.test b/mysql-test/main/kill.test
index c5bbd349574..bc4e114ff2e 100644
--- a/mysql-test/main/kill.test
+++ b/mysql-test/main/kill.test
@@ -664,3 +664,9 @@ ALTER TABLE t2 DROP c;
UNLOCK TABLES;
DROP VIEW v1;
DROP TABLE t1, t2;
+
+--echo #
+--echo # KILL QUERY ID USER
+--echo #
+--error ER_PARSE_ERROR
+kill query id user 'foo';
diff --git a/mysql-test/main/mix2_myisam.result b/mysql-test/main/mix2_myisam.result
index 5acec2616fa..220d14f3b0e 100644
--- a/mysql-test/main/mix2_myisam.result
+++ b/mysql-test/main/mix2_myisam.result
@@ -1990,7 +1990,7 @@ a b
drop table t1;
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
drop table t1;
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t1 (v varchar(65536));
diff --git a/mysql-test/main/myisam.result b/mysql-test/main/myisam.result
index 7556e64936a..32737bd8399 100644
--- a/mysql-test/main/myisam.result
+++ b/mysql-test/main/myisam.result
@@ -1706,7 +1706,7 @@ a b
drop table t1;
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
drop table if exists t1;
set statement sql_mode = 'NO_ENGINE_SUBSTITUTION' for
create table t1 (v varchar(65536));
@@ -1980,7 +1980,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1990,7 +1990,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1024);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2000,7 +2000,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=1024;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2046,7 +2046,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2136,7 +2136,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a) key_block_size=1024, key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2166,7 +2166,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2412,6 +2412,7 @@ Key Start Len Index Type
1 2 30 multip. varchar
2 33 30 multip. char NULL
DROP TABLE t1;
+set statement sql_mode='' for
create table t1 (n int not null, c char(1)) transactional=1;
Warnings:
Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1'
diff --git a/mysql-test/main/myisam.test b/mysql-test/main/myisam.test
index 0b153514ecb..9b81ab197ae 100644
--- a/mysql-test/main/myisam.test
+++ b/mysql-test/main/myisam.test
@@ -1557,6 +1557,7 @@ DROP TABLE t1;
# MariaDB: Note that the table will still have 'TRANSACTIONAL=1' attribute.
# That's the intended behavior atm.
#
+set statement sql_mode='' for
create table t1 (n int not null, c char(1)) transactional=1;
show create table t1;
drop table t1;
diff --git a/mysql-test/main/mysql_upgrade.result b/mysql-test/main/mysql_upgrade.result
index 6e17d6cc333..c69b2af5ba7 100644
--- a/mysql-test/main/mysql_upgrade.result
+++ b/mysql-test/main/mysql_upgrade.result
@@ -674,8 +674,120 @@ test
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
SHOW CREATE TABLE mysql.user;
+<<<<<<< HEAD
View Create View character_set_client collation_connection
user CREATE ALGORITHM=UNDEFINED DEFINER=`mariadb.sys`@`localhost` SQL SECURITY DEFINER VIEW `mysql`.`user` AS select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` latin1 latin1_swedish_ci
+||||||| 75538f94ca0
+Table Create Table
+user CREATE TABLE `user` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
+ `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
+ `ssl_cipher` blob NOT NULL,
+ `x509_issuer` blob NOT NULL,
+ `x509_subject` blob NOT NULL,
+ `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_user_connections` int(11) NOT NULL DEFAULT 0,
+ `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
+ `authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
+ `password_last_changed` timestamp NULL DEFAULT NULL,
+ `password_lifetime` smallint(5) unsigned DEFAULT NULL,
+ `account_locked` enum('N','Y') COLLATE utf8_bin NOT NULL DEFAULT 'N',
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+=======
+Table Create Table
+user CREATE TABLE `user` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
+ `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
+ `ssl_cipher` blob NOT NULL,
+ `x509_issuer` blob NOT NULL,
+ `x509_subject` blob NOT NULL,
+ `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_user_connections` int(11) NOT NULL DEFAULT 0,
+ `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
+ `authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
+ `password_last_changed` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `password_lifetime` smallint(5) unsigned DEFAULT NULL,
+ `account_locked` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+>>>>>>> bb-10.3-release
CREATE ROLE `aRole`;
SET ROLE `aRole`;
FLUSH PRIVILEGES;
@@ -688,6 +800,134 @@ root N
root N
root N
aRole Y
+DROP ROLE aRole;
+#
+# MDEV-24122: Fix previously MySQL-5.7 data directories that upgraded prior to MDEV-23201
+#
+#
+DROP TABLE IF EXISTS mysql.user;
+FLUSH TABLES mysql.user;
+ALTER TABLE mysql.user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ALTER TABLE mysql.user ADD default_role char(80) binary DEFAULT '' NOT NULL;
+ALTER TABLE mysql.user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL;
+FLUSH PRIVILEGES;
+Phase 1/7: Checking and upgrading mysql database
+Processing databases
+mysql
+mysql.column_stats OK
+mysql.columns_priv OK
+mysql.db OK
+mysql.event OK
+mysql.func OK
+mysql.gtid_slave_pos OK
+mysql.help_category OK
+mysql.help_keyword OK
+mysql.help_relation OK
+mysql.help_topic OK
+mysql.host OK
+mysql.index_stats OK
+mysql.innodb_index_stats OK
+mysql.innodb_table_stats OK
+mysql.plugin OK
+mysql.proc OK
+mysql.procs_priv OK
+mysql.proxies_priv OK
+mysql.roles_mapping OK
+mysql.servers OK
+mysql.table_stats OK
+mysql.tables_priv OK
+mysql.time_zone OK
+mysql.time_zone_leap_second OK
+mysql.time_zone_name OK
+mysql.time_zone_transition OK
+mysql.time_zone_transition_type OK
+mysql.transaction_registry OK
+mysql.user OK
+Phase 2/7: Installing used storage engines... Skipped
+Phase 3/7: Fixing views
+Phase 4/7: Running 'mysql_fix_privilege_tables'
+Phase 5/7: Fixing table and database names
+Phase 6/7: Checking and upgrading tables
+Processing databases
+information_schema
+mtr
+mtr.global_suppressions OK
+mtr.test_suppressions OK
+performance_schema
+test
+Phase 7/7: Running 'FLUSH PRIVILEGES'
+OK
+SHOW CREATE TABLE mysql.user;
+Table Create Table
+user CREATE TABLE `user` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
+ `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
+ `ssl_cipher` blob NOT NULL,
+ `x509_issuer` blob NOT NULL,
+ `x509_subject` blob NOT NULL,
+ `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
+ `max_user_connections` int(11) NOT NULL DEFAULT 0,
+ `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
+ `authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
+ `password_last_changed` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `password_lifetime` smallint(5) unsigned DEFAULT NULL,
+ `account_locked` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+CREATE ROLE `aRole`;
+SET DEFAULT ROLE aRole;
+SHOW GRANTS;
+Grants for root@localhost
+GRANT `aRole` TO `root`@`localhost` WITH ADMIN OPTION
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+GRANT USAGE ON *.* TO `aRole`
+SET DEFAULT ROLE aRole FOR 'root'@'localhost'
+SET DEFAULT ROLE NONE;
+SHOW GRANTS;
+Grants for root@localhost
+GRANT `aRole` TO `root`@`localhost` WITH ADMIN OPTION
+GRANT ALL PRIVILEGES ON *.* TO `root`@`localhost` WITH GRANT OPTION
+GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+GRANT USAGE ON *.* TO `aRole`
DROP ROLE `aRole`;
FLUSH PRIVILEGES;
# End of 10.2 tests
diff --git a/mysql-test/main/mysql_upgrade.test b/mysql-test/main/mysql_upgrade.test
index 3395dd7edad..02e0335cd13 100644
--- a/mysql-test/main/mysql_upgrade.test
+++ b/mysql-test/main/mysql_upgrade.test
@@ -243,6 +243,36 @@ FLUSH PRIVILEGES;
SET ROLE `aRole`;
SELECT `User`, `is_role` FROM `mysql`.`user`;
+DROP ROLE aRole;
+
+--echo #
+--echo # MDEV-24122: Fix previously MySQL-5.7 data directories that upgraded prior to MDEV-23201
+--echo #
+--echo #
+
+# For 10.4 merge - dropping the view.
+# DROP VIEW IF EXISTS mysql.user;
+DROP TABLE IF EXISTS mysql.user;
+--copy_file std_data/mysql57user.frm $MYSQLD_DATADIR/mysql/user.frm
+--copy_file std_data/mysql57user.MYI $MYSQLD_DATADIR/mysql/user.MYI
+--copy_file std_data/mysql57user.MYD $MYSQLD_DATADIR/mysql/user.MYD
+FLUSH TABLES mysql.user;
+
+# What prior to MDEV-23201 would of done:
+ALTER TABLE mysql.user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ALTER TABLE mysql.user ADD default_role char(80) binary DEFAULT '' NOT NULL;
+ALTER TABLE mysql.user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL;
+FLUSH PRIVILEGES;
+
+--exec $MYSQL_UPGRADE --force 2>&1
+SHOW CREATE TABLE mysql.user;
+
+CREATE ROLE `aRole`;
+SET DEFAULT ROLE aRole;
+SHOW GRANTS;
+SET DEFAULT ROLE NONE;
+SHOW GRANTS;
+
DROP ROLE `aRole`;
--exec $MYSQL mysql < $MYSQLTEST_VARDIR/tmp/user.sql
FLUSH PRIVILEGES;
diff --git a/mysql-test/main/mysqldump-system.result b/mysql-test/main/mysqldump-system.result
index d887df81d2b..10c8582e9b8 100644
--- a/mysql-test/main/mysqldump-system.result
+++ b/mysql-test/main/mysqldump-system.result
@@ -90,31 +90,37 @@ USE mysql;
LOCK TABLES `column_stats` WRITE;
/*!40000 ALTER TABLE `column_stats` DISABLE KEYS */;
+<<<<<<< HEAD
INSERT INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,254,'DOUBLE_PREC_HB','\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0ÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿ÿ¿');
+||||||| 75538f94ca0
+INSERT INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,0,NULL,NULL);
+=======
+REPLACE INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,0,NULL,NULL);
+>>>>>>> bb-10.3-release
/*!40000 ALTER TABLE `column_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `index_stats` WRITE;
/*!40000 ALTER TABLE `index_stats` DISABLE KEYS */;
-INSERT INTO `index_stats` VALUES ('mysql','tz','PRIMARY',1,98.2500);
+REPLACE INTO `index_stats` VALUES ('mysql','tz','PRIMARY',1,98.2500);
/*!40000 ALTER TABLE `index_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `table_stats` WRITE;
/*!40000 ALTER TABLE `table_stats` DISABLE KEYS */;
-INSERT INTO `table_stats` VALUES ('mysql','tz',393);
+REPLACE INTO `table_stats` VALUES ('mysql','tz',393);
/*!40000 ALTER TABLE `table_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `innodb_index_stats` WRITE;
/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */;
-INSERT INTO `innodb_index_stats` VALUES ('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx01',4,1,'Time_zone_id'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx02',393,1,'Time_zone_id,Transition_time'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_leaf_pages',1,NULL,'Number of leaf pages in the index'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','size',1,NULL,'Number of pages in the index');
+REPLACE INTO `innodb_index_stats` VALUES ('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx01',4,1,'Time_zone_id'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx02',393,1,'Time_zone_id,Transition_time'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_leaf_pages',1,NULL,'Number of leaf pages in the index'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','size',1,NULL,'Number of pages in the index');
/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `innodb_table_stats` WRITE;
/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */;
-INSERT INTO `innodb_table_stats` VALUES ('mysql','tz','2019-12-31 21:00:00',393,1,0);
+REPLACE INTO `innodb_table_stats` VALUES ('mysql','tz','2019-12-31 21:00:00',393,1,0);
/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */;
UNLOCK TABLES;
@@ -122,31 +128,31 @@ USE mysql;
LOCK TABLES `time_zone` WRITE;
/*!40000 ALTER TABLE `time_zone` DISABLE KEYS */;
-INSERT INTO `time_zone` VALUES (1,'N'),(2,'N'),(3,'N'),(4,'Y'),(5,'N');
+REPLACE INTO `time_zone` VALUES (1,'N'),(2,'N'),(3,'N'),(4,'Y'),(5,'N');
/*!40000 ALTER TABLE `time_zone` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_name` WRITE;
/*!40000 ALTER TABLE `time_zone_name` DISABLE KEYS */;
-INSERT INTO `time_zone_name` VALUES ('Europe/Moscow',3),('Japan',5),('leap/Europe/Moscow',4),('MET',1),('Universal',2),('UTC',2);
+REPLACE INTO `time_zone_name` VALUES ('Europe/Moscow',3),('Japan',5),('leap/Europe/Moscow',4),('MET',1),('Universal',2),('UTC',2);
/*!40000 ALTER TABLE `time_zone_name` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_leap_second` WRITE;
/*!40000 ALTER TABLE `time_zone_leap_second` DISABLE KEYS */;
-INSERT INTO `time_zone_leap_second` VALUES (78796800,1),(94694401,2),(126230402,3),(157766403,4),(189302404,5),(220924805,6),(252460806,7),(283996807,8),(315532808,9),(362793609,10),(394329610,11),(425865611,12),(489024012,13),(567993613,14),(631152014,15),(662688015,16),(709948816,17),(741484817,18),(773020818,19),(820454419,20),(867715220,21),(915148821,22);
+REPLACE INTO `time_zone_leap_second` VALUES (78796800,1),(94694401,2),(126230402,3),(157766403,4),(189302404,5),(220924805,6),(252460806,7),(283996807,8),(315532808,9),(362793609,10),(394329610,11),(425865611,12),(489024012,13),(567993613,14),(631152014,15),(662688015,16),(709948816,17),(741484817,18),(773020818,19),(820454419,20),(867715220,21),(915148821,22);
/*!40000 ALTER TABLE `time_zone_leap_second` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_transition` WRITE;
/*!40000 ALTER TABLE `time_zone_transition` DISABLE KEYS */;
-INSERT INTO `time_zone_transition` VALUES (1,-1693706400,0),(1,-1680483600,1),(1,-1663455600,2),(1,-1650150000,3),(1,-1632006000,2),(1,-1618700400,3),(1,-938905200,2),(1,-857257200,3),(1,-844556400,2),(1,-828226800,3),(1,-812502000,2),(1,-796777200,3),(1,228877200,2),(1,243997200,3),(1,260326800,2),(1,276051600,3),(1,291776400,2),(1,307501200,3),(1,323830800,2),(1,338950800,3),(1,354675600,2),(1,370400400,3),(1,386125200,2),(1,401850000,3),(1,417574800,2),(1,433299600,3),(1,449024400,2),(1,465354000,3),(1,481078800,2),(1,496803600,3),(1,512528400,2),(1,528253200,3),(1,543978000,2),(1,559702800,3),(1,575427600,2),(1,591152400,3),(1,606877200,2),(1,622602000,3),(1,638326800,2),(1,654656400,3),(1,670381200,2),(1,686106000,3),(1,701830800,2),(1,717555600,3),(1,733280400,2),(1,749005200,3),(1,764730000,2),(1,780454800,3),(1,796179600,2),(1,811904400,3),(1,828234000,2),(1,846378000,3),(1,859683600,2),(1,877827600,3),(1,891133200,2),(1,909277200,3),(1,922582800,2),(1,941331600,3),(1,954032400,2),(1,972781200,3),(1,985482000,2),(1,1004230800,3),(1,1017536400,2),(1,1035680400,3),(1,1048986000,2),(1,1067130000,3),(1,1080435600,2),(1,1099184400,3),(1,1111885200,2),(1,1130634000,3),(1,1143334800,2),(1,1162083600,3),(1,1174784400,2),(1,1193533200,3),(1,1206838800,2),(1,1224982800,3),(1,1238288400,2),(1,1256432400,3),(1,1269738000,2),(1,1288486800,3),(1,1301187600,2),(1,1319936400,3),(1,1332637200,2),(1,1351386000,3),(1,1364691600,2),(1,1382835600,3),(1,1396141200,2),(1,1414285200,3),(1,1427590800,2),(1,1445734800,3),(1,1459040400,2),(1,1477789200,3),(1,1490490000,2),(1,1509238800,3),(1,1521939600,2),(1,1540688400,3),(1,1553994000,2),(1,1572138000,3),(1,1585443600,2),(1,1603587600,3),(1,1616893200,2),(1,1635642000,3),(1,1648342800,2),(1,1667091600,3),(1,1679792400,2),(1,1698541200,3),(1,1711846800,2),(1,1729990800,3),(1,1743296400,2),(1,1761440400,3),(1,1774746000,2),(1,1792890000,3),(1,1806195600,2),(1,1824944400,3),(1,1837645200,2),(1,1856394000,3),(1,1869094800,2),(1,1887843600,3),(1,1901149200,2),(1,1919293200,3),(1,1932598800,2),(1,1950742800,3),(1,1964048400,2),(1,1982797200,3),(1,1995498000,2),(1,2014246800,3),(1,2026947600,2),(1,2045696400,3),(1,2058397200,2),(1,2077146000,3),(1,2090451600,2),(1,2108595600,3),(1,2121901200,2),(1,2140045200,3),(3,-1688265000,2),(3,-1656819048,1),(3,-1641353448,2),(3,-1627965048,3),(3,-1618716648,1),(3,-1596429048,3),(3,-1593829848,5),(3,-1589860800,4),(3,-1542427200,5),(3,-1539493200,6),(3,-1525323600,5),(3,-1522728000,4),(3,-1491188400,7),(3,-1247536800,4),(3,354920400,5),(3,370728000,4),(3,386456400,5),(3,402264000,4),(3,417992400,5),(3,433800000,4),(3,449614800,5),(3,465346800,8),(3,481071600,9),(3,496796400,8),(3,512521200,9),(3,528246000,8),(3,543970800,9),(3,559695600,8),(3,575420400,9),(3,591145200,8),(3,606870000,9),(3,622594800,8),(3,638319600,9),(3,654649200,8),(3,670374000,10),(3,686102400,11),(3,695779200,8),(3,701812800,5),(3,717534000,4),(3,733273200,9),(3,748998000,8),(3,764722800,9),(3,780447600,8),(3,796172400,9),(3,811897200,8),(3,828226800,9),(3,846370800,8),(3,859676400,9),(3,877820400,8),(3,891126000,9),(3,909270000,8),(3,922575600,9),(3,941324400,8),(3,954025200,9),(3,972774000,8),(3,985474800,9),(3,1004223600,8),(3,1017529200,9),(3,1035673200,8),(3,1048978800,9),(3,1067122800,8),(3,1080428400,9),(3,1099177200,8),(3,1111878000,9),(3,1130626800,8),(3,1143327600,9),(3,1162076400,8),(3,1174777200,9),(3,1193526000,8),(3,1206831600,9),(3,1224975600,8),(3,1238281200,9),(3,1256425200,8),(3,1269730800,9),(3,1288479600,8),(3,1301180400,9),(3,1319929200,8),(3,1332630000,9),(3,1351378800,8),(3,1364684400,9),(3,1382828400,8),(3,1396134000,9),(3,1414278000,8),(3,1427583600,9),(3,1445727600,8),(3,1459033200,9),(3,1477782000,8),(3,1490482800,9),(3,1509231600,8),(3,1521932400,9),(3,1540681200,8),(3,1553986800,9),(3,1572130800,8),(3,1585436400,9),(3,1603580400,8),(3,1616886000,9),(3,1635634800,8),(3,1648335600,9),(3,1667084400,8),(3,1679785200,9),(3,1698534000,8),(3,1711839600,9),(3,1729983600,8),(3,1743289200,9),(3,1761433200,8),(3,1774738800,9),(3,1792882800,8),(3,1806188400,9),(3,1824937200,8),(3,1837638000,9),(3,1856386800,8),(3,1869087600,9),(3,1887836400,8),(3,1901142000,9),(3,1919286000,8),(3,1932591600,9),(3,1950735600,8),(3,1964041200,9),(3,1982790000,8),(3,1995490800,9),(3,2014239600,8),(3,2026940400,9),(3,2045689200,8),(3,2058390000,9),(3,2077138800,8),(3,2090444400,9),(3,2108588400,8),(3,2121894000,9),(3,2140038000,8),(4,-1688265000,2),(4,-1656819048,1),(4,-1641353448,2),(4,-1627965048,3),(4,-1618716648,1),(4,-1596429048,3),(4,-1593829848,5),(4,-1589860800,4),(4,-1542427200,5),(4,-1539493200,6),(4,-1525323600,5),(4,-1522728000,4),(4,-1491188400,7),(4,-1247536800,4),(4,354920409,5),(4,370728010,4),(4,386456410,5),(4,402264011,4),(4,417992411,5),(4,433800012,4),(4,449614812,5),(4,465346812,8),(4,481071612,9),(4,496796413,8),(4,512521213,9),(4,528246013,8),(4,543970813,9),(4,559695613,8),(4,575420414,9),(4,591145214,8),(4,606870014,9),(4,622594814,8),(4,638319615,9),(4,654649215,8),(4,670374016,10),(4,686102416,11),(4,695779216,8),(4,701812816,5),(4,717534017,4),(4,733273217,9),(4,748998018,8),(4,764722818,9),(4,780447619,8),(4,796172419,9),(4,811897219,8),(4,828226820,9),(4,846370820,8),(4,859676420,9),(4,877820421,8),(4,891126021,9),(4,909270021,8),(4,922575622,9),(4,941324422,8),(4,954025222,9),(4,972774022,8),(4,985474822,9),(4,1004223622,8),(4,1017529222,9),(4,1035673222,8),(4,1048978822,9),(4,1067122822,8),(4,1080428422,9),(4,1099177222,8),(4,1111878022,9),(4,1130626822,8),(4,1143327622,9),(4,1162076422,8),(4,1174777222,9),(4,1193526022,8),(4,1206831622,9),(4,1224975622,8),(4,1238281222,9),(4,1256425222,8),(4,1269730822,9),(4,1288479622,8),(4,1301180422,9),(4,1319929222,8),(4,1332630022,9),(4,1351378822,8),(4,1364684422,9),(4,1382828422,8),(4,1396134022,9),(4,1414278022,8),(4,1427583622,9),(4,1445727622,8),(4,1459033222,9),(4,1477782022,8),(4,1490482822,9),(4,1509231622,8),(4,1521932422,9),(4,1540681222,8),(4,1553986822,9),(4,1572130822,8),(4,1585436422,9),(4,1603580422,8),(4,1616886022,9),(4,1635634822,8),(4,1648335622,9),(4,1667084422,8),(4,1679785222,9),(4,1698534022,8),(4,1711839622,9),(4,1729983622,8),(4,1743289222,9),(4,1761433222,8),(4,1774738822,9),(4,1792882822,8),(4,1806188422,9),(4,1824937222,8),(4,1837638022,9),(4,1856386822,8),(4,1869087622,9),(4,1887836422,8),(4,1901142022,9),(4,1919286022,8),(4,1932591622,9),(4,1950735622,8),(4,1964041222,9),(4,1982790022,8),(4,1995490822,9),(4,2014239622,8),(4,2026940422,9),(4,2045689222,8),(4,2058390022,9),(4,2077138822,8),(4,2090444422,9),(4,2108588422,8),(4,2121894022,9),(4,2140038022,8),(5,-1009875600,1);
+REPLACE INTO `time_zone_transition` VALUES (1,-1693706400,0),(1,-1680483600,1),(1,-1663455600,2),(1,-1650150000,3),(1,-1632006000,2),(1,-1618700400,3),(1,-938905200,2),(1,-857257200,3),(1,-844556400,2),(1,-828226800,3),(1,-812502000,2),(1,-796777200,3),(1,228877200,2),(1,243997200,3),(1,260326800,2),(1,276051600,3),(1,291776400,2),(1,307501200,3),(1,323830800,2),(1,338950800,3),(1,354675600,2),(1,370400400,3),(1,386125200,2),(1,401850000,3),(1,417574800,2),(1,433299600,3),(1,449024400,2),(1,465354000,3),(1,481078800,2),(1,496803600,3),(1,512528400,2),(1,528253200,3),(1,543978000,2),(1,559702800,3),(1,575427600,2),(1,591152400,3),(1,606877200,2),(1,622602000,3),(1,638326800,2),(1,654656400,3),(1,670381200,2),(1,686106000,3),(1,701830800,2),(1,717555600,3),(1,733280400,2),(1,749005200,3),(1,764730000,2),(1,780454800,3),(1,796179600,2),(1,811904400,3),(1,828234000,2),(1,846378000,3),(1,859683600,2),(1,877827600,3),(1,891133200,2),(1,909277200,3),(1,922582800,2),(1,941331600,3),(1,954032400,2),(1,972781200,3),(1,985482000,2),(1,1004230800,3),(1,1017536400,2),(1,1035680400,3),(1,1048986000,2),(1,1067130000,3),(1,1080435600,2),(1,1099184400,3),(1,1111885200,2),(1,1130634000,3),(1,1143334800,2),(1,1162083600,3),(1,1174784400,2),(1,1193533200,3),(1,1206838800,2),(1,1224982800,3),(1,1238288400,2),(1,1256432400,3),(1,1269738000,2),(1,1288486800,3),(1,1301187600,2),(1,1319936400,3),(1,1332637200,2),(1,1351386000,3),(1,1364691600,2),(1,1382835600,3),(1,1396141200,2),(1,1414285200,3),(1,1427590800,2),(1,1445734800,3),(1,1459040400,2),(1,1477789200,3),(1,1490490000,2),(1,1509238800,3),(1,1521939600,2),(1,1540688400,3),(1,1553994000,2),(1,1572138000,3),(1,1585443600,2),(1,1603587600,3),(1,1616893200,2),(1,1635642000,3),(1,1648342800,2),(1,1667091600,3),(1,1679792400,2),(1,1698541200,3),(1,1711846800,2),(1,1729990800,3),(1,1743296400,2),(1,1761440400,3),(1,1774746000,2),(1,1792890000,3),(1,1806195600,2),(1,1824944400,3),(1,1837645200,2),(1,1856394000,3),(1,1869094800,2),(1,1887843600,3),(1,1901149200,2),(1,1919293200,3),(1,1932598800,2),(1,1950742800,3),(1,1964048400,2),(1,1982797200,3),(1,1995498000,2),(1,2014246800,3),(1,2026947600,2),(1,2045696400,3),(1,2058397200,2),(1,2077146000,3),(1,2090451600,2),(1,2108595600,3),(1,2121901200,2),(1,2140045200,3),(3,-1688265000,2),(3,-1656819048,1),(3,-1641353448,2),(3,-1627965048,3),(3,-1618716648,1),(3,-1596429048,3),(3,-1593829848,5),(3,-1589860800,4),(3,-1542427200,5),(3,-1539493200,6),(3,-1525323600,5),(3,-1522728000,4),(3,-1491188400,7),(3,-1247536800,4),(3,354920400,5),(3,370728000,4),(3,386456400,5),(3,402264000,4),(3,417992400,5),(3,433800000,4),(3,449614800,5),(3,465346800,8),(3,481071600,9),(3,496796400,8),(3,512521200,9),(3,528246000,8),(3,543970800,9),(3,559695600,8),(3,575420400,9),(3,591145200,8),(3,606870000,9),(3,622594800,8),(3,638319600,9),(3,654649200,8),(3,670374000,10),(3,686102400,11),(3,695779200,8),(3,701812800,5),(3,717534000,4),(3,733273200,9),(3,748998000,8),(3,764722800,9),(3,780447600,8),(3,796172400,9),(3,811897200,8),(3,828226800,9),(3,846370800,8),(3,859676400,9),(3,877820400,8),(3,891126000,9),(3,909270000,8),(3,922575600,9),(3,941324400,8),(3,954025200,9),(3,972774000,8),(3,985474800,9),(3,1004223600,8),(3,1017529200,9),(3,1035673200,8),(3,1048978800,9),(3,1067122800,8),(3,1080428400,9),(3,1099177200,8),(3,1111878000,9),(3,1130626800,8),(3,1143327600,9),(3,1162076400,8),(3,1174777200,9),(3,1193526000,8),(3,1206831600,9),(3,1224975600,8),(3,1238281200,9),(3,1256425200,8),(3,1269730800,9),(3,1288479600,8),(3,1301180400,9),(3,1319929200,8),(3,1332630000,9),(3,1351378800,8),(3,1364684400,9),(3,1382828400,8),(3,1396134000,9),(3,1414278000,8),(3,1427583600,9),(3,1445727600,8),(3,1459033200,9),(3,1477782000,8),(3,1490482800,9),(3,1509231600,8),(3,1521932400,9),(3,1540681200,8),(3,1553986800,9),(3,1572130800,8),(3,1585436400,9),(3,1603580400,8),(3,1616886000,9),(3,1635634800,8),(3,1648335600,9),(3,1667084400,8),(3,1679785200,9),(3,1698534000,8),(3,1711839600,9),(3,1729983600,8),(3,1743289200,9),(3,1761433200,8),(3,1774738800,9),(3,1792882800,8),(3,1806188400,9),(3,1824937200,8),(3,1837638000,9),(3,1856386800,8),(3,1869087600,9),(3,1887836400,8),(3,1901142000,9),(3,1919286000,8),(3,1932591600,9),(3,1950735600,8),(3,1964041200,9),(3,1982790000,8),(3,1995490800,9),(3,2014239600,8),(3,2026940400,9),(3,2045689200,8),(3,2058390000,9),(3,2077138800,8),(3,2090444400,9),(3,2108588400,8),(3,2121894000,9),(3,2140038000,8),(4,-1688265000,2),(4,-1656819048,1),(4,-1641353448,2),(4,-1627965048,3),(4,-1618716648,1),(4,-1596429048,3),(4,-1593829848,5),(4,-1589860800,4),(4,-1542427200,5),(4,-1539493200,6),(4,-1525323600,5),(4,-1522728000,4),(4,-1491188400,7),(4,-1247536800,4),(4,354920409,5),(4,370728010,4),(4,386456410,5),(4,402264011,4),(4,417992411,5),(4,433800012,4),(4,449614812,5),(4,465346812,8),(4,481071612,9),(4,496796413,8),(4,512521213,9),(4,528246013,8),(4,543970813,9),(4,559695613,8),(4,575420414,9),(4,591145214,8),(4,606870014,9),(4,622594814,8),(4,638319615,9),(4,654649215,8),(4,670374016,10),(4,686102416,11),(4,695779216,8),(4,701812816,5),(4,717534017,4),(4,733273217,9),(4,748998018,8),(4,764722818,9),(4,780447619,8),(4,796172419,9),(4,811897219,8),(4,828226820,9),(4,846370820,8),(4,859676420,9),(4,877820421,8),(4,891126021,9),(4,909270021,8),(4,922575622,9),(4,941324422,8),(4,954025222,9),(4,972774022,8),(4,985474822,9),(4,1004223622,8),(4,1017529222,9),(4,1035673222,8),(4,1048978822,9),(4,1067122822,8),(4,1080428422,9),(4,1099177222,8),(4,1111878022,9),(4,1130626822,8),(4,1143327622,9),(4,1162076422,8),(4,1174777222,9),(4,1193526022,8),(4,1206831622,9),(4,1224975622,8),(4,1238281222,9),(4,1256425222,8),(4,1269730822,9),(4,1288479622,8),(4,1301180422,9),(4,1319929222,8),(4,1332630022,9),(4,1351378822,8),(4,1364684422,9),(4,1382828422,8),(4,1396134022,9),(4,1414278022,8),(4,1427583622,9),(4,1445727622,8),(4,1459033222,9),(4,1477782022,8),(4,1490482822,9),(4,1509231622,8),(4,1521932422,9),(4,1540681222,8),(4,1553986822,9),(4,1572130822,8),(4,1585436422,9),(4,1603580422,8),(4,1616886022,9),(4,1635634822,8),(4,1648335622,9),(4,1667084422,8),(4,1679785222,9),(4,1698534022,8),(4,1711839622,9),(4,1729983622,8),(4,1743289222,9),(4,1761433222,8),(4,1774738822,9),(4,1792882822,8),(4,1806188422,9),(4,1824937222,8),(4,1837638022,9),(4,1856386822,8),(4,1869087622,9),(4,1887836422,8),(4,1901142022,9),(4,1919286022,8),(4,1932591622,9),(4,1950735622,8),(4,1964041222,9),(4,1982790022,8),(4,1995490822,9),(4,2014239622,8),(4,2026940422,9),(4,2045689222,8),(4,2058390022,9),(4,2077138822,8),(4,2090444422,9),(4,2108588422,8),(4,2121894022,9),(4,2140038022,8),(5,-1009875600,1);
/*!40000 ALTER TABLE `time_zone_transition` ENABLE KEYS */;
UNLOCK TABLES;
LOCK TABLES `time_zone_transition_type` WRITE;
/*!40000 ALTER TABLE `time_zone_transition_type` DISABLE KEYS */;
-INSERT INTO `time_zone_transition_type` VALUES (1,0,7200,1,'MEST'),(1,1,3600,0,'MET'),(1,2,7200,1,'MEST'),(1,3,3600,0,'MET'),(2,0,0,0,'UTC'),(3,0,9000,0,'MMT'),(3,1,12648,1,'MST'),(3,2,9048,0,'MMT'),(3,3,16248,1,'MDST'),(3,4,10800,0,'MSK'),(3,5,14400,1,'MSD'),(3,6,18000,1,'MSD'),(3,7,7200,0,'EET'),(3,8,10800,0,'MSK'),(3,9,14400,1,'MSD'),(3,10,10800,1,'EEST'),(3,11,7200,0,'EET'),(4,0,9000,0,'MMT'),(4,1,12648,1,'MST'),(4,2,9048,0,'MMT'),(4,3,16248,1,'MDST'),(4,4,10800,0,'MSK'),(4,5,14400,1,'MSD'),(4,6,18000,1,'MSD'),(4,7,7200,0,'EET'),(4,8,10800,0,'MSK'),(4,9,14400,1,'MSD'),(4,10,10800,1,'EEST'),(4,11,7200,0,'EET'),(5,0,32400,0,'CJT'),(5,1,32400,0,'JST');
+REPLACE INTO `time_zone_transition_type` VALUES (1,0,7200,1,'MEST'),(1,1,3600,0,'MET'),(1,2,7200,1,'MEST'),(1,3,3600,0,'MET'),(2,0,0,0,'UTC'),(3,0,9000,0,'MMT'),(3,1,12648,1,'MST'),(3,2,9048,0,'MMT'),(3,3,16248,1,'MDST'),(3,4,10800,0,'MSK'),(3,5,14400,1,'MSD'),(3,6,18000,1,'MSD'),(3,7,7200,0,'EET'),(3,8,10800,0,'MSK'),(3,9,14400,1,'MSD'),(3,10,10800,1,'EEST'),(3,11,7200,0,'EET'),(4,0,9000,0,'MMT'),(4,1,12648,1,'MST'),(4,2,9048,0,'MMT'),(4,3,16248,1,'MDST'),(4,4,10800,0,'MSK'),(4,5,14400,1,'MSD'),(4,6,18000,1,'MSD'),(4,7,7200,0,'EET'),(4,8,10800,0,'MSK'),(4,9,14400,1,'MSD'),(4,10,10800,1,'EEST'),(4,11,7200,0,'EET'),(5,0,32400,0,'CJT'),(5,1,32400,0,'JST');
/*!40000 ALTER TABLE `time_zone_transition_type` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
diff --git a/mysql-test/main/mysqldump.result b/mysql-test/main/mysqldump.result
index 19ffca17b1e..d0d054b5473 100644
--- a/mysql-test/main/mysqldump.result
+++ b/mysql-test/main/mysqldump.result
@@ -1,14 +1,10 @@
call mtr.add_suppression("@003f.frm' \\(errno: 22\\)");
+call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
# Bug#37938 Test "mysqldump" lacks various insert statements
# Turn off concurrent inserts to avoid random errors
# NOTE: We reset the variable back to saved value at the end of test
SET @OLD_CONCURRENT_INSERT = @@GLOBAL.CONCURRENT_INSERT;
SET @@GLOBAL.CONCURRENT_INSERT = 0;
-DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3;
-drop database if exists mysqldump_test_db;
-drop database if exists db1;
-drop database if exists db2;
-drop view if exists v1, v2, v3;
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
INSERT INTO t1 VALUES (1), (2);
<?xml version="1.0"?>
@@ -5726,6 +5722,315 @@ DELIMITER ;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
DROP TABLE t1;
+#
+# MDEV-20939: Race condition between mysqldump import and InnoDB
+# persistent statistics calculation
+#
+#
+# Without --replace and --insert-ignore
+#
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+DROP TABLE IF EXISTS `innodb_index_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_index_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `stat_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `stat_value` bigint(20) unsigned NOT NULL,
+ `sample_size` bigint(20) unsigned DEFAULT NULL,
+ `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+DROP TABLE IF EXISTS `innodb_table_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_table_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `n_rows` bigint(20) unsigned NOT NULL,
+ `clustered_index_size` bigint(20) unsigned NOT NULL,
+ `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `general_log` (
+ `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `command_type` varchar(64) NOT NULL,
+ `argument` mediumtext NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `slow_log` (
+ `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `query_time` time(6) NOT NULL,
+ `lock_time` time(6) NOT NULL,
+ `rows_sent` int(11) NOT NULL,
+ `rows_examined` int(11) NOT NULL,
+ `db` varchar(512) NOT NULL,
+ `last_insert_id` int(11) NOT NULL,
+ `insert_id` int(11) NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `rows_affected` int(11) NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `transaction_registry` (
+ `transaction_id` bigint(20) unsigned NOT NULL,
+ `commit_id` bigint(20) unsigned NOT NULL,
+ `begin_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `commit_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `isolation_level` enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`transaction_id`),
+ UNIQUE KEY `commit_id` (`commit_id`),
+ KEY `begin_timestamp` (`begin_timestamp`),
+ KEY `commit_timestamp` (`commit_timestamp`,`transaction_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+#
+# With --replace
+#
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+DROP TABLE IF EXISTS `innodb_index_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_index_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `stat_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `stat_value` bigint(20) unsigned NOT NULL,
+ `sample_size` bigint(20) unsigned DEFAULT NULL,
+ `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_index_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+DROP TABLE IF EXISTS `innodb_table_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_table_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `n_rows` bigint(20) unsigned NOT NULL,
+ `clustered_index_size` bigint(20) unsigned NOT NULL,
+ `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_table_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `general_log` (
+ `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `command_type` varchar(64) NOT NULL,
+ `argument` mediumtext NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `slow_log` (
+ `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `query_time` time(6) NOT NULL,
+ `lock_time` time(6) NOT NULL,
+ `rows_sent` int(11) NOT NULL,
+ `rows_examined` int(11) NOT NULL,
+ `db` varchar(512) NOT NULL,
+ `last_insert_id` int(11) NOT NULL,
+ `insert_id` int(11) NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `rows_affected` int(11) NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `transaction_registry` (
+ `transaction_id` bigint(20) unsigned NOT NULL,
+ `commit_id` bigint(20) unsigned NOT NULL,
+ `begin_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `commit_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `isolation_level` enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`transaction_id`),
+ UNIQUE KEY `commit_id` (`commit_id`),
+ KEY `begin_timestamp` (`begin_timestamp`),
+ KEY `commit_timestamp` (`commit_timestamp`,`transaction_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+#
+# With --insert-ignore
+#
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+DROP TABLE IF EXISTS `innodb_index_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_index_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `index_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `stat_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `stat_value` bigint(20) unsigned NOT NULL,
+ `sample_size` bigint(20) unsigned DEFAULT NULL,
+ `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_index_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+DROP TABLE IF EXISTS `innodb_table_stats`;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `innodb_table_stats` (
+ `database_name` varchar(64) COLLATE utf8_bin NOT NULL,
+ `table_name` varchar(199) COLLATE utf8_bin NOT NULL,
+ `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `n_rows` bigint(20) unsigned NOT NULL,
+ `clustered_index_size` bigint(20) unsigned NOT NULL,
+ `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL,
+ PRIMARY KEY (`database_name`,`table_name`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+LOCK TABLES `innodb_table_stats` WRITE;
+/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */;
+/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */;
+UNLOCK TABLES;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `general_log` (
+ `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `command_type` varchar(64) NOT NULL,
+ `argument` mediumtext NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `slow_log` (
+ `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6),
+ `user_host` mediumtext NOT NULL,
+ `query_time` time(6) NOT NULL,
+ `lock_time` time(6) NOT NULL,
+ `rows_sent` int(11) NOT NULL,
+ `rows_examined` int(11) NOT NULL,
+ `db` varchar(512) NOT NULL,
+ `last_insert_id` int(11) NOT NULL,
+ `insert_id` int(11) NOT NULL,
+ `server_id` int(10) unsigned NOT NULL,
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
+ `rows_affected` int(11) NOT NULL
+) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `transaction_registry` (
+ `transaction_id` bigint(20) unsigned NOT NULL,
+ `commit_id` bigint(20) unsigned NOT NULL,
+ `begin_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `commit_timestamp` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `isolation_level` enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') COLLATE utf8_bin NOT NULL,
+ PRIMARY KEY (`transaction_id`),
+ UNIQUE KEY `commit_id` (`commit_id`),
+ KEY `begin_timestamp` (`begin_timestamp`),
+ KEY `commit_timestamp` (`commit_timestamp`,`transaction_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
# End of 10.2 tests
#
# Test for Invisible columns
diff --git a/mysql-test/main/mysqldump.test b/mysql-test/main/mysqldump.test
index f328bf32361..e2cf293a8a2 100644
--- a/mysql-test/main/mysqldump.test
+++ b/mysql-test/main/mysqldump.test
@@ -19,12 +19,7 @@ let collation=utf8_unicode_ci;
# There are tables in 'mysql' database of type innodb
--source include/have_innodb.inc
-# This test is slow on buildbot.
---source include/big_test.inc
-
-disable_query_log;
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
-enable_query_log;
--echo # Bug#37938 Test "mysqldump" lacks various insert statements
--echo # Turn off concurrent inserts to avoid random errors
@@ -32,15 +27,6 @@ enable_query_log;
SET @OLD_CONCURRENT_INSERT = @@GLOBAL.CONCURRENT_INSERT;
SET @@GLOBAL.CONCURRENT_INSERT = 0;
-
---disable_warnings
-DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3;
-drop database if exists mysqldump_test_db;
-drop database if exists db1;
-drop database if exists db2;
-drop view if exists v1, v2, v3;
---enable_warnings
-
# XML output
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
@@ -2747,6 +2733,29 @@ INSERT INTO t1 (a) VALUES (1),(2),(3);
--exec $MYSQL_DUMP --default-character-set=utf8mb4 --triggers --no-data --no-create-info --add-drop-trigger --skip-comments --databases test
DROP TABLE t1;
+--echo #
+--echo # MDEV-20939: Race condition between mysqldump import and InnoDB
+--echo # persistent statistics calculation
+--echo #
+
+--let $ignore= --ignore-table=mysql.proxies_priv --ignore-table=mysql.user --ignore-table=mysql.column_stats --ignore-table=mysql.columns_priv --ignore-table=mysql.db --ignore-table=mysql.event --ignore-table=mysql.func --ignore-table=mysql.gtid_slave_pos --ignore-table=mysql.help_category --ignore-table=mysql.help_keyword --ignore-table=mysql.help_relation --ignore-table=mysql.help_topic --ignore-table=mysql.host --ignore-table=mysql.index_stats --ignore-table=mysql.plugin --ignore-table=mysql.proc --ignore-table=mysql.procs_priv --ignore-table=mysql.roles_mapping --ignore-table=mysql.servers --ignore-table=mysql.table_stats --ignore-table=mysql.tables_priv --ignore-table=mysql.time_zone --ignore-table=mysql.time_zone_leap_second --ignore-table=mysql.time_zone_name --ignore-table=mysql.time_zone_transition --ignore-table=mysql.time_zone_transition_type --ignore-table=mysql.general_log --ignore-table=mysql.slow_log
+--let $skip_opts= --skip-dump-date --skip-comments
+
+--echo #
+--echo # Without --replace and --insert-ignore
+--echo #
+--exec $MYSQL_DUMP $ignore $skip_opts mysql
+
+--echo #
+--echo # With --replace
+--echo #
+--exec $MYSQL_DUMP $ignore $skip_opts --replace mysql
+
+--echo #
+--echo # With --insert-ignore
+--echo #
+--exec $MYSQL_DUMP $ignore $skip_opts --insert-ignore mysql
+
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/ps_show_log.result b/mysql-test/main/ps_show_log.result
new file mode 100644
index 00000000000..54eabaeded6
--- /dev/null
+++ b/mysql-test/main/ps_show_log.result
@@ -0,0 +1,65 @@
+#
+# MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared
+# statement protocol yet
+#
+CREATE USER u1;
+include/master-slave.inc
+[connection master]
+connection master;
+CREATE TABLE t1(n INT);
+DROP TABLE t1;
+connection slave;
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+# # Format_desc # # #
+# # Gtid_list # # []
+# # Binlog_checkpoint # # #
+# # Gtid # # GTID 0-1-1
+# # Query # # use `test`; CREATE TABLE t1(n INT)
+# # Gtid # # GTID 0-1-2
+# # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
+# Execute the same prepared statement the second time to check that
+# no internal structures used for handling the statement
+# 'SHOW BINLOG EVENTS' were damaged.
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+# # Format_desc # # #
+# # Gtid_list # # []
+# # Binlog_checkpoint # # #
+# # Gtid # # GTID 0-1-1
+# # Query # # use `test`; CREATE TABLE t1(n INT)
+# # Gtid # # GTID 0-1-2
+# # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
+DEALLOCATE PREPARE stmt_1;
+connection slave;
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000001 # Format_desc # # #
+slave-relay-bin.000001 # Rotate # # #
+# Execute the same prepared statement the second time to check that
+# no internal structures used for handling the statement
+# 'SHOW RELAYLOG EVENTS' were damaged.
+EXECUTE stmt_1;
+Log_name Pos Event_type Server_id End_log_pos Info
+slave-relay-bin.000001 # Format_desc # # #
+slave-relay-bin.000001 # Rotate # # #
+DEALLOCATE PREPARE stmt_1;
+# Create the user u1 without the REPLICATION SLAVE privilege required
+# for running the statements SHOW BINLOG EVENTS/SHOW RELAYLOG EVENTS
+# and check that attempt to execute the statements SHOW BINLOG EVENTS/
+# SHOW RELAYLOG EVENTS as a prepred statements by a user without required
+# privileges results in error.
+connect con2,localhost,u1,,test;
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+EXECUTE stmt_1;
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE privilege(s) for this operation
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+EXECUTE stmt_1;
+ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE privilege(s) for this operation
+DEALLOCATE PREPARE stmt_1;
+include/rpl_end.inc
+connection default;
+DROP USER u1;
+# End of 10.2 tests
diff --git a/mysql-test/main/ps_show_log.test b/mysql-test/main/ps_show_log.test
new file mode 100644
index 00000000000..95000d2d7e0
--- /dev/null
+++ b/mysql-test/main/ps_show_log.test
@@ -0,0 +1,73 @@
+--echo #
+--echo # MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared
+--echo # statement protocol yet
+--echo #
+
+CREATE USER u1;
+
+--source include/have_binlog_format_statement.inc
+--source include/master-slave.inc
+--connection master
+CREATE TABLE t1(n INT);
+
+DROP TABLE t1;
+
+--sync_slave_with_master
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-bin.*/#/
+EXECUTE stmt_1;
+
+--echo # Execute the same prepared statement the second time to check that
+--echo # no internal structures used for handling the statement
+--echo # 'SHOW BINLOG EVENTS' were damaged.
+
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-bin.*/#/
+EXECUTE stmt_1;
+
+DEALLOCATE PREPARE stmt_1;
+
+--connection slave
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-relay-bin.*;pos=.*/#/
+EXECUTE stmt_1;
+
+--echo # Execute the same prepared statement the second time to check that
+--echo # no internal structures used for handling the statement
+--echo # 'SHOW RELAYLOG EVENTS' were damaged.
+
+--replace_column 2 # 4 # 5 #
+--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-relay-bin.*;pos=.*/#/
+EXECUTE stmt_1;
+
+DEALLOCATE PREPARE stmt_1;
+
+--echo # Create the user u1 without the REPLICATION SLAVE privilege required
+--echo # for running the statements SHOW BINLOG EVENTS/SHOW RELAYLOG EVENTS
+--echo # and check that attempt to execute the statements SHOW BINLOG EVENTS/
+--echo # SHOW RELAYLOG EVENTS as a prepred statements by a user without required
+--echo # privileges results in error.
+
+--connect (con2,localhost,u1,,test)
+PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS';
+
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+EXECUTE stmt_1;
+
+PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS';
+
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+EXECUTE stmt_1;
+
+DEALLOCATE PREPARE stmt_1;
+
+--source include/rpl_end.inc
+
+--connection default
+# Clean up
+DROP USER u1;
+
+--echo # End of 10.2 tests
diff --git a/mysql-test/main/range.result b/mysql-test/main/range.result
index fca3e84f35d..60c79c02c44 100644
--- a/mysql-test/main/range.result
+++ b/mysql-test/main/range.result
@@ -3244,6 +3244,23 @@ SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR
id a b code num
DROP TABLE t1, t2;
#
+# MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value
+#
+create table t1 (pk int, i int, v int, primary key (pk), key(v));
+insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16);
+create table t2 (a int, b int);
+insert into t2 values (1,2),(2,4);
+EXPLAIN
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2
+1 SIMPLE t1 ALL PRIMARY,v NULL NULL NULL 8 Range checked for each record (index map: 0x3)
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+pk i v a b
+1 1 2 1 2
+2 2 4 2 4
+drop table t1, t2;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/range.test b/mysql-test/main/range.test
index a78b263f717..d06053d07e3 100644
--- a/mysql-test/main/range.test
+++ b/mysql-test/main/range.test
@@ -2214,6 +2214,20 @@ SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR
DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value
+--echo #
+
+create table t1 (pk int, i int, v int, primary key (pk), key(v));
+insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16);
+create table t2 (a int, b int);
+insert into t2 values (1,2),(2,4);
+EXPLAIN
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+drop table t1, t2;
+
--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/main/range_mrr_icp.result b/mysql-test/main/range_mrr_icp.result
index 736eaa741c0..257314539c5 100644
--- a/mysql-test/main/range_mrr_icp.result
+++ b/mysql-test/main/range_mrr_icp.result
@@ -3241,6 +3241,23 @@ SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR
id a b code num
DROP TABLE t1, t2;
#
+# MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value
+#
+create table t1 (pk int, i int, v int, primary key (pk), key(v));
+insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16);
+create table t2 (a int, b int);
+insert into t2 values (1,2),(2,4);
+EXPLAIN
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 2
+1 SIMPLE t1 ALL PRIMARY,v NULL NULL NULL 8 Range checked for each record (index map: 0x3)
+select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk);
+pk i v a b
+1 1 2 1 2
+2 2 4 2 4
+drop table t1, t2;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/skip_grants-master.opt b/mysql-test/main/skip_grants.opt
index 5699a3387b8..5699a3387b8 100644
--- a/mysql-test/main/skip_grants-master.opt
+++ b/mysql-test/main/skip_grants.opt
diff --git a/mysql-test/main/skip_grants.result b/mysql-test/main/skip_grants.result
index 154f77fff76..9c985c2e7a8 100644
--- a/mysql-test/main/skip_grants.result
+++ b/mysql-test/main/skip_grants.result
@@ -48,10 +48,16 @@ DROP PROCEDURE p3;
DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
+#
+# Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server
+#
set global event_scheduler=1;
Warnings:
Note 1408 Event Scheduler: Loaded 0 events
set global event_scheduler=0;
+#
+# Bug#26285 Selecting information_schema crahes server
+#
select count(*) from information_schema.COLUMN_PRIVILEGES;
count(*)
0
@@ -64,14 +70,21 @@ count(*)
select count(*) from information_schema.USER_PRIVILEGES;
count(*)
0
-End of 5.0 tests
+#
+# End of 5.0 tests
+#
#
# Bug#29817 Queries with UDF fail with non-descriptive error
# if mysql.proc is missing
#
select no_such_function(1);
ERROR 42000: FUNCTION test.no_such_function does not exist
-End of 5.1 tests
+#
+# End of 5.1 tests
+#
+#
+# MDEV-8280 crash in 'show global status' with --skip-grant-tables
+#
show global status like 'Acl%';
Variable_name Value
Acl_column_grants 0
@@ -85,6 +98,17 @@ Acl_role_grants 0
Acl_roles 0
Acl_table_grants 0
Acl_users 0
+#
+# End of 10.1 tests
+#
+#
+# MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables
+#
+set role x;
+ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement
+#
+# End of 10.2 tests
+#
show create user root@localhost;
ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement
insert mysql.global_priv values ('foo', 'bar', '{}');
@@ -112,3 +136,6 @@ CREATE USER `baz`@`baz` IDENTIFIED BY PASSWORD '*E52096EF8EB0240275A7FE9E069101C
drop user bar@foo;
drop user baz@baz;
# restart
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/skip_grants.test b/mysql-test/main/skip_grants.test
index ccad3c2d13f..3a458b76bcf 100644
--- a/mysql-test/main/skip_grants.test
+++ b/mysql-test/main/skip_grants.test
@@ -89,20 +89,23 @@ DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
-#
-# Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server
-#
+--echo #
+--echo # Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server
+--echo #
set global event_scheduler=1;
set global event_scheduler=0;
-#
-# Bug#26285 Selecting information_schema crahes server
-#
+--echo #
+--echo # Bug#26285 Selecting information_schema crahes server
+--echo #
select count(*) from information_schema.COLUMN_PRIVILEGES;
select count(*) from information_schema.SCHEMA_PRIVILEGES;
select count(*) from information_schema.TABLE_PRIVILEGES;
select count(*) from information_schema.USER_PRIVILEGES;
---echo End of 5.0 tests
+
+--echo #
+--echo # End of 5.0 tests
+--echo #
--echo #
--echo # Bug#29817 Queries with UDF fail with non-descriptive error
@@ -111,13 +114,30 @@ select count(*) from information_schema.USER_PRIVILEGES;
--error ER_SP_DOES_NOT_EXIST
select no_such_function(1);
---echo End of 5.1 tests
+--echo #
+--echo # End of 5.1 tests
+--echo #
-#
-# MDEV-8280 crash in 'show global status' with --skip-grant-tables
-#
+--echo #
+--echo # MDEV-8280 crash in 'show global status' with --skip-grant-tables
+--echo #
show global status like 'Acl%';
+--echo #
+--echo # End of 10.1 tests
+--echo #
+
+--echo #
+--echo # MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables
+--echo #
+
+--error ER_OPTION_PREVENTS_STATEMENT
+set role x;
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
+
#
# MDEV-18297
# How to reset a forgotten root password
@@ -140,3 +160,7 @@ drop user bar@foo;
drop user baz@baz;
# need to restart the server to restore the --skip-grant state
--source include/restart_mysqld.inc
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/sp-ucs2.result b/mysql-test/main/sp-ucs2.result
index 389fa946ad5..047a64713af 100644
--- a/mysql-test/main/sp-ucs2.result
+++ b/mysql-test/main/sp-ucs2.result
@@ -100,20 +100,20 @@ RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
RETURN 'str';
END|
-ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1'
+DROP FUNCTION f|
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) COLLATE ucs2_unicode_ci
BEGIN
RETURN 'str';
END|
-ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1'
+DROP FUNCTION f|
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
DECLARE f2 VARCHAR(64) COLLATE ucs2_unicode_ci;
RETURN 'str';
END|
-ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1'
+DROP FUNCTION f|
SET NAMES utf8;
CREATE FUNCTION bug48766 ()
RETURNS ENUM( 'w' ) CHARACTER SET ucs2
diff --git a/mysql-test/main/sp-ucs2.test b/mysql-test/main/sp-ucs2.test
index 3276da3e257..c6dbdaacb5e 100644
--- a/mysql-test/main/sp-ucs2.test
+++ b/mysql-test/main/sp-ucs2.test
@@ -114,35 +114,35 @@ DROP FUNCTION f1|
#
# COLLATE with no CHARACTER SET in IN param
#
---error ER_COLLATION_CHARSET_MISMATCH
CREATE FUNCTION f(f1 VARCHAR(64) COLLATE ucs2_unicode_ci)
RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
RETURN 'str';
END|
+DROP FUNCTION f|
#
# COLLATE with no CHARACTER SET in RETURNS
#
---error ER_COLLATION_CHARSET_MISMATCH
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) COLLATE ucs2_unicode_ci
BEGIN
RETURN 'str';
END|
+DROP FUNCTION f|
#
# COLLATE with no CHARACTER SET in DECLARE
#
---error ER_COLLATION_CHARSET_MISMATCH
CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2)
RETURNS VARCHAR(64) CHARACTER SET ucs2
BEGIN
DECLARE f2 VARCHAR(64) COLLATE ucs2_unicode_ci;
RETURN 'str';
END|
+DROP FUNCTION f|
delimiter ;|
diff --git a/mysql-test/main/stat_tables.result b/mysql-test/main/stat_tables.result
index 55d8bbf51c9..53557b39b05 100644
--- a/mysql-test/main/stat_tables.result
+++ b/mysql-test/main/stat_tables.result
@@ -829,6 +829,20 @@ length(a)
set names latin1;
set @@use_stat_tables=@save_use_stat_tables;
drop table t1;
+#
+# MDEV-23753: SIGSEGV in Column_stat::store_stat_fields
+#
+CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2;
+LOCK TABLES t1 WRITE;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting);
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze error Invalid argument
+DROP TABLE t1;
# please keep this at the last
set @@global.histogram_size=@save_histogram_size;
# Start of 10.4 tests
diff --git a/mysql-test/main/stat_tables.test b/mysql-test/main/stat_tables.test
index ca341a93b81..9955908bd60 100644
--- a/mysql-test/main/stat_tables.test
+++ b/mysql-test/main/stat_tables.test
@@ -1,4 +1,5 @@
--source include/have_stat_tables.inc
+--source include/have_partition.inc
select @@global.use_stat_tables;
select @@session.use_stat_tables;
@@ -573,6 +574,17 @@ set names latin1;
set @@use_stat_tables=@save_use_stat_tables;
drop table t1;
+--echo #
+--echo # MDEV-23753: SIGSEGV in Column_stat::store_stat_fields
+--echo #
+
+CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2;
+LOCK TABLES t1 WRITE;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES ();
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting);
+DROP TABLE t1;
+
+
--echo # please keep this at the last
set @@global.histogram_size=@save_histogram_size;
diff --git a/mysql-test/main/stat_tables_innodb.result b/mysql-test/main/stat_tables_innodb.result
index 26d4bff6be9..41fd303ed4c 100644
--- a/mysql-test/main/stat_tables_innodb.result
+++ b/mysql-test/main/stat_tables_innodb.result
@@ -861,6 +861,20 @@ length(a)
set names latin1;
set @@use_stat_tables=@save_use_stat_tables;
drop table t1;
+#
+# MDEV-23753: SIGSEGV in Column_stat::store_stat_fields
+#
+CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2;
+LOCK TABLES t1 WRITE;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting);
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze error Invalid argument
+DROP TABLE t1;
# please keep this at the last
set @@global.histogram_size=@save_histogram_size;
# Start of 10.4 tests
diff --git a/mysql-test/main/table_elim.result b/mysql-test/main/table_elim.result
index 41928f28963..b49e7b11ed6 100644
--- a/mysql-test/main/table_elim.result
+++ b/mysql-test/main/table_elim.result
@@ -544,7 +544,7 @@ drop table t0,t1,t2,t3,t4,t5,t6;
CREATE TABLE t1 (f1 int(11), PRIMARY KEY (f1)) ;
CREATE TABLE t2 (f4 varchar(1024), KEY (f4)) ;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT IGNORE INTO t2 VALUES ('xcddwntkbxyorzdv'),
('cnxxcddwntkbxyor'),('r'),('r'), ('did'),('I'),('when'),
('hczkfqjeggivdvac'),('e'),('okay'),('up');
diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result
index 86e8b9cf76d..4a1d829ef1b 100644
--- a/mysql-test/main/table_value_constr.result
+++ b/mysql-test/main/table_value_constr.result
@@ -2622,6 +2622,60 @@ ERROR HY000: 'ignore' is not allowed in this context
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
ERROR HY000: 'default' is not allowed in this context
#
+# MDEV-24675: TVC using subqueries
+#
+values((select 1));
+(select 1)
+1
+values (2), ((select 1));
+2
+2
+1
+values ((select 1)), (2), ((select 3));
+(select 1)
+1
+2
+3
+values ((select 1), 2), (3,4), (5, (select 6));
+(select 1) 2
+1 2
+3 4
+5 6
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3), (3,2), (1,2);
+values((select max(a) from t1));
+(select max(a) from t1)
+3
+values((select min(b) from t1));
+(select min(b) from t1)
+2
+values ((select max(a) from t1), (select min(b) from t1));
+(select max(a) from t1) (select min(b) from t1)
+3 2
+values((select * from (select max(b) from t1) as t));
+(select * from (select max(b) from t1) as t)
+3
+drop table t1;
+#
+# MDEV-24618: TVC contains extra parenthesis for row expressions
+# in value list
+#
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3);
+insert into t1 values ((5,4));
+ERROR 21000: Operand should contain 1 column(s)
+values ((1,2));
+ERROR 21000: Operand should contain 1 column(s)
+select * from (values ((1,2))) dt;
+ERROR 21000: Operand should contain 1 column(s)
+values (1,2);
+1 2
+1 2
+values ((select min(a), max(b) from t1));
+ERROR 21000: Operand should contain 1 column(s)
+drop table t1;
+End of 10.3 tests
+#
# MDEV-22610 Crash in INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT))
#
VALUES (DEFAULT) UNION VALUES (DEFAULT);
@@ -2634,3 +2688,4 @@ ERROR HY000: 'default' is not allowed in this context
INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE));
ERROR HY000: 'ignore' is not allowed in this context
DROP TABLE t1;
+End of 10.4 tests
diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test
index bd5e4d75904..094561476e8 100644
--- a/mysql-test/main/table_value_constr.test
+++ b/mysql-test/main/table_value_constr.test
@@ -1354,6 +1354,54 @@ EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE;
--error ER_UNKNOWN_ERROR
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
+--echo #
+--echo # MDEV-24675: TVC using subqueries
+--echo #
+
+values((select 1));
+
+values (2), ((select 1));
+
+values ((select 1)), (2), ((select 3));
+
+values ((select 1), 2), (3,4), (5, (select 6));
+
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3), (3,2), (1,2);
+
+values((select max(a) from t1));
+
+values((select min(b) from t1));
+
+values ((select max(a) from t1), (select min(b) from t1));
+
+values((select * from (select max(b) from t1) as t));
+
+drop table t1;
+
+--echo #
+--echo # MDEV-24618: TVC contains extra parenthesis for row expressions
+--echo # in value list
+--echo #
+
+create table t1 (a int, b int);
+insert into t1 values (1,3), (2,3);
+--error ER_OPERAND_COLUMNS
+insert into t1 values ((5,4));
+
+--error ER_OPERAND_COLUMNS
+values ((1,2));
+
+--error ER_OPERAND_COLUMNS
+select * from (values ((1,2))) dt;
+
+values (1,2);
+--error ER_OPERAND_COLUMNS
+values ((select min(a), max(b) from t1));
+
+drop table t1;
+
+--echo End of 10.3 tests
--echo #
--echo # MDEV-22610 Crash in INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT))
@@ -1369,3 +1417,6 @@ INSERT INTO t1 (VALUES (DEFAULT) UNION VALUES (DEFAULT));
--error ER_UNKNOWN_ERROR
INSERT INTO t1 (VALUES (IGNORE) UNION VALUES (IGNORE));
DROP TABLE t1;
+
+
+--echo End of 10.4 tests
diff --git a/mysql-test/main/user_limits.result b/mysql-test/main/user_limits.result
index ffb8bb204a6..acb34754caa 100644
--- a/mysql-test/main/user_limits.result
+++ b/mysql-test/main/user_limits.result
@@ -186,3 +186,30 @@ connection default;
drop user mysqltest_1@localhost;
drop table t1;
set global max_user_connections= @my_max_user_connections;
+#
+# End of 10.1 tests
+#
+#
+# MDEV-17852 Altered connection limits for user have no effect
+#
+create user foo@'%' with max_user_connections 1;
+connect con1,localhost,foo;
+select current_user();
+current_user()
+foo@%
+connect(localhost,foo,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con2,localhost,foo;
+ERROR 42000: User 'foo' has exceeded the 'max_user_connections' resource (current value: 1)
+connection default;
+alter user foo with max_user_connections 2;
+connect con3,localhost,foo;
+select current_user();
+current_user()
+foo@%
+disconnect con3;
+disconnect con1;
+connection default;
+drop user foo@'%';
+#
+# End of 10.2 tests
+#
diff --git a/mysql-test/main/user_limits.test b/mysql-test/main/user_limits.test
index ebb4fd4fb88..36524febd8d 100644
--- a/mysql-test/main/user_limits.test
+++ b/mysql-test/main/user_limits.test
@@ -216,3 +216,29 @@ drop table t1;
--source include/wait_until_count_sessions.inc
set global max_user_connections= @my_max_user_connections;
+
+--echo #
+--echo # End of 10.1 tests
+--echo #
+
+--echo #
+--echo # MDEV-17852 Altered connection limits for user have no effect
+--echo #
+create user foo@'%' with max_user_connections 1;
+--connect con1,localhost,foo
+select current_user();
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_USER_LIMIT_REACHED
+--connect con2,localhost,foo
+--connection default
+alter user foo with max_user_connections 2;
+--connect con3,localhost,foo
+select current_user();
+--disconnect con3
+--disconnect con1
+--connection default
+drop user foo@'%';
+
+--echo #
+--echo # End of 10.2 tests
+--echo #
diff --git a/mysql-test/main/view.result b/mysql-test/main/view.result
index 7e9dd1837f6..49e57aca219 100644
--- a/mysql-test/main/view.result
+++ b/mysql-test/main/view.result
@@ -6758,6 +6758,22 @@ drop database db1;
create database test;
use test;
#
+# MDEV-16940: update of multi-table view returning error used in SP
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1), (2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (2), (3);
+CREATE VIEW v1 AS SELECT a, b FROM t1,t2;
+CREATE PROCEDURE sp1() UPDATE v1 SET a = 8, b = 9;
+CALL sp1;
+ERROR HY000: Can not modify more than one base table through a join view 'test.v1'
+CALL sp1;
+ERROR HY000: Can not modify more than one base table through a join view 'test.v1'
+DROP PROCEDURE sp1;
+DROP VIEW v1;
+DROP TABLE t1, t2;
+#
# End of 10.2 tests
#
#
diff --git a/mysql-test/main/view.test b/mysql-test/main/view.test
index 9e84f6dc677..47ede54e1c7 100644
--- a/mysql-test/main/view.test
+++ b/mysql-test/main/view.test
@@ -6463,6 +6463,28 @@ create database test;
use test;
--echo #
+--echo # MDEV-16940: update of multi-table view returning error used in SP
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1), (2);
+CREATE TABLE t2 (b INT) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (2), (3);
+
+CREATE VIEW v1 AS SELECT a, b FROM t1,t2;
+
+CREATE PROCEDURE sp1() UPDATE v1 SET a = 8, b = 9;
+
+--error ER_VIEW_MULTIUPDATE
+CALL sp1;
+--error ER_VIEW_MULTIUPDATE
+CALL sp1;
+
+DROP PROCEDURE sp1;
+DROP VIEW v1;
+DROP TABLE t1, t2;
+
+--echo #
--echo # End of 10.2 tests
--echo #
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index b2aa4eef5fa..8c2e45ecde2 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -1163,7 +1163,7 @@ sub command_line_setup {
'rr' => \$opt_rr,
'rr-arg=s' => \@rr_record_args,
'rr-dir=s' => \$opt_rr_dir,
- 'client-gdb' => \$opt_client_gdb,
+ 'client-gdb=s' => \$opt_client_gdb,
'manual-gdb' => \$opt_manual_gdb,
'manual-lldb' => \$opt_manual_lldb,
'boot-gdb' => \$opt_boot_gdb,
@@ -1261,7 +1261,7 @@ sub command_line_setup {
);
# fix options (that take an optional argument and *only* after = sign
- my %fixopt = ( '--gdb' => '--gdb=#' );
+ my %fixopt = ( '--gdb' => '--gdb=#', '--client-gdb' => '--client-gdb=#' );
@ARGV = map { $fixopt{$_} or $_ } @ARGV;
GetOptions(%options) or usage("Can't read options");
usage("") if $opt_usage;
@@ -5947,7 +5947,11 @@ sub gdb_arguments {
$input = $input ? "< $input" : "";
if ($type eq 'client') {
- mtr_tofile($gdb_init_file, "set args @$$args $input");
+ mtr_tofile($gdb_init_file,
+ join("\n",
+ "set args @$$args $input",
+ split /;/, $opt_client_gdb || ""
+ ));
} elsif ($opt_valgrind_mysqld) {
my $v = $$exe;
my $vargs = [];
diff --git a/mysql-test/suite/binlog/include/binlog_xa_recover.inc b/mysql-test/suite/binlog/include/binlog_xa_recover.inc
index de2703377cc..9e0906c90f0 100644
--- a/mysql-test/suite/binlog/include/binlog_xa_recover.inc
+++ b/mysql-test/suite/binlog/include/binlog_xa_recover.inc
@@ -180,12 +180,11 @@ connection default;
# commit checkpoint, otherwise we get nondeterministic results.
SET @old_dbug= @@global.DEBUG_DBUG;
SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
-
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
SET DEBUG_SYNC= "now SIGNAL con12_cont";
connection con12;
reap;
connection default;
-SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
SET GLOBAL debug_dbug= @old_dbug;
SET DEBUG_SYNC= "now SIGNAL con11_cont";
diff --git a/mysql-test/suite/binlog/r/binlog_xa_recover.result b/mysql-test/suite/binlog/r/binlog_xa_recover.result
index 25aa1389b71..cb2cad957f0 100644
--- a/mysql-test/suite/binlog/r/binlog_xa_recover.result
+++ b/mysql-test/suite/binlog/r/binlog_xa_recover.result
@@ -148,10 +148,10 @@ connection con10;
connection default;
SET @old_dbug= @@global.DEBUG_DBUG;
SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
SET DEBUG_SYNC= "now SIGNAL con12_cont";
connection con12;
connection default;
-SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
SET GLOBAL debug_dbug= @old_dbug;
SET DEBUG_SYNC= "now SIGNAL con11_cont";
connection con11;
diff --git a/mysql-test/suite/binlog_encryption/binlog_xa_recover.result b/mysql-test/suite/binlog_encryption/binlog_xa_recover.result
index af36fe277a1..6e33595eb9e 100644
--- a/mysql-test/suite/binlog_encryption/binlog_xa_recover.result
+++ b/mysql-test/suite/binlog_encryption/binlog_xa_recover.result
@@ -153,10 +153,10 @@ connection con10;
connection default;
SET @old_dbug= @@global.DEBUG_DBUG;
SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed";
+SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
SET DEBUG_SYNC= "now SIGNAL con12_cont";
connection con12;
connection default;
-SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed";
SET GLOBAL debug_dbug= @old_dbug;
SET DEBUG_SYNC= "now SIGNAL con11_cont";
connection con11;
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index 31c9c17daf1..e910be0a406 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -21,6 +21,7 @@ galera_FK_duplicate_client_insert : MDEV-24473: galera.galera_FK_duplicate_clien
galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event()
galera_bf_abort_at_after_statement : MDEV-21557: galera_bf_abort_at_after_statement MTR failed: query 'reap' succeeded - should have failed with errno 1213
galera_bf_abort_group_commit : MDEV-18282 Galera test failure on galera.galera_bf_abort_group_commit
+galera_bf_lock_wait : MDEV-24649 galera.galera_bf_lock_wait MTR failed with sigabrt: Assertion `!is_owned()' failed in sync0policy.ic on MutexDebug with Mutex = TTASEventMutex<GenericPolicy>
galera_binlog_stmt_autoinc : MDEV-19959 Galera test failure on galera_binlog_stmt_autoinc
galera_encrypt_tmp_files : Get error failed to enable encryption of temporary files
galera_ftwrl : MDEV-21525 galera.galera_ftwrl
@@ -28,8 +29,10 @@ galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
galera_kill_largechanges : MDEV-18179 Galera test failure on galera.galera_kill_largechanges
galera_many_tables_nopk : MDEV-18182 Galera test failure on galera.galera_many_tables_nopk
galera_mdl_race : MDEV-21524 galera.galera_mdl_race
+galera_mdl_race : MDEV-21524: galera.galera_mdl_race MTR failed: query 'reap' succeeded - should have failed with errno 1213
galera_parallel_simple : MDEV-20318 galera.galera_parallel_simple fails
galera_pc_ignore_sb : MDEV-20888 galera.galera_pc_ignore_sb
+galera_partition : MDEV-21806: galera.galera_partition MTR failed: failed to recover from DONOR state
galera_shutdown_nonprim : MDEV-21493 galera.galera_shutdown_nonprim
galera_ssl_upgrade : MDEV-19950 Galera test failure on galera_ssl_upgrade
galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key
diff --git a/mysql-test/suite/galera/r/galera_UK_conflict.result b/mysql-test/suite/galera/r/galera_UK_conflict.result
new file mode 100644
index 00000000000..76649f1b268
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_UK_conflict.result
@@ -0,0 +1,89 @@
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2));
+INSERT INTO t1 VALUES (1, 1, 0);
+INSERT INTO t1 VALUES (3, 3, 0);
+INSERT INTO t1 VALUES (10, 10, 0);
+SET GLOBAL wsrep_slave_threads = 3;
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+connection node_1;
+SET SESSION wsrep_sync_wait=0;
+START TRANSACTION;
+DELETE FROM t1 WHERE f2 = 3;
+INSERT INTO t1 VALUES (3, 3, 1);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET SESSION wsrep_sync_wait=0;
+connection node_2;
+INSERT INTO t1 VALUES (5, 5, 2);
+connection node_1a;
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (4, 4, 2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+10 10 0
+wsrep_local_replays
+1
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+connection node_2;
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+10 10 0
+INSERT INTO t1 VALUES (7,7,7);
+INSERT INTO t1 VALUES (8,8,8);
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+7 7 7
+8 8 8
+10 10 0
+connection node_1;
+SELECT * FROM t1;
+f1 f2 f3
+1 1 0
+3 3 1
+4 4 2
+5 5 2
+7 7 7
+10 10 0
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
index 16db26c3c6b..808e32b8cb2 100644
--- a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
+++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
@@ -4,7 +4,6 @@ connection node_1;
# test phase with cascading foreign key through 3 tables
#
connection node_1;
-set wsrep_sync_wait=0;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
@@ -26,15 +25,26 @@ INSERT INTO grandparent VALUES (1),(2);
INSERT INTO parent VALUES (1,1), (2,2);
INSERT INTO child VALUES (1,1), (2,2);
connection node_2;
-set wsrep_sync_wait=0;
DELETE FROM grandparent WHERE id = 1;
+SELECT * FROM grandparent;
+id
+2
+SELECT * FROM parent;
+id grandparent_id
+2 2
+SELECT * FROM child;
+id parent_id
+2 2
connection node_1;
-SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
-COUNT(*) COUNT(*) = 0
-0 1
-SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1;
-COUNT(*) COUNT(*) = 0
-0 1
+SELECT * FROM grandparent;
+id
+2
+SELECT * FROM parent;
+id grandparent_id
+2 2
+SELECT * FROM child;
+id parent_id
+2 2
DROP TABLE child;
DROP TABLE parent;
DROP TABLE grandparent;
diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result
index bd76692b27c..e545da53855 100644
--- a/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result
+++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result
@@ -20,12 +20,10 @@ INSERT INTO child VALUES (1,'row one'), (2,'row two');
connection node_2;
DELETE FROM parent;
connection node_1;
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-COUNT(*) COUNT(*) = 0
-0 1
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
-COUNT(*) COUNT(*) = 0
-0 1
+SELECT * FROM parent;
+id
+SELECT * FROM child;
+id parent_id
DROP TABLE child;
DROP TABLE parent;
#
@@ -60,11 +58,9 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
SET GLOBAL debug_dbug = "";
SET DEBUG_SYNC = "RESET";
connection node_1;
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-COUNT(*) COUNT(*) = 0
-0 1
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
-COUNT(*) COUNT(*) = 0
-0 1
+SELECT * FROM parent;
+id
+SELECT * FROM child;
+id j parent_id
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/r/galera_var_sst_auth.result b/mysql-test/suite/galera/r/galera_var_sst_auth.result
index 6a5683e2633..53b8443705a 100644
--- a/mysql-test/suite/galera/r/galera_var_sst_auth.result
+++ b/mysql-test/suite/galera/r/galera_var_sst_auth.result
@@ -1,8 +1,5 @@
connection node_2;
connection node_1;
-#
-# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
-#
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
********
@@ -10,5 +7,14 @@ SET @@global.wsrep_sst_auth='foo:bar';
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
********
-disconnect node_2;
-disconnect node_1;
+connection node_2;
+SET @@global.wsrep_sst_auth= 'abcdefghijklmnopqrstuvwxyz';
+SELECT @@global.wsrep_sst_auth;
+@@global.wsrep_sst_auth
+********
+Shutdown node_2
+connection node_1;
+connection node_2;
+SELECT @@global.wsrep_sst_auth;
+@@global.wsrep_sst_auth
+********
diff --git a/mysql-test/suite/galera/r/lp1376747-4.result b/mysql-test/suite/galera/r/lp1376747-4.result
index 6bbc24309ad..888961b592d 100644
--- a/mysql-test/suite/galera/r/lp1376747-4.result
+++ b/mysql-test/suite/galera/r/lp1376747-4.result
@@ -5,32 +5,34 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
SET session wsrep_sync_wait=0;
-FLUSH TABLE WITH READ LOCK;
+FLUSH TABLES WITH READ LOCK;
connection node_1;
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
INSERT INTO t1 VALUES (2,3);
connection node_2a;
SET session wsrep_sync_wait=0;
-# node_1 DDL should not yet be applied
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SET debug_sync='flush_tables_with_read_lock_after_acquire_locks SIGNAL parked2 WAIT_FOR go2';
FLUSH TABLES t1 WITH READ LOCK;;
connection node_2;
+SET debug_sync='now WAIT_FOR parked2';
+SET debug_sync='now SIGNAL go2';
UNLOCK TABLES;
-# node_1 DDL should not yet be applied 2
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+set debug_sync= 'RESET';
connection node_2a;
UNLOCK TABLES;
-# node_1 DDL should be applied 2
+SET SESSION wsrep_sync_wait = DEFAULT;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/galera/t/galera_UK_conflict.test b/mysql-test/suite/galera/t/galera_UK_conflict.test
new file mode 100644
index 00000000000..57bafbf8ae0
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_UK_conflict.test
@@ -0,0 +1,148 @@
+#
+# This test tests the operation of transaction replay with a scenario
+# where two subsequent write sets in applying conflict with local transaction
+# in commit phase. The conflict is "false positive" confict on GAP lock in
+# secondary unique index.
+# The first applier will cause BF abort for the local committer, which
+# starts replaying because of positive certification.
+# In buggy version, scenatio continues so that ehile the local transaction
+# is replaying, the latter applier experiences similar UK GAP lock conflict
+# and forces the replayer to abort second time.
+# In fixed version, this latter BF abort should not happen.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2));
+INSERT INTO t1 VALUES (1, 1, 0);
+INSERT INTO t1 VALUES (3, 3, 0);
+INSERT INTO t1 VALUES (10, 10, 0);
+
+# we will need 2 appliers threads for applyin two write sets in parallel in node1
+# and 1 applier thread for handling replaying
+SET GLOBAL wsrep_slave_threads = 3;
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+
+--connection node_1
+# starting a transaction, which deletes and inserts the middle row in test table
+# this will be victim of false positive conflict with appliers
+SET SESSION wsrep_sync_wait=0;
+START TRANSACTION;
+
+DELETE FROM t1 WHERE f2 = 3;
+INSERT INTO t1 VALUES (3, 3, 1);
+
+# Control connection to manage sync points for appliers
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET SESSION wsrep_sync_wait=0;
+
+# send from node 2 first INSERT transaction, which will conflict on GAP lock in node 1
+--connection node_2
+INSERT INTO t1 VALUES (5, 5, 2);
+
+--connection node_1a
+# wait to see the INSERT in apply_cb sync point
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+
+# first applier seen in wait point, set sync point for the second INSERT
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+# send second insert into same GAP in test table
+INSERT INTO t1 VALUES (4, 4, 2);
+
+--connection node_1a
+# wait for the second insert to arrive in his sync point
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# both appliers are now waiting in separate sync points
+
+# Block the local commit, send the COMMIT and wait until it gets blocked
+--let $galera_sync_point = commit_monitor_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+--send COMMIT
+
+--connection node_1a
+# wait for the local commit to enter in commit monitor wait state
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# release the local transaction to continue with commit
+--let $galera_sync_point = commit_monitor_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# and now release the first applier, it should force local trx to abort
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+
+# set another sync point for second applier
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+
+# letting the second appier to move forward
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+
+# waiting until second applier is in wait
+SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+
+# stopping second applier before commit
+--let $galera_sync_point = commit_monitor_enter_sync
+--source include/galera_set_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# releasing the second insert, with buggy version it will conflict with
+# replayer
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = NULL;
+SET debug_sync='RESET';
+
+# with fixed version, second applier has reached commit monitor, and we can
+# release it to complete
+--let $galera_sync_point = commit_monitor_enter_sync
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# local commit should succeed
+--connection node_1
+--reap
+
+SELECT * FROM t1;
+
+# wsrep_local_replays has increased by 1
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays;
+--enable_query_log
+
+# returning original slave thread count
+SET GLOBAL wsrep_slave_threads = DEFAULT;
+
+--connection node_2
+SELECT * FROM t1;
+
+# replicate some transactions, so that wsrep slave thread count can reach
+# original state in node 1
+INSERT INTO t1 VALUES (7,7,7);
+INSERT INTO t1 VALUES (8,8,8);
+SELECT * FROM t1;
+
+--connection node_1
+SELECT * FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test
index a3e0dbcf36f..49b54f0f7f0 100644
--- a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test
+++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test
@@ -9,7 +9,6 @@
--echo #
--connection node_1
-set wsrep_sync_wait=0;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
@@ -36,8 +35,12 @@ INSERT INTO parent VALUES (1,1), (2,2);
INSERT INTO child VALUES (1,1), (2,2);
--connection node_2
-set wsrep_sync_wait=0;
-
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'grandparent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child'
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 2 FROM child;
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 2 FROM parent;
@@ -46,6 +49,10 @@ set wsrep_sync_wait=0;
--source include/wait_condition.inc
DELETE FROM grandparent WHERE id = 1;
+SELECT * FROM grandparent;
+SELECT * FROM parent;
+SELECT * FROM child;
+
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM child;
--source include/wait_condition.inc
@@ -53,8 +60,10 @@ DELETE FROM grandparent WHERE id = 1;
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM grandparent;
--source include/wait_condition.inc
-SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1;
-SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1;
+
+SELECT * FROM grandparent;
+SELECT * FROM parent;
+SELECT * FROM child;
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test
index 96e633f83d7..d902783ed64 100644
--- a/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test
+++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test
@@ -25,16 +25,25 @@ INSERT INTO parent VALUES ('row one'), ('row two');
INSERT INTO child VALUES (1,'row one'), (2,'row two');
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 2 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 2 FROM child;
--source include/wait_condition.inc
+
DELETE FROM parent;
--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 0 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 0 FROM child;
--source include/wait_condition.inc
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
+SELECT * FROM parent;
+SELECT * FROM child;
DROP TABLE child;
DROP TABLE parent;
@@ -62,6 +71,12 @@ INSERT INTO parent VALUES (1);
INSERT INTO child VALUES (1,0,1);
--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child'
+--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 1 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM child;
--source include/wait_condition.inc
@@ -89,11 +104,13 @@ SET DEBUG_SYNC = "RESET";
--connection node_1
--reap
-
+--let $wait_condition = SELECT COUNT(*) = 0 FROM parent;
+--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 0 FROM child;
--source include/wait_condition.inc
-SELECT COUNT(*), COUNT(*) = 0 FROM parent;
-SELECT COUNT(*), COUNT(*) = 0 FROM child;
+
+SELECT * FROM parent;
+SELECT * FROM child;
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/t/galera_truncate.test b/mysql-test/suite/galera/t/galera_truncate.test
index 3c3ee56a23f..f490943db7c 100644
--- a/mysql-test/suite/galera/t/galera_truncate.test
+++ b/mysql-test/suite/galera/t/galera_truncate.test
@@ -32,6 +32,9 @@ CREATE TABLE t2 (f1 VARCHAR(255)) Engine=InnoDB;
INSERT INTO t2 VALUES ('abc');
--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t2';
+--source include/wait_condition.inc
+
TRUNCATE TABLE t2;
--connection node_2
diff --git a/mysql-test/suite/galera/t/galera_var_sst_auth.test b/mysql-test/suite/galera/t/galera_var_sst_auth.test
index 5c9b3f5a61e..ad7f46620ad 100644
--- a/mysql-test/suite/galera/t/galera_var_sst_auth.test
+++ b/mysql-test/suite/galera/t/galera_var_sst_auth.test
@@ -1,12 +1,33 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
---echo #
---echo # MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
---echo #
+#
+# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
+#
SELECT @@global.wsrep_sst_auth;
SET @@global.wsrep_sst_auth='foo:bar';
SELECT @@global.wsrep_sst_auth;
---source include/galera_end.inc
+#
+# MDEV-24509 Warning: Memory not freed: 56 on SET @@global.wsrep_sst_auth
+#
+--connection node_2
+SET @@global.wsrep_sst_auth= 'abcdefghijklmnopqrstuvwxyz';
+SELECT @@global.wsrep_sst_auth;
+--echo Shutdown node_2
+--source include/shutdown_mysqld.inc
+
+# On node_1, verify that the node has left the cluster.
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+
+# Restart node_2
+--connection node_2
+--source include/start_mysqld.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+SELECT @@global.wsrep_sst_auth;
+
+
diff --git a/mysql-test/suite/galera/t/lp1376747-4.test b/mysql-test/suite/galera/t/lp1376747-4.test
index 7cf922b97e5..d19ff422ab0 100644
--- a/mysql-test/suite/galera/t/lp1376747-4.test
+++ b/mysql-test/suite/galera/t/lp1376747-4.test
@@ -5,7 +5,8 @@
# after provider is unpaused
#
--source include/galera_cluster.inc
---source include/have_innodb.inc
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
--let $galera_connection_name = node_2a
--let $galera_server_number = 2
@@ -17,7 +18,7 @@ INSERT INTO t1 VALUES (1);
--connection node_2
SET session wsrep_sync_wait=0;
-FLUSH TABLE WITH READ LOCK;
+FLUSH TABLES WITH READ LOCK;
--connection node_1
ALTER TABLE t1 ADD COLUMN f2 INTEGER;
@@ -25,32 +26,33 @@ INSERT INTO t1 VALUES (2,3);
--connection node_2a
SET session wsrep_sync_wait=0;
-
---echo # node_1 DDL should not yet be applied
SHOW CREATE TABLE t1;
-
+SET debug_sync='flush_tables_with_read_lock_after_acquire_locks SIGNAL parked2 WAIT_FOR go2';
--send FLUSH TABLES t1 WITH READ LOCK;
--connection node_2
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%committed%';
---source include/wait_condition.inc
-
+SET debug_sync='now WAIT_FOR parked2';
+
+# let the flush table wait in pause state before we unlock
+# table otherwise there is window where-in flush table is
+# yet to wait in pause and unlock allows alter table to proceed.
+# this is because send is asynchronous.
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE Info LIKE 'FLUSH TABLES t1 WITH READ LOCK';
+--let $wait_condition_on_error_output = SHOW PROCESSLIST
+--source include/wait_condition_with_debug.inc
+
+SET debug_sync='now SIGNAL go2';
+# this will release existing lock but will not resume
+# the cluster as there is new FTRL that is still pausing it.
UNLOCK TABLES;
-
---echo # node_1 DDL should not yet be applied 2
SHOW CREATE TABLE t1;
+set debug_sync= 'RESET';
--connection node_2a
--reap
UNLOCK TABLES;
---let $wait_condition = SELECT COUNT(*) = 2 FROM t1;
---source include/wait_condition.inc
-
---let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 't1';
---source include/wait_condition.inc
-
---echo # node_1 DDL should be applied 2
+SET SESSION wsrep_sync_wait = DEFAULT;
SHOW CREATE TABLE t1;
SELECT * from t1;
diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def
index 0e2706f2dc3..884c2a8c82a 100644
--- a/mysql-test/suite/galera_3nodes/disabled.def
+++ b/mysql-test/suite/galera_3nodes/disabled.def
@@ -10,7 +10,15 @@
#
##############################################################################
+GAL-501 : MDEV-24645 galera_3nodes.GAL-501 MTR failed: failed to open gcomm backend connection: 110
galera_gtid_2_cluster : MDEV-23775 Galera test failure on galera_3nodes.galera_gtid_2_cluster
+galera_ipv6_mariabackup : MDEV-24440: galera_3nodes.galera_ipv6_mariabackup MTR fails sporadically: Failed to read from: wsrep_sst_mariabackup --role 'donor' --address '[::1]:16028/xtrabackup_sst//1'
+galera_ipv6_mariabackup_section : MDEV-22195: galera_3nodes.galera_ipv6_mariabackup_section MTR failed: assert_grep.inc failed
+galera_ipv6_mysqldump : MDEV-24036: galera_3nodes.galera_ipv6_mysqldump: rare random crashes during shutdown
+galera_ipv6_rsync_section : MDEV-23580: galera_3nodes.galera_ipv6_rsync_section MTR failed: WSREP_SST: [ERROR] rsync daemon port '16008' has been taken
galera_ist_gcache_rollover : MDEV-23578 WSREP: exception caused by message: {v=0,t=1,ut=255,o=4,s=0,sr=0,as=1,f=6,src=50524cfe,srcvid=view_id(REG,50524cfe,4),insvid=view_id(UNKNOWN,00000000,0),ru=00000000,r=[-1,-1],fs=75,nl=(}
-galera_slave_options_do :MDEV-8798
+galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query
+galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query
+galera_pc_bootstrap : MDEV-24650 galera_pc_bootstrap MTR failed: Could not execute 'check-testcase' before testcase
+galera_slave_options_do : MDEV-8798
galera_slave_options_ignore : MDEV-8798
diff --git a/mysql-test/suite/gcol/inc/gcol_column_def_options.inc b/mysql-test/suite/gcol/inc/gcol_column_def_options.inc
index 28c854c44f4..f4350d25ae9 100644
--- a/mysql-test/suite/gcol/inc/gcol_column_def_options.inc
+++ b/mysql-test/suite/gcol/inc/gcol_column_def_options.inc
@@ -343,11 +343,12 @@ DELETE FROM t1 WHERE c=1;
DROP TABLE t1;
}
---error ER_PARSE_ERROR
CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"));
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
CREATE TABLE t1 (i INT);
---error ER_PARSE_ERROR
ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar");
+SHOW CREATE TABLE t1;
DROP TABLE t1;
--error ER_PARSE_ERROR
CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10));
diff --git a/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result b/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result
index a98652248f8..6da3f3c14d3 100644
--- a/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result
@@ -426,10 +426,20 @@ INSERT INTO t1(a) VALUES(0);
DELETE FROM t1 WHERE c=1;
DROP TABLE t1;
CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar"))' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
CREATE TABLE t1 (i INT);
ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar");
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar")' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) DEFAULT NULL,
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS (10))' at line 1
diff --git a/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result b/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result
index 82a879be3f7..0d7aaeab1fa 100644
--- a/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result
@@ -426,10 +426,20 @@ INSERT INTO t1(a) VALUES(0);
DELETE FROM t1 WHERE c=1;
DROP TABLE t1;
CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar"))' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
CREATE TABLE t1 (i INT);
ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar");
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar")' at line 1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) DEFAULT NULL,
+ `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10));
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS (10))' at line 1
diff --git a/mysql-test/suite/gcol/r/innodb_virtual_fk.result b/mysql-test/suite/gcol/r/innodb_virtual_fk.result
index 68601823e31..252274f3e0a 100644
--- a/mysql-test/suite/gcol/r/innodb_virtual_fk.result
+++ b/mysql-test/suite/gcol/r/innodb_virtual_fk.result
@@ -793,6 +793,9 @@ DROP TABLE t1;
#
# MDEV-24041 Generated column DELETE with FOREIGN KEY crash InnoDB
#
+SET FOREIGN_KEY_CHECKS=1;
+CREATE DATABASE `a-b`;
+USE `a-b`;
CREATE TABLE emails (
id int,
PRIMARY KEY (id)
@@ -802,6 +805,7 @@ id int,
email_id int,
date_sent char(4),
generated_email_id int as (email_id),
+#generated_sent_date DATE GENERATED ALWAYS AS (date_sent),
PRIMARY KEY (id),
KEY mautic_generated_sent_date_email_id (generated_email_id),
FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE SET NULL
@@ -818,3 +822,4 @@ DELETE FROM emails;
DROP TABLE email_stats;
DROP TABLE emails_metadata;
DROP TABLE emails;
+DROP DATABASE `a-b`;
diff --git a/mysql-test/suite/gcol/t/innodb_virtual_fk.test b/mysql-test/suite/gcol/t/innodb_virtual_fk.test
index da20612f0a1..24b6a4631e6 100644
--- a/mysql-test/suite/gcol/t/innodb_virtual_fk.test
+++ b/mysql-test/suite/gcol/t/innodb_virtual_fk.test
@@ -653,7 +653,9 @@ DROP TABLE t1;
--echo #
--echo # MDEV-24041 Generated column DELETE with FOREIGN KEY crash InnoDB
--echo #
-
+SET FOREIGN_KEY_CHECKS=1;
+CREATE DATABASE `a-b`;
+USE `a-b`;
CREATE TABLE emails (
id int,
PRIMARY KEY (id)
@@ -664,6 +666,7 @@ CREATE TABLE email_stats (
email_id int,
date_sent char(4),
generated_email_id int as (email_id),
+ #generated_sent_date DATE GENERATED ALWAYS AS (date_sent),
PRIMARY KEY (id),
KEY mautic_generated_sent_date_email_id (generated_email_id),
FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE SET NULL
@@ -686,3 +689,4 @@ DELETE FROM emails;
DROP TABLE email_stats;
DROP TABLE emails_metadata;
DROP TABLE emails;
+DROP DATABASE `a-b`;
diff --git a/mysql-test/suite/innodb/r/alter_mdl_timeout.result b/mysql-test/suite/innodb/r/alter_mdl_timeout.result
new file mode 100644
index 00000000000..7af1362c69e
--- /dev/null
+++ b/mysql-test/suite/innodb/r/alter_mdl_timeout.result
@@ -0,0 +1,23 @@
+create table t1(f1 char(10), f2 char(10) not null, f3 int not null,
+f4 int not null, primary key(f3))engine=innodb;
+insert into t1 values('a','a', 1, 1), ('b','b', 2, 2), ('c', 'c', 3, 3), ('d', 'd', 4, 4);
+SET DEBUG_SYNC="row_merge_after_scan SIGNAL con1_start WAIT_FOR con1_insert";
+SET DEBUG_SYNC="innodb_commit_inplace_alter_table_wait SIGNAL con1_wait WAIT_FOR con1_update";
+ALTER TABLE t1 ADD UNIQUE INDEX(f1(3), f4), ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE;
+connect con1,localhost,root,,,;
+SET DEBUG_SYNC="now WAIT_FOR con1_start";
+begin;
+INSERT INTO t1 VALUES('e','e',5, 5);
+SET DEBUG_SYNC="now SIGNAL con1_insert";
+SET DEBUG_SYNC="now WAIT_FOR con1_wait";
+SET DEBUG_SYNC="before_row_upd_sec_new_index_entry SIGNAL con1_update WAIT_FOR alter_rollback";
+UPDATE t1 set f4 = 10 order by f1 desc limit 2;
+connection default;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC="now SIGNAL alter_rollback";
+connection con1;
+commit;
+connection default;
+disconnect con1;
+DROP TABLE t1;
+SET DEBUG_SYNC="RESET";
diff --git a/mysql-test/suite/innodb/r/file_format_defaults.result b/mysql-test/suite/innodb/r/file_format_defaults.result
index 4fd280450aa..ab4d72258a5 100644
--- a/mysql-test/suite/innodb/r/file_format_defaults.result
+++ b/mysql-test/suite/innodb/r/file_format_defaults.result
@@ -8,7 +8,7 @@ SELECT @@innodb_file_per_table;
SET SQL_MODE=strict_all_tables;
CREATE TABLE tab0 (c1 VARCHAR(65530), KEY(c1(3073))) ENGINE=InnoDB ROW_FORMAT=COMPRESSED;
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
SHOW CREATE TABLE tab0;
Table Create Table
tab0 CREATE TABLE `tab0` (
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index b48a85a2443..94c73a84679 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -2300,7 +2300,7 @@ drop table t1;
SET sql_mode = 'NO_ENGINE_SUBSTITUTION';
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
drop table t1;
create table t1 (v varchar(65536));
Warnings:
diff --git a/mysql-test/suite/innodb/t/alter_mdl_timeout.opt b/mysql-test/suite/innodb/t/alter_mdl_timeout.opt
new file mode 100644
index 00000000000..9e0e38bd64a
--- /dev/null
+++ b/mysql-test/suite/innodb/t/alter_mdl_timeout.opt
@@ -0,0 +1 @@
+--lock_wait_timeout=2
diff --git a/mysql-test/suite/innodb/t/alter_mdl_timeout.test b/mysql-test/suite/innodb/t/alter_mdl_timeout.test
new file mode 100644
index 00000000000..15e7f524fd0
--- /dev/null
+++ b/mysql-test/suite/innodb/t/alter_mdl_timeout.test
@@ -0,0 +1,32 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+
+create table t1(f1 char(10), f2 char(10) not null, f3 int not null,
+ f4 int not null, primary key(f3))engine=innodb;
+insert into t1 values('a','a', 1, 1), ('b','b', 2, 2), ('c', 'c', 3, 3), ('d', 'd', 4, 4);
+SET DEBUG_SYNC="row_merge_after_scan SIGNAL con1_start WAIT_FOR con1_insert";
+SET DEBUG_SYNC="innodb_commit_inplace_alter_table_wait SIGNAL con1_wait WAIT_FOR con1_update";
+send ALTER TABLE t1 ADD UNIQUE INDEX(f1(3), f4), ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE;
+
+connect(con1,localhost,root,,,);
+SET DEBUG_SYNC="now WAIT_FOR con1_start";
+begin;
+INSERT INTO t1 VALUES('e','e',5, 5);
+SET DEBUG_SYNC="now SIGNAL con1_insert";
+SET DEBUG_SYNC="now WAIT_FOR con1_wait";
+SET DEBUG_SYNC="before_row_upd_sec_new_index_entry SIGNAL con1_update WAIT_FOR alter_rollback";
+SEND UPDATE t1 set f4 = 10 order by f1 desc limit 2;
+
+connection default;
+--error ER_LOCK_WAIT_TIMEOUT
+reap;
+SET DEBUG_SYNC="now SIGNAL alter_rollback";
+
+connection con1;
+reap;
+commit;
+
+connection default;
+disconnect con1;
+DROP TABLE t1;
+SET DEBUG_SYNC="RESET";
diff --git a/mysql-test/suite/innodb_fts/r/create.result b/mysql-test/suite/innodb_fts/r/create.result
index 55c5c45f643..3ca24f5253d 100644
--- a/mysql-test/suite/innodb_fts/r/create.result
+++ b/mysql-test/suite/innodb_fts/r/create.result
@@ -178,3 +178,13 @@ Table Op Msg_type Msg_text
test.t1 optimize status OK
DROP TABLE t1;
SET GLOBAL innodb_optimize_fulltext_only= @optimize_fulltext.save;
+#
+# MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields
+#
+create table t1 (
+f1 int, f2 text,
+FTS_DOC_ID bigint unsigned not null,
+unique key FTS_DOC_ID_INDEX(FTS_DOC_ID, f1),
+fulltext (f2))
+engine=innodb;
+ERROR 42000: Incorrect index name 'FTS_DOC_ID_INDEX'
diff --git a/mysql-test/suite/innodb_fts/t/create.test b/mysql-test/suite/innodb_fts/t/create.test
index 4e522994fcc..38c93de4982 100644
--- a/mysql-test/suite/innodb_fts/t/create.test
+++ b/mysql-test/suite/innodb_fts/t/create.test
@@ -106,3 +106,14 @@ SET GLOBAL innodb_optimize_fulltext_only= 1;
OPTIMIZE TABLE t1;
DROP TABLE t1;
SET GLOBAL innodb_optimize_fulltext_only= @optimize_fulltext.save;
+
+--echo #
+--echo # MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields
+--echo #
+--error ER_WRONG_NAME_FOR_INDEX
+create table t1 (
+ f1 int, f2 text,
+ FTS_DOC_ID bigint unsigned not null,
+ unique key FTS_DOC_ID_INDEX(FTS_DOC_ID, f1),
+ fulltext (f2))
+engine=innodb;
diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix.result b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
index 4866c152640..97268124fd8 100644
--- a/mysql-test/suite/innodb_zip/r/index_large_prefix.result
+++ b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
@@ -325,10 +325,10 @@ ROW_FORMAT=DYNAMIC;
SET sql_mode='';
create index idx1 on worklog5743(a2);
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
create index idx2 on worklog5743(a3);
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
create index idx3 on worklog5743(a4);
show warnings;
Level Code Message
@@ -337,7 +337,13 @@ create index idx4 on worklog5743(a1, a2);
ERROR 42000: Specified key was too long; max key length is 3072 bytes
show warnings;
Level Code Message
+<<<<<<< HEAD
Warning 1071 Specified key was too long; max key length is 3072 bytes
+||||||| 75538f94ca0
+Error 1071 Specified key was too long; max key length is 3072 bytes
+=======
+Note 1071 Specified key was too long; max key length is 3072 bytes
+>>>>>>> bb-10.3-release
Error 1071 Specified key was too long; max key length is 3072 bytes
create index idx5 on worklog5743(a1, a5);
ERROR 42000: Specified key was too long; max key length is 3072 bytes
diff --git a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
index 2c66133404c..e88b72ef1d4 100644
--- a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
+++ b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
@@ -1224,7 +1224,7 @@ DROP INDEX prefix_idx ON worklog5743;
SET sql_mode = 'NO_ENGINE_SUBSTITUTION';
CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (4000));
Warnings:
-Warning 1071 Specified key was too long; max key length is 3072 bytes
+Note 1071 Specified key was too long; max key length is 3072 bytes
SET sql_mode = default;
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743;
diff --git a/mysql-test/suite/maria/maria-ucs2.result b/mysql-test/suite/maria/maria-ucs2.result
index 1a54ab78081..321a374f4c2 100644
--- a/mysql-test/suite/maria/maria-ucs2.result
+++ b/mysql-test/suite/maria/maria-ucs2.result
@@ -17,7 +17,7 @@ test.t1 check status OK
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
ALTER TABLE t1 MODIFY a VARCHAR(800) CHARSET `ucs2`;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -30,7 +30,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(800),KEY(a)) ENGINE=Aria CHARACTER SET ucs2;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES (REPEAT('abc ',200));
CHECK TABLE t1;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result
index 913bf4efbdf..03d37270eed 100644
--- a/mysql-test/suite/maria/maria.result
+++ b/mysql-test/suite/maria/maria.result
@@ -1594,7 +1594,7 @@ a b
drop table t1;
create table t1 (v varchar(65530), key(v));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
drop table if exists t1;
set statement sql_mode = 'NO_ENGINE_SUBSTITUTION' for
create table t1 (v varchar(65536));
@@ -1866,7 +1866,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a));
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1876,7 +1876,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1024);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1886,7 +1886,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=1024;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1953,7 +1953,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1965,7 +1965,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a int not null, b varchar(2048), key (a) key_block_size=1024, key(b)) key_block_size=8192;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1995,7 +1995,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result
index 64ae268997a..09fc696c272 100644
--- a/mysql-test/suite/maria/maria3.result
+++ b/mysql-test/suite/maria/maria3.result
@@ -17,7 +17,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000);
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/maria/mrr.result b/mysql-test/suite/maria/mrr.result
index 5c709fb34e5..17b7b751dfb 100644
--- a/mysql-test/suite/maria/mrr.result
+++ b/mysql-test/suite/maria/mrr.result
@@ -393,7 +393,7 @@ PRIMARY KEY (pk),
KEY col_varchar_1024_latin1_key (col_varchar_1024_latin1_key)
) ENGINE=Aria;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
INSERT INTO t1 VALUES
(1,'z'), (2,'abcdefjhjkl'), (3,'in'), (4,'abcdefjhjkl'), (6,'abcdefjhjkl'),
(11,'zx'), (12,'abcdefjhjm'), (13,'jn'), (14,'abcdefjhjp'), (16,'abcdefjhjr');
@@ -430,7 +430,7 @@ f5 varchar(1024) COLLATE latin1_bin,
KEY (f5)
) ENGINE=Aria TRANSACTIONAL=0 ;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
# Fill the table with some data
SELECT alias2.* , alias1.f2
FROM
diff --git a/mysql-test/suite/perfschema/r/schema.result b/mysql-test/suite/perfschema/r/schema.result
index 1f331394df6..8ce4cad4f4b 100644
--- a/mysql-test/suite/perfschema/r/schema.result
+++ b/mysql-test/suite/perfschema/r/schema.result
@@ -62,7 +62,7 @@ users
show create table accounts;
Table Create Table
accounts CREATE TABLE `accounts` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`CURRENT_CONNECTIONS` bigint(20) NOT NULL,
`TOTAL_CONNECTIONS` bigint(20) NOT NULL
@@ -140,7 +140,7 @@ events_stages_summary_by_thread_by_event_name CREATE TABLE `events_stages_summar
show create table events_stages_summary_by_user_by_event_name;
Table Create Table
events_stages_summary_by_user_by_event_name CREATE TABLE `events_stages_summary_by_user_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -151,7 +151,7 @@ events_stages_summary_by_user_by_event_name CREATE TABLE `events_stages_summary_
show create table events_stages_summary_by_account_by_event_name;
Table Create Table
events_stages_summary_by_account_by_event_name CREATE TABLE `events_stages_summary_by_account_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
@@ -398,7 +398,7 @@ events_statements_summary_by_thread_by_event_name CREATE TABLE `events_statement
show create table events_statements_summary_by_user_by_event_name;
Table Create Table
events_statements_summary_by_user_by_event_name CREATE TABLE `events_statements_summary_by_user_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -428,7 +428,7 @@ events_statements_summary_by_user_by_event_name CREATE TABLE `events_statements_
show create table events_statements_summary_by_account_by_event_name;
Table Create Table
events_statements_summary_by_account_by_event_name CREATE TABLE `events_statements_summary_by_account_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
@@ -590,7 +590,7 @@ events_waits_summary_by_thread_by_event_name CREATE TABLE `events_waits_summary_
show create table events_waits_summary_by_user_by_event_name;
Table Create Table
events_waits_summary_by_user_by_event_name CREATE TABLE `events_waits_summary_by_user_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -601,7 +601,7 @@ events_waits_summary_by_user_by_event_name CREATE TABLE `events_waits_summary_by
show create table events_waits_summary_by_account_by_event_name;
Table Create Table
events_waits_summary_by_account_by_event_name CREATE TABLE `events_waits_summary_by_account_by_event_name` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
@@ -762,8 +762,8 @@ show create table setup_actors;
Table Create Table
setup_actors CREATE TABLE `setup_actors` (
`HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%',
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%',
- `ROLE` char(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%'
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%',
+ `ROLE` char(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%'
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table setup_consumers;
Table Create Table
@@ -1028,7 +1028,7 @@ threads CREATE TABLE `threads` (
`NAME` varchar(128) NOT NULL,
`TYPE` varchar(10) NOT NULL,
`PROCESSLIST_ID` bigint(20) unsigned DEFAULT NULL,
- `PROCESSLIST_USER` varchar(16) DEFAULT NULL,
+ `PROCESSLIST_USER` varchar(128) DEFAULT NULL,
`PROCESSLIST_HOST` varchar(60) DEFAULT NULL,
`PROCESSLIST_DB` varchar(64) DEFAULT NULL,
`PROCESSLIST_COMMAND` varchar(16) DEFAULT NULL,
@@ -1042,7 +1042,7 @@ threads CREATE TABLE `threads` (
show create table users;
Table Create Table
users CREATE TABLE `users` (
- `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`CURRENT_CONNECTIONS` bigint(20) NOT NULL,
`TOTAL_CONNECTIONS` bigint(20) NOT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
diff --git a/mysql-test/suite/perfschema/r/table_schema.result b/mysql-test/suite/perfschema/r/table_schema.result
index 8caf2017fd2..b7c138c54c7 100644
--- a/mysql-test/suite/perfschema/r/table_schema.result
+++ b/mysql-test/suite/perfschema/r/table_schema.result
@@ -1,7 +1,7 @@
select * from information_schema.columns where table_schema="performance_schema"
order by table_name, ordinal_position;
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
-def performance_schema accounts USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema accounts USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema accounts HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema accounts CURRENT_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
def performance_schema accounts TOTAL_CONNECTIONS 4 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
@@ -37,7 +37,7 @@ def performance_schema events_stages_history_long TIMER_END 7 NULL YES bigint NU
def performance_schema events_stages_history_long TIMER_WAIT 8 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_history_long NESTING_EVENT_ID 9 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_history_long NESTING_EVENT_TYPE 10 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references NEVER NULL
-def performance_schema events_stages_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_stages_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -59,7 +59,7 @@ def performance_schema events_stages_summary_by_thread_by_event_name SUM_TIMER_W
def performance_schema events_stages_summary_by_thread_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_thread_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_thread_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_stages_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_stages_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_stages_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -192,7 +192,7 @@ def performance_schema events_statements_history_long NO_INDEX_USED 37 NULL NO b
def performance_schema events_statements_history_long NO_GOOD_INDEX_USED 38 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_history_long NESTING_EVENT_ID 39 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_history_long NESTING_EVENT_TYPE 40 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references NEVER NULL
-def performance_schema events_statements_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_statements_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -300,7 +300,7 @@ def performance_schema events_statements_summary_by_thread_by_event_name SUM_SOR
def performance_schema events_statements_summary_by_thread_by_event_name SUM_SORT_SCAN 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_thread_by_event_name SUM_NO_INDEX_USED 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_thread_by_event_name SUM_NO_GOOD_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_statements_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_statements_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_statements_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -408,7 +408,7 @@ def performance_schema events_waits_history_long NESTING_EVENT_TYPE 16 NULL YES
def performance_schema events_waits_history_long OPERATION 17 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references NEVER NULL
def performance_schema events_waits_history_long NUMBER_OF_BYTES 18 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
def performance_schema events_waits_history_long FLAGS 19 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_waits_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_waits_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -437,7 +437,7 @@ def performance_schema events_waits_summary_by_thread_by_event_name SUM_TIMER_WA
def performance_schema events_waits_summary_by_thread_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_thread_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_thread_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema events_waits_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema events_waits_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema events_waits_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
@@ -561,8 +561,8 @@ def performance_schema session_connect_attrs ATTR_NAME 2 NULL NO varchar 32 96 N
def performance_schema session_connect_attrs ATTR_VALUE 3 NULL YES varchar 1024 3072 NULL NULL NULL utf8 utf8_bin varchar(1024) select,insert,update,references NEVER NULL
def performance_schema session_connect_attrs ORDINAL_POSITION 4 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL
def performance_schema setup_actors HOST 1 '%' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
-def performance_schema setup_actors USER 2 '%' NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
-def performance_schema setup_actors ROLE 3 '%' NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema setup_actors USER 2 '%' NO char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
+def performance_schema setup_actors ROLE 3 '%' NO char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema setup_consumers NAME 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL
def performance_schema setup_consumers ENABLED 2 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references NEVER NULL
def performance_schema setup_instruments NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
@@ -783,7 +783,7 @@ def performance_schema threads THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NU
def performance_schema threads NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema threads TYPE 3 NULL NO varchar 10 30 NULL NULL NULL utf8 utf8_general_ci varchar(10) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_ID 4 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
-def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references NEVER NULL
+def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_HOST 6 NULL YES varchar 60 180 NULL NULL NULL utf8 utf8_general_ci varchar(60) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_DB 7 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL
def performance_schema threads PROCESSLIST_COMMAND 8 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references NEVER NULL
@@ -793,7 +793,7 @@ def performance_schema threads PROCESSLIST_INFO 11 NULL YES longtext 4294967295
def performance_schema threads PARENT_THREAD_ID 12 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def performance_schema threads ROLE 13 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL
def performance_schema threads INSTRUMENTED 14 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references NEVER NULL
-def performance_schema users USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL
+def performance_schema users USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL
def performance_schema users CURRENT_CONNECTIONS 2 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
def performance_schema users TOTAL_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL
select count(*) from information_schema.columns
diff --git a/mysql-test/suite/perfschema/r/threads_mysql.result b/mysql-test/suite/perfschema/r/threads_mysql.result
index 31f91fc6464..40e6360fec7 100644
--- a/mysql-test/suite/perfschema/r/threads_mysql.result
+++ b/mysql-test/suite/perfschema/r/threads_mysql.result
@@ -17,6 +17,16 @@ processlist_info NULL
unified_parent_thread_id NULL
role NULL
instrumented YES
+name thread/sql/manager
+type BACKGROUND
+processlist_user NULL
+processlist_host NULL
+processlist_db NULL
+processlist_command NULL
+processlist_info NULL
+unified_parent_thread_id unified parent_thread_id
+role NULL
+instrumented YES
name thread/sql/one_connection
type FOREGROUND
processlist_user root
@@ -44,16 +54,6 @@ processlist_info NULL
unified_parent_thread_id unified parent_thread_id
role NULL
instrumented YES
-name thread/sql/slave_background
-type BACKGROUND
-processlist_user NULL
-processlist_host NULL
-processlist_db NULL
-processlist_command NULL
-processlist_info NULL
-unified_parent_thread_id unified parent_thread_id
-role NULL
-instrumented YES
CREATE TEMPORARY TABLE t1 AS
SELECT thread_id FROM performance_schema.threads
WHERE name LIKE 'thread/sql%';
@@ -113,7 +113,7 @@ WHERE t1.name LIKE 'thread/sql%'
ORDER BY parent_thread_name, child_thread_name;
parent_thread_name child_thread_name
thread/sql/event_scheduler thread/sql/event_worker
+thread/sql/main thread/sql/manager
thread/sql/main thread/sql/one_connection
thread/sql/main thread/sql/signal_handler
-thread/sql/main thread/sql/slave_background
thread/sql/one_connection thread/sql/event_scheduler
diff --git a/mysql-test/suite/rpl/disabled.def b/mysql-test/suite/rpl/disabled.def
index 89140a19ac8..1ed8651da17 100644
--- a/mysql-test/suite/rpl/disabled.def
+++ b/mysql-test/suite/rpl/disabled.def
@@ -10,8 +10,7 @@
#
##############################################################################
-rpl_spec_variables : BUG#11755836 2009-10-27 jasonh rpl_spec_variables fails on PB2 hpux
-#rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock
+#rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings
rpl_partition_archive : MDEV-5077 2013-09-27 svoj Cannot exchange partition with archive table
rpl_row_binlog_max_cache_size : MDEV-11092
rpl_row_index_choice : MDEV-11666
diff --git a/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test b/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test
index 0f46b00f683..4c93ad86209 100644
--- a/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test
+++ b/mysql-test/suite/rpl/include/rpl_binlog_max_cache_size.test
@@ -49,14 +49,14 @@ connection master;
--echo *** Single statement on transactional table ***
--disable_query_log
---error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE
+--error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE, 1534
eval INSERT INTO t1 (a, data) VALUES (1,
CONCAT($data, $data, $data, $data, $data));
--enable_query_log
--echo *** Single statement on non-transactional table ***
--disable_query_log
---error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE
+--error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE, 1534
eval INSERT INTO t2 (a, data) VALUES (2,
CONCAT($data, $data, $data, $data, $data, $data));
--enable_query_log
diff --git a/mysql-test/suite/rpl/r/rpl_relay_max_extension.result b/mysql-test/suite/rpl/r/rpl_relay_max_extension.result
new file mode 100644
index 00000000000..4444398203e
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_relay_max_extension.result
@@ -0,0 +1,37 @@
+include/rpl_init.inc [topology=1->2]
+connection server_2;
+include/stop_slave.inc
+RESET SLAVE;
+include/start_slave.inc
+include/stop_slave.inc
+#
+# Stop slave server
+#
+#
+# Simulate file number get close to 999997
+# by renaming relay logs and modifying index/info files
+#
+# Restart slave server
+#
+SET @save_slave_parallel_threads= @@GLOBAL.slave_parallel_threads;
+SET @save_max_relay_log_size= @@GLOBAL.max_relay_log_size;
+SET GLOBAL slave_parallel_threads=1;
+SET GLOBAL max_relay_log_size=100 * 1024;
+include/start_slave.inc
+connection server_1;
+create table t1 (i int, c varchar(1024));
+#
+# Insert some data to generate enough amount of binary logs
+#
+connection server_2;
+#
+# Assert that 'slave-relay-bin.999999' is purged.
+#
+NOT FOUND /slave-relay-bin.999999/ in slave-relay-bin.index
+include/stop_slave.inc
+SET GLOBAL slave_parallel_threads= @save_slave_parallel_threads;
+SET GLOBAL max_relay_log_size= @save_max_relay_log_size;
+include/start_slave.inc
+connection server_1;
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_utf32.result b/mysql-test/suite/rpl/r/rpl_row_utf32.result
index af6e709860e..6d177b7cda0 100644
--- a/mysql-test/suite/rpl/r/rpl_row_utf32.result
+++ b/mysql-test/suite/rpl/r/rpl_row_utf32.result
@@ -3,7 +3,7 @@ include/master-slave.inc
SET SQL_LOG_BIN=0;
CREATE TABLE t1 (c1 char(255) DEFAULT NULL, KEY c1 (c1)) DEFAULT CHARSET=utf32;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
SET SQL_LOG_BIN=1;
connection slave;
SET @saved_slave_type_conversions= @@global.slave_type_conversions;
@@ -13,7 +13,7 @@ include/start_slave.inc
SET SQL_LOG_BIN=0;
CREATE TABLE t1 ( c1 varchar(255) DEFAULT NULL, KEY c1 (c1)) DEFAULT CHARSET=utf32;
Warnings:
-Warning 1071 Specified key was too long; max key length is 1000 bytes
+Note 1071 Specified key was too long; max key length is 1000 bytes
SET SQL_LOG_BIN=1;
connection master;
INSERT INTO t1(c1) VALUES ('insert into t1');
diff --git a/mysql-test/suite/rpl/r/rpl_spec_variables.result b/mysql-test/suite/rpl/r/rpl_spec_variables.result
index 96f63a50ea9..8b4c398f308 100644
--- a/mysql-test/suite/rpl/r/rpl_spec_variables.result
+++ b/mysql-test/suite/rpl/r/rpl_spec_variables.result
@@ -2,14 +2,17 @@ include/master-slave.inc
[connection master]
* auto_increment_increment, auto_increment_offset *
+connection master;
SET @@global.auto_increment_increment=2;
SET @@session.auto_increment_increment=2;
SET @@global.auto_increment_offset=10;
SET @@session.auto_increment_offset=10;
+connection slave;
SET @@global.auto_increment_increment=3;
SET @@session.auto_increment_increment=3;
SET @@global.auto_increment_offset=20;
SET @@session.auto_increment_offset=20;
+connection master;
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
INSERT INTO t1 (b) VALUES ('master');
INSERT INTO t1 (b) VALUES ('master');
@@ -17,6 +20,7 @@ SELECT * FROM t1 ORDER BY a;
a b
2 master
4 master
+connection slave;
CREATE TABLE t2 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
INSERT INTO t1 (b) VALUES ('slave');
INSERT INTO t1 (b) VALUES ('slave');
@@ -32,102 +36,123 @@ SELECT * FROM t2 ORDER BY a;
a b
1 slave
4 slave
+connection master;
DROP TABLE IF EXISTS t1,t2;
SET @@global.auto_increment_increment=1;
SET @@session.auto_increment_increment=1;
SET @@global.auto_increment_offset=1;
SET @@session.auto_increment_offset=1;
+connection slave;
SET @@global.auto_increment_increment=1;
SET @@session.auto_increment_increment=1;
SET @@global.auto_increment_offset=1;
SET @@session.auto_increment_offset=1;
+connection slave;
SET auto_increment_increment=1;
SET auto_increment_offset=1;
* character_set_database, collation_server *
+connection master;
SET @restore_master_character_set_database=@@global.character_set_database;
SET @restore_master_collation_server=@@global.collation_server;
SET @@global.character_set_database=latin1;
SET @@session.character_set_database=latin1;
SET @@global.collation_server=latin1_german1_ci;
SET @@session.collation_server=latin1_german1_ci;
+connection slave;
SET @restore_slave_character_set_database=@@global.character_set_database;
SET @restore_slave_collation_server=@@global.collation_server;
SET @@global.character_set_database=utf8;
SET @@session.character_set_database=utf8;
SET @@global.collation_server=utf8_bin;
SET @@session.collation_server=utf8_bin;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
- `b` varchar(10) COLLATE latin1_german1_ci DEFAULT NULL,
+ `b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+connection slave;
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
- `b` varchar(10) COLLATE latin1_german1_ci DEFAULT NULL,
+ `b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
`a` int(11) NOT NULL,
- `b` varchar(10) COLLATE utf8_bin DEFAULT NULL,
+ `b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
SET @@global.collation_server=latin1_swedish_ci;
SET @@session.collation_server=latin1_swedish_ci;
+connection master;
SET @@global.collation_server=latin1_swedish_ci;
SET @@session.collation_server=latin1_swedish_ci;
DROP TABLE IF EXISTS t1,t2;
* default_week_format *
+connection master;
SET @@global.default_week_format=0;
SET @@session.default_week_format=0;
+connection slave;
SET @@global.default_week_format=1;
SET @@session.default_week_format=1;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c INT) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1, 'master ', WEEK('2008-01-07'));
SELECT * FROM t1 ORDER BY a;
a b c
1 master 1
+connection slave;
INSERT INTO t1 VALUES (2, 'slave ', WEEK('2008-01-07'));
SELECT * FROM t1 ORDER BY a;
a b c
1 master 1
2 slave 2
+connection master;
DROP TABLE t1;
+connection slave;
SET @@global.default_week_format=0;
SET @@session.default_week_format=0;
* local_infile *
+connection slave;
SET @@global.local_infile=0;
+connection master;
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(20), c CHAR(254)) ENGINE=MyISAM;
LOAD DATA LOCAL INFILE 'FILE' INTO TABLE t1 (b);
SELECT COUNT(*) FROM t1;
COUNT(*)
70
+connection slave;
LOAD DATA LOCAL INFILE 'FILE2' INTO TABLE t1 (b);
-ERROR 42000: The used command is not allowed with this MySQL version
+ERROR 42000: The used command is not allowed with this MariaDB version
SELECT COUNT(*) FROM t1;
COUNT(*)
70
SET @@global.local_infile=1;
+connection master;
DROP TABLE t1;
* max_heap_table_size *
+connection slave;
SET @restore_slave_max_heap_table_size=@@global.max_heap_table_size;
SET @@global.max_heap_table_size=16384;
SET @@session.max_heap_table_size=16384;
+connection master;
CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10), c CHAR(254)) ENGINE=MEMORY;
SELECT COUNT(*)=2000 FROM t1;
COUNT(*)=2000
1
+connection slave;
SELECT COUNT(*)=2000 FROM t1 WHERE b='master' GROUP BY b ORDER BY b;
COUNT(*)=2000
1
@@ -137,18 +162,24 @@ COUNT(*)<2000 AND COUNT(*)>0
SELECT COUNT(*)<2000 AND COUNT(*)>0 FROM t2 WHERE b='slave' GROUP BY b ORDER BY b;
COUNT(*)<2000 AND COUNT(*)>0
1
+connection master;
DROP TABLE IF EXISTS t1,t2;
* storage_engine *
+connection master;
SET @restore_master_storage_engine=@@global.storage_engine;
SET @@global.storage_engine=InnoDB;
SET @@session.storage_engine=InnoDB;
+connection slave;
SET @restore_slave_storage_engine=@@global.storage_engine;
SET @@global.storage_engine=Memory;
SET @@session.storage_engine=Memory;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10));
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB;
+connection slave;
CREATE TABLE t3 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10));
+connection master;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -163,6 +194,7 @@ t2 CREATE TABLE `t2` (
`b` varchar(10) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+connection slave;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -186,37 +218,49 @@ t3 CREATE TABLE `t3` (
) ENGINE=MEMORY DEFAULT CHARSET=latin1
SET @@global.storage_engine=InnoDB;
SET @@session.storage_engine=InnoDB;
+connection master;
DROP TABLE IF EXISTS t1,t2,t3;
* sql_mode *
+connection master;
+SET @old_sql_mode_master= @@global.sql_mode;
SET @@global.sql_mode=ANSI;
SET @@session.sql_mode=ANSI;
+connection slave;
+SET @old_sql_mode_slave= @@global.sql_mode;
SET @@global.sql_mode=TRADITIONAL;
SET @@session.sql_mode=TRADITIONAL;
+connection master;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c DATE);
INSERT INTO t1 VALUES (1, 'master', '0000-00-00');
SELECT * FROM t1 ORDER BY a;
a b c
1 master 0000-00-00
+connection slave;
INSERT INTO t1 VALUES (1, 'slave', '0000-00-00');
-ERROR 22007: Incorrect date value: '0000-00-00' for column 'c' at row 1
+ERROR 22007: Incorrect date value: '0000-00-00' for column `test`.`t1`.`c` at row 1
SELECT * FROM t1 ORDER BY a;
a b c
1 master 0000-00-00
SET @@global.sql_mode='';
SET @@session.sql_mode='';
+connection master;
SET @@global.sql_mode='';
SET @@session.sql_mode='';
DROP TABLE t1;
*** clean up ***
+connection master;
SET @@global.character_set_database=@restore_master_character_set_database;
SET @@global.collation_server=@restore_master_collation_server;
SET @@global.storage_engine=@restore_master_storage_engine;
+SET @@global.sql_mode=@old_sql_mode_master;
+connection slave;
SET @@global.character_set_database=@restore_slave_character_set_database;
SET @@global.collation_server=@restore_slave_collation_server;
SET @@global.max_heap_table_size=@restore_slave_max_heap_table_size;
SET @@global.storage_engine=@restore_slave_storage_engine;
+SET @@global.sql_mode=@old_sql_mode_slave;
call mtr.add_suppression("The table 't[12]' is full");
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_table_options.result b/mysql-test/suite/rpl/r/rpl_table_options.result
index 87fd8c2b2fb..14af4e390c2 100644
--- a/mysql-test/suite/rpl/r/rpl_table_options.result
+++ b/mysql-test/suite/rpl/r/rpl_table_options.result
@@ -5,24 +5,27 @@ set storage_engine=example;
connection slave;
connection master;
create table t1 (a int not null) ull=12340;
+alter table t1 ull=12350;
+Warnings:
+Note 1105 EXAMPLE DEBUG: ULL 12340 -> 12350
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
-) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=12340
+) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=12350
connection slave;
connection slave;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12340 */
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12350 */
set sql_mode=ignore_bad_table_options;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12340
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12350
connection master;
drop table t1;
set storage_engine=default;
diff --git a/mysql-test/suite/rpl/t/rpl_relay_max_extension.test b/mysql-test/suite/rpl/t/rpl_relay_max_extension.test
new file mode 100644
index 00000000000..e1e087f2e0e
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_relay_max_extension.test
@@ -0,0 +1,109 @@
+# ==== Purpose ====
+#
+# Test verifies that auto purging mechanism of relay logs works fine when the
+# file extension grows beyond 999999.
+#
+# ==== Implementation ====
+#
+# Steps:
+# 0 - In master-slave setup clear all the relay logs on the slave server.
+# 1 - Start the slave so that new relay logs starting from
+# 'slave-relay-bin.000001' are created.
+# 2 - Get the active relay-log file name by using SHOW SLAVE STATUS.
+# Shutdown the slave server.
+# 3 - Rename active relay log to '999997' in both 'relay-log.info' and
+# 'slave-relay-bin.index' files.
+# 4 - Restart the slave server by configuring 'slave_parallel_threads=1'
+# and 'max_relay_log_size=100K'.
+# 5 - Generate load on master such that few relay logs are generated on
+# slave. The relay log sequence number will change to 7 digits.
+# 6 - Sync slave with master to ensure that relay logs are applied on
+# slave. They should have been automatically purged.
+# 7 - Assert that there is no 'slave-relay-bin.999999' file in
+# 'relay-log.info'.
+#
+# ==== References ====
+#
+# MDEV-8134: The relay-log is not flushed after the slave-relay-log.999999
+# showed
+#
+
+--source include/have_innodb.inc
+--source include/have_binlog_format_row.inc
+--let $rpl_topology=1->2
+--source include/rpl_init.inc
+
+--connection server_2
+--source include/stop_slave.inc
+RESET SLAVE;
+--source include/start_slave.inc
+--source include/stop_slave.inc
+--let $relay_log=query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1)
+
+--echo #
+--echo # Stop slave server
+--echo #
+
+--let $datadir = `select @@datadir`
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--shutdown_server 10
+--source include/wait_until_disconnected.inc
+
+--exec sed -i "s/$relay_log/slave-relay-bin.999997/g" $datadir/relay-log.info
+--exec sed -i "s/$relay_log/slave-relay-bin.999997/g" $datadir/slave-relay-bin.index
+
+--echo #
+--echo # Simulate file number get close to 999997
+--echo # by renaming relay logs and modifying index/info files
+
+--move_file $datadir/$relay_log $datadir/slave-relay-bin.999997
+
+--echo #
+--echo # Restart slave server
+--echo #
+
+--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+SET @save_slave_parallel_threads= @@GLOBAL.slave_parallel_threads;
+SET @save_max_relay_log_size= @@GLOBAL.max_relay_log_size;
+
+SET GLOBAL slave_parallel_threads=1;
+SET GLOBAL max_relay_log_size=100 * 1024;
+--source include/start_slave.inc
+
+--connection server_1
+create table t1 (i int, c varchar(1024));
+--echo #
+--echo # Insert some data to generate enough amount of binary logs
+--echo #
+--let $count = 1000
+--disable_query_log
+while ($count)
+{
+ eval insert into t1 values (1001 - $count, repeat('a',1000));
+ dec $count;
+}
+--enable_query_log
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+
+--let $relay_log=query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1)
+
+--echo #
+--echo # Assert that 'slave-relay-bin.999999' is purged.
+--echo #
+let SEARCH_FILE=$datadir/slave-relay-bin.index;
+let SEARCH_PATTERN=slave-relay-bin.999999;
+source include/search_pattern_in_file.inc;
+
+--source include/stop_slave.inc
+SET GLOBAL slave_parallel_threads= @save_slave_parallel_threads;
+SET GLOBAL max_relay_log_size= @save_max_relay_log_size;
+--source include/start_slave.inc
+
+--connection server_1
+DROP TABLE t1;
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_row_create_table.test b/mysql-test/suite/rpl/t/rpl_row_create_table.test
index 65f14295c19..cb76d6c4dcb 100644
--- a/mysql-test/suite/rpl/t/rpl_row_create_table.test
+++ b/mysql-test/suite/rpl/t/rpl_row_create_table.test
@@ -7,11 +7,6 @@ connection slave;
--source include/have_innodb.inc
connection master;
-# Bug#18326: Do not lock table for writing during prepare of statement
-# The use of the ps protocol causes extra table maps in the binlog, so
-# we disable the ps-protocol for this statement.
---disable_ps_protocol
-
# Set the default storage engine to different values on master and
# slave. We need to stop the slave for the server variable to take
# effect, since the variable is only read on start-up.
diff --git a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
index 2d5cde82dcc..c10b3570ed6 100644
--- a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
+++ b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test
@@ -4,10 +4,4 @@
let $rename_event_pos= `select @binlog_start_pos + 819`;
-# Bug#18326: Do not lock table for writing during prepare of statement
-# The use of the ps protocol causes extra table maps in the binlog, so
-# we disable the ps-protocol for this statement.
-
---disable_ps_protocol
-- source include/rpl_flsh_tbls.test
---enable_ps_protocol
diff --git a/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt b/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt
deleted file mode 100644
index 627becdbfb5..00000000000
--- a/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb
diff --git a/mysql-test/suite/rpl/t/rpl_spec_variables.test b/mysql-test/suite/rpl/t/rpl_spec_variables.test
index 2cb580fce83..e2d5175036c 100644
--- a/mysql-test/suite/rpl/t/rpl_spec_variables.test
+++ b/mysql-test/suite/rpl/t/rpl_spec_variables.test
@@ -260,10 +260,12 @@ DROP TABLE IF EXISTS t1,t2,t3;
--echo * sql_mode *
--connection master
+SET @old_sql_mode_master= @@global.sql_mode;
SET @@global.sql_mode=ANSI;
SET @@session.sql_mode=ANSI;
--connection slave
+SET @old_sql_mode_slave= @@global.sql_mode;
SET @@global.sql_mode=TRADITIONAL;
SET @@session.sql_mode=TRADITIONAL;
@@ -292,14 +294,16 @@ DROP TABLE t1;
SET @@global.character_set_database=@restore_master_character_set_database;
SET @@global.collation_server=@restore_master_collation_server;
SET @@global.storage_engine=@restore_master_storage_engine;
+SET @@global.sql_mode=@old_sql_mode_master;
--sync_slave_with_master
SET @@global.character_set_database=@restore_slave_character_set_database;
SET @@global.collation_server=@restore_slave_collation_server;
SET @@global.max_heap_table_size=@restore_slave_max_heap_table_size;
SET @@global.storage_engine=@restore_slave_storage_engine;
-
+SET @@global.sql_mode=@old_sql_mode_slave;
# Put at the end since the test otherwise emptied the table.
-
+remove_file $MYSQLTEST_VARDIR/tmp/words.dat;
+remove_file $MYSQLTEST_VARDIR/tmp/words2.dat;
--echo
call mtr.add_suppression("The table 't[12]' is full");
diff --git a/mysql-test/suite/rpl/t/rpl_table_options.test b/mysql-test/suite/rpl/t/rpl_table_options.test
index 3f52444a3c7..6dd1c9bd20d 100644
--- a/mysql-test/suite/rpl/t/rpl_table_options.test
+++ b/mysql-test/suite/rpl/t/rpl_table_options.test
@@ -18,6 +18,7 @@ connection master;
# the option is unknown.
#
create table t1 (a int not null) ull=12340;
+alter table t1 ull=12350;
show create table t1;
sync_slave_with_master;
diff --git a/mysql-test/suite/sql_sequence/mysqldump.result b/mysql-test/suite/sql_sequence/mysqldump.result
index e6aedb57ea6..fb023cc5e36 100644
--- a/mysql-test/suite/sql_sequence/mysqldump.result
+++ b/mysql-test/suite/sql_sequence/mysqldump.result
@@ -2,8 +2,46 @@ CREATE SEQUENCE a1 engine=aria;
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
insert into t1 values (1),(2);
CREATE SEQUENCE x1 engine=innodb;
+# dump whole database
CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
SELECT SETVAL(`a1`, 1, 0);
+CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
+SELECT SETVAL(`x1`, 1, 0);
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
+/*!40101 SET character_set_client = @saved_cs_client */;
+INSERT INTO `t1` VALUES (1),(2);
+# dump by tables order 1
+CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
+SELECT SETVAL(`a1`, 1, 0);
+CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
+SELECT SETVAL(`x1`, 1, 0);
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
+/*!40101 SET character_set_client = @saved_cs_client */;
+INSERT INTO `t1` VALUES (1),(2);
+# dump by tables order 2
+CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
+SELECT SETVAL(`a1`, 1, 0);
+CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
+SELECT SETVAL(`x1`, 1, 0);
+/*!40101 SET @saved_cs_client = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
+/*!40101 SET character_set_client = @saved_cs_client */;
+INSERT INTO `t1` VALUES (1),(2);
+# dump by tables only tables
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `t1` (
@@ -12,8 +50,12 @@ CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024;
/*!40101 SET character_set_client = @saved_cs_client */;
INSERT INTO `t1` VALUES (1),(2);
+# dump by tables only sequences
+CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria;
+SELECT SETVAL(`a1`, 1, 0);
CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB;
SELECT SETVAL(`x1`, 1, 0);
+# end of dumps
DROP TABLE a1,t1,x1;
set default_storage_engine=InnoDB;
create sequence t1;
diff --git a/mysql-test/suite/sql_sequence/mysqldump.test b/mysql-test/suite/sql_sequence/mysqldump.test
index 308f06d5e8d..d2afb2fd675 100644
--- a/mysql-test/suite/sql_sequence/mysqldump.test
+++ b/mysql-test/suite/sql_sequence/mysqldump.test
@@ -11,7 +11,18 @@ CREATE SEQUENCE a1 engine=aria;
CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024;
insert into t1 values (1),(2);
CREATE SEQUENCE x1 engine=innodb;
+--echo # dump whole database
--exec $MYSQL_DUMP --compact test
+--echo # dump by tables order 1
+--exec $MYSQL_DUMP --compact --tables test t1 a1 x1
+--echo # dump by tables order 2
+--exec $MYSQL_DUMP --compact --tables test a1 t1 x1
+--echo # dump by tables only tables
+--exec $MYSQL_DUMP --compact --tables test t1
+--echo # dump by tables only sequences
+--exec $MYSQL_DUMP --compact --tables test a1 x1
+--echo # end of dumps
+
DROP TABLE a1,t1,x1;
#
diff --git a/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result
index a2c328f38fd..915343fcff2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result
@@ -24,6 +24,8 @@ select * from information_schema.session_variables where variable_name='innodb_i
VARIABLE_NAME VARIABLE_VALUE
INNODB_IDLE_FLUSH_PCT 100
set global innodb_idle_flush_pct=10;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
10
@@ -44,6 +46,7 @@ ERROR 42000: Incorrect argument type to variable 'innodb_idle_flush_pct'
set global innodb_idle_flush_pct=-7;
Warnings:
Warning 1292 Truncated incorrect innodb_idle_flush_pct value: '-7'
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
0
@@ -53,6 +56,7 @@ INNODB_IDLE_FLUSH_PCT 0
set global innodb_idle_flush_pct=106;
Warnings:
Warning 1292 Truncated incorrect innodb_idle_flush_pct value: '106'
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
@@ -60,18 +64,26 @@ select * from information_schema.global_variables where variable_name='innodb_id
VARIABLE_NAME VARIABLE_VALUE
INNODB_IDLE_FLUSH_PCT 100
set global innodb_idle_flush_pct=0;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
0
set global innodb_idle_flush_pct=100;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
set global innodb_idle_flush_pct=DEFAULT;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
select @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
SET @@global.innodb_idle_flush_pct = @start_global_value;
+Warnings:
+Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect.
SELECT @@global.innodb_idle_flush_pct;
@@global.innodb_idle_flush_pct
100
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff
index 50a1d1f197a..bfbbfb43c74 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff
@@ -250,7 +250,7 @@
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
- VARIABLE_COMMENT Up to what percentage of dirty pages should be flushed when innodb finds it has spare resources to do so.
+ VARIABLE_COMMENT DEPRECATED. This setting has no effect.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
@@ -1141,22 +1141,22 @@
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index 849937ce3a4..00db4fc57bf 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -1106,7 +1106,7 @@ SESSION_VALUE NULL
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
-VARIABLE_COMMENT Up to what percentage of dirty pages should be flushed when innodb finds it has spare resources to do so.
+VARIABLE_COMMENT DEPRECATED. This setting has no effect.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
NUMERIC_BLOCK_SIZE 0
diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result
index 1c427b34d2b..9ef1b3290af 100644
--- a/mysql-test/suite/wsrep/r/variables.result
+++ b/mysql-test/suite/wsrep/r/variables.result
@@ -142,7 +142,7 @@ SELECT @@global.wsrep_sst_auth;
SET @@global.wsrep_sst_auth= '';
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
-
+NULL
SET @@global.wsrep_sst_auth= NULL;
SELECT @@global.wsrep_sst_auth;
@@global.wsrep_sst_auth
diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc
index 490fa92d312..3976c950541 100644
--- a/plugin/feedback/sender_thread.cc
+++ b/plugin/feedback/sender_thread.cc
@@ -47,7 +47,7 @@ static int table_to_string(TABLE *table, String *result)
res= table->file->ha_rnd_init(1);
- dbug_tmp_use_all_columns(table, table->read_set);
+ dbug_tmp_use_all_columns(table, &table->read_set);
while(!res && !table->file->ha_rnd_next(table->record[0]))
{
diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql
index 83a4f5b5eda..733ed0ed4c1 100644
--- a/scripts/mysql_system_tables_fix.sql
+++ b/scripts/mysql_system_tables_fix.sql
@@ -650,8 +650,7 @@ UPDATE user SET Delete_history_priv = Super_priv WHERE @had_user_delete_history_
ALTER TABLE user ADD plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL AFTER max_user_connections,
ADD authentication_string TEXT NOT NULL AFTER plugin;
ALTER TABLE user CHANGE auth_string authentication_string TEXT NOT NULL;
-ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL,
- MODIFY authentication_string TEXT NOT NULL;
+
ALTER TABLE user ADD password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER authentication_string;
ALTER TABLE user ADD password_last_changed timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL after password_expired;
ALTER TABLE user ADD password_lifetime smallint unsigned DEFAULT NULL after password_last_changed;
@@ -659,10 +658,25 @@ ALTER TABLE user ADD account_locked enum('N', 'Y') COLLATE utf8_general_ci DEFAU
ALTER TABLE user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER account_locked;
ALTER TABLE user ADD default_role char(80) binary DEFAULT '' NOT NULL AFTER is_role;
ALTER TABLE user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL AFTER default_role;
+
-- Somewhere above, we ran ALTER TABLE user .... CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin.
--- we want password_expired column to have collation utf8_general_ci.
-ALTER TABLE user MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-ALTER TABLE user MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+-- we want password_expired column to have collation utf8_general_ci.
+-- Order columns correctly that were not ordered until MDEV-23201 (ff8ffef3e1915d7a9caa07d9461cd8d47c4baf98)
+
+ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL AFTER max_user_connections,
+ MODIFY authentication_string TEXT NOT NULL AFTER plugin,
+ MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER authentication_string,
+ MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER password_expired,
+ MODIFY default_role char(80) binary DEFAULT '' NOT NULL AFTER is_role,
+ MODIFY max_statement_time decimal(12,6) DEFAULT 0 NOT NULL AFTER default_role,
+-- MDEV-24122 formerly mysql5.7 users may have the following columns password_last_changed,
+-- password_lifetime and account_locked. Ensure they are beyond the end of the user columns
+-- used by MariaDB. MariaDB-10.4 will use these in the creation of mysql.global_priv.
+-- password_last_changed has a DEFAULT/ON UPDATE of CURRENT_TIMESTAMP to keep track of
+-- time until 10.4 added.
+ MODIFY IF EXISTS password_last_changed timestamp DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP AFTER max_statement_time,
+ MODIFY IF EXISTS password_lifetime smallint unsigned DEFAULT NULL AFTER password_last_changed,
+ MODIFY IF EXISTS account_locked enum('N', 'Y') CHARACTER SET utf8 DEFAULT 'N' NOT NULL after password_lifetime;
-- Checking for any duplicate hostname and username combination are exists.
-- If exits we will throw error.
diff --git a/sql/create_options.cc b/sql/create_options.cc
index a8d997efaf4..066adcd92e3 100644
--- a/sql/create_options.cc
+++ b/sql/create_options.cc
@@ -98,14 +98,13 @@ static bool report_unknown_option(THD *thd, engine_option_value *val,
{
DBUG_ENTER("report_unknown_option");
- if (val->parsed || suppress_warning)
+ if (val->parsed || suppress_warning || thd->slave_thread)
{
DBUG_PRINT("info", ("parsed => exiting"));
DBUG_RETURN(FALSE);
}
- if (!(thd->variables.sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) &&
- !thd->slave_thread)
+ if (!(thd->variables.sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS))
{
my_error(ER_UNKNOWN_OPTION, MYF(0), val->name.str);
DBUG_RETURN(TRUE);
diff --git a/sql/field.cc b/sql/field.cc
index 70598619acd..7aea222ca07 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -8098,11 +8098,10 @@ uint Field_varstring::get_key_image(uchar *buff, uint length,
{
String val;
uint local_char_length;
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
val_str(&val, &val);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
local_char_length= val.charpos(length / field_charset->mbmaxlen);
if (local_char_length < val.length())
@@ -11396,7 +11395,7 @@ key_map Field::get_possible_keys()
bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
bool rc;
if ((rc= validate_value_in_record(thd, record)))
{
@@ -11408,7 +11407,7 @@ bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record)
ER_THD(thd, ER_INVALID_DEFAULT_VALUE_FOR_FIELD),
ErrConvString(&tmp).ptr(), field_name.str);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return rc;
}
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 609e2135a99..9313282630f 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -4282,7 +4282,7 @@ int ha_partition::write_row(const uchar * buf)
int error;
longlong func_value;
bool have_auto_increment= table->next_number_field && buf == table->record[0];
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
THD *thd= ha_thd();
sql_mode_t saved_sql_mode= thd->variables.sql_mode;
bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
@@ -4324,9 +4324,9 @@ int ha_partition::write_row(const uchar * buf)
}
}
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
if (unlikely(error))
{
m_part_info->err_value= func_value;
@@ -11326,13 +11326,12 @@ int ha_partition::bulk_update_row(const uchar *old_data, const uchar *new_data,
int error= 0;
uint32 part_id;
longlong func_value;
- my_bitmap_map *old_map;
DBUG_ENTER("ha_partition::bulk_update_row");
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
error= m_part_info->get_partition_id(m_part_info, &part_id,
&func_value);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
if (unlikely(error))
{
m_part_info->err_value= func_value;
diff --git a/sql/handler.cc b/sql/handler.cc
index 34fc2ad7e67..98f651971c3 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -859,9 +859,11 @@ static my_bool kill_handlerton(THD *thd, plugin_ref plugin,
{
handlerton *hton= plugin_hton(plugin);
+ mysql_mutex_lock(&thd->LOCK_thd_data);
if (hton->state == SHOW_OPTION_YES && hton->kill_query &&
thd_get_ha_data(thd, hton))
hton->kill_query(hton, thd, *(enum thd_kill_levels *) level);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
return FALSE;
}
@@ -5211,6 +5213,7 @@ int ha_create_table(THD *thd, const char *path,
char name_buff[FN_REFLEN];
const char *name;
TABLE_SHARE share;
+ Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
bool temp_table __attribute__((unused)) =
create_info->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER);
DBUG_ENTER("ha_create_table");
diff --git a/sql/handler.h b/sql/handler.h
index 5dd3f6f5c5e..85da8bc4c22 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1507,7 +1507,7 @@ struct handlerton
enum handler_create_iterator_result
(*create_iterator)(handlerton *hton, enum handler_iterator_type type,
struct handler_iterator *fill_this_in);
- int (*abort_transaction)(handlerton *hton, THD *bf_thd,
+ void (*abort_transaction)(handlerton *hton, THD *bf_thd,
THD *victim_thd, my_bool signal);
int (*set_checkpoint)(handlerton *hton, const XID* xid);
int (*get_checkpoint)(handlerton *hton, XID* xid);
diff --git a/sql/item.cc b/sql/item.cc
index f700bcfe680..1228b37f144 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1423,9 +1423,9 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions)
Sql_mode_save sql_mode(thd);
thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE);
thd->variables.sql_mode|= MODE_INVALID_DATES;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
res= save_in_field(field, no_conversions);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return res;
}
@@ -5753,7 +5753,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
Field *from_field= (Field *)not_found_field;
bool outer_fixed= false;
SELECT_LEX *select= thd->lex->current_select;
-
+
if (select && select->in_tvc)
{
my_error(ER_FIELD_REFERENCE_IN_TVC, MYF(0), full_name());
@@ -6652,7 +6652,7 @@ Item *Item_string::make_odbc_literal(THD *thd, const LEX_CSTRING *typestr)
}
-static int save_int_value_in_field (Field *field, longlong nr,
+static int save_int_value_in_field (Field *field, longlong nr,
bool null_value, bool unsigned_flag)
{
if (null_value)
@@ -8427,6 +8427,22 @@ bool Item_direct_ref::val_native(THD *thd, Native *to)
}
+longlong Item_direct_ref::val_time_packed()
+{
+ longlong tmp = (*ref)->val_time_packed();
+ null_value= (*ref)->null_value;
+ return tmp;
+}
+
+
+longlong Item_direct_ref::val_datetime_packed()
+{
+ longlong tmp = (*ref)->val_datetime_packed();
+ null_value= (*ref)->null_value;
+ return tmp;
+}
+
+
Item_cache_wrapper::~Item_cache_wrapper()
{
DBUG_ASSERT(expr_cache == 0);
diff --git a/sql/item.h b/sql/item.h
index e2e18ce9b86..b960dfe6871 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -5569,14 +5569,17 @@ public:
return Item_ref::fix_fields(thd, it);
}
void save_val(Field *to);
+ /* Below we should have all val() methods as in Item_ref */
double val_real();
longlong val_int();
- String *val_str(String* tmp);
bool val_native(THD *thd, Native *to);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
+ String *val_str(String* tmp);
bool is_null();
bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ longlong val_datetime_packed();
+ longlong val_time_packed();
virtual Ref_Type ref_type() { return DIRECT_REF; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_direct_ref>(thd, this); }
@@ -5905,6 +5908,20 @@ public:
}
return Item_direct_ref::get_date(thd, ltime, fuzzydate);
}
+ longlong val_time_packed()
+ {
+ if (check_null_ref())
+ return 0;
+ else
+ return Item_direct_ref::val_time_packed();
+ }
+ longlong val_datetime_packed()
+ {
+ if (check_null_ref())
+ return 0;
+ else
+ return Item_direct_ref::val_datetime_packed();
+ }
bool send(Protocol *protocol, st_value *buffer);
void save_org_in_field(Field *field,
fast_field_copier data __attribute__ ((__unused__)))
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 4f878d8437c..bbd5fbf073a 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -321,13 +321,13 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item,
TABLE *table= field->table;
Sql_mode_save sql_mode(thd);
Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE);
- my_bitmap_map *old_maps[2] = { NULL, NULL };
+ MY_BITMAP *old_maps[2] = { NULL, NULL };
ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */
/* table->read_set may not be set if we come here from a CREATE TABLE */
if (table && table->read_set)
dbug_tmp_use_all_columns(table, old_maps,
- table->read_set, table->write_set);
+ &table->read_set, &table->write_set);
/* For comparison purposes allow invalid dates like 2000-01-32 */
thd->variables.sql_mode= (thd->variables.sql_mode & ~MODE_NO_ZERO_DATE) |
MODE_INVALID_DATES;
@@ -368,7 +368,7 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item,
DBUG_ASSERT(!result);
}
if (table && table->read_set)
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_maps);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_maps);
}
return result;
}
@@ -1198,9 +1198,9 @@ longlong Item_func_truth::val_int()
bool Item_in_optimizer::is_top_level_item()
{
- if (invisible_mode())
- return FALSE;
- return ((Item_in_subselect *)args[1])->is_top_level_item();
+ if (!invisible_mode())
+ return ((Item_in_subselect *)args[1])->is_top_level_item();
+ return false;
}
@@ -3196,7 +3196,7 @@ bool Item_func_decode_oracle::fix_length_and_dec()
/*
Aggregate all THEN and ELSE expression types
and collations when string result
-
+
@param THD - current thd
@param start - an element in args to start aggregating from
*/
diff --git a/sql/key.cc b/sql/key.cc
index adff6975631..6f0a1112497 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -244,14 +244,13 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
else if (key_part->key_part_flag & HA_VAR_LENGTH_PART)
{
Field *field= key_part->field;
- my_bitmap_map *old_map;
my_ptrdiff_t ptrdiff= to_record - field->table->record[0];
field->move_field_offset(ptrdiff);
key_length-= HA_KEY_BLOB_LENGTH;
length= MY_MIN(key_length, key_part->length);
- old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, &field->table->write_set);
field->set_key_image(from_key, length);
- dbug_tmp_restore_column_map(field->table->write_set, old_map);
+ dbug_tmp_restore_column_map(&field->table->write_set, old_map);
from_key+= HA_KEY_BLOB_LENGTH;
field->move_field_offset(-ptrdiff);
}
@@ -419,7 +418,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
void key_unpack(String *to, TABLE *table, KEY *key)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
DBUG_ENTER("key_unpack");
to->length(0);
@@ -443,7 +442,7 @@ void key_unpack(String *to, TABLE *table, KEY *key)
field_unpack(to, key_part->field, table->record[0], key_part->length,
MY_TEST(key_part->key_part_flag & HA_PART_KEY_SEG));
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_VOID_RETURN;
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 20a41dc8aee..52995cc7e66 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -13741,11 +13741,11 @@ int Rows_log_event::update_sequence()
/* This event come from a setval function executed on the master.
Update the sequence next_number and round, like we do with setval()
*/
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->read_set);
longlong nextval= table->field[NEXT_FIELD_NO]->val_int();
longlong round= table->field[ROUND_FIELD_NO]->val_int();
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return table->s->sequence->set_value(table, nextval, round, 0) > 0;
}
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 93b7982c4a5..bea02f2bf6a 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -1138,7 +1138,7 @@ MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout,
DBUG_ASSERT(!debug_sync_set_action((owner->get_thd()),
STRING_WITH_LEN(act)));
};);
- if (wsrep_thd_is_BF(owner->get_thd(), false))
+ if (WSREP_ON && wsrep_thd_is_BF(owner->get_thd(), false))
{
wait_result= mysql_cond_wait(&m_COND_wait_status, &m_LOCK_wait_status);
}
@@ -1211,7 +1211,7 @@ void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket)
*/
DBUG_ASSERT(ticket->get_lock());
#ifdef WITH_WSREP
- if ((this == &(ticket->get_lock()->m_waiting)) &&
+ if (WSREP_ON && (this == &(ticket->get_lock()->m_waiting)) &&
wsrep_thd_is_BF(ticket->get_ctx()->get_thd(), false))
{
Ticket_iterator itw(ticket->get_lock()->m_waiting);
@@ -1762,7 +1762,7 @@ MDL_lock::can_grant_lock(enum_mdl_type type_arg,
non WSREP threads must report conflict immediately
note: RSU processing wsrep threads, have wsrep_on==OFF
*/
- if (WSREP(requestor_ctx->get_thd()) ||
+ if (WSREP_ON && WSREP(requestor_ctx->get_thd()) ||
requestor_ctx->get_thd()->wsrep_cs().mode() ==
wsrep::client_state::m_rsu)
{
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index a74fb4326e4..ffdbadbe129 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -363,7 +363,6 @@ static bool binlog_format_used= false;
LEX_STRING opt_init_connect, opt_init_slave;
mysql_cond_t COND_thread_cache;
static mysql_cond_t COND_flush_thread_cache;
-mysql_cond_t COND_slave_background;
static DYNAMIC_ARRAY all_options;
static longlong start_memory_used;
@@ -701,7 +700,7 @@ mysql_mutex_t
LOCK_crypt,
LOCK_global_system_variables,
LOCK_user_conn,
- LOCK_connection_count, LOCK_error_messages, LOCK_slave_background;
+ LOCK_connection_count, LOCK_error_messages;
mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats,
LOCK_global_table_stats, LOCK_global_index_stats;
@@ -893,8 +892,7 @@ PSI_mutex_key key_LOCK_stats,
PSI_mutex_key key_LOCK_gtid_waiting;
PSI_mutex_key key_LOCK_after_binlog_sync;
-PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered,
- key_LOCK_slave_background;
+PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered;
PSI_mutex_key key_TABLE_SHARE_LOCK_share;
PSI_mutex_key key_LOCK_ack_receiver;
@@ -968,7 +966,6 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL},
{ &key_LOCK_after_binlog_sync, "LOCK_after_binlog_sync", PSI_FLAG_GLOBAL},
{ &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
- { &key_LOCK_slave_background, "LOCK_slave_background", PSI_FLAG_GLOBAL},
{ &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL},
{ &key_PARTITION_LOCK_auto_inc, "HA_DATA_PARTITION::LOCK_auto_inc", 0},
{ &key_LOCK_slave_state, "LOCK_slave_state", 0},
@@ -1039,7 +1036,7 @@ PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy;
PSI_cond_key key_COND_rpl_thread_queue, key_COND_rpl_thread,
key_COND_rpl_thread_stop, key_COND_rpl_thread_pool,
key_COND_parallel_entry, key_COND_group_commit_orderer,
- key_COND_prepare_ordered, key_COND_slave_background;
+ key_COND_prepare_ordered;
PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates;
PSI_cond_key key_COND_ack_receiver;
@@ -1087,7 +1084,6 @@ static PSI_cond_info all_server_conds[]=
{ &key_COND_parallel_entry, "COND_parallel_entry", 0},
{ &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0},
{ &key_COND_prepare_ordered, "COND_prepare_ordered", 0},
- { &key_COND_slave_background, "COND_slave_background", 0},
{ &key_COND_start_thread, "COND_start_thread", PSI_FLAG_GLOBAL},
{ &key_COND_wait_gtid, "COND_wait_gtid", 0},
{ &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0},
@@ -1792,6 +1788,7 @@ static void close_connections(void)
{
wsrep_deinit(true);
}
+ wsrep_sst_auth_free();
#endif
/* All threads has now been aborted */
DBUG_PRINT("quit", ("Waiting for threads to die (count=%u)",
@@ -1905,6 +1902,7 @@ extern "C" void unireg_abort(int exit_code)
wsrep_deinit(true);
wsrep_deinit_server();
}
+ wsrep_sst_auth_free();
#endif // WITH_WSREP
clean_up(!opt_abort && (exit_code || !opt_bootstrap)); /* purecov: inspected */
@@ -2140,8 +2138,6 @@ static void clean_up_mutexes()
mysql_cond_destroy(&COND_prepare_ordered);
mysql_mutex_destroy(&LOCK_after_binlog_sync);
mysql_mutex_destroy(&LOCK_commit_ordered);
- mysql_mutex_destroy(&LOCK_slave_background);
- mysql_cond_destroy(&COND_slave_background);
#ifndef EMBEDDED_LIBRARY
mysql_mutex_destroy(&LOCK_error_log);
#endif
@@ -4555,9 +4551,6 @@ static int init_thread_environment()
MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered,
MY_MUTEX_INIT_SLOW);
- mysql_mutex_init(key_LOCK_slave_background, &LOCK_slave_background,
- MY_MUTEX_INIT_SLOW);
- mysql_cond_init(key_COND_slave_background, &COND_slave_background, NULL);
#ifdef HAVE_OPENSSL
mysql_mutex_init(key_LOCK_des_key_file,
@@ -5255,6 +5248,10 @@ static int init_server_components()
that there are unprocessed options.
*/
my_getopt_skip_unknown= 0;
+#ifdef WITH_WSREP
+ if (wsrep_recovery)
+ my_getopt_skip_unknown= TRUE;
+#endif
if ((ho_error= handle_options(&remaining_argc, &remaining_argv, no_opts,
mysqld_get_one_option)))
@@ -5264,20 +5261,27 @@ static int init_server_components()
remaining_argv--;
my_getopt_skip_unknown= TRUE;
- if (remaining_argc > 1)
+#ifdef WITH_WSREP
+ if (!wsrep_recovery)
{
- fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n",
- my_progname, remaining_argv[1]);
- unireg_abort(1);
+#endif
+ if (remaining_argc > 1)
+ {
+ fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n",
+ my_progname, remaining_argv[1]);
+ unireg_abort(1);
+ }
+#ifdef WITH_WSREP
}
+#endif
}
- if (init_io_cache_encryption())
- unireg_abort(1);
-
if (opt_abort)
unireg_abort(0);
+ if (init_io_cache_encryption())
+ unireg_abort(1);
+
/* if the errmsg.sys is not loaded, terminate to maintain behaviour */
if (!DEFAULT_ERRMSGS[0][0])
unireg_abort(1);
diff --git a/sql/mysqld.h b/sql/mysqld.h
index bd45ff7b798..c1469caced5 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -615,8 +615,7 @@ extern mysql_mutex_t
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
LOCK_active_mi, LOCK_manager, LOCK_user_conn,
- LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count,
- LOCK_slave_background;
+ LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count;
extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_global_system_variables;
extern mysql_rwlock_t LOCK_all_status_vars;
extern mysql_mutex_t LOCK_start_thread;
@@ -631,7 +630,6 @@ extern mysql_rwlock_t LOCK_ssl_refresh;
extern mysql_prlock_t LOCK_system_variables_hash;
extern mysql_cond_t COND_start_thread;
extern mysql_cond_t COND_manager;
-extern mysql_cond_t COND_slave_background;
extern Atomic_counter<uint32_t> thread_count;
extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 97ebbdbe8a7..8451fb87994 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -3636,8 +3636,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
{
- /* Do the same as print_key() does */
- my_bitmap_map *old_map;
+ /* Do the same as print_key() does */
if (field->real_maybe_null())
{
@@ -3649,10 +3648,10 @@ void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
field->set_notnull();
ptr++;
}
- old_map= dbug_tmp_use_all_columns(field->table,
- field->table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table,
+ &field->table->write_set);
field->set_key_image(ptr, len);
- dbug_tmp_restore_column_map(field->table->write_set, old_map);
+ dbug_tmp_restore_column_map(&field->table->write_set, old_map);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -3867,7 +3866,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
PART_PRUNE_PARAM prune_param;
MEM_ROOT alloc;
RANGE_OPT_PARAM *range_par= &prune_param.range_param;
- my_bitmap_map *old_sets[2];
+ MY_BITMAP *old_sets[2];
prune_param.part_info= part_info;
init_sql_alloc(&alloc, "prune_partitions",
@@ -3884,7 +3883,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
}
dbug_tmp_use_all_columns(table, old_sets,
- table->read_set, table->write_set);
+ &table->read_set, &table->write_set);
range_par->thd= thd;
range_par->table= table;
/* range_par->cond doesn't need initialization */
@@ -3981,7 +3980,7 @@ all_used:
retval= FALSE; // some partitions are used
mark_all_partitions_as_used(prune_param.part_info);
end:
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
thd->no_errors=0;
thd->mem_root= range_par->old_root;
free_root(&alloc,MYF(0)); // Return memory & allocator
@@ -11122,6 +11121,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
uint keynr= param->real_keynr[idx];
DBUG_ENTER("check_quick_select");
+ param->is_ror_scan= FALSE;
/* Handle cases when we don't have a valid non-empty list of range */
if (!tree)
DBUG_RETURN(HA_POS_ERROR);
@@ -15658,8 +15658,8 @@ static void
print_sel_arg_key(Field *field, const uchar *key, String *out)
{
TABLE *table= field->table;
- my_bitmap_map *old_sets[2];
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ MY_BITMAP *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
if (field->real_maybe_null())
{
@@ -15679,7 +15679,7 @@ print_sel_arg_key(Field *field, const uchar *key, String *out)
field->val_str(out);
end:
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
}
@@ -15774,9 +15774,9 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length)
const uchar *key_end= key+used_length;
uint store_length;
TABLE *table= key_part->field->table;
- my_bitmap_map *old_sets[2];
+ MY_BITMAP *old_sets[2];
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
for (; key < key_end; key+=store_length, key_part++)
{
@@ -15803,7 +15803,7 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length)
if (key+store_length < key_end)
fputc('/',DBUG_FILE);
}
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
}
@@ -15811,16 +15811,16 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{
char buf[MAX_KEY/8+1];
TABLE *table;
- my_bitmap_map *old_sets[2];
+ MY_BITMAP *old_sets[2];
DBUG_ENTER("print_quick");
if (!quick)
DBUG_VOID_RETURN;
DBUG_LOCK_FILE;
table= quick->head;
- dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+ dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set);
quick->dbug_dump(0, TRUE);
- dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+ dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets);
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 9f08964e62c..a8459438be7 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -1449,13 +1449,13 @@ void partition_info::print_no_partition_found(TABLE *table_arg, myf errflag)
buf_ptr= (char*)"from column_list";
else
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table_arg, table_arg->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table_arg, &table_arg->read_set);
if (part_expr->null_value)
buf_ptr= (char*)"NULL";
else
longlong10_to_str(err_value, buf,
part_expr->unsigned_flag ? 10 : -10);
- dbug_tmp_restore_column_map(table_arg->read_set, old_map);
+ dbug_tmp_restore_column_map(&table_arg->read_set, old_map);
}
my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, errflag, buf_ptr);
}
diff --git a/sql/protocol.cc b/sql/protocol.cc
index aa9651e974c..eb7f19d2bd0 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -1269,15 +1269,15 @@ bool Protocol_text::store(Field *field)
CHARSET_INFO *tocs= this->thd->variables.character_set_results;
#ifdef DBUG_ASSERT_EXISTS
TABLE *table= field->table;
- my_bitmap_map *old_map= 0;
+ MY_BITMAP *old_map= 0;
if (table->file)
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
#endif
field->val_str(&str);
#ifdef DBUG_ASSERT_EXISTS
if (old_map)
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
#endif
return store_string_aux(str.ptr(), str.length(), str.charset(), tocs);
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 154636480ca..0ac1533519f 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -8,6 +8,7 @@
#ifdef WITH_WSREP
#include "wsrep_trans_observer.h"
#endif
+#include "sql_repl.h"
/*
Code for optional parallel execution of replicated events on the slave.
@@ -100,7 +101,7 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
return;
mysql_mutex_lock(&rli->data_lock);
- cmp= strcmp(rli->group_relay_log_name, qev->event_relay_log_name);
+ cmp= compare_log_name(rli->group_relay_log_name, qev->event_relay_log_name);
if (cmp < 0)
{
rli->group_relay_log_pos= qev->future_event_relay_log_pos;
@@ -109,7 +110,7 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev)
rli->group_relay_log_pos < qev->future_event_relay_log_pos)
rli->group_relay_log_pos= qev->future_event_relay_log_pos;
- cmp= strcmp(rli->group_master_log_name, qev->future_event_master_log_name);
+ cmp= compare_log_name(rli->group_master_log_name, qev->future_event_master_log_name);
if (cmp < 0)
{
strcpy(rli->group_master_log_name, qev->future_event_master_log_name);
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index bcdff1e33a8..c8f77acf523 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -991,7 +991,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
if (rgi->is_parallel_exec)
{
/* In case of parallel replication, do not update the position backwards. */
- int cmp= strcmp(group_relay_log_name, rgi->event_relay_log_name);
+ int cmp= compare_log_name(group_relay_log_name, rgi->event_relay_log_name);
if (cmp < 0)
{
group_relay_log_pos= rgi->future_event_relay_log_pos;
@@ -1003,7 +1003,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos,
In the parallel case we need to update the master_log_name here, rather
than in Rotate_log_event::do_update_pos().
*/
- cmp= strcmp(group_master_log_name, rgi->future_event_master_log_name);
+ cmp= compare_log_name(group_master_log_name, rgi->future_event_master_log_name);
if (cmp <= 0)
{
if (cmp < 0)
diff --git a/sql/slave.cc b/sql/slave.cc
index 9d4049c6452..7e261dc0233 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -64,6 +64,7 @@
#include "rpl_parallel.h"
#include "sql_show.h"
#include "semisync_slave.h"
+#include "sql_manager.h"
#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
@@ -363,9 +364,9 @@ end:
}
-static void
-handle_gtid_pos_auto_create_request(THD *thd, void *hton)
+static void bg_gtid_pos_auto_create(void *hton)
{
+ THD *thd= NULL;
int UNINIT_VAR(err);
plugin_ref engine= NULL, *auto_engines;
rpl_slave_state::gtid_pos_table *entry;
@@ -377,7 +378,6 @@ handle_gtid_pos_auto_create_request(THD *thd, void *hton)
it.
*/
mysql_mutex_lock(&LOCK_global_system_variables);
- engine= NULL;
for (auto_engines= opt_gtid_pos_auto_plugins;
auto_engines && *auto_engines;
++auto_engines)
@@ -422,6 +422,13 @@ handle_gtid_pos_auto_create_request(THD *thd, void *hton)
table_name.str= loc_table_name.c_ptr_safe();
table_name.length= loc_table_name.length();
+ thd= new THD(next_thread_id());
+ thd->thread_stack= (char*) &thd; /* Set approximate stack start */
+ thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND;
+ thd->store_globals();
+ thd->security_ctx->skip_grants();
+ thd->set_command(COM_DAEMON);
+ thd->variables.wsrep_on= 0;
err= gtid_pos_table_creation(thd, engine, &table_name);
if (err)
{
@@ -449,15 +456,15 @@ handle_gtid_pos_auto_create_request(THD *thd, void *hton)
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
end:
+ delete thd;
if (engine)
plugin_unlock(NULL, engine);
}
-static bool slave_background_thread_running;
-static bool slave_background_thread_stop;
static bool slave_background_thread_gtid_loaded;
+<<<<<<< HEAD
static struct slave_background_kill_t {
slave_background_kill_t *next;
THD *to_kill;
@@ -473,21 +480,31 @@ static volatile bool slave_background_gtid_pending_delete_flag;
pthread_handler_t
handle_slave_background(void *arg __attribute__((unused)))
-{
- THD *thd;
- PSI_stage_info old_stage;
- bool stop;
+||||||| 75538f94ca0
+static struct slave_background_kill_t {
+ slave_background_kill_t *next;
+ THD *to_kill;
+} *slave_background_kill_list;
- my_thread_init();
- thd= new THD(next_thread_id());
+static struct slave_background_gtid_pos_create_t {
+ slave_background_gtid_pos_create_t *next;
+ void *hton;
+} *slave_background_gtid_pos_create_list;
+
+
+pthread_handler_t
+handle_slave_background(void *arg __attribute__((unused)))
+=======
+static void bg_rpl_load_gtid_slave_state(void *)
+>>>>>>> bb-10.3-release
+{
+ THD *thd= new THD(next_thread_id());
thd->thread_stack= (char*) &thd; /* Set approximate stack start */
thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND;
thd->store_globals();
thd->security_ctx->skip_grants();
thd->set_command(COM_DAEMON);
-#ifdef WITH_WSREP
thd->variables.wsrep_on= 0;
-#endif
thd_proc_info(thd, "Loading slave GTID position from table");
if (rpl_load_gtid_slave_state(thd))
@@ -497,8 +514,10 @@ handle_slave_background(void *arg __attribute__((unused)))
thd->get_stmt_da()->sql_errno(),
thd->get_stmt_da()->message());
- mysql_mutex_lock(&LOCK_slave_background);
+ // hijacking global_rpl_thread_pool cond here - it's only once on startup
+ mysql_mutex_lock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
slave_background_thread_gtid_loaded= true;
+<<<<<<< HEAD
mysql_cond_broadcast(&COND_slave_background);
THD_STAGE_INFO(thd, stage_slave_background_process_request);
@@ -569,35 +588,100 @@ handle_slave_background(void *arg __attribute__((unused)))
mysql_cond_broadcast(&COND_slave_background);
mysql_mutex_unlock(&LOCK_slave_background);
+||||||| 75538f94ca0
+ mysql_cond_broadcast(&COND_slave_background);
+
+ THD_STAGE_INFO(thd, stage_slave_background_process_request);
+ do
+ {
+ slave_background_kill_t *kill_list;
+ slave_background_gtid_pos_create_t *create_list;
+
+ thd->ENTER_COND(&COND_slave_background, &LOCK_slave_background,
+ &stage_slave_background_wait_request,
+ &old_stage);
+ for (;;)
+ {
+ stop= abort_loop || thd->killed || slave_background_thread_stop;
+ kill_list= slave_background_kill_list;
+ create_list= slave_background_gtid_pos_create_list;
+ if (stop || kill_list || create_list)
+ break;
+ mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
+ }
+
+ slave_background_kill_list= NULL;
+ slave_background_gtid_pos_create_list= NULL;
+ thd->EXIT_COND(&old_stage);
+
+ while (kill_list)
+ {
+ slave_background_kill_t *p = kill_list;
+ THD *to_kill= p->to_kill;
+ kill_list= p->next;
+
+ to_kill->awake(KILL_CONNECTION);
+ mysql_mutex_lock(&to_kill->LOCK_wakeup_ready);
+ to_kill->rgi_slave->killed_for_retry=
+ rpl_group_info::RETRY_KILL_KILLED;
+ mysql_cond_broadcast(&to_kill->COND_wakeup_ready);
+ mysql_mutex_unlock(&to_kill->LOCK_wakeup_ready);
+ my_free(p);
+ }
+
+ while (create_list)
+ {
+ slave_background_gtid_pos_create_t *next= create_list->next;
+ void *hton= create_list->hton;
+ handle_gtid_pos_auto_create_request(thd, hton);
+ my_free(create_list);
+ create_list= next;
+ }
+
+ mysql_mutex_lock(&LOCK_slave_background);
+ } while (!stop);
+
+ slave_background_thread_running= false;
+ mysql_cond_broadcast(&COND_slave_background);
+ mysql_mutex_unlock(&LOCK_slave_background);
+
+=======
+ mysql_cond_signal(&global_rpl_thread_pool.COND_rpl_thread_pool);
+ mysql_mutex_unlock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
+>>>>>>> bb-10.3-release
delete thd;
+<<<<<<< HEAD
my_thread_end();
return 0;
-}
+||||||| 75538f94ca0
+ thread_safe_decrement32(&service_thread_count);
+ signal_thd_deleted();
+ my_thread_end();
+ return 0;
+=======
+>>>>>>> bb-10.3-release
+}
+static void bg_slave_kill(void *victim)
+{
+ THD *to_kill= (THD *)victim;
+ to_kill->awake(KILL_CONNECTION);
+ mysql_mutex_lock(&to_kill->LOCK_wakeup_ready);
+ to_kill->rgi_slave->killed_for_retry= rpl_group_info::RETRY_KILL_KILLED;
+ mysql_cond_broadcast(&to_kill->COND_wakeup_ready);
+ mysql_mutex_unlock(&to_kill->LOCK_wakeup_ready);
+}
-void
-slave_background_kill_request(THD *to_kill)
+void slave_background_kill_request(THD *to_kill)
{
if (to_kill->rgi_slave->killed_for_retry)
return; // Already deadlock killed.
- slave_background_kill_t *p=
- (slave_background_kill_t *)my_malloc(sizeof(*p), MYF(MY_WME));
- if (p)
- {
- p->to_kill= to_kill;
- to_kill->rgi_slave->killed_for_retry=
- rpl_group_info::RETRY_KILL_PENDING;
- mysql_mutex_lock(&LOCK_slave_background);
- p->next= slave_background_kill_list;
- slave_background_kill_list= p;
- mysql_cond_signal(&COND_slave_background);
- mysql_mutex_unlock(&LOCK_slave_background);
- }
+ to_kill->rgi_slave->killed_for_retry= rpl_group_info::RETRY_KILL_PENDING;
+ mysql_manager_submit(bg_slave_kill, to_kill);
}
-
/*
This function must only be called from a slave SQL thread (or worker thread),
to ensure that the table_entry will not go away before we can lock the
@@ -607,23 +691,18 @@ void
slave_background_gtid_pos_create_request(
rpl_slave_state::gtid_pos_table *table_entry)
{
- slave_background_gtid_pos_create_t *p;
-
if (table_entry->state != rpl_slave_state::GTID_POS_AUTO_CREATE)
return;
- p= (slave_background_gtid_pos_create_t *)my_malloc(sizeof(*p), MYF(MY_WME));
- if (!p)
- return;
mysql_mutex_lock(&rpl_global_gtid_slave_state->LOCK_slave_state);
if (table_entry->state != rpl_slave_state::GTID_POS_AUTO_CREATE)
{
- my_free(p);
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
return;
}
table_entry->state= rpl_slave_state::GTID_POS_CREATE_REQUESTED;
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
+<<<<<<< HEAD
p->hton= table_entry->table_hton;
mysql_mutex_lock(&LOCK_slave_background);
p->next= slave_background_gtid_pos_create_list;
@@ -698,6 +777,67 @@ stop_slave_background_thread()
while (slave_background_thread_running)
mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
mysql_mutex_unlock(&LOCK_slave_background);
+||||||| 75538f94ca0
+ p->hton= table_entry->table_hton;
+ mysql_mutex_lock(&LOCK_slave_background);
+ p->next= slave_background_gtid_pos_create_list;
+ slave_background_gtid_pos_create_list= p;
+ mysql_cond_signal(&COND_slave_background);
+ mysql_mutex_unlock(&LOCK_slave_background);
+}
+
+
+/*
+ Start the slave background thread.
+
+ This thread is currently used for two purposes:
+
+ 1. To load the GTID state from mysql.gtid_slave_pos at server start; reading
+ from table requires valid THD, which is otherwise not available during
+ server init.
+
+ 2. To kill worker thread transactions during parallel replication, when a
+ storage engine attempts to take an errorneous conflicting lock that would
+ cause a deadlock. Killing is done asynchroneously, as the kill may not
+ be safe within the context of a callback from inside storage engine
+ locking code.
+*/
+static int
+start_slave_background_thread()
+{
+ pthread_t th;
+
+ slave_background_thread_running= true;
+ slave_background_thread_stop= false;
+ slave_background_thread_gtid_loaded= false;
+ if (mysql_thread_create(key_thread_slave_background,
+ &th, &connection_attrib, handle_slave_background,
+ NULL))
+ {
+ sql_print_error("Failed to create thread while initialising slave");
+ return 1;
+ }
+ mysql_mutex_lock(&LOCK_slave_background);
+ while (!slave_background_thread_gtid_loaded)
+ mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
+ mysql_mutex_unlock(&LOCK_slave_background);
+
+ return 0;
+}
+
+
+static void
+stop_slave_background_thread()
+{
+ mysql_mutex_lock(&LOCK_slave_background);
+ slave_background_thread_stop= true;
+ mysql_cond_broadcast(&COND_slave_background);
+ while (slave_background_thread_running)
+ mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
+ mysql_mutex_unlock(&LOCK_slave_background);
+=======
+ mysql_manager_submit(bg_gtid_pos_auto_create, table_entry->table_hton);
+>>>>>>> bb-10.3-release
}
@@ -712,12 +852,19 @@ int init_slave()
init_slave_psi_keys();
#endif
- if (start_slave_background_thread())
- return 1;
-
if (global_rpl_thread_pool.init(opt_slave_parallel_threads))
return 1;
+ slave_background_thread_gtid_loaded= false;
+ mysql_manager_submit(bg_rpl_load_gtid_slave_state, NULL);
+
+ // hijacking global_rpl_thread_pool cond here - it's only once on startup
+ mysql_mutex_lock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
+ while (!slave_background_thread_gtid_loaded)
+ mysql_cond_wait(&global_rpl_thread_pool.COND_rpl_thread_pool,
+ &global_rpl_thread_pool.LOCK_rpl_thread_pool);
+ mysql_mutex_unlock(&global_rpl_thread_pool.LOCK_rpl_thread_pool);
+
/*
This is called when mysqld starts. Before client connections are
accepted. However bootstrap may conflict with us if it does START SLAVE.
@@ -1443,10 +1590,15 @@ void slave_prepare_for_shutdown()
mysql_mutex_lock(&LOCK_active_mi);
master_info_index->free_connections();
mysql_mutex_unlock(&LOCK_active_mi);
+<<<<<<< HEAD
// It's safe to destruct worker pool now when
// all driver threads are gone.
global_rpl_thread_pool.deactivate();
stop_slave_background_thread();
+||||||| 75538f94ca0
+ stop_slave_background_thread();
+=======
+>>>>>>> bb-10.3-release
}
/*
@@ -1477,8 +1629,6 @@ void end_slave()
active_mi= 0;
mysql_mutex_unlock(&LOCK_active_mi);
- stop_slave_background_thread();
-
global_rpl_thread_pool.destroy();
free_all_rpl_filters();
DBUG_VOID_RETURN;
@@ -4737,10 +4887,7 @@ pthread_handler_t handle_slave_io(void *arg)
goto err;
}
-
-#ifdef WITH_WSREP
thd->variables.wsrep_on= 0;
-#endif
if (DBUG_EVALUATE_IF("failed_slave_start", 1, 0)
|| repl_semisync_slave.slave_start(mi))
{
@@ -5060,8 +5207,11 @@ log space");
err:
// print the current replication position
if (mi->using_gtid == Master_info::USE_GTID_NO)
+ {
sql_print_information("Slave I/O thread exiting, read up to log '%s', "
"position %llu", IO_RPL_LOG_NAME, mi->master_log_pos);
+ sql_print_information("master was %s:%d", mi->host, mi->port);
+ }
else
{
StringBuffer<100> tmp;
@@ -5070,6 +5220,7 @@ err:
"position %llu; GTID position %s",
IO_RPL_LOG_NAME, mi->master_log_pos,
tmp.c_ptr_safe());
+ sql_print_information("master was %s:%d", mi->host, mi->port);
}
repl_semisync_slave.slave_stop(mi);
thd->reset_query();
@@ -5672,6 +5823,7 @@ pthread_handler_t handle_slave_sql(void *arg)
sql_print_information("Slave SQL thread exiting, replication stopped in "
"log '%s' at position %llu%s", RPL_LOG_NAME,
rli->group_master_log_pos, tmp.c_ptr_safe());
+ sql_print_information("master was %s:%d", mi->host, mi->port);
}
#ifdef WITH_WSREP
wsrep_after_command_before_result(thd);
diff --git a/sql/spatial.cc b/sql/spatial.cc
index 2b36468e158..84a05532532 100644
--- a/sql/spatial.cc
+++ b/sql/spatial.cc
@@ -605,6 +605,7 @@ Geometry *Geometry::create_from_json(Geometry_buffer *buffer,
if (feature_type_found)
goto handle_geometry_key;
}
+ goto err_return;
}
else
{
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index cb2757ba48f..e56361bc424 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -3159,6 +3159,12 @@ end:
int acl_check_setrole(THD *thd, const char *rolename, ulonglong *access)
{
+ if (!initialized)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables");
+ return 1;
+ }
+
return check_user_can_set_role(thd, thd->security_ctx->priv_user,
thd->security_ctx->host, thd->security_ctx->ip, rolename, access);
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 9d1588a7ce1..747db4424f2 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -450,6 +450,7 @@ void thd_set_ha_data(THD *thd, const struct handlerton *hton,
const void *ha_data)
{
plugin_ref *lock= &thd->ha_data[hton->slot].lock;
+ DBUG_ASSERT(thd == current_thd);
if (ha_data && !*lock)
*lock= ha_lock_engine(NULL, (handlerton*) hton);
else if (!ha_data && *lock)
@@ -457,7 +458,9 @@ void thd_set_ha_data(THD *thd, const struct handlerton *hton,
plugin_unlock(NULL, *lock);
*lock= NULL;
}
+ mysql_mutex_lock(&thd->LOCK_thd_data);
*thd_ha_data(thd, hton)= (void*) ha_data;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
}
@@ -1512,15 +1515,12 @@ void THD::reset_db(const LEX_CSTRING *new_db)
/* Do operations that may take a long time */
-void THD::cleanup(bool have_mutex)
+void THD::cleanup(void)
{
DBUG_ENTER("THD::cleanup");
DBUG_ASSERT(cleanup_done == 0);
- if (have_mutex)
- set_killed_no_mutex(KILL_CONNECTION,0,0);
- else
- set_killed(KILL_CONNECTION);
+ set_killed(KILL_CONNECTION);
#ifdef WITH_WSREP
if (wsrep_cs().state() != wsrep::client_state::s_none)
{
@@ -1611,28 +1611,6 @@ void THD::cleanup(bool have_mutex)
void THD::free_connection()
{
DBUG_ASSERT(free_connection_done == 0);
- /* Make sure threads are not available via server_threads. */
- assert_not_linked();
-
- /*
- Other threads may have a lock on THD::LOCK_thd_data or
- THD::LOCK_thd_kill to ensure that this THD is not deleted
- while they access it. The following mutex_lock ensures
- that no one else is using this THD and it's now safe to
- continue.
-
- For example consider KILL-statement execution on
- sql_parse.cc kill_one_thread() that will use
- THD::LOCK_thd_data to protect victim thread during
- THD::awake().
- */
- mysql_mutex_lock(&LOCK_thd_data);
- mysql_mutex_lock(&LOCK_thd_kill);
-
-#ifdef WITH_WSREP
- delete wsrep_rgi;
- wsrep_rgi= nullptr;
-#endif /* WITH_WSREP */
my_free(const_cast<char*>(db.str));
db= null_clex_str;
#ifndef EMBEDDED_LIBRARY
@@ -1641,8 +1619,8 @@ void THD::free_connection()
net.vio= nullptr;
net_end(&net);
#endif
- if (!cleanup_done)
- cleanup(true); // We have locked THD::LOCK_thd_kill
+ if (!cleanup_done)
+ cleanup();
ha_close_connection(this);
plugin_thdvar_cleanup(this);
mysql_audit_free_thd(this);
@@ -1653,8 +1631,6 @@ void THD::free_connection()
#if defined(ENABLED_PROFILING)
profiling.restart(); // Reset profiling
#endif
- mysql_mutex_unlock(&LOCK_thd_kill);
- mysql_mutex_unlock(&LOCK_thd_data);
}
/*
@@ -5085,6 +5061,16 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd)
DBUG_EXECUTE_IF("disable_thd_need_ordering_with", return 1;);
if (!thd || !other_thd)
return 1;
+#ifdef WITH_WSREP
+ /* wsrep applier, replayer and TOI processing threads are ordered
+ by replication provider, relaxed GAP locking protocol can be used
+ between high priority wsrep threads
+ */
+ if (WSREP_ON &&
+ wsrep_thd_is_BF(const_cast<THD *>(thd), false) &&
+ wsrep_thd_is_BF(const_cast<THD *>(other_thd), true))
+ return 0;
+#endif /* WITH_WSREP */
rgi= thd->rgi_slave;
other_rgi= other_thd->rgi_slave;
if (!rgi || !other_rgi)
diff --git a/sql/sql_class.h b/sql/sql_class.h
index a5857a6feb0..6257588c122 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3281,7 +3281,7 @@ public:
void update_all_stats();
void update_stats(void);
void change_user(void);
- void cleanup(bool have_mutex=false);
+ void cleanup(void);
void cleanup_after_query();
void free_connection();
void reset_for_reuse();
@@ -6740,6 +6740,22 @@ class Sql_mode_save
sql_mode_t old_mode; // SQL mode saved at construction time.
};
+class Abort_on_warning_instant_set
+{
+ THD *m_thd;
+ bool m_save_abort_on_warning;
+public:
+ Abort_on_warning_instant_set(THD *thd, bool temporary_value)
+ :m_thd(thd), m_save_abort_on_warning(thd->abort_on_warning)
+ {
+ thd->abort_on_warning= temporary_value;
+ }
+ ~Abort_on_warning_instant_set()
+ {
+ m_thd->abort_on_warning= m_save_abort_on_warning;
+ }
+};
+
class Switch_to_definer_security_ctx
{
public:
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 643b7ee898a..4d09dab392b 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -92,7 +92,6 @@ int get_or_create_user_conn(THD *thd, const char *user,
uc->host= uc->user + user_len + 1;
uc->len= (uint)temp_len;
uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0;
- uc->user_resources= *mqh;
uc->reset_utime= thd->thr_create_utime;
if (my_hash_insert(&hash_user_connections, (uchar*) uc))
{
@@ -102,6 +101,7 @@ int get_or_create_user_conn(THD *thd, const char *user,
goto end;
}
}
+ uc->user_resources= *mqh;
thd->user_connect=uc;
uc->connections++;
end:
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 12119997430..8447d5bea7d 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -687,7 +687,6 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++)
{
- my_bitmap_map *old_map;
/* note that 'item' can be changed by fix_fields() call */
if (item->fix_fields_if_needed_for_scalar(thd, it_ke.ref()))
return 1;
@@ -699,9 +698,9 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
}
if (!in_prepare)
{
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
(void) item->save_in_field(key_part->field, 1);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
}
key_len+= key_part->store_length;
keypart_map= (keypart_map << 1) | 1;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index cac225e01e7..6aec301bedc 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -4983,6 +4983,9 @@ bool LEX::save_prep_leaf_tables()
bool st_select_lex::save_prep_leaf_tables(THD *thd)
{
+ if (prep_leaf_list_state == SAVED)
+ return FALSE;
+
List_iterator_fast<TABLE_LIST> li(leaf_tables);
TABLE_LIST *table;
diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc
index 2ad8d8a914a..b08e43e8af2 100644
--- a/sql/sql_manager.cc
+++ b/sql/sql_manager.cc
@@ -26,8 +26,8 @@
#include "sql_manager.h"
#include "sql_base.h" // flush_tables
-static bool volatile manager_thread_in_use;
-static bool abort_manager;
+static bool volatile manager_thread_in_use = 0;
+static bool abort_manager = false;
pthread_t manager_thread;
mysql_mutex_t LOCK_manager;
@@ -35,31 +35,31 @@ mysql_cond_t COND_manager;
struct handler_cb {
struct handler_cb *next;
- void (*action)(void);
+ void (*action)(void *);
+ void *data;
};
-static struct handler_cb * volatile cb_list;
+static struct handler_cb *cb_list; // protected by LOCK_manager
-bool mysql_manager_submit(void (*action)())
+bool mysql_manager_submit(void (*action)(void *), void *data)
{
bool result= FALSE;
DBUG_ASSERT(manager_thread_in_use);
- struct handler_cb * volatile *cb;
+ struct handler_cb **cb;
mysql_mutex_lock(&LOCK_manager);
cb= &cb_list;
- while (*cb && (*cb)->action != action)
+ while (*cb)
cb= &(*cb)->next;
+ *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME));
if (!*cb)
+ result= TRUE;
+ else
{
- *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME));
- if (!*cb)
- result= TRUE;
- else
- {
- (*cb)->next= NULL;
- (*cb)->action= action;
- }
+ (*cb)->next= NULL;
+ (*cb)->action= action;
+ (*cb)->data= data;
}
+ mysql_cond_signal(&COND_manager);
mysql_mutex_unlock(&LOCK_manager);
return result;
}
@@ -69,18 +69,14 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
int error = 0;
struct timespec abstime;
bool reset_flush_time = TRUE;
- struct handler_cb *cb= NULL;
my_thread_init();
DBUG_ENTER("handle_manager");
pthread_detach_this_thread();
manager_thread = pthread_self();
- mysql_cond_init(key_COND_manager, &COND_manager,NULL);
- mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL);
- manager_thread_in_use = 1;
- for (;;)
+ mysql_mutex_lock(&LOCK_manager);
+ while (!abort_manager)
{
- mysql_mutex_lock(&LOCK_manager);
/* XXX: This will need to be made more general to handle different
* polling needs. */
if (flush_time)
@@ -90,40 +86,37 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused)))
set_timespec(abstime, flush_time);
reset_flush_time = FALSE;
}
- while ((!error || error == EINTR) && !abort_manager)
+ while ((!error || error == EINTR) && !abort_manager && !cb_list)
error= mysql_cond_timedwait(&COND_manager, &LOCK_manager, &abstime);
+
+ if (error == ETIMEDOUT || error == ETIME)
+ {
+ tc_purge();
+ error = 0;
+ reset_flush_time = TRUE;
+ }
}
else
{
- while ((!error || error == EINTR) && !abort_manager)
+ while ((!error || error == EINTR) && !abort_manager && !cb_list)
error= mysql_cond_wait(&COND_manager, &LOCK_manager);
}
- if (cb == NULL)
- {
- cb= cb_list;
- cb_list= NULL;
- }
- mysql_mutex_unlock(&LOCK_manager);
- if (abort_manager)
- break;
-
- if (error == ETIMEDOUT || error == ETIME)
- {
- tc_purge();
- error = 0;
- reset_flush_time = TRUE;
- }
+ struct handler_cb *cb= cb_list;
+ cb_list= NULL;
+ mysql_mutex_unlock(&LOCK_manager);
while (cb)
{
struct handler_cb *next= cb->next;
- cb->action();
+ cb->action(cb->data);
my_free(cb);
cb= next;
}
+ mysql_mutex_lock(&LOCK_manager);
}
manager_thread_in_use = 0;
+ mysql_mutex_unlock(&LOCK_manager);
mysql_mutex_destroy(&LOCK_manager);
mysql_cond_destroy(&COND_manager);
DBUG_LEAVE; // Can't use DBUG_RETURN after my_thread_end
@@ -137,15 +130,15 @@ void start_handle_manager()
{
DBUG_ENTER("start_handle_manager");
abort_manager = false;
- if (flush_time && flush_time != ~(ulong) 0L)
{
pthread_t hThread;
- int error;
- if ((error= mysql_thread_create(key_thread_handle_manager,
- &hThread, &connection_attrib,
- handle_manager, 0)))
- sql_print_warning("Can't create handle_manager thread (errno= %d)",
- error);
+ int err;
+ manager_thread_in_use = 1;
+ mysql_cond_init(key_COND_manager, &COND_manager,NULL);
+ mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL);
+ if ((err= mysql_thread_create(key_thread_handle_manager, &hThread,
+ &connection_attrib, handle_manager, 0)))
+ sql_print_warning("Can't create handle_manager thread (errno: %M)", err);
}
DBUG_VOID_RETURN;
}
@@ -155,10 +148,10 @@ void start_handle_manager()
void stop_handle_manager()
{
DBUG_ENTER("stop_handle_manager");
- abort_manager = true;
if (manager_thread_in_use)
{
mysql_mutex_lock(&LOCK_manager);
+ abort_manager = true;
DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: %lu",
(ulong)manager_thread));
mysql_cond_signal(&COND_manager);
diff --git a/sql/sql_manager.h b/sql/sql_manager.h
index 9c6c84450ed..f97d4a2cfc5 100644
--- a/sql/sql_manager.h
+++ b/sql/sql_manager.h
@@ -18,6 +18,6 @@
void start_handle_manager();
void stop_handle_manager();
-bool mysql_manager_submit(void (*action)());
+bool mysql_manager_submit(void (*action)(void *), void *data);
#endif /* SQL_MANAGER_INCLUDED */
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 209caa9460e..de96f0c8924 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2220,6 +2220,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
break;
general_log_print(thd, command, NullS);
status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS]);
+ *current_global_status_var= global_status_var;
calc_sum_of_all_status(current_global_status_var);
if (!(uptime= (ulong) (thd->start_time - server_start_time)))
queries_per_second1000= 0;
diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h
index c9c75d07a6e..6bd60fdd6c5 100644
--- a/sql/sql_plugin.h
+++ b/sql/sql_plugin.h
@@ -22,11 +22,13 @@
that is defined in plugin.h
*/
#define SHOW_always_last SHOW_KEY_CACHE_LONG, \
- SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, \
SHOW_HAVE, SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, \
- SHOW_LONG_NOFLUSH, SHOW_LONGLONG_STATUS, SHOW_UINT32_STATUS, \
- SHOW_LEX_STRING, SHOW_ATOMIC_COUNTER_UINT32_T
-#include "mariadb.h"
+ SHOW_LONG_NOFLUSH, SHOW_LEX_STRING, \
+ SHOW_ATOMIC_COUNTER_UINT32_T, \
+ /* SHOW_*_STATUS must be at the end, SHOW_LONG_STATUS being first */ \
+ SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, SHOW_LONGLONG_STATUS, \
+ SHOW_UINT32_STATUS
+#include <my_global.h>
#undef SHOW_always_last
#include "m_string.h" /* LEX_STRING */
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 0312bc9a758..ea6bfe03be5 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -122,6 +122,7 @@ When one supplies long data for a placeholder:
static const uint PARAMETER_FLAG_UNSIGNED= 128U << 8;
#endif
#include "lock.h" // MYSQL_OPEN_FORCE_SHARED_MDL
+#include "log_event.h" // class Log_event
#include "sql_handler.h"
#include "transaction.h" // trans_rollback_implicit
#ifdef WITH_WSREP
@@ -2487,6 +2488,16 @@ static bool check_prepared_statement(Prepared_statement *stmt)
DBUG_RETURN(FALSE);
}
break;
+ case SQLCOM_SHOW_BINLOG_EVENTS:
+ case SQLCOM_SHOW_RELAYLOG_EVENTS:
+ {
+ List<Item> field_list;
+ Log_event::init_show_field_list(thd, &field_list);
+
+ if ((res= send_stmt_metadata(thd, stmt, &field_list)) == 2)
+ DBUG_RETURN(FALSE);
+ }
+ break;
#endif /* EMBEDDED_LIBRARY */
case SQLCOM_SHOW_CREATE_PROC:
if ((res= mysql_test_show_create_routine(stmt, &sp_handler_procedure)) == 2)
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 5203e0f52a5..622eff2faae 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -4685,5 +4685,22 @@ rpl_gtid_pos_update(THD *thd, char *str, size_t len)
return false;
}
+int compare_log_name(const char *log_1, const char *log_2) {
+ int res= 1;
+ const char *ext1_str= strrchr(log_1, '.');
+ const char *ext2_str= strrchr(log_2, '.');
+ char file_name_1[255], file_name_2[255];
+ strmake(file_name_1, log_1, (ext1_str - log_1));
+ strmake(file_name_2, log_2, (ext2_str - log_2));
+ char *endptr = NULL;
+ res= strcmp(file_name_1, file_name_2);
+ if (!res)
+ {
+ ulong ext1= strtoul(++ext1_str, &endptr, 10);
+ ulong ext2= strtoul(++ext2_str, &endptr, 10);
+ res= (ext1 > ext2 ? 1 : ((ext1 == ext2) ? 0 : -1));
+ }
+ return res;
+}
#endif /* HAVE_REPLICATION */
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 18aa7ea3fce..95916e31abf 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -45,6 +45,7 @@ bool show_binlogs(THD* thd);
extern int init_master_info(Master_info* mi);
void kill_zombie_dump_threads(uint32 slave_server_id);
int check_binlog_magic(IO_CACHE* log, const char** errmsg);
+int compare_log_name(const char *log_1, const char *log_2);
struct LOAD_FILE_IO_CACHE : public IO_CACHE
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 4bbe5ce9141..e89bb1fd442 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -2097,7 +2097,7 @@ JOIN::optimize_inner()
join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE &&
join->with_two_phase_optimization)
continue;
- /*
+ /*
Do not push conditions from where into materialized inner tables
of outer joins: this is not valid.
*/
@@ -2298,7 +2298,7 @@ setup_subq_exit:
if (with_two_phase_optimization)
optimization_state= JOIN::OPTIMIZATION_PHASE_1_DONE;
else
- {
+ {
if (optimize_stage2())
DBUG_RETURN(1);
}
@@ -2318,11 +2318,11 @@ int JOIN::optimize_stage2()
if (unlikely(thd->check_killed()))
DBUG_RETURN(1);
-
+
/* Generate an execution plan from the found optimal join order. */
if (get_best_combination())
DBUG_RETURN(1);
-
+
if (make_range_rowid_filters())
DBUG_RETURN(1);
@@ -3965,7 +3965,7 @@ bool JOIN::setup_subquery_caches()
if (tmp_having)
{
DBUG_ASSERT(having == NULL);
- if (!(tmp_having=
+ if (!(tmp_having=
tmp_having->transform(thd,
&Item::expr_cache_insert_transformer,
NULL)))
@@ -6893,7 +6893,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
Special treatment for ft-keys.
*/
-bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
+bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
bool skip_unprefixed_keyparts)
{
KEYUSE key_end, *prev, *save_pos, *use;
@@ -8064,7 +8064,7 @@ best_access_path(JOIN *join,
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
pos->range_rowid_filter_info= best_filter;
-
+
loose_scan_opt.save_to_position(s, loose_scan_pos);
if (!best_key &&
@@ -10208,7 +10208,7 @@ bool JOIN::check_two_phase_optimization(THD *thd)
return true;
return false;
}
-
+
bool JOIN::inject_cond_into_where(Item *injected_cond)
{
@@ -10239,7 +10239,7 @@ bool JOIN::inject_cond_into_where(Item *injected_cond)
and_args->push_back(elem, thd->mem_root);
}
}
-
+
return false;
}
@@ -24036,7 +24036,7 @@ bool
cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
bool result= 0;
for (store_key **copy=ref->key_copy ; *copy ; copy++)
@@ -24047,7 +24047,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
break;
}
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return result;
}
@@ -25718,7 +25718,7 @@ bool JOIN::rollup_init()
{
if (!(rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd)))
return true;
-
+
List<Item> *rollup_fields= &rollup.fields[i];
rollup_fields->empty();
rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements);
@@ -26228,7 +26228,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
{
JOIN_TAB *ctab= bush_children->start;
/* table */
- size_t len= my_snprintf(table_name_buffer,
+ size_t len= my_snprintf(table_name_buffer,
sizeof(table_name_buffer)-1,
"<subquery%d>",
ctab->emb_sj_nest->sj_subq_pred->get_identifier());
@@ -27403,7 +27403,7 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
DBUG_ASSERT(thd);
-
+
if (tvc)
{
tvc->print(thd, str, query_type);
diff --git a/sql/sql_select.h b/sql/sql_select.h
index e14907d73bc..1d928334bd8 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1948,8 +1948,8 @@ class store_key_field: public store_key
enum store_key_result copy_inner()
{
TABLE *table= copy_field.to_field->table;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->write_set);
/*
It looks like the next statement is needed only for a simplified
@@ -1960,7 +1960,7 @@ class store_key_field: public store_key
bzero(copy_field.to_ptr,copy_field.to_length);
copy_field.do_copy(&copy_field);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
null_key= to_field->is_null();
return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK;
}
@@ -1995,8 +1995,8 @@ public:
enum store_key_result copy_inner()
{
TABLE *table= to_field->table;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->write_set);
int res= FALSE;
/*
@@ -2017,7 +2017,7 @@ public:
*/
if (!res && table->in_use->is_error())
res= 1; /* STORE_KEY_FATAL */
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
null_key= to_field->is_null() || item->null_value;
return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL :
(store_key_result) res);
@@ -2053,8 +2053,8 @@ protected:
{
inited=1;
TABLE *table= to_field->table;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
- table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table,
+ &table->write_set);
if ((res= item->save_in_field(to_field, 1)))
{
if (!err)
@@ -2066,7 +2066,7 @@ protected:
*/
if (!err && to_field->table->in_use->is_error())
err= 1; /* STORE_KEY_FATAL */
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
}
null_key= to_field->is_null() || item->null_value;
return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err);
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index a2bcfd5a4ff..b88933eac0c 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -136,7 +136,7 @@ bool sequence_definition::check_and_adjust(bool set_reserved_until)
void sequence_definition::read_fields(TABLE *table)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
reserved_until= table->field[0]->val_int();
min_value= table->field[1]->val_int();
max_value= table->field[2]->val_int();
@@ -145,7 +145,7 @@ void sequence_definition::read_fields(TABLE *table)
cache= table->field[5]->val_int();
cycle= table->field[6]->val_int();
round= table->field[7]->val_int();
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
used_fields= ~(uint) 0;
print_dbug();
}
@@ -157,7 +157,7 @@ void sequence_definition::read_fields(TABLE *table)
void sequence_definition::store_fields(TABLE *table)
{
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
/* zero possible delete markers & null bits */
memcpy(table->record[0], table->s->default_values, table->s->null_bytes);
@@ -170,7 +170,7 @@ void sequence_definition::store_fields(TABLE *table)
table->field[6]->store((longlong) cycle != 0, 0);
table->field[7]->store((longlong) round, 1);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
print_dbug();
}
@@ -527,12 +527,11 @@ int SEQUENCE::read_initial_values(TABLE *table)
int SEQUENCE::read_stored_values(TABLE *table)
{
int error;
- my_bitmap_map *save_read_set;
DBUG_ENTER("SEQUENCE::read_stored_values");
- save_read_set= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *save_read_set= tmp_use_all_columns(table, &table->read_set);
error= table->file->ha_read_first_row(table->record[0], MAX_KEY);
- tmp_restore_column_map(table->read_set, save_read_set);
+ tmp_restore_column_map(&table->read_set, save_read_set);
if (unlikely(error))
{
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 12da17e7f10..ad2a489c39d 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2147,7 +2147,6 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
!foreign_db_mode;
bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) &&
!create_info_arg;
- my_bitmap_map *old_map;
handlerton *hton;
int error= 0;
DBUG_ENTER("show_create_table");
@@ -2214,7 +2213,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
We have to restore the read_set if we are called from insert in case
of row based replication.
*/
- old_map= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set);
bool not_the_first_field= false;
for (ptr=table->field ; (field= *ptr); ptr++)
@@ -2259,8 +2258,13 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
/*
For string types dump collation name only if
collation is not primary for the given charset
+
+ For generated fields don't print the COLLATE clause if
+ the collation matches the expression's collation.
*/
- if (!(field->charset()->state & MY_CS_PRIMARY) && !field->vcol_info)
+ if (!(field->charset()->state & MY_CS_PRIMARY) &&
+ (!field->vcol_info ||
+ field->charset() != field->vcol_info->expr->collation.collation))
{
packet->append(STRING_WITH_LEN(" COLLATE "));
packet->append(field->charset()->name);
@@ -2516,7 +2520,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
}
}
#endif
- tmp_restore_column_map(table->read_set, old_map);
+ tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(error);
}
@@ -3832,6 +3836,16 @@ static bool show_status_array(THD *thd, const char *wild,
if (show_type == SHOW_SYS)
mysql_mutex_lock(&LOCK_global_system_variables);
+ else if (show_type >= SHOW_LONG_STATUS && scope == OPT_GLOBAL &&
+ !status_var->local_memory_used)
+ {
+ mysql_mutex_lock(&LOCK_status);
+ *status_var= global_status_var;
+ mysql_mutex_unlock(&LOCK_status);
+ calc_sum_of_all_status(status_var);
+ DBUG_ASSERT(status_var->local_memory_used);
+ }
+
pos= get_one_variable(thd, var, scope, show_type, status_var,
&charset, buff, &length);
@@ -3889,7 +3903,6 @@ uint calc_sum_of_all_status(STATUS_VAR *to)
calc_sum_callback_arg arg(to);
DBUG_ENTER("calc_sum_of_all_status");
- *to= global_status_var;
to->local_memory_used= 0;
/* Add to this status from existing threads */
server_threads.iterate(calc_sum_callback, &arg);
@@ -5298,6 +5311,12 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond)
continue;
}
+ if (thd->killed == ABORT_QUERY)
+ {
+ error= 0;
+ goto err;
+ }
+
DEBUG_SYNC(thd, "before_open_in_get_all_tables");
if (fill_schema_table_by_open(thd, &tmp_mem_root, FALSE,
table, schema_table,
@@ -5866,7 +5885,7 @@ static bool print_anchor_data_type(const Spvar_definition *def,
Let's print it according to the current sql_mode.
It will make output in line with the value in mysql.proc.param_list,
so both I_S.XXX.DTD_IDENTIFIER and mysql.proc.param_list use the same notation:
- default or Oracle, according to the sql_mode at the SP creation time.
+ default or Oracle, according to the sql_mode at the SP creation time.
The caller must make sure to set thd->variables.sql_mode to the routine sql_mode.
*/
static bool print_anchor_dtd_identifier(THD *thd, const Spvar_definition *def,
@@ -7970,10 +7989,7 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
if (partial_cond)
partial_cond->val_int();
- if (scope == OPT_GLOBAL)
- {
- calc_sum_of_all_status(&tmp);
- }
+ tmp.local_memory_used= 0; // meaning tmp was not populated yet
mysql_rwlock_rdlock(&LOCK_all_status_vars);
res= show_status_array(thd, wild,
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 717deeabe18..e7d380ebb2d 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -1024,9 +1024,8 @@ public:
{
char buff[MAX_FIELD_WIDTH];
String val(buff, sizeof(buff), &my_charset_bin);
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(stat_table, stat_table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(stat_table, &stat_table->read_set);
for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++)
{
Field *stat_field= stat_table->field[i];
@@ -1084,7 +1083,7 @@ public:
}
}
}
- dbug_tmp_restore_column_map(stat_table->read_set, old_map);
+ dbug_tmp_restore_column_map(&stat_table->read_set, old_map);
}
@@ -2121,6 +2120,10 @@ int alloc_statistics_for_table(THD* thd, TABLE *table)
ulonglong *idx_avg_frequency= (ulonglong*) alloc_root(&table->mem_root,
sizeof(ulonglong) * key_parts);
+ if (table->file->ha_rnd_init(TRUE))
+ DBUG_RETURN(1);
+ table->file->ha_rnd_end();
+
uint columns= 0;
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 4ddfcabf6c8..18b34e4a212 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -4072,8 +4072,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
/* not a critical problem */
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_TOO_LONG_KEY,
- ER_THD(thd, ER_TOO_LONG_KEY),
+ ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
@@ -4117,7 +4116,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
{
key_part_length= file->max_key_part_length();
/* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
@@ -4398,7 +4397,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
ER_ILLEGAL_HA_CREATE_OPTION,
ER_THD(thd, ER_ILLEGAL_HA_CREATE_OPTION),
file->engine_name()->str,
- "TRANSACTIONAL=1");
+ create_info->transactional == HA_CHOICE_YES
+ ? "TRANSACTIONAL=1" : "TRANSACTIONAL=0");
if (parse_option_list(thd, file->partition_ht(), &create_info->option_struct,
&create_info->option_list,
@@ -5299,6 +5299,9 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
if (!opt_explicit_defaults_for_timestamp)
promote_first_timestamp_column(&alter_info->create_list);
+ /* We can abort create table for any table type */
+ thd->abort_on_warning= thd->is_strict_mode();
+
if (mysql_create_table_no_lock(thd, &create_table->db,
&create_table->table_name, create_info,
alter_info,
@@ -5336,6 +5339,8 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
}
err:
+ thd->abort_on_warning= 0;
+
/* In RBR or readonly server we don't need to log CREATE TEMPORARY TABLE */
if (!result && create_info->tmp_table() &&
(thd->is_current_stmt_binlog_format_row() || (opt_readonly && !thd->slave_thread)))
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 08dc137bebe..54eed6e4c90 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -565,6 +565,7 @@ void mysql_print_status()
STATUS_VAR tmp;
uint count;
+ tmp= global_status_var;
count= calc_sum_of_all_status(&tmp);
printf("\nStatus information:\n\n");
(void) my_getwd(current_dir, sizeof(current_dir),MYF(0));
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index 916047c4b0f..33ee2dd381c 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -47,7 +47,7 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast<List_item> &li)
while ((lst= li++))
{
- List_iterator_fast<Item> it(*lst);
+ List_iterator<Item> it(*lst);
Item *item;
while ((item= it++))
@@ -59,7 +59,7 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast<List_item> &li)
while replacing their values to NAME_CONST()s.
So fix only those that have not been.
*/
- if (item->fix_fields_if_needed(thd, 0) ||
+ if (item->fix_fields_if_needed_for_scalar(thd, it.ref()) ||
item->check_is_evaluable_expression_or_error())
DBUG_RETURN(true);
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index d64a96f5070..2a82b1b2332 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1718,6 +1718,9 @@ bool Multiupdate_prelocking_strategy::handle_end(THD *thd)
if (select_lex->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(1);
+ if (thd->lex->save_prep_leaf_tables())
+ DBUG_RETURN(1);
+
List<Item> *fields= &lex->first_select_lex()->item_list;
if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
*fields, MARK_COLUMNS_WRITE, 0, 0))
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 2548dc28731..9b2355e53c5 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -829,7 +829,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
/*
We should not introduce any further shift/reduce conflicts.
*/
-%expect 47
+%expect 68
/*
Comments for TOKENS.
@@ -1808,7 +1808,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <type_handler> int_type real_type
-%type <Lex_field_type> type_with_opt_collate field_type
+%type <Lex_field_type> field_type field_type_all
qualified_field_type
field_type_numeric
field_type_string
@@ -3392,7 +3392,7 @@ sp_param_name:
;
sp_param_name_and_type:
- sp_param_name type_with_opt_collate
+ sp_param_name field_type
{
if (unlikely(Lex->sp_param_fill_definition($$= $1)))
MYSQL_YYABORT;
@@ -3522,7 +3522,7 @@ row_field_name:
;
row_field_definition:
- row_field_name type_with_opt_collate
+ row_field_name field_type
;
row_field_definition_list:
@@ -3551,7 +3551,7 @@ sp_decl_idents_init_vars:
sp_decl_variable_list:
sp_decl_idents_init_vars
- type_with_opt_collate
+ field_type
sp_opt_default
{
if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1,
@@ -6888,19 +6888,26 @@ column_default_expr:
}
;
+field_type: field_type_all
+ {
+ Lex->map_data_type(Lex_ident_sys(), &($$= $1));
+ Lex->last_field->set_attributes($$, Lex->charset);
+ }
+ ;
+
qualified_field_type:
- field_type
+ field_type_all
{
Lex->map_data_type(Lex_ident_sys(), &($$= $1));
}
- | sp_decl_ident '.' field_type
+ | sp_decl_ident '.' field_type_all
{
if (Lex->map_data_type($1, &($$= $3)))
MYSQL_YYABORT;
}
;
-field_type:
+field_type_all:
field_type_numeric
| field_type_temporal
| field_type_string
@@ -7363,20 +7370,6 @@ with_or_without_system:
;
-type_with_opt_collate:
- field_type opt_collate
- {
- Lex->map_data_type(Lex_ident_sys(), &($$= $1));
-
- if ($2)
- {
- if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))))
- MYSQL_YYABORT;
- }
- Lex->last_field->set_attributes($$, Lex->charset);
- }
- ;
-
charset:
CHAR_SYM SET {}
| CHARSET {}
@@ -7450,6 +7443,12 @@ charset_or_alias:
}
;
+collate: COLLATE_SYM collation_name_or_default
+ {
+ Lex->charset= $2;
+ }
+ ;
+
opt_binary:
/* empty */ { bincmp_collation(NULL, false); }
| binary {}
@@ -7460,6 +7459,13 @@ binary:
| charset_or_alias opt_bin_mod { bincmp_collation($1, $2); }
| BINARY { bincmp_collation(NULL, true); }
| BINARY charset_or_alias { bincmp_collation($2, true); }
+ | charset_or_alias collate
+ {
+ if (!my_charset_same(Lex->charset, $1))
+ my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ Lex->charset->name, $1->csname));
+ }
+ | collate { }
;
opt_bin_mod:
@@ -14989,7 +14995,7 @@ kill:
lex->sql_command= SQLCOM_KILL;
lex->kill_type= KILL_TYPE_ID;
}
- kill_type kill_option kill_expr
+ kill_type kill_option
{
Lex->kill_signal= (killed_state) ($3 | $4);
}
@@ -15002,16 +15008,21 @@ kill_type:
;
kill_option:
- /* empty */ { $$= (int) KILL_CONNECTION; }
- | CONNECTION_SYM { $$= (int) KILL_CONNECTION; }
- | QUERY_SYM { $$= (int) KILL_QUERY; }
- | QUERY_SYM ID_SYM
+ opt_connection kill_expr { $$= (int) KILL_CONNECTION; }
+ | QUERY_SYM kill_expr { $$= (int) KILL_QUERY; }
+ | QUERY_SYM ID_SYM expr
{
$$= (int) KILL_QUERY;
Lex->kill_type= KILL_TYPE_QUERY;
+ Lex->value_list.push_front($3, thd->mem_root);
}
;
+opt_connection:
+ /* empty */ { }
+ | CONNECTION_SYM { }
+ ;
+
kill_expr:
expr
{
@@ -15024,7 +15035,6 @@ kill_expr:
}
;
-
shutdown:
SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
shutdown_option {}
@@ -18024,7 +18034,7 @@ sf_return_type:
&empty_clex_str,
thd->variables.collation_database);
}
- type_with_opt_collate
+ field_type
{
if (unlikely(Lex->sphead->fill_field_definition(thd,
Lex->last_field)))
diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy
index 30727a8ffc4..cb58c4aff43 100644
--- a/sql/sql_yacc_ora.yy
+++ b/sql/sql_yacc_ora.yy
@@ -305,7 +305,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
/*
We should not introduce any further shift/reduce conflicts.
*/
-%expect 50
+%expect 70
/*
Comments for TOKENS.
@@ -1288,9 +1288,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <type_handler> int_type real_type
-%type <Lex_field_type> type_with_opt_collate field_type
+%type <Lex_field_type> field_type field_type_all
qualified_field_type
- sp_param_type_with_opt_collate
+ sp_param_type
sp_param_field_type
sp_param_field_type_string
field_type_numeric
@@ -3194,7 +3194,7 @@ sp_param_name:
;
sp_param_name_and_type:
- sp_param_name sp_param_type_with_opt_collate
+ sp_param_name sp_param_type
{
if (unlikely(Lex->sp_param_fill_definition($$= $1)))
MYSQL_YYABORT;
@@ -3238,7 +3238,7 @@ sp_pdparams:
;
sp_pdparam:
- sp_param_name sp_opt_inout sp_param_type_with_opt_collate
+ sp_param_name sp_opt_inout sp_param_type
{
$1->mode= $2;
if (unlikely(Lex->sp_param_fill_definition($1)))
@@ -3407,7 +3407,7 @@ row_field_name:
;
row_field_definition:
- row_field_name type_with_opt_collate
+ row_field_name field_type
;
row_field_definition_list:
@@ -3436,7 +3436,7 @@ sp_decl_idents_init_vars:
sp_decl_vars:
sp_decl_idents_init_vars
- type_with_opt_collate
+ field_type
sp_opt_default
{
if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1,
@@ -6891,19 +6891,26 @@ column_default_expr:
}
;
+field_type: field_type_all
+ {
+ Lex->map_data_type(Lex_ident_sys(), &($$= $1));
+ Lex->last_field->set_attributes($$, Lex->charset);
+ }
+ ;
+
qualified_field_type:
- field_type
+ field_type_all
{
Lex->map_data_type(Lex_ident_sys(), &($$= $1));
}
- | sp_decl_ident '.' field_type
+ | sp_decl_ident '.' field_type_all
{
if (Lex->map_data_type($1, &($$= $3)))
MYSQL_YYABORT;
}
;
-field_type:
+field_type_all:
field_type_numeric
| field_type_temporal
| field_type_string
@@ -7445,30 +7452,10 @@ with_or_without_system:
;
-type_with_opt_collate:
- field_type opt_collate
- {
- Lex->map_data_type(Lex_ident_sys(), &($$= $1));
-
- if ($2)
- {
- if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))))
- MYSQL_YYABORT;
- }
- Lex->last_field->set_attributes($$, Lex->charset);
- }
- ;
-
-sp_param_type_with_opt_collate:
- sp_param_field_type opt_collate
+sp_param_type:
+ sp_param_field_type
{
Lex->map_data_type(Lex_ident_sys(), &($$= $1));
-
- if ($2)
- {
- if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))))
- MYSQL_YYABORT;
- }
Lex->last_field->set_attributes($$, Lex->charset);
}
;
@@ -7546,6 +7533,12 @@ charset_or_alias:
}
;
+collate: COLLATE_SYM collation_name_or_default
+ {
+ Lex->charset= $2;
+ }
+ ;
+
opt_binary:
/* empty */ { bincmp_collation(NULL, false); }
| binary {}
@@ -7556,6 +7549,13 @@ binary:
| charset_or_alias opt_bin_mod { bincmp_collation($1, $2); }
| BINARY { bincmp_collation(NULL, true); }
| BINARY charset_or_alias { bincmp_collation($2, true); }
+ | charset_or_alias collate
+ {
+ if (!my_charset_same(Lex->charset, $1))
+ my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0),
+ Lex->charset->name, $1->csname));
+ }
+ | collate { }
;
opt_bin_mod:
@@ -15113,7 +15113,7 @@ kill:
lex->sql_command= SQLCOM_KILL;
lex->kill_type= KILL_TYPE_ID;
}
- kill_type kill_option kill_expr
+ kill_type kill_option
{
Lex->kill_signal= (killed_state) ($3 | $4);
}
@@ -15126,16 +15126,21 @@ kill_type:
;
kill_option:
- /* empty */ { $$= (int) KILL_CONNECTION; }
- | CONNECTION_SYM { $$= (int) KILL_CONNECTION; }
- | QUERY_SYM { $$= (int) KILL_QUERY; }
- | QUERY_SYM ID_SYM
+ opt_connection kill_expr { $$= (int) KILL_CONNECTION; }
+ | QUERY_SYM kill_expr { $$= (int) KILL_QUERY; }
+ | QUERY_SYM ID_SYM expr
{
$$= (int) KILL_QUERY;
Lex->kill_type= KILL_TYPE_QUERY;
+ Lex->value_list.push_front($3, thd->mem_root);
}
;
+opt_connection:
+ /* empty */ { }
+ | CONNECTION_SYM { }
+ ;
+
kill_expr:
expr
{
@@ -15148,7 +15153,6 @@ kill_expr:
}
;
-
shutdown:
SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
shutdown_option {}
@@ -18236,7 +18240,7 @@ sf_return_type:
&empty_clex_str,
thd->variables.collation_database);
}
- sp_param_type_with_opt_collate
+ sp_param_type
{
if (unlikely(Lex->sphead->fill_field_definition(thd,
Lex->last_field)))
diff --git a/sql/table.h b/sql/table.h
index 4c9efdf3529..bb0e087bacb 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -3087,25 +3087,25 @@ typedef struct st_open_table_list{
} OPEN_TABLE_LIST;
-static inline my_bitmap_map *tmp_use_all_columns(TABLE *table,
- MY_BITMAP *bitmap)
+static inline MY_BITMAP *tmp_use_all_columns(TABLE *table,
+ MY_BITMAP **bitmap)
{
- my_bitmap_map *old= bitmap->bitmap;
- bitmap->bitmap= table->s->all_set.bitmap;
+ MY_BITMAP *old= *bitmap;
+ *bitmap= &table->s->all_set;
return old;
}
-static inline void tmp_restore_column_map(MY_BITMAP *bitmap,
- my_bitmap_map *old)
+static inline void tmp_restore_column_map(MY_BITMAP **bitmap,
+ MY_BITMAP *old)
{
- bitmap->bitmap= old;
+ *bitmap= old;
}
/* The following is only needed for debugging */
-static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table,
- MY_BITMAP *bitmap)
+static inline MY_BITMAP *dbug_tmp_use_all_columns(TABLE *table,
+ MY_BITMAP **bitmap)
{
#ifdef DBUG_ASSERT_EXISTS
return tmp_use_all_columns(table, bitmap);
@@ -3114,8 +3114,8 @@ static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table,
#endif
}
-static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
- my_bitmap_map *old)
+static inline void dbug_tmp_restore_column_map(MY_BITMAP **bitmap,
+ MY_BITMAP *old)
{
#ifdef DBUG_ASSERT_EXISTS
tmp_restore_column_map(bitmap, old);
@@ -3128,22 +3128,22 @@ static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap,
Provide for the possiblity of the read set being the same as the write set
*/
static inline void dbug_tmp_use_all_columns(TABLE *table,
- my_bitmap_map **save,
- MY_BITMAP *read_set,
- MY_BITMAP *write_set)
+ MY_BITMAP **save,
+ MY_BITMAP **read_set,
+ MY_BITMAP **write_set)
{
#ifdef DBUG_ASSERT_EXISTS
- save[0]= read_set->bitmap;
- save[1]= write_set->bitmap;
+ save[0]= *read_set;
+ save[1]= *write_set;
(void) tmp_use_all_columns(table, read_set);
(void) tmp_use_all_columns(table, write_set);
#endif
}
-static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set,
- MY_BITMAP *write_set,
- my_bitmap_map **old)
+static inline void dbug_tmp_restore_column_maps(MY_BITMAP **read_set,
+ MY_BITMAP **write_set,
+ MY_BITMAP **old)
{
#ifdef DBUG_ASSERT_EXISTS
tmp_restore_column_map(read_set, old[0]);
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 17222efe791..b09178de7ed 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -1028,6 +1028,8 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
TABLE table;
TABLE_SHARE share;
Create_field *field;
+ Check_level_instant_set old_count_cuted_fields(thd, CHECK_FIELD_WARN);
+ Abort_on_warning_instant_set old_abort_on_warning(thd, 0);
DBUG_ENTER("make_empty_rec");
/* We need a table to generate columns for default values */
@@ -1046,7 +1048,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
null_pos= buff;
List_iterator<Create_field> it(create_fields);
- Check_level_instant_set check_level_save(thd, CHECK_FIELD_WARN);
while ((field=it++))
{
Record_addr addr(buff + field->offset + data_offset,
diff --git a/sql/upgrade_conf_file.cc b/sql/upgrade_conf_file.cc
index 4e167f0263f..b40cce0bbd7 100644
--- a/sql/upgrade_conf_file.cc
+++ b/sql/upgrade_conf_file.cc
@@ -75,6 +75,7 @@ static const char *removed_variables[] =
"innodb_ibuf_accel_rate",
"innodb_ibuf_active_contract",
"innodb_ibuf_max_size",
+"innodb_idle_flush_pct",
"innodb_import_table_from_xtrabackup",
"innodb_instrument_semaphores",
"innodb_kill_idle_transaction",
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index b7a7dadf0dc..23de4d7f7c1 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -265,6 +265,12 @@ static bool sst_auth_real_set (const char* value)
if (wsrep_sst_auth) { my_free((void*) wsrep_sst_auth); }
wsrep_sst_auth= my_strdup(WSREP_SST_AUTH_MASK, MYF(0));
}
+ else
+ {
+ if (wsrep_sst_auth) { my_free((void*) wsrep_sst_auth); }
+ wsrep_sst_auth= NULL;
+ }
+
return 0;
}
return 1;
diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc
index 0f72c132d84..323439025c5 100644
--- a/sql/wsrep_thd.cc
+++ b/sql/wsrep_thd.cc
@@ -68,7 +68,546 @@ static void wsrep_replication_process(THD *thd,
thd->wsrep_rgi->cleanup_after_session();
delete thd->wsrep_rgi;
+<<<<<<< HEAD
thd->wsrep_rgi= NULL;
+||||||| 75538f94ca0
+ thd->wsrep_rgi = NULL;
+ thd->set_row_count_func(shadow->row_count_func);
+}
+
+void wsrep_replay_sp_transaction(THD* thd)
+{
+ DBUG_ENTER("wsrep_replay_sp_transaction");
+ mysql_mutex_assert_owner(&thd->LOCK_thd_data);
+ DBUG_ASSERT(thd->wsrep_conflict_state == MUST_REPLAY);
+ DBUG_ASSERT(wsrep_thd_trx_seqno(thd) > 0);
+
+ WSREP_DEBUG("replaying SP transaction %llu", thd->thread_id);
+ close_thread_tables(thd);
+ if (thd->locked_tables_mode && thd->lock)
+ {
+ WSREP_DEBUG("releasing table lock for replaying (%u)",
+ thd->thread_id);
+ thd->locked_tables_list.unlock_locked_tables(thd);
+ thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
+ }
+ thd->release_transactional_locks();
+
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ THD *replay_thd= new THD(true);
+ replay_thd->thread_stack= thd->thread_stack;
+
+ struct wsrep_thd_shadow shadow;
+ wsrep_prepare_bf_thd(replay_thd, &shadow);
+ WSREP_DEBUG("replaying set for %p rgi %p", replay_thd, replay_thd->wsrep_rgi); replay_thd->wsrep_trx_meta= thd->wsrep_trx_meta;
+ replay_thd->wsrep_ws_handle= thd->wsrep_ws_handle;
+ replay_thd->wsrep_ws_handle.trx_id= WSREP_UNDEFINED_TRX_ID;
+ replay_thd->wsrep_conflict_state= REPLAYING;
+
+ replay_thd->variables.option_bits|= OPTION_BEGIN;
+ replay_thd->server_status|= SERVER_STATUS_IN_TRANS;
+
+ thd->reset_globals();
+ replay_thd->store_globals();
+ wsrep_status_t rcode= wsrep->replay_trx(wsrep,
+ &replay_thd->wsrep_ws_handle,
+ (void*) replay_thd);
+
+ wsrep_return_from_bf_mode(replay_thd, &shadow);
+ replay_thd->reset_globals();
+ delete replay_thd;
+
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+
+ thd->store_globals();
+
+ switch (rcode)
+ {
+ case WSREP_OK:
+ {
+ thd->wsrep_conflict_state= NO_CONFLICT;
+ thd->killed= NOT_KILLED;
+ wsrep_status_t rcode= wsrep->post_commit(wsrep, &thd->wsrep_ws_handle);
+ if (rcode != WSREP_OK)
+ {
+ WSREP_WARN("Post commit failed for SP replay: thd: %u error: %d",
+ thd->thread_id, rcode);
+ }
+ /* As replaying the transaction was successful, an error must not
+ be returned to client, so we need to reset the error state of
+ the diagnostics area */
+ thd->get_stmt_da()->reset_diagnostics_area();
+ break;
+ }
+ case WSREP_TRX_FAIL:
+ {
+ thd->wsrep_conflict_state= ABORTED;
+ wsrep_status_t rcode= wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle);
+ if (rcode != WSREP_OK)
+ {
+ WSREP_WARN("Post rollback failed for SP replay: thd: %u error: %d",
+ thd->thread_id, rcode);
+ }
+ if (thd->get_stmt_da()->is_set())
+ {
+ thd->get_stmt_da()->reset_diagnostics_area();
+ }
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ break;
+ }
+ default:
+ WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
+ rcode,
+ (thd->db.str ? thd->db.str : "(null)"),
+ WSREP_QUERY(thd));
+ /* we're now in inconsistent state, must abort */
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ unireg_abort(1);
+ break;
+ }
+
+ wsrep_cleanup_transaction(thd);
+
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ wsrep_replaying--;
+ WSREP_DEBUG("replaying decreased: %d, thd: %u",
+ wsrep_replaying, thd->thread_id);
+ mysql_cond_broadcast(&COND_wsrep_replaying);
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+
+ DBUG_VOID_RETURN;
+}
+
+void wsrep_replay_transaction(THD *thd)
+{
+ DBUG_ENTER("wsrep_replay_transaction");
+ /* checking if BF trx must be replayed */
+ if (thd->wsrep_conflict_state== MUST_REPLAY) {
+ DBUG_ASSERT(wsrep_thd_trx_seqno(thd));
+ if (thd->wsrep_exec_mode!= REPL_RECV) {
+ if (thd->get_stmt_da()->is_sent())
+ {
+ WSREP_ERROR("replay issue, thd has reported status already");
+ }
+
+
+ /*
+ PS reprepare observer should have been removed already.
+ open_table() will fail if we have dangling observer here.
+ */
+ DBUG_ASSERT(thd->m_reprepare_observer == NULL);
+
+ struct da_shadow
+ {
+ enum Diagnostics_area::enum_diagnostics_status status;
+ ulonglong affected_rows;
+ ulonglong last_insert_id;
+ char message[MYSQL_ERRMSG_SIZE];
+ };
+ struct da_shadow da_status;
+ da_status.status= thd->get_stmt_da()->status();
+ if (da_status.status == Diagnostics_area::DA_OK)
+ {
+ da_status.affected_rows= thd->get_stmt_da()->affected_rows();
+ da_status.last_insert_id= thd->get_stmt_da()->last_insert_id();
+ strmake(da_status.message,
+ thd->get_stmt_da()->message(),
+ sizeof(da_status.message)-1);
+ }
+
+ thd->get_stmt_da()->reset_diagnostics_area();
+
+ thd->wsrep_conflict_state= REPLAYING;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+ thd->reset_for_next_command();
+ thd->reset_killed();
+ close_thread_tables(thd);
+ if (thd->locked_tables_mode && thd->lock)
+ {
+ WSREP_DEBUG("releasing table lock for replaying (%lld)",
+ (longlong) thd->thread_id);
+ thd->locked_tables_list.unlock_locked_tables(thd);
+ thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
+ }
+ thd->release_transactional_locks();
+ /*
+ Replaying will call MYSQL_START_STATEMENT when handling
+ BEGIN Query_log_event so end statement must be called before
+ replaying.
+ */
+ MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
+ thd->m_statement_psi= NULL;
+ thd->m_digest= NULL;
+ thd_proc_info(thd, "WSREP replaying trx");
+ WSREP_DEBUG("replay trx: %s %lld",
+ thd->query() ? thd->query() : "void",
+ (long long)wsrep_thd_trx_seqno(thd));
+ struct wsrep_thd_shadow shadow;
+ wsrep_prepare_bf_thd(thd, &shadow);
+
+ /* From trans_begin() */
+ thd->variables.option_bits|= OPTION_BEGIN;
+ thd->server_status|= SERVER_STATUS_IN_TRANS;
+
+ int rcode = wsrep->replay_trx(wsrep,
+ &thd->wsrep_ws_handle,
+ (void *)thd);
+
+ wsrep_return_from_bf_mode(thd, &shadow);
+ if (thd->wsrep_conflict_state!= REPLAYING)
+ WSREP_WARN("lost replaying mode: %d", thd->wsrep_conflict_state );
+
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+
+ switch (rcode)
+ {
+ case WSREP_OK:
+ thd->wsrep_conflict_state= NO_CONFLICT;
+ wsrep->post_commit(wsrep, &thd->wsrep_ws_handle);
+ WSREP_DEBUG("trx_replay successful for: %lld %lld",
+ (longlong) thd->thread_id, (longlong) thd->real_id);
+ if (thd->get_stmt_da()->is_sent())
+ {
+ WSREP_WARN("replay ok, thd has reported status");
+ }
+ else if (thd->get_stmt_da()->is_set())
+ {
+ if (thd->get_stmt_da()->status() != Diagnostics_area::DA_OK &&
+ thd->get_stmt_da()->status() != Diagnostics_area::DA_OK_BULK)
+ {
+ WSREP_WARN("replay ok, thd has error status %d",
+ thd->get_stmt_da()->status());
+ }
+ }
+ else
+ {
+ if (da_status.status == Diagnostics_area::DA_OK)
+ {
+ my_ok(thd,
+ da_status.affected_rows,
+ da_status.last_insert_id,
+ da_status.message);
+ }
+ else
+ {
+ my_ok(thd);
+ }
+ }
+ break;
+ case WSREP_TRX_FAIL:
+ if (thd->get_stmt_da()->is_sent())
+ {
+ WSREP_ERROR("replay failed, thd has reported status");
+ }
+ else
+ {
+ WSREP_DEBUG("replay failed, rolling back");
+ }
+ thd->wsrep_conflict_state= ABORTED;
+ wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle);
+ break;
+ default:
+ WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
+ rcode, thd->get_db(),
+ thd->query() ? thd->query() : "void");
+ /* we're now in inconsistent state, must abort */
+
+ /* http://bazaar.launchpad.net/~codership/codership-mysql/5.6/revision/3962#sql/wsrep_thd.cc */
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+ unireg_abort(1);
+ break;
+ }
+
+ wsrep_cleanup_transaction(thd);
+
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ wsrep_replaying--;
+ WSREP_DEBUG("replaying decreased: %d, thd: %lld",
+ wsrep_replaying, (longlong) thd->thread_id);
+ mysql_cond_broadcast(&COND_wsrep_replaying);
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+=======
+ thd->wsrep_rgi = NULL;
+ thd->set_row_count_func(shadow->row_count_func);
+}
+
+void wsrep_replay_sp_transaction(THD* thd)
+{
+ DBUG_ENTER("wsrep_replay_sp_transaction");
+ mysql_mutex_assert_owner(&thd->LOCK_thd_data);
+ DBUG_ASSERT(thd->wsrep_conflict_state == MUST_REPLAY);
+ DBUG_ASSERT(wsrep_thd_trx_seqno(thd) > 0);
+
+ WSREP_DEBUG("replaying SP transaction %llu", thd->thread_id);
+ close_thread_tables(thd);
+ if (thd->locked_tables_mode && thd->lock)
+ {
+ WSREP_DEBUG("releasing table lock for replaying (%u)",
+ thd->thread_id);
+ thd->locked_tables_list.unlock_locked_tables(thd);
+ thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
+ }
+ thd->release_transactional_locks();
+
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ THD *replay_thd= new THD(true);
+ replay_thd->thread_stack= thd->thread_stack;
+
+ struct wsrep_thd_shadow shadow;
+ wsrep_prepare_bf_thd(replay_thd, &shadow);
+ WSREP_DEBUG("replaying set for %p rgi %p", replay_thd, replay_thd->wsrep_rgi); replay_thd->wsrep_trx_meta= thd->wsrep_trx_meta;
+ replay_thd->wsrep_ws_handle= thd->wsrep_ws_handle;
+ replay_thd->wsrep_ws_handle.trx_id= WSREP_UNDEFINED_TRX_ID;
+ replay_thd->wsrep_conflict_state= REPLAYING;
+
+ replay_thd->variables.option_bits|= OPTION_BEGIN;
+ replay_thd->server_status|= SERVER_STATUS_IN_TRANS;
+
+ thd->reset_globals();
+ replay_thd->store_globals();
+ wsrep_status_t rcode= wsrep->replay_trx(wsrep,
+ &replay_thd->wsrep_ws_handle,
+ (void*) replay_thd);
+
+ wsrep_return_from_bf_mode(replay_thd, &shadow);
+ replay_thd->reset_globals();
+ delete replay_thd;
+
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+
+ thd->store_globals();
+
+ switch (rcode)
+ {
+ case WSREP_OK:
+ {
+ thd->wsrep_conflict_state= NO_CONFLICT;
+ thd->killed= NOT_KILLED;
+ wsrep_status_t rcode= wsrep->post_commit(wsrep, &thd->wsrep_ws_handle);
+ if (rcode != WSREP_OK)
+ {
+ WSREP_WARN("Post commit failed for SP replay: thd: %u error: %d",
+ thd->thread_id, rcode);
+ }
+ /* As replaying the transaction was successful, an error must not
+ be returned to client, so we need to reset the error state of
+ the diagnostics area */
+ thd->get_stmt_da()->reset_diagnostics_area();
+ break;
+ }
+ case WSREP_TRX_FAIL:
+ {
+ thd->wsrep_conflict_state= ABORTED;
+ wsrep_status_t rcode= wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle);
+ if (rcode != WSREP_OK)
+ {
+ WSREP_WARN("Post rollback failed for SP replay: thd: %u error: %d",
+ thd->thread_id, rcode);
+ }
+ if (thd->get_stmt_da()->is_set())
+ {
+ thd->get_stmt_da()->reset_diagnostics_area();
+ }
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ break;
+ }
+ default:
+ WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
+ rcode,
+ (thd->db.str ? thd->db.str : "(null)"),
+ WSREP_QUERY(thd));
+ /* we're now in inconsistent state, must abort */
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ unireg_abort(1);
+ break;
+ }
+
+ wsrep_cleanup_transaction(thd);
+
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ wsrep_replaying--;
+ WSREP_DEBUG("replaying decreased: %d, thd: %u",
+ wsrep_replaying, thd->thread_id);
+ mysql_cond_broadcast(&COND_wsrep_replaying);
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+
+ DBUG_VOID_RETURN;
+}
+
+void wsrep_replay_transaction(THD *thd)
+{
+ DBUG_ENTER("wsrep_replay_transaction");
+ /* checking if BF trx must be replayed */
+ if (thd->wsrep_conflict_state== MUST_REPLAY) {
+ DBUG_ASSERT(wsrep_thd_trx_seqno(thd));
+ if (thd->wsrep_exec_mode!= REPL_RECV) {
+ if (thd->get_stmt_da()->is_sent())
+ {
+ WSREP_ERROR("replay issue, thd has reported status already");
+ }
+
+
+ /*
+ PS reprepare observer should have been removed already.
+ open_table() will fail if we have dangling observer here.
+ */
+ DBUG_ASSERT(thd->m_reprepare_observer == NULL);
+
+ struct da_shadow
+ {
+ enum Diagnostics_area::enum_diagnostics_status status;
+ ulonglong affected_rows;
+ ulonglong last_insert_id;
+ char message[MYSQL_ERRMSG_SIZE];
+ };
+ struct da_shadow da_status;
+ da_status.status= thd->get_stmt_da()->status();
+ if (da_status.status == Diagnostics_area::DA_OK)
+ {
+ da_status.affected_rows= thd->get_stmt_da()->affected_rows();
+ da_status.last_insert_id= thd->get_stmt_da()->last_insert_id();
+ strmake(da_status.message,
+ thd->get_stmt_da()->message(),
+ sizeof(da_status.message)-1);
+ }
+
+ thd->get_stmt_da()->reset_diagnostics_area();
+
+ thd->wsrep_conflict_state= REPLAYING;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+ thd->reset_for_next_command();
+ thd->reset_killed();
+ close_thread_tables(thd);
+ if (thd->locked_tables_mode && thd->lock)
+ {
+ WSREP_DEBUG("releasing table lock for replaying (%lld)",
+ (longlong) thd->thread_id);
+ thd->locked_tables_list.unlock_locked_tables(thd);
+ thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
+ }
+ thd->release_transactional_locks();
+ /*
+ Replaying will call MYSQL_START_STATEMENT when handling
+ BEGIN Query_log_event so end statement must be called before
+ replaying.
+ */
+ MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
+ thd->m_statement_psi= NULL;
+ thd->m_digest= NULL;
+ thd_proc_info(thd, "WSREP replaying trx");
+ WSREP_DEBUG("replay trx: %s %lld",
+ thd->query() ? thd->query() : "void",
+ (long long)wsrep_thd_trx_seqno(thd));
+ struct wsrep_thd_shadow shadow;
+ wsrep_prepare_bf_thd(thd, &shadow);
+
+ /* From trans_begin() */
+ thd->variables.option_bits|= OPTION_BEGIN;
+ thd->server_status|= SERVER_STATUS_IN_TRANS;
+
+ /* Allow tests to block the replayer thread using the DBUG facilities */
+#ifdef ENABLED_DEBUG_SYNC
+ DBUG_EXECUTE_IF("sync.wsrep_replay_cb",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.wsrep_replay_cb_reached "
+ "WAIT_FOR signal.wsrep_replay_cb";
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
+#endif /* ENABLED_DEBUG_SYNC */
+
+ int rcode = wsrep->replay_trx(wsrep,
+ &thd->wsrep_ws_handle,
+ (void *)thd);
+
+ wsrep_return_from_bf_mode(thd, &shadow);
+ if (thd->wsrep_conflict_state!= REPLAYING)
+ WSREP_WARN("lost replaying mode: %d", thd->wsrep_conflict_state );
+
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+
+ switch (rcode)
+ {
+ case WSREP_OK:
+ thd->wsrep_conflict_state= NO_CONFLICT;
+ wsrep->post_commit(wsrep, &thd->wsrep_ws_handle);
+ WSREP_DEBUG("trx_replay successful for: %lld %lld",
+ (longlong) thd->thread_id, (longlong) thd->real_id);
+ if (thd->get_stmt_da()->is_sent())
+ {
+ WSREP_WARN("replay ok, thd has reported status");
+ }
+ else if (thd->get_stmt_da()->is_set())
+ {
+ if (thd->get_stmt_da()->status() != Diagnostics_area::DA_OK &&
+ thd->get_stmt_da()->status() != Diagnostics_area::DA_OK_BULK)
+ {
+ WSREP_WARN("replay ok, thd has error status %d",
+ thd->get_stmt_da()->status());
+ }
+ }
+ else
+ {
+ if (da_status.status == Diagnostics_area::DA_OK)
+ {
+ my_ok(thd,
+ da_status.affected_rows,
+ da_status.last_insert_id,
+ da_status.message);
+ }
+ else
+ {
+ my_ok(thd);
+ }
+ }
+ break;
+ case WSREP_TRX_FAIL:
+ if (thd->get_stmt_da()->is_sent())
+ {
+ WSREP_ERROR("replay failed, thd has reported status");
+ }
+ else
+ {
+ WSREP_DEBUG("replay failed, rolling back");
+ }
+ thd->wsrep_conflict_state= ABORTED;
+ wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle);
+ break;
+ default:
+ WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
+ rcode, thd->get_db(),
+ thd->query() ? thd->query() : "void");
+ /* we're now in inconsistent state, must abort */
+
+ /* http://bazaar.launchpad.net/~codership/codership-mysql/5.6/revision/3962#sql/wsrep_thd.cc */
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+ unireg_abort(1);
+ break;
+ }
+
+ wsrep_cleanup_transaction(thd);
+
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ wsrep_replaying--;
+ WSREP_DEBUG("replaying decreased: %d, thd: %lld",
+ wsrep_replaying, (longlong) thd->thread_id);
+ mysql_cond_broadcast(&COND_wsrep_replaying);
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+>>>>>>> bb-10.3-release
if(thd->has_thd_temporary_tables())
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index fa49b081ad1..689c61a0ff4 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -1547,7 +1547,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
share->rows_recorded= 0;
stats.auto_increment_value= 1;
share->archive_write.auto_increment= 0;
- my_bitmap_map *org_bitmap= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= tmp_use_all_columns(table, &table->read_set);
while (!(rc= get_row(&archive, table->record[0])))
{
@@ -1568,7 +1568,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
}
}
- tmp_restore_column_map(table->read_set, org_bitmap);
+ tmp_restore_column_map(&table->read_set, org_bitmap);
share->rows_recorded= (ha_rows)writer.rows;
}
diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc
index f081dca71c3..1d2331c1a5e 100644
--- a/storage/cassandra/ha_cassandra.cc
+++ b/storage/cassandra/ha_cassandra.cc
@@ -1641,18 +1641,18 @@ int ha_cassandra::index_read_map(uchar *buf, const uchar *key,
char *cass_key;
int cass_key_len;
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len))
{
/* We get here when making lookups like uuid_column='not-an-uuid' */
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_KEY_NOT_FOUND);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
bool found;
if (se->get_slice(cass_key, cass_key_len, &found))
@@ -1726,8 +1726,8 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
cassandra_to_mariadb() calls will use field->store(...) methods, which
require that the column is in the table->write_set
*/
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map;
+ old_map= dbug_tmp_use_all_columns(table, &table->write_set);
/* Start with all fields being NULL */
for (field= table->field + 1; *field; field++)
@@ -1848,7 +1848,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk)
}
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return res;
}
@@ -1933,7 +1933,7 @@ void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals,
int ha_cassandra::write_row(const uchar *buf)
{
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
int ires;
DBUG_ENTER("ha_cassandra::write_row");
@@ -1943,7 +1943,7 @@ int ha_cassandra::write_row(const uchar *buf)
if (!doing_insert_batch)
se->clear_insert_buffer();
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
insert_lineno++;
@@ -1954,7 +1954,7 @@ int ha_cassandra::write_row(const uchar *buf)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
rowkey_converter->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
se->start_row_insert(cass_key, cass_key_len);
@@ -1977,7 +1977,7 @@ int ha_cassandra::write_row(const uchar *buf)
free_dynamic_row(&vals, &names);
if (rc)
{
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(rc);
}
}
@@ -1988,7 +1988,7 @@ int ha_cassandra::write_row(const uchar *buf)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
field_converters[i]->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
se->add_insert_column(field_converters[i]->field->field_name.str, 0,
@@ -1996,7 +1996,7 @@ int ha_cassandra::write_row(const uchar *buf)
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
bool res;
@@ -2263,8 +2263,8 @@ bool ha_cassandra::mrr_start_read()
{
uint key_len;
- my_bitmap_map *old_map;
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map;
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
se->new_lookup_keys();
@@ -2288,7 +2288,7 @@ bool ha_cassandra::mrr_start_read()
break;
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return se->multiget_slice();
}
@@ -2366,7 +2366,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
LEX_STRING *oldnames, *names;
uint oldcount, count;
String oldvalcol, valcol;
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
int res;
DBUG_ENTER("ha_cassandra::update_row");
/* Currently, it is guaranteed that new_data == table->record[0] */
@@ -2374,7 +2374,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
/* For now, just rewrite the full record */
se->clear_insert_buffer();
- old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->read_set);
char *old_key;
int old_key_len;
@@ -2387,7 +2387,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
rowkey_converter->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
@@ -2450,7 +2450,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
{
my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0),
field_converters[i]->field->field_name.str, insert_lineno);
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
se->add_insert_column(field_converters[i]->field->field_name.str, 0,
@@ -2477,7 +2477,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data)
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
res= se->do_insert();
diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt
index 0af3a02a09d..b69d1a04f54 100644
--- a/storage/connect/CMakeLists.txt
+++ b/storage/connect/CMakeLists.txt
@@ -82,6 +82,19 @@ ENDIF(UNIX)
#
+# BSON: the new handling of JSON data included temporarily for testing
+#
+
+OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON)
+
+IF(CONNECT_WITH_BSON)
+ SET(CONNECT_SOURCES ${CONNECT_SOURCES}
+ bson.cpp bsonudf.cpp tabbson.cpp bson.h bsonudf.h tabbson.h)
+ add_definitions(-DBSON_SUPPORT)
+ENDIF(CONNECT_WITH_BSON)
+
+
+#
# VCT: the VEC format might be not supported in future versions
#
@@ -318,29 +331,29 @@ ENDIF(CONNECT_WITH_MONGO)
OPTION(CONNECT_WITH_REST "Compile CONNECT storage engine with REST support" ON)
IF(CONNECT_WITH_REST)
- MESSAGE_ONCE(CONNECT_WITH_REST "REST support is ON")
+# MESSAGE(STATUS "=====> REST support is ON")
SET(CONNECT_SOURCES ${CONNECT_SOURCES} tabrest.cpp tabrest.h)
add_definitions(-DREST_SUPPORT)
- FIND_PACKAGE(cpprestsdk QUIET)
- IF (cpprestsdk_FOUND)
- IF(UNIX)
-# INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR})
-# If needed edit next line to set the path to libcpprest.so
- SET(REST_LIBRARY -lcpprest)
- MESSAGE (STATUS ${REST_LIBRARY})
- ELSE(NOT UNIX)
-# Next line sets debug compile mode matching cpprest_2_10d.dll
-# when it was binary installed (can be change later in Visual Studio)
-# Comment it out if not needed depending on your cpprestsdk installation.
- SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd")
- ENDIF(UNIX)
-# IF(REST_LIBRARY) why this? how about Windows
- SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp)
- add_definitions(-DREST_SOURCE)
-# ENDIF()
- ELSE(NOT cpprestsdk_FOUND)
-# MESSAGE(STATUS "=====> cpprestsdk package not found")
- ENDIF (cpprestsdk_FOUND)
+# FIND_PACKAGE(cpprestsdk QUIET)
+# IF (cpprestsdk_FOUND)
+# IF(UNIX)
+## INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR})
+## If needed edit next line to set the path to libcpprest.so
+# SET(REST_LIBRARY -lcpprest)
+# MESSAGE (STATUS ${REST_LIBRARY})
+# ELSE(NOT UNIX)
+## Next line sets debug compile mode matching cpprest_2_10d.dll
+## when it was binary installed (can be change later in Visual Studio)
+## Comment it out if not needed depending on your cpprestsdk installation.
+# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd")
+# ENDIF(UNIX)
+## IF(REST_LIBRARY) why this? how about Windows
+# SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp)
+# add_definitions(-DREST_SOURCE)
+## ENDIF()
+##ELSE(NOT cpprestsdk_FOUND)
+## MESSAGE(STATUS "=====> cpprestsdk package not found")
+# ENDIF (cpprestsdk_FOUND)
ENDIF(CONNECT_WITH_REST)
#
diff --git a/storage/connect/block.h b/storage/connect/block.h
index 2ca9586ee3f..c10fc4761ac 100644
--- a/storage/connect/block.h
+++ b/storage/connect/block.h
@@ -1,25 +1,25 @@
/**************** Block H Declares Source Code File (.H) ***************/
-/* Name: BLOCK.H Version 2.0 */
+/* Name: BLOCK.H Version 2.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 1998 */
+/* (C) Copyright to the author Olivier BERTRAND 1998 - 2020 */
/* */
/* This file contains the BLOCK pure virtual class definition. */
/*---------------------------------------------------------------------*/
/* Note: one of the main purpose of this base class is to take care */
-/* of the very specific way Plug handles memory allocation. */
+/* of the very specific way Connect handles memory allocation. */
/* Instead of allocating small chunks of storage via new or malloc */
-/* Plug works in its private memory pool in which it does the sub- */
+/* Connect works in its private memory pool in which it does the sub- */
/* allocation using the function PlugSubAlloc. These are never freed */
/* separately but when a transaction is terminated, the entire pool */
/* is set to empty, resulting in a very fast and efficient allocate */
/* process, no garbage collection problem, and an automatic recovery */
-/* procedure (via LongJump) when the memory is exhausted. */
+/* procedure (via throw) when the memory is exhausted. */
/* For this to work new must be given two parameters, first the */
/* global pointer of the Plug application, and an optional pointer to */
/* the memory pool to use, defaulting to NULL meaning using the Plug */
/* standard default memory pool, example: */
-/* tabp = new(g) XTAB("EMPLOYEE"); */
-/* allocates a XTAB class object in the standard Plug memory pool. */
+/* tabp = new(g) XTAB("EMPLOYEE"); */
+/* allocates a XTAB class object in the standard Plug memory pool. */
/***********************************************************************/
#if !defined(BLOCK_DEFINED)
#define BLOCK_DEFINED
@@ -37,21 +37,25 @@ typedef class BLOCK *PBLOCK;
class DllExport BLOCK {
public:
- void * operator new(size_t size, PGLOBAL g, void *p = NULL) {
- xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, p);
- return (PlugSubAlloc(g, p, size));
- } // end of new
+ void *operator new(size_t size, PGLOBAL g, void *mp = NULL) {
+ xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, mp);
+ return PlugSubAlloc(g, mp, size);
+ } // end of new
- virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc
+ void* operator new(size_t size, long long mp) {
+ xtrc(256, "Realloc at: mp=%lld\n", mp);
+ return (void*)mp;
+ } // end of new
+
+ virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc
virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc
-#if !defined(__BORLANDC__)
- // Avoid warning C4291 by defining a matching dummy delete operator
- void operator delete(void *, PGLOBAL, void *) {}
- void operator delete(void *, size_t) {}
-#endif
- virtual ~BLOCK() {}
+ // Avoid gcc errors by defining matching dummy delete operators
+ void operator delete(void*, PGLOBAL, void *) {}
+ void operator delete(void*, long long) {}
+ void operator delete(void*) {}
- }; // end of class BLOCK
+ virtual ~BLOCK() {}
+}; // end of class BLOCK
#endif // !BLOCK_DEFINED
diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp
new file mode 100644
index 00000000000..f6a8db67d42
--- /dev/null
+++ b/storage/connect/bson.cpp
@@ -0,0 +1,1789 @@
+/*************** bson CPP Declares Source Code File (.H) ***************/
+/* Name: bson.cpp Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* */
+/* This file contains the BJSON classes functions. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/***********************************************************************/
+#include <my_global.h>
+
+/***********************************************************************/
+/* Include application header files: */
+/* global.h is header containing all global declarations. */
+/* plgdbsem.h is header containing the DB application declarations. */
+/* bson.h is header containing the BSON classes declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "bson.h"
+
+/***********************************************************************/
+/* Check macro. */
+/***********************************************************************/
+#if defined(_DEBUG)
+#define CheckType(X,Y) if (!X || X ->Type != Y) throw MSG(VALTYPE_NOMATCH);
+#else
+#define CheckType(X,Y)
+#endif
+
+#if defined(__WIN__)
+#define EL "\r\n"
+#else
+#define EL "\n"
+#undef SE_CATCH // Does not work for Linux
+#endif
+
+#if defined(SE_CATCH)
+/**************************************************************************/
+/* This is the support of catching C interrupts to prevent crashes. */
+/**************************************************************************/
+#include <eh.h>
+
+class SE_Exception {
+public:
+ SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
+ ~SE_Exception() {}
+
+ unsigned int nSE;
+ PEXCEPTION_RECORD eRec;
+}; // end of class SE_Exception
+
+void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) {
+ throw SE_Exception(u, pExp->ExceptionRecord);
+} // end of trans_func
+
+char* GetExceptionDesc(PGLOBAL g, unsigned int e);
+#endif // SE_CATCH
+
+/* --------------------------- Class BDOC ---------------------------- */
+
+/***********************************************************************/
+/* BDOC constructor. */
+/***********************************************************************/
+BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL)
+{
+ jp = NULL;
+ s = NULL;
+ len = 0;
+ pretty = 3;
+ pty[0] = pty[1] = pty[2] = true;
+ comma = false;
+} // end of BDOC constructor
+
+/***********************************************************************/
+/* Parse a json string. */
+/* Note: when pretty is not known, the caller set pretty to 3. */
+/***********************************************************************/
+PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng)
+{
+ int i;
+ bool b = false, ptyp = (bool *)pty;
+ PBVAL bvp = NULL;
+
+ s = js;
+ len = lng;
+ xtrc(1, "BDOC::ParseJson: s=%.10s len=%zd\n", s, len);
+
+ if (!s || !len) {
+ strcpy(g->Message, "Void JSON object");
+ return NULL;
+ } // endif s
+
+ // Trying to guess the pretty format
+ if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
+ pty[0] = false;
+
+ try {
+ bvp = NewVal();
+ bvp->Type = TYPE_UNKNOWN;
+
+ for (i = 0; i < len; i++)
+ switch (s[i]) {
+ case '[':
+ if (bvp->Type != TYPE_UNKNOWN)
+ bvp->To_Val = ParseAsArray(i);
+ else
+ bvp->To_Val = ParseArray(++i);
+
+ bvp->Type = TYPE_JAR;
+ break;
+ case '{':
+ if (bvp->Type != TYPE_UNKNOWN) {
+ bvp->To_Val = ParseAsArray(i);
+ bvp->Type = TYPE_JAR;
+ } else {
+ bvp->To_Val = ParseObject(++i);
+ bvp->Type = TYPE_JOB;
+ } // endif Type
+
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) {
+ comma = true;
+ pty[0] = pty[2] = false;
+ break;
+ } // endif pretty
+
+ sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
+ throw 3;
+ case '(':
+ b = true;
+ break;
+ case ')':
+ if (b) {
+ b = false;
+ break;
+ } // endif b
+
+ default:
+ if (bvp->Type != TYPE_UNKNOWN) {
+ bvp->To_Val = ParseAsArray(i);
+ bvp->Type = TYPE_JAR;
+ } else if ((bvp->To_Val = MOF(ParseValue(i, NewVal()))))
+ bvp->Type = TYPE_JVAL;
+ else
+ throw 4;
+
+ break;
+ }; // endswitch s[i]
+
+ if (bvp->Type == TYPE_UNKNOWN)
+ sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s);
+ else if (pretty == 3) {
+ for (i = 0; i < 3; i++)
+ if (pty[i]) {
+ pretty = i;
+ break;
+ } // endif pty
+
+ } // endif ptyp
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, G->Message);
+ GetMsg(g);
+ bvp = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ bvp = NULL;
+ } // end catch
+
+ return bvp;
+} // end of ParseJson
+
+/***********************************************************************/
+/* Parse several items as being in an array. */
+/***********************************************************************/
+OFFSET BDOC::ParseAsArray(int& i) {
+ if (pty[0] && (!pretty || pretty > 2)) {
+ OFFSET jsp;
+
+ if ((jsp = ParseArray((i = 0))) && pretty == 3)
+ pretty = (pty[0]) ? 0 : 3;
+
+ return jsp;
+ } else
+ strcpy(G->Message, "More than one item in file");
+
+ return 0;
+} // end of ParseAsArray
+
+/***********************************************************************/
+/* Parse a JSON Array. */
+/***********************************************************************/
+OFFSET BDOC::ParseArray(int& i)
+{
+ int level = 0;
+ bool b = (!i);
+ PBVAL vlp, firstvlp, lastvlp;
+
+ vlp = firstvlp = lastvlp = NULL;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case ',':
+ if (level < 2) {
+ sprintf(G->Message, "Unexpected ',' near %.*s", ARGS);
+ throw 1;
+ } else
+ level = 1;
+
+ break;
+ case ']':
+ if (level == 1) {
+ sprintf(G->Message, "Unexpected ',]' near %.*s", ARGS);
+ throw 1;
+ } // endif level
+
+ return MOF(firstvlp);
+ case '\n':
+ if (!b)
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ if (level == 2) {
+ sprintf(G->Message, "Unexpected value near %.*s", ARGS);
+ throw 1;
+ } else if (lastvlp) {
+ vlp = ParseValue(i, NewVal());
+ lastvlp->Next = MOF(vlp);
+ lastvlp = vlp;
+ } else
+ firstvlp = lastvlp = ParseValue(i, NewVal());
+
+ level = (b) ? 1 : 2;
+ break;
+ }; // endswitch s[i]
+
+ if (b) {
+ // Case of Pretty == 0
+ return MOF(firstvlp);
+ } // endif b
+
+ throw ("Unexpected EOF in array");
+} // end of ParseArray
+
+/***********************************************************************/
+/* Parse a JSON Object. */
+/***********************************************************************/
+OFFSET BDOC::ParseObject(int& i)
+{
+ OFFSET key;
+ int level = 0;
+ PBPR bpp, firstbpp, lastbpp;
+
+ bpp = firstbpp = lastbpp = NULL;
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ if (level < 2) {
+ key = ParseString(++i);
+ bpp = NewPair(key);
+
+ if (lastbpp) {
+ lastbpp->Vlp.Next = MOF(bpp);
+ lastbpp = bpp;
+ } else
+ firstbpp = lastbpp = bpp;
+
+ level = 2;
+ } else {
+ sprintf(G->Message, "misplaced string near %.*s", ARGS);
+ throw 2;
+ } // endif level
+
+ break;
+ case ':':
+ if (level == 2) {
+ ParseValue(++i, GetVlp(lastbpp));
+ level = 3;
+ } else {
+ sprintf(G->Message, "Unexpected ':' near %.*s", ARGS);
+ throw 2;
+ } // endif level
+
+ break;
+ case ',':
+ if (level < 3) {
+ sprintf(G->Message, "Unexpected ',' near %.*s", ARGS);
+ throw 2;
+ } else
+ level = 1;
+
+ break;
+ case '}':
+ if (!(level == 0 || level == 3)) {
+ sprintf(G->Message, "Unexpected '}' near %.*s", ARGS);
+ throw 2;
+ } // endif level
+
+ return MOF(firstbpp);
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ sprintf(G->Message, "Unexpected character '%c' near %.*s",
+ s[i], ARGS);
+ throw 2;
+ }; // endswitch s[i]
+
+ strcpy(G->Message, "Unexpected EOF in Object");
+ throw 2;
+} // end of ParseObject
+
+/***********************************************************************/
+/* Parse a JSON Value. */
+/***********************************************************************/
+PBVAL BDOC::ParseValue(int& i, PBVAL bvp)
+{
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ goto suite;
+ } // endswitch
+
+suite:
+ switch (s[i]) {
+ case '[':
+ bvp->To_Val = ParseArray(++i);
+ bvp->Type = TYPE_JAR;
+ break;
+ case '{':
+ bvp->To_Val = ParseObject(++i);
+ bvp->Type = TYPE_JOB;
+ break;
+ case '"':
+ bvp->To_Val = ParseString(++i);
+ bvp->Type = TYPE_STRG;
+ break;
+ case 't':
+ if (!strncmp(s + i, "true", 4)) {
+ bvp->B = true;
+ bvp->Type = TYPE_BOOL;
+ i += 3;
+ } else
+ goto err;
+
+ break;
+ case 'f':
+ if (!strncmp(s + i, "false", 5)) {
+ bvp->B = false;
+ bvp->Type = TYPE_BOOL;
+ i += 4;
+ } else
+ goto err;
+
+ break;
+ case 'n':
+ if (!strncmp(s + i, "null", 4)) {
+ bvp->Type = TYPE_NULL;
+ i += 3;
+ } else
+ goto err;
+
+ break;
+ case '-':
+ default:
+ if (s[i] == '-' || isdigit(s[i]))
+ ParseNumeric(i, bvp);
+ else
+ goto err;
+
+ }; // endswitch s[i]
+
+ return bvp;
+
+err:
+ sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], ARGS);
+ throw 3;
+} // end of ParseValue
+
+/***********************************************************************/
+/* Unescape and parse a JSON string. */
+/***********************************************************************/
+OFFSET BDOC::ParseString(int& i)
+{
+ uchar* p;
+ int n = 0;
+
+ // Be sure of memory availability
+ if (((size_t)len + 1 - i) > ((PPOOLHEADER)G->Sarea)->FreeBlk)
+ throw("ParseString: Out of memory");
+
+ // The size to allocate is not known yet
+ p = (uchar*)BsonSubAlloc(0);
+
+ for (; i < len; i++)
+ switch (s[i]) {
+ case '"':
+ p[n++] = 0;
+ BsonSubAlloc(n);
+ return MOF(p);
+ case '\\':
+ if (++i < len) {
+ if (s[i] == 'u') {
+ if (len - i > 5) {
+ // if (charset == utf8) {
+ char xs[5];
+ uint hex;
+
+ xs[0] = s[++i];
+ xs[1] = s[++i];
+ xs[2] = s[++i];
+ xs[3] = s[++i];
+ xs[4] = 0;
+ hex = strtoul(xs, NULL, 16);
+
+ if (hex < 0x80) {
+ p[n] = (uchar)hex;
+ } else if (hex < 0x800) {
+ p[n++] = (uchar)(0xC0 | (hex >> 6));
+ p[n] = (uchar)(0x80 | (hex & 0x3F));
+ } else if (hex < 0x10000) {
+ p[n++] = (uchar)(0xE0 | (hex >> 12));
+ p[n++] = (uchar)(0x80 | ((hex >> 6) & 0x3f));
+ p[n] = (uchar)(0x80 | (hex & 0x3f));
+ } else
+ p[n] = '?';
+
+#if 0
+ } else {
+ char xs[3];
+ UINT hex;
+
+ i += 2;
+ xs[0] = s[++i];
+ xs[1] = s[++i];
+ xs[2] = 0;
+ hex = strtoul(xs, NULL, 16);
+ p[n] = (char)hex;
+ } // endif charset
+#endif // 0
+ } else
+ goto err;
+
+ } else switch (s[i]) {
+ case 't': p[n] = '\t'; break;
+ case 'n': p[n] = '\n'; break;
+ case 'r': p[n] = '\r'; break;
+ case 'b': p[n] = '\b'; break;
+ case 'f': p[n] = '\f'; break;
+ default: p[n] = s[i]; break;
+ } // endswitch
+
+ n++;
+ } else
+ goto err;
+
+ break;
+ default:
+ p[n++] = s[i];
+ break;
+}; // endswitch s[i]
+
+err:
+throw("Unexpected EOF in String");
+} // end of ParseString
+
+/***********************************************************************/
+/* Parse a JSON numeric value. */
+/***********************************************************************/
+void BDOC::ParseNumeric(int& i, PBVAL vlp)
+{
+ char buf[50];
+ int n = 0;
+ short nd = 0;
+ bool has_dot = false;
+ bool has_e = false;
+ bool found_digit = false;
+
+ for (; i < len; i++) {
+ switch (s[i]) {
+ case '.':
+ if (!found_digit || has_dot || has_e)
+ goto err;
+
+ has_dot = true;
+ break;
+ case 'e':
+ case 'E':
+ if (!found_digit || has_e)
+ goto err;
+
+ has_e = true;
+ found_digit = false;
+ break;
+ case '+':
+ if (!has_e)
+ goto err;
+
+ // fall through
+ case '-':
+ if (found_digit)
+ goto err;
+
+ break;
+ default:
+ if (isdigit(s[i])) {
+ if (has_dot && !has_e)
+ nd++; // Number of decimals
+
+ found_digit = true;
+ } else
+ goto fin;
+
+ }; // endswitch s[i]
+
+ buf[n++] = s[i];
+ } // endfor i
+
+fin:
+ if (found_digit) {
+ buf[n] = 0;
+
+ if (has_dot || has_e) {
+ double dv = atof(buf);
+
+ if (nd >= 6 || dv > FLT_MAX || dv < FLT_MIN) {
+ double* dvp = (double*)PlugSubAlloc(G, NULL, sizeof(double));
+
+ *dvp = dv;
+ vlp->To_Val = MOF(dvp);
+ vlp->Type = TYPE_DBL;
+ } else {
+ vlp->F = (float)dv;
+ vlp->Type = TYPE_FLOAT;
+ } // endif nd
+
+ vlp->Nd = MY_MIN(nd, 16);
+ } else {
+ longlong iv = strtoll(buf, NULL, 10);
+
+ if (iv > INT_MAX32 || iv < INT_MIN32) {
+ longlong *llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong));
+
+ *llp = iv;
+ vlp->To_Val = MOF(llp);
+ vlp->Type = TYPE_BINT;
+ } else {
+ vlp->N = (int)iv;
+ vlp->Type = TYPE_INTG;
+ } // endif iv
+
+ } // endif has
+
+ i--; // Unstack following character
+ return;
+ } else
+ throw("No digit found");
+
+err:
+ throw("Unexpected EOF in number");
+} // end of ParseNumeric
+
+/***********************************************************************/
+/* Serialize a BJSON document tree: */
+/***********************************************************************/
+PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty)
+{
+ PSZ str = NULL;
+ bool b = false, err = true;
+ FILE* fs = NULL;
+
+ G->Message[0] = 0;
+
+ try {
+ if (!bvp) {
+ strcpy(g->Message, "Null json tree");
+ throw 1;
+ } else if (!fn) {
+ // Serialize to a string
+ jp = new(g) JOUTSTR(g);
+ b = pretty == 1;
+ } else {
+ if (!(fs = fopen(fn, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, fn);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ throw 2;
+ } else if (pretty >= 2) {
+ // Serialize to a pretty file
+ jp = new(g)JOUTPRT(g, fs);
+ } else {
+ // Serialize to a flat file
+ b = true;
+ jp = new(g)JOUTFILE(g, fs, pretty);
+ } // endif's
+
+ } // endif's
+
+ switch (bvp->Type) {
+ case TYPE_JAR:
+ err = SerializeArray(bvp->To_Val, b);
+ break;
+ case TYPE_JOB:
+ err = ((b && jp->Prty()) && jp->WriteChr('\t'));
+ err |= SerializeObject(bvp->To_Val);
+ break;
+ case TYPE_JVAL:
+ err = SerializeValue(MVP(bvp->To_Val));
+ break;
+ default:
+ err = SerializeValue(bvp, true);
+ } // endswitch Type
+
+ if (fs) {
+ fputs(EL, fs);
+ fclose(fs);
+ str = (err) ? NULL : strcpy(g->Message, "Ok");
+ } else if (!err) {
+ str = ((JOUTSTR*)jp)->Strp;
+ jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
+ } else if (G->Message[0])
+ strcpy(g->Message, "Error in Serialize");
+ else
+ GetMsg(g);
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, G->Message);
+ GetMsg(g);
+ str = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ str = NULL;
+ } // end catch
+
+ return str;
+} // end of Serialize
+
+
+/***********************************************************************/
+/* Serialize a JSON Array. */
+/***********************************************************************/
+bool BDOC::SerializeArray(OFFSET arp, bool b)
+{
+ bool first = true;
+ PBVAL vp = MVP(arp);
+
+ if (b) {
+ if (jp->Prty()) {
+ if (jp->WriteChr('['))
+ return true;
+ else if (jp->Prty() == 1 && (jp->WriteStr(EL) || jp->WriteChr('\t')))
+ return true;
+
+ } // endif Prty
+
+ } else if (jp->WriteChr('['))
+ return true;
+
+ for (vp; vp; vp = MVP(vp->Next)) {
+ if (first)
+ first = false;
+ else if ((!b || jp->Prty()) && jp->WriteChr(','))
+ return true;
+ else if (b) {
+ if (jp->Prty() < 2 && jp->WriteStr(EL))
+ return true;
+ else if (jp->Prty() == 1 && jp->WriteChr('\t'))
+ return true;
+
+ } // endif b
+
+ if (SerializeValue(vp))
+ return true;
+
+ } // endfor vp
+
+ if (b && jp->Prty() == 1 && jp->WriteStr(EL))
+ return true;
+
+ return ((!b || jp->Prty()) && jp->WriteChr(']'));
+} // end of SerializeArray
+
+/***********************************************************************/
+/* Serialize a JSON Object. */
+/***********************************************************************/
+bool BDOC::SerializeObject(OFFSET obp)
+{
+ bool first = true;
+ PBPR prp = MPP(obp);
+
+ if (jp->WriteChr('{'))
+ return true;
+
+ for (prp; prp; prp = GetNext(prp)) {
+ if (first)
+ first = false;
+ else if (jp->WriteChr(','))
+ return true;
+
+ if (jp->WriteChr('"') ||
+ jp->WriteStr(MZP(prp->Key)) ||
+ jp->WriteChr('"') ||
+ jp->WriteChr(':') ||
+ SerializeValue(GetVlp(prp)))
+ return true;
+
+ } // endfor i
+
+ return jp->WriteChr('}');
+} // end of SerializeObject
+
+/***********************************************************************/
+/* Serialize a JSON Value. */
+/***********************************************************************/
+bool BDOC::SerializeValue(PBVAL jvp, bool b)
+{
+ char buf[64];
+
+ if (jvp) switch (jvp->Type) {
+ case TYPE_JAR:
+ return SerializeArray(jvp->To_Val, false);
+ case TYPE_JOB:
+ return SerializeObject(jvp->To_Val);
+ case TYPE_BOOL:
+ return jp->WriteStr(jvp->B ? "true" : "false");
+ case TYPE_STRG:
+ case TYPE_DTM:
+ if (b) {
+ return jp->WriteStr(MZP(jvp->To_Val));
+ } else
+ return jp->Escape(MZP(jvp->To_Val));
+
+ case TYPE_INTG:
+ sprintf(buf, "%d", jvp->N);
+ return jp->WriteStr(buf);
+ case TYPE_BINT:
+ sprintf(buf, "%lld", *(longlong*)MakePtr(Base, jvp->To_Val));
+ return jp->WriteStr(buf);
+ case TYPE_FLOAT:
+ sprintf(buf, "%.*f", jvp->Nd, jvp->F);
+ return jp->WriteStr(buf);
+ case TYPE_DBL:
+ sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(Base, jvp->To_Val));
+ return jp->WriteStr(buf);
+ case (char)TYPE_NULL:
+ return jp->WriteStr("null");
+ case TYPE_JVAL:
+ return SerializeValue(MVP(jvp->To_Val));
+ default:
+ return jp->WriteStr("???"); // TODO
+ } // endswitch Type
+
+ return jp->WriteStr("null");
+} // end of SerializeValue
+
+/* --------------------------- Class BJSON --------------------------- */
+
+/***********************************************************************/
+/* Program for sub-allocating Bjson structures. */
+/***********************************************************************/
+void* BJSON::BsonSubAlloc(size_t size)
+{
+ PPOOLHEADER pph; /* Points on area header. */
+ void* memp = G->Sarea;
+
+ size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */
+ pph = (PPOOLHEADER)memp;
+
+ xtrc(16, "SubAlloc in %p size=%zd used=%zd free=%zd\n",
+ memp, size, pph->To_Free, pph->FreeBlk);
+
+ if (size > pph->FreeBlk) { /* Not enough memory left in pool */
+ sprintf(G->Message,
+ "Not enough memory for request of %zd (used=%zd free=%zd)",
+ size, pph->To_Free, pph->FreeBlk);
+ xtrc(1, "BsonSubAlloc: %s\n", G->Message);
+
+ if (Throw)
+ throw(1234);
+ else
+ return NULL;
+
+ } /* endif size OS32 code */
+
+ // Do the suballocation the simplest way
+ memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */
+ pph->To_Free += size; /* New offset of pool free block */
+ pph->FreeBlk -= size; /* New size of pool free block */
+ xtrc(16, "Done memp=%p used=%zd free=%zd\n",
+ memp, pph->To_Free, pph->FreeBlk);
+ return memp;
+} // end of BsonSubAlloc
+
+/*********************************************************************************/
+/* Program for SubSet re-initialization of the memory pool. */
+/*********************************************************************************/
+PSZ BJSON::NewStr(PSZ str)
+{
+ if (str) {
+ PSZ sm = (PSZ)BsonSubAlloc(strlen(str) + 1);
+
+ strcpy(sm, str);
+ return sm;
+ } else
+ return NULL;
+
+} // end of NewStr
+
+/*********************************************************************************/
+/* Program for SubSet re-initialization of the memory pool. */
+/*********************************************************************************/
+void BJSON::SubSet(bool b)
+{
+ PPOOLHEADER pph = (PPOOLHEADER)G->Sarea;
+
+ pph->To_Free = (G->Saved_Size) ? G->Saved_Size : sizeof(POOLHEADER);
+ pph->FreeBlk = G->Sarea_Size - pph->To_Free;
+
+ if (b)
+ G->Saved_Size = 0;
+
+} // end of SubSet
+
+/*********************************************************************************/
+/* Set the beginning of suballocations. */
+/*********************************************************************************/
+void BJSON::MemSet(size_t size)
+{
+ PPOOLHEADER pph = (PPOOLHEADER)G->Sarea;
+
+ pph->To_Free = size + sizeof(POOLHEADER);
+ pph->FreeBlk = G->Sarea_Size - pph->To_Free;
+} // end of MemSet
+
+ /* ------------------------ Bobject functions ------------------------ */
+
+/***********************************************************************/
+/* Set a pair vlp to some PVAL values. */
+/***********************************************************************/
+void BJSON::SetPairValue(PBPR brp, PBVAL bvp)
+{
+ if (bvp) {
+ brp->Vlp.To_Val = bvp->To_Val;
+ brp->Vlp.Nd = bvp->Nd;
+ brp->Vlp.Type = bvp->Type;
+ } else {
+ brp->Vlp.To_Val = 0;
+ brp->Vlp.Nd = 0;
+ brp->Vlp.Type = TYPE_NULL;
+ } // endif bvp
+
+} // end of SetPairValue
+
+ /***********************************************************************/
+/* Sub-allocate and initialize a BPAIR. */
+/***********************************************************************/
+PBPR BJSON::NewPair(OFFSET key, int type)
+{
+ PBPR bpp = (PBPR)BsonSubAlloc(sizeof(BPAIR));
+
+ bpp->Key = key;
+ bpp->Vlp.Ktp = TYPE_STRG;
+ bpp->Vlp.Type = type;
+ bpp->Vlp.To_Val = 0;
+ bpp->Vlp.Nd = 0;
+ bpp->Vlp.Next = 0;
+ return bpp;
+} // end of SubAllocPair
+
+/***********************************************************************/
+/* Return the number of pairs in this object. */
+/***********************************************************************/
+int BJSON::GetObjectSize(PBVAL bop, bool b)
+{
+ CheckType(bop, TYPE_JOB);
+ int n = 0;
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ // If b return only non null pairs
+ if (!b || (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL))
+ n++;
+
+ return n;
+} // end of GetObjectSize
+
+/***********************************************************************/
+/* Add a new pair to an Object and return it. */
+/***********************************************************************/
+PBVAL BJSON::AddPair(PBVAL bop, PSZ key, int type)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp;
+ OFFSET nrp = NewPair(key, type);
+
+ if (bop->To_Val) {
+ for (brp = GetObject(bop); brp->Vlp.Next; brp = GetNext(brp));
+
+ brp->Vlp.Next = nrp;
+ } else
+ bop->To_Val = nrp;
+
+ bop->Nd++;
+ return GetVlp(MPP(nrp));
+} // end of AddPair
+
+/***********************************************************************/
+/* Return all object keys as an array. */
+/***********************************************************************/
+PBVAL BJSON::GetKeyList(PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+ PBVAL arp = NewVal(TYPE_JAR);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ AddArrayValue(arp, MOF(SubAllocVal(brp->Key, TYPE_STRG)));
+
+ return arp;
+} // end of GetKeyList
+
+/***********************************************************************/
+/* Return all object values as an array. */
+/***********************************************************************/
+PBVAL BJSON::GetObjectValList(PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+ PBVAL arp = NewVal(TYPE_JAR);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ AddArrayValue(arp, DupVal(GetVlp(brp)));
+
+ return arp;
+} // end of GetObjectValList
+
+/***********************************************************************/
+/* Get the value corresponding to the given key. */
+/***********************************************************************/
+PBVAL BJSON::GetKeyValue(PBVAL bop, PSZ key)
+{
+ CheckType(bop, TYPE_JOB);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (!strcmp(GetKey(brp), key))
+ return GetVlp(brp);
+
+ return NULL;
+} // end of GetKeyValue;
+
+/***********************************************************************/
+/* Return the text corresponding to all keys (XML like). */
+/***********************************************************************/
+PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp = GetObject(bop);
+
+ if (brp) {
+ bool b;
+
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(' ');
+
+ b = false;
+ } // endif text
+
+ if (b && !brp->Vlp.Next && !strcmp(MZP(brp->Key), "$date")) {
+ int i;
+ PSZ s;
+
+ GetValueText(g, GetVlp(brp), text);
+ s = text->GetStr();
+ i = (s[1] == '-' ? 2 : 1);
+
+ if (IsNum(s + i)) {
+ // Date is in milliseconds
+ int j = text->GetLength();
+
+ if (j >= 4 + i) {
+ s[j - 3] = 0; // Change it to seconds
+ text->SetLength((uint)strlen(s));
+ } else
+ text->Set(" 0");
+
+ } // endif text
+
+ } else for (; brp; brp = GetNext(brp)) {
+ GetValueText(g, GetVlp(brp), text);
+
+ if (brp->Vlp.Next)
+ text->Append(' ');
+
+ } // endfor brp
+
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
+
+ } // endif bop
+
+ return NULL;
+} // end of GetObjectText;
+
+/***********************************************************************/
+/* Set or add a value corresponding to the given key. */
+/***********************************************************************/
+void BJSON::SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp, prp = NULL;
+
+ if (bop->To_Val) {
+ for (brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (!strcmp(GetKey(brp), key))
+ break;
+ else
+ prp = brp;
+
+ if (!brp)
+ brp = MPP(prp->Vlp.Next = NewPair(key));
+
+ } else
+ brp = MPP(bop->To_Val = NewPair(key));
+
+ SetPairValue(brp, MVP(bvp));
+ bop->Nd++;
+} // end of SetKeyValue
+
+/***********************************************************************/
+/* Merge two objects. */
+/***********************************************************************/
+PBVAL BJSON::MergeObject(PBVAL bop1, PBVAL bop2)
+{
+ CheckType(bop1, TYPE_JOB);
+ CheckType(bop2, TYPE_JOB);
+
+ if (bop1->To_Val)
+ for (PBPR brp = GetObject(bop2); brp; brp = GetNext(brp))
+ SetKeyValue(bop1, GetVlp(brp), GetKey(brp));
+
+ else {
+ bop1->To_Val = bop2->To_Val;
+ bop1->Nd = bop2->Nd;
+ } // endelse To_Val
+
+ return bop1;
+} // end of MergeObject;
+
+/***********************************************************************/
+/* Delete a value corresponding to the given key. */
+/***********************************************************************/
+bool BJSON::DeleteKey(PBVAL bop, PCSZ key)
+{
+ CheckType(bop, TYPE_JOB);
+ PBPR brp, pbrp = NULL;
+
+ for (brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (!strcmp(MZP(brp->Key), key)) {
+ if (pbrp) {
+ pbrp->Vlp.Next = brp->Vlp.Next;
+ } else
+ bop->To_Val = brp->Vlp.Next;
+
+ bop->Nd--;
+ return true;;
+ } else
+ pbrp = brp;
+
+ return false;
+} // end of DeleteKey
+
+/***********************************************************************/
+/* True if void or if all members are nulls. */
+/***********************************************************************/
+bool BJSON::IsObjectNull(PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+
+ for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp))
+ if (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL)
+ return false;
+
+ return true;
+} // end of IsObjectNull
+
+/* ------------------------- Barray functions ------------------------ */
+
+/***********************************************************************/
+/* Return the number of values in this object. */
+/***********************************************************************/
+int BJSON::GetArraySize(PBVAL bap, bool b)
+{
+ CheckType(bap, TYPE_JAR);
+ int n = 0;
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp))
+ // If b, return only non null values
+ if (!b || bvp->Type != TYPE_NULL)
+ n++;
+
+ return n;
+} // end of GetArraySize
+
+/***********************************************************************/
+/* Get the Nth value of an Array. */
+/***********************************************************************/
+PBVAL BJSON::GetArrayValue(PBVAL bap, int n)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++)
+ if (i == n)
+ return bvp;
+
+ return NULL;
+} // end of GetArrayValue
+
+/***********************************************************************/
+/* Add a Value to the Array Value list. */
+/***********************************************************************/
+void BJSON::AddArrayValue(PBVAL bap, OFFSET nbv, int* x)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+ PBVAL bvp, lbp = NULL;
+
+ if (!nbv)
+ nbv = MOF(NewVal());
+
+ for (bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++)
+ if (x && i == *x)
+ break;
+ else
+ lbp = bvp;
+
+ if (lbp) {
+ MVP(nbv)->Next = lbp->Next;
+ lbp->Next = nbv;
+ } else {
+ MVP(nbv)->Next = bap->To_Val;
+ bap->To_Val = nbv;
+ } // endif lbp
+
+ bap->Nd++;
+} // end of AddArrayValue
+
+/***********************************************************************/
+/* Merge two arrays. */
+/***********************************************************************/
+void BJSON::MergeArray(PBVAL bap1, PBVAL bap2)
+{
+ CheckType(bap1, TYPE_JAR);
+ CheckType(bap2, TYPE_JAR);
+
+ if (bap1->To_Val) {
+ for (PBVAL bvp = GetArray(bap2); bvp; bvp = GetNext(bvp))
+ AddArrayValue(bap1, MOF(DupVal(bvp)));
+
+ } else {
+ bap1->To_Val = bap2->To_Val;
+ bap1->Nd = bap2->Nd;
+ } // endif To_Val
+
+} // end of MergeArray
+
+/***********************************************************************/
+/* Set the nth Value of the Array Value list or add it. */
+/***********************************************************************/
+void BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+ PBVAL bvp = NULL;
+
+ if (bap->To_Val)
+ for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp))
+ if (i == n) {
+ SetValueVal(bvp, nvp);
+ return;
+ }
+
+ if (!bvp)
+ AddArrayValue(bap, MOF(nvp));
+
+} // end of SetValue
+
+/***********************************************************************/
+/* Return the text corresponding to all values. */
+/***********************************************************************/
+PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text)
+{
+ CheckType(bap, TYPE_JAR);
+
+ if (bap->To_Val) {
+ bool b;
+
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(" (");
+ else
+ text->Append('(');
+
+ b = false;
+ } // endif text
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) {
+ GetValueText(g, bvp, text);
+
+ if (bvp->Next)
+ text->Append(", ");
+ else if (!b)
+ text->Append(')');
+
+ } // endfor bvp
+
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
+
+ } // endif To_Val
+
+ return NULL;
+} // end of GetText;
+
+/***********************************************************************/
+/* Delete a Value from the Arrays Value list. */
+/***********************************************************************/
+bool BJSON::DeleteValue(PBVAL bap, int n)
+{
+ CheckType(bap, TYPE_JAR);
+ int i = 0;
+ PBVAL bvp, pvp = NULL;
+
+ for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp))
+ if (i == n) {
+ if (pvp)
+ pvp->Next = bvp->Next;
+ else
+ bap->To_Val = bvp->Next;
+
+ bap->Nd--;
+ return true;;
+ } else
+ pvp = bvp;
+
+ return false;
+} // end of DeleteValue
+
+/***********************************************************************/
+/* True if void or if all members are nulls. */
+/***********************************************************************/
+bool BJSON::IsArrayNull(PBVAL bap)
+{
+ CheckType(bap, TYPE_JAR);
+
+ for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp))
+ if (bvp->Type != TYPE_NULL)
+ return false;
+
+ return true;
+} // end of IsNull
+
+/* ------------------------- Bvalue functions ------------------------ */
+
+/***********************************************************************/
+/* Sub-allocate and clear a BVAL. */
+/***********************************************************************/
+PBVAL BJSON::NewVal(int type)
+{
+ PBVAL bvp = (PBVAL)BsonSubAlloc(sizeof(BVAL));
+
+ bvp->To_Val = 0;
+ bvp->Nd = 0;
+ bvp->Type = type;
+ bvp->Next = 0;
+ return bvp;
+} // end of SubAllocVal
+
+/***********************************************************************/
+/* Sub-allocate and initialize a BVAL as type. */
+/***********************************************************************/
+PBVAL BJSON::SubAllocVal(OFFSET toval, int type, short nd)
+{
+ PBVAL bvp = NewVal(type);
+
+ bvp->To_Val = toval;
+ bvp->Nd = nd;
+ return bvp;
+} // end of SubAllocVal
+
+/***********************************************************************/
+/* Sub-allocate and initialize a BVAL as string. */
+/***********************************************************************/
+PBVAL BJSON::SubAllocStr(OFFSET toval, short nd)
+{
+ PBVAL bvp = NewVal(TYPE_STRG);
+
+ bvp->To_Val = toval;
+ bvp->Nd = nd;
+ return bvp;
+} // end of SubAllocStr
+
+/***********************************************************************/
+/* Allocate a BVALUE with a given string or numeric value. */
+/***********************************************************************/
+PBVAL BJSON::NewVal(PVAL valp)
+{
+ PBVAL vlp = NewVal();
+
+ SetValue(vlp, valp);
+ return vlp;
+} // end of SubAllocVal
+
+/***********************************************************************/
+/* Sub-allocate and initialize a BVAL from another BVAL. */
+/***********************************************************************/
+PBVAL BJSON::DupVal(PBVAL bvlp) {
+ PBVAL bvp = NewVal();
+
+ *bvp = *bvlp;
+ bvp->Next = 0;
+ return bvp;
+} // end of DupVal
+
+/***********************************************************************/
+/* Return the size of value's value. */
+/***********************************************************************/
+int BJSON::GetSize(PBVAL vlp, bool b)
+{
+ switch (vlp->Type) {
+ case TYPE_JAR:
+ return GetArraySize(vlp);
+ case TYPE_JOB:
+ return GetObjectSize(vlp);
+ default:
+ return 1;
+ } // enswitch Type
+
+} // end of GetSize
+
+PBVAL BJSON::GetBson(PBVAL bvp)
+{
+ PBVAL bp = NULL;
+
+ switch (bvp->Type) {
+ case TYPE_JAR:
+ bp = MVP(bvp->To_Val);
+ break;
+ case TYPE_JOB:
+ bp = GetVlp(MPP(bvp->To_Val));
+ break;
+ default:
+ bp = bvp;
+ break;
+ } // endswitch Type
+
+ return bp;
+} // end of GetBson
+
+/***********************************************************************/
+/* Return the Value's as a Value struct. */
+/***********************************************************************/
+PVAL BJSON::GetValue(PGLOBAL g, PBVAL vp)
+{
+ double d;
+ PVAL valp;
+ PBVAL vlp = vp->Type == TYPE_JVAL ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_STRG:
+ case TYPE_DBL:
+ case TYPE_BINT:
+ valp = AllocateValue(g, MP(vlp->To_Val), vlp->Type, vlp->Nd);
+ break;
+ case TYPE_INTG:
+ case TYPE_BOOL:
+ valp = AllocateValue(g, vlp, vlp->Type);
+ break;
+ case TYPE_FLOAT:
+ d = (double)vlp->F;
+ valp = AllocateValue(g, &d, TYPE_DOUBLE, vlp->Nd);
+ break;
+ default:
+ valp = NULL;
+ break;
+ } // endswitch Type
+
+ return valp;
+} // end of GetValue
+
+/***********************************************************************/
+/* Return the Value's Integer value. */
+/***********************************************************************/
+int BJSON::GetInteger(PBVAL vp) {
+ int n;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_INTG:
+ n = vlp->N;
+ break;
+ case TYPE_FLOAT:
+ n = (int)vlp->F;
+ break;
+ case TYPE_DTM:
+ case TYPE_STRG:
+ n = atoi(MZP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ n = (vlp->B) ? 1 : 0;
+ break;
+ case TYPE_BINT:
+ n = (int)*(longlong*)MP(vlp->To_Val);
+ break;
+ case TYPE_DBL:
+ n = (int)*(double*)MP(vlp->To_Val);
+ break;
+ default:
+ n = 0;
+ } // endswitch Type
+
+ return n;
+} // end of GetInteger
+
+/***********************************************************************/
+/* Return the Value's Big integer value. */
+/***********************************************************************/
+longlong BJSON::GetBigint(PBVAL vp) {
+ longlong lln;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_BINT:
+ lln = *(longlong*)MP(vlp->To_Val);
+ break;
+ case TYPE_INTG:
+ lln = (longlong)vlp->N;
+ break;
+ case TYPE_FLOAT:
+ lln = (longlong)vlp->F;
+ break;
+ case TYPE_DBL:
+ lln = (longlong)*(double*)MP(vlp->To_Val);
+ break;
+ case TYPE_DTM:
+ case TYPE_STRG:
+ lln = atoll(MZP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ lln = (vlp->B) ? 1 : 0;
+ break;
+ default:
+ lln = 0;
+ } // endswitch Type
+
+ return lln;
+} // end of GetBigint
+
+/***********************************************************************/
+/* Return the Value's Double value. */
+/***********************************************************************/
+double BJSON::GetDouble(PBVAL vp)
+{
+ double d;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_DBL:
+ d = *(double*)MP(vlp->To_Val);
+ break;
+ case TYPE_BINT:
+ d = (double)*(longlong*)MP(vlp->To_Val);
+ break;
+ case TYPE_INTG:
+ d = (double)vlp->N;
+ break;
+ case TYPE_FLOAT:
+ { char buf[32];
+ int n = (vlp->Nd) ? vlp->Nd : 5;
+
+ sprintf(buf, "%.*f", n, vlp->F);
+ d = atof(buf);
+ } break;
+ case TYPE_DTM:
+ case TYPE_STRG:
+ d = atof(MZP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ d = (vlp->B) ? 1.0 : 0.0;
+ break;
+ default:
+ d = 0.0;
+ } // endswitch Type
+
+ return d;
+} // end of GetDouble
+
+/***********************************************************************/
+/* Return the Value's String value. */
+/***********************************************************************/
+PSZ BJSON::GetString(PBVAL vp, char* buff)
+{
+ char buf[32];
+ char* p = (buff) ? buff : buf;
+ PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp;
+
+ switch (vlp->Type) {
+ case TYPE_DTM:
+ case TYPE_STRG:
+ p = MZP(vlp->To_Val);
+ break;
+ case TYPE_INTG:
+ sprintf(p, "%d", vlp->N);
+ break;
+ case TYPE_FLOAT:
+ sprintf(p, "%.*f", vlp->Nd, vlp->F);
+ break;
+ case TYPE_BINT:
+ sprintf(p, "%lld", *(longlong*)MP(vlp->To_Val));
+ break;
+ case TYPE_DBL:
+ sprintf(p, "%.*lf", vlp->Nd, *(double*)MP(vlp->To_Val));
+ break;
+ case TYPE_BOOL:
+ p = (PSZ)((vlp->B) ? "true" : "false");
+ break;
+ case (char)TYPE_NULL:
+ p = (PSZ)"null";
+ break;
+ default:
+ p = NULL;
+ } // endswitch Type
+
+ return (p == buf) ? (PSZ)PlugDup(G, buf) : p;
+} // end of GetString
+
+/***********************************************************************/
+/* Return the Value's String value. */
+/***********************************************************************/
+PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text)
+{
+ if (vlp->Type == TYPE_JOB)
+ return GetObjectText(g, vlp, text);
+ else if (vlp->Type == TYPE_JAR)
+ return GetArrayText(g, vlp, text);
+
+ char buff[32];
+ PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(vlp, buff);
+
+ if (s)
+ text->Append(s);
+ else if (GetJsonNull())
+ text->Append(GetJsonNull());
+
+ return NULL;
+} // end of GetText
+
+void BJSON::SetValueObj(PBVAL vlp, PBVAL bop)
+{
+ CheckType(bop, TYPE_JOB);
+ vlp->To_Val = bop->To_Val;
+ vlp->Nd = bop->Nd;
+ vlp->Type = TYPE_JOB;
+} // end of SetValueObj;
+
+void BJSON::SetValueArr(PBVAL vlp, PBVAL bap)
+{
+ CheckType(bap, TYPE_JAR);
+ vlp->To_Val = bap->To_Val;
+ vlp->Nd = bap->Nd;
+ vlp->Type = TYPE_JAR;
+} // end of SetValue;
+
+void BJSON::SetValueVal(PBVAL vlp, PBVAL vp)
+{
+ vlp->To_Val = vp->To_Val;
+ vlp->Nd = vp->Nd;
+ vlp->Type = vp->Type;
+} // end of SetValue;
+
+PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp)
+{
+ if (!vlp)
+ vlp = NewVal();
+
+ if (!valp || valp->IsNull()) {
+ vlp->Type = TYPE_NULL;
+ } else switch (valp->GetType()) {
+ case TYPE_DATE:
+ if (((DTVAL*)valp)->IsFormatted())
+ vlp->To_Val = DupStr(valp->GetCharValue());
+ else {
+ char buf[32];
+
+ vlp->To_Val = DupStr(valp->GetCharString(buf));
+ } // endif Formatted
+
+ vlp->Type = TYPE_DTM;
+ break;
+ case TYPE_STRING:
+ vlp->To_Val = DupStr(valp->GetCharValue());
+ vlp->Type = TYPE_STRG;
+ break;
+ case TYPE_DOUBLE:
+ case TYPE_DECIM:
+ { double d = valp->GetFloatValue();
+ int nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0;
+
+ if (nd > 0 && nd <= 6 && d >= FLT_MIN && d <= FLT_MAX) {
+ vlp->F = (float)valp->GetFloatValue();
+ vlp->Type = TYPE_FLOAT;
+ } else {
+ double* dp = (double*)BsonSubAlloc(sizeof(double));
+
+ *dp = d;
+ vlp->To_Val = MOF(dp);
+ vlp->Type = TYPE_DBL;
+ } // endif Nd
+
+ vlp->Nd = MY_MIN(nd, 16);
+ } break;
+ case TYPE_TINY:
+ vlp->B = valp->GetTinyValue() != 0;
+ vlp->Type = TYPE_BOOL;
+ break;
+ case TYPE_INT:
+ vlp->N = valp->GetIntValue();
+ vlp->Type = TYPE_INTG;
+ break;
+ case TYPE_BIGINT:
+ if (valp->GetBigintValue() >= INT_MIN32 &&
+ valp->GetBigintValue() <= INT_MAX32) {
+ vlp->N = valp->GetIntValue();
+ vlp->Type = TYPE_INTG;
+ } else {
+ longlong* llp = (longlong*)BsonSubAlloc(sizeof(longlong));
+
+ *llp = valp->GetBigintValue();
+ vlp->To_Val = MOF(llp);
+ vlp->Type = TYPE_BINT;
+ } // endif BigintValue
+
+ break;
+ default:
+ sprintf(G->Message, "Unsupported typ %d\n", valp->GetType());
+ throw(777);
+ } // endswitch Type
+
+ return vlp;
+} // end of SetValue
+
+/***********************************************************************/
+/* Set the Value's value as the given integer. */
+/***********************************************************************/
+void BJSON::SetInteger(PBVAL vlp, int n)
+{
+ vlp->N = n;
+ vlp->Type = TYPE_INTG;
+} // end of SetInteger
+
+/***********************************************************************/
+/* Set the Value's Boolean value as a tiny integer. */
+/***********************************************************************/
+void BJSON::SetBool(PBVAL vlp, bool b)
+{
+ vlp->B = b;
+ vlp->Type = TYPE_BOOL;
+} // end of SetTiny
+
+/***********************************************************************/
+/* Set the Value's value as the given big integer. */
+/***********************************************************************/
+void BJSON::SetBigint(PBVAL vlp, longlong ll)
+{
+ if (ll >= INT_MIN32 && ll <= INT_MAX32) {
+ vlp->N = (int)ll;
+ vlp->Type = TYPE_INTG;
+ } else {
+ longlong* llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong));
+
+ *llp = ll;
+ vlp->To_Val = MOF(llp);
+ vlp->Type = TYPE_BINT;
+ } // endif ll
+
+} // end of SetBigint
+
+/***********************************************************************/
+/* Set the Value's value as the given DOUBLE. */
+/***********************************************************************/
+void BJSON::SetFloat(PBVAL vlp, double d, int nd)
+{
+ double* dp = (double*)BsonSubAlloc(sizeof(double));
+
+ *dp = d;
+ vlp->To_Val = MOF(dp);
+ vlp->Nd = MY_MIN(nd, 16);
+ vlp->Type = TYPE_DBL;
+} // end of SetFloat
+
+/***********************************************************************/
+/* Set the Value's value as the given DOUBLE representation. */
+/***********************************************************************/
+void BJSON::SetFloat(PBVAL vlp, PSZ s)
+{
+ char *p = strchr(s, '.');
+ int nd = 0;
+ double d = atof(s);
+
+ if (p) {
+ for (++p; isdigit(*p); nd++, p++);
+ for (--p; *p == '0'; nd--, p--);
+ } // endif p
+
+ if (nd < 6 && d >= FLT_MIN && d <= FLT_MAX) {
+ vlp->F = (float)d;
+ vlp->Nd = nd;
+ vlp->Type = TYPE_FLOAT;
+ } else
+ SetFloat(vlp, d, nd);
+
+} // end of SetFloat
+
+ /***********************************************************************/
+/* Set the Value's value as the given string. */
+/***********************************************************************/
+void BJSON::SetString(PBVAL vlp, PSZ s, int ci)
+{
+ vlp->To_Val = MOF(s);
+ vlp->Nd = ci;
+ vlp->Type = TYPE_STRG;
+} // end of SetString
+
+/***********************************************************************/
+/* True when its JSON or normal value is null. */
+/***********************************************************************/
+bool BJSON::IsValueNull(PBVAL vlp)
+{
+ bool b;
+
+ switch (vlp->Type) {
+ case (char)TYPE_NULL:
+ b = true;
+ break;
+ case TYPE_JOB:
+ b = IsObjectNull(vlp);
+ break;
+ case TYPE_JAR:
+ b = IsArrayNull(vlp);
+ break;
+ default:
+ b = false;
+ } // endswitch Type
+
+ return b;
+ } // end of IsNull
diff --git a/storage/connect/bson.h b/storage/connect/bson.h
new file mode 100644
index 00000000000..5420c6f2f36
--- /dev/null
+++ b/storage/connect/bson.h
@@ -0,0 +1,208 @@
+/**************** bson H Declares Source Code File (.H) ****************/
+/* Name: bson.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* */
+/* This file contains the BSON classe declares. */
+/***********************************************************************/
+#pragma once
+#include <mysql_com.h>
+#include "json.h"
+#include "xobject.h"
+
+#if defined(_DEBUG)
+#define X assert(false);
+#else
+#define X
+#endif
+
+#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0)
+
+class BDOC;
+class BOUT;
+class BJSON;
+
+typedef class BDOC* PBDOC;
+typedef class BJSON* PBJSON;
+typedef uint OFFSET;
+
+/***********************************************************************/
+/* Structure BVAL. Binary representation of a JVALUE. */
+/***********************************************************************/
+typedef struct _jvalue {
+ union {
+ OFFSET To_Val; // Offset to a value
+ int N; // An integer value
+ float F; // A float value
+ bool B; // A boolean value True or false (0)
+ };
+ short Nd; // Number of decimals
+ char Type; // The value type
+ char Ktp; // The key type
+ OFFSET Next; // Offset to the next value in array
+} BVAL, *PBVAL; // end of struct BVALUE
+
+/***********************************************************************/
+/* Structure BPAIR. The pairs of a json Object. */
+/***********************************************************************/
+typedef struct _jpair {
+ OFFSET Key; // Offset to this pair key name
+ BVAL Vlp; // The value of the pair
+} BPAIR, *PBPR; // end of struct BPAIR
+
+char* NextChr(PSZ s, char sep);
+char* GetJsonNull(void);
+const char* GetFmt(int type, bool un);
+
+DllExport bool IsNum(PSZ s);
+
+/***********************************************************************/
+/* Class BJSON. The class handling all BJSON operations. */
+/***********************************************************************/
+class BJSON : public BLOCK {
+public:
+ // Constructor
+ BJSON(PGLOBAL g, PBVAL vp = NULL)
+ { G = g, Base = G->Sarea; Bvp = vp; Throw = true; }
+
+ // Utility functions
+ inline OFFSET MOF(void *p) {return MakeOff(Base, p);}
+ inline void *MP(OFFSET o) {return MakePtr(Base, o);}
+ inline PBPR MPP(OFFSET o) {return (PBPR)MakePtr(Base, o);}
+ inline PBVAL MVP(OFFSET o) {return (PBVAL)MakePtr(Base, o);}
+ inline PSZ MZP(OFFSET o) {return (PSZ)MakePtr(Base, o);}
+ inline longlong LLN(OFFSET o) {return *(longlong*)MakePtr(Base, o);}
+ inline double DBL(OFFSET o) {return *(double*)MakePtr(Base, o);}
+
+ void Reset(void) {Base = G->Sarea;}
+ void* GetBase(void) { return Base; }
+ void SubSet(bool b = false);
+ void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;}
+ void MemSet(size_t size);
+ void GetMsg(PGLOBAL g) { if (g != G) strcpy(g->Message, G->Message); }
+
+ // SubAlloc functions
+ void* BsonSubAlloc(size_t size);
+ PBPR NewPair(OFFSET key, int type = TYPE_NULL);
+ OFFSET NewPair(PSZ key, int type = TYPE_NULL)
+ {return MOF(NewPair(DupStr(key), type));}
+ PBVAL NewVal(int type = TYPE_NULL);
+ PBVAL NewVal(PVAL valp);
+ PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0);
+ PBVAL SubAllocVal(PBVAL toval, int type = TYPE_NULL, short nd = 0)
+ {return SubAllocVal(MOF(toval), type, nd);}
+ PBVAL SubAllocStr(OFFSET str, short nd = 0);
+ PBVAL SubAllocStr(PSZ str, short nd = 0)
+ {return SubAllocStr(DupStr(str), nd);}
+ PBVAL DupVal(PBVAL bvp);
+ OFFSET DupStr(PSZ str) { return MOF(NewStr(str)); }
+ PSZ NewStr(PSZ str);
+
+ // Array functions
+ inline PBVAL GetArray(PBVAL vlp) {return MVP(vlp->To_Val);}
+ int GetArraySize(PBVAL bap, bool b = false);
+ PBVAL GetArrayValue(PBVAL bap, int i);
+ PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text);
+ void MergeArray(PBVAL bap1,PBVAL bap2);
+ bool DeleteValue(PBVAL bap, int n);
+ void AddArrayValue(PBVAL bap, OFFSET nvp = 0, int* x = NULL);
+ inline void AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL)
+ {AddArrayValue(bap, MOF(nvp), x);}
+ void SetArrayValue(PBVAL bap, PBVAL nvp, int n);
+ bool IsArrayNull(PBVAL bap);
+
+ // Object functions
+ inline PBPR GetObject(PBVAL bop) {return MPP(bop->To_Val);}
+ inline PBPR GetNext(PBPR brp) { return MPP(brp->Vlp.Next); }
+ void SetPairValue(PBPR brp, PBVAL bvp);
+ int GetObjectSize(PBVAL bop, bool b = false);
+ PSZ GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text);
+ PBVAL MergeObject(PBVAL bop1, PBVAL bop2);
+ PBVAL AddPair(PBVAL bop, PSZ key, int type = TYPE_NULL);
+ PSZ GetKey(PBPR prp) {return prp ? MZP(prp->Key) : NULL;}
+ PBVAL GetTo_Val(PBPR prp) {return prp ? MVP(prp->Vlp.To_Val) : NULL;}
+ PBVAL GetVlp(PBPR prp) {return prp ? (PBVAL)&prp->Vlp : NULL;}
+ PBVAL GetKeyValue(PBVAL bop, PSZ key);
+ PBVAL GetKeyList(PBVAL bop);
+ PBVAL GetObjectValList(PBVAL bop);
+ void SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key);
+ inline void SetKeyValue(PBVAL bop, PBVAL vlp, PSZ key)
+ {SetKeyValue(bop, MOF(vlp), key);}
+ bool DeleteKey(PBVAL bop, PCSZ k);
+ bool IsObjectNull(PBVAL bop);
+
+ // Value functions
+ int GetSize(PBVAL vlp, bool b = false);
+ PBVAL GetNext(PBVAL vlp) {return MVP(vlp->Next);}
+ //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); }
+ PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text);
+ PBVAL GetBson(PBVAL bvp);
+ PSZ GetString(PBVAL vp, char* buff = NULL);
+ int GetInteger(PBVAL vp);
+ long long GetBigint(PBVAL vp);
+ double GetDouble(PBVAL vp);
+ PVAL GetValue(PGLOBAL g, PBVAL vp);
+ void SetValueObj(PBVAL vlp, PBVAL bop);
+ void SetValueArr(PBVAL vlp, PBVAL bap);
+ void SetValueVal(PBVAL vlp, PBVAL vp);
+ PBVAL SetValue(PBVAL vlp, PVAL valp);
+ void SetString(PBVAL vlp, PSZ s, int ci = 0);
+ void SetInteger(PBVAL vlp, int n);
+ void SetBigint(PBVAL vlp, longlong ll);
+ void SetFloat(PBVAL vlp, double f, int nd = 16);
+ void SetFloat(PBVAL vlp, PSZ s);
+ void SetBool(PBVAL vlp, bool b);
+ void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; }
+ bool IsValueNull(PBVAL vlp);
+ bool IsJson(PBVAL vlp) {return vlp ? vlp->Type == TYPE_JAR ||
+ vlp->Type == TYPE_JOB ||
+ vlp->Type == TYPE_JVAL : false;}
+
+ // Members
+ PGLOBAL G;
+ PBVAL Bvp;
+ void *Base;
+ bool Throw;
+
+protected:
+ // Default constructor not to be used
+ BJSON(void) {}
+}; // end of class BJSON
+
+/***********************************************************************/
+/* Class JDOC. The class for parsing and serializing json documents. */
+/***********************************************************************/
+class BDOC : public BJSON {
+public:
+ BDOC(PGLOBAL G);
+
+ bool GetComma(void) { return comma; }
+ int GetPretty(void) { return pretty; }
+ void SetPretty(int pty) { pretty = pty; }
+
+ // Methods
+ PBVAL ParseJson(PGLOBAL g, char* s, size_t n);
+ PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty);
+
+protected:
+ OFFSET ParseArray(int& i);
+ OFFSET ParseObject(int& i);
+ PBVAL ParseValue(int& i, PBVAL bvp);
+ OFFSET ParseString(int& i);
+ void ParseNumeric(int& i, PBVAL bvp);
+ OFFSET ParseAsArray(int& i);
+ bool SerializeArray(OFFSET arp, bool b);
+ bool SerializeObject(OFFSET obp);
+ bool SerializeValue(PBVAL vp, bool b = false);
+
+ // Members used when parsing and serializing
+ JOUT* jp; // Used with serialize
+ char* s; // The Json string to parse
+ int len; // The Json string length
+ int pretty; // The pretty style of the file to parse
+ bool pty[3]; // Used to guess what pretty is
+ bool comma; // True if Pretty = 1
+
+ // Default constructor not to be used
+ BDOC(void) {}
+}; // end of class BDOC
diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp
new file mode 100644
index 00000000000..4145e21deb5
--- /dev/null
+++ b/storage/connect/bsonudf.cpp
@@ -0,0 +1,6080 @@
+/****************** bsonudf C++ Program Source Code File (.CPP) ******************/
+/* PROGRAM NAME: bsonudf Version 1.0 */
+/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */
+/* This program are the BSON User Defined Functions. */
+/*********************************************************************************/
+
+/*********************************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/*********************************************************************************/
+#include <my_global.h>
+#include <mysqld.h>
+#include <mysql.h>
+#include <sql_error.h>
+#include <stdio.h>
+
+#include "bsonudf.h"
+
+#if defined(UNIX) || defined(UNIV_LINUX)
+#define _O_RDONLY O_RDONLY
+#endif
+
+#define MEMFIX 4096
+#if defined(connect_EXPORTS)
+#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M)
+#else
+#define PUSH_WARNING(M) htrc(M)
+#endif
+#define M 6
+
+int IsArgJson(UDF_ARGS* args, uint i);
+void SetChanged(PBSON bsp);
+
+static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp);
+
+/* --------------------------------- JSON UDF ---------------------------------- */
+
+/*********************************************************************************/
+/* Program for saving the status of the memory pools. */
+/*********************************************************************************/
+inline void JsonMemSave(PGLOBAL g) {
+ g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free;
+} /* end of JsonMemSave */
+
+/*********************************************************************************/
+/* Program for freeing the memory pools. */
+/*********************************************************************************/
+inline void JsonFreeMem(PGLOBAL g) {
+ g->Activityp = NULL;
+ g = PlugExit(g);
+} /* end of JsonFreeMem */
+
+/*********************************************************************************/
+/* Allocate and initialize a BSON structure. */
+/*********************************************************************************/
+static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp)
+{
+ PBSON bsp = (PBSON)PlgDBSubAlloc(g, NULL, sizeof(BSON));
+
+ if (bsp) {
+ strcpy(bsp->Msg, "Binary Json");
+ bsp->Msg[BMX] = 0;
+ bsp->Filename = NULL;
+ bsp->G = g;
+ bsp->Pretty = 2;
+ bsp->Reslen = len;
+ bsp->Changed = false;
+ bsp->Top = bsp->Jsp = (PJSON)jsp;
+ bsp->Bsp = NULL;
+ } else
+ PUSH_WARNING(g->Message);
+
+ return bsp;
+} /* end of BbinAlloc */
+
+/* --------------------------- New Testing BJSON Stuff --------------------------*/
+
+/*********************************************************************************/
+/* SubAlloc a new BJNX class with protection against memory exhaustion. */
+/*********************************************************************************/
+static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len)
+{
+ PBJNX bjnx;
+
+ try {
+ bjnx = new(g) BJNX(g, vlp, type, len);
+ } catch (...) {
+ if (trace(1023))
+ htrc("%s\n", g->Message);
+
+ PUSH_WARNING(g->Message);
+ bjnx = NULL;
+ } // end try/catch
+
+ return bjnx;
+} /* end of BjnxNew */
+
+/* ----------------------------------- BSNX ------------------------------------ */
+
+/*********************************************************************************/
+/* BSNX public constructor. */
+/*********************************************************************************/
+BJNX::BJNX(PGLOBAL g) : BDOC(g)
+{
+ Row = NULL;
+ Bvalp = NULL;
+ Jpnp = NULL;
+ Jp = NULL;
+ Nodes = NULL;
+ Value = NULL;
+ MulVal = NULL;
+ Jpath = NULL;
+ Buf_Type = TYPE_NULL;
+ Long = len;
+ Prec = 0;
+ Nod = 0;
+ Xnod = -1;
+ K = 0;
+ I = -1;
+ Imax = 9;
+ B = 0;
+ Xpd = false;
+ Parsed = false;
+ Found = false;
+ Wr = false;
+ Jb = false;
+ Changed = false;
+ Throw = false;
+} // end of BJNX constructor
+
+/*********************************************************************************/
+/* BSNX public constructor. */
+/*********************************************************************************/
+BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC(g)
+{
+ Row = row;
+ Bvalp = NULL;
+ Jpnp = NULL;
+ Jp = NULL;
+ Nodes = NULL;
+ Value = AllocateValue(g, type, len, prec);
+ MulVal = NULL;
+ Jpath = NULL;
+ Buf_Type = type;
+ Long = len;
+ Prec = prec;
+ Nod = 0;
+ Xnod = -1;
+ K = 0;
+ I = -1;
+ Imax = 9;
+ B = 0;
+ Xpd = false;
+ Parsed = false;
+ Found = false;
+ Wr = wr;
+ Jb = false;
+ Changed = false;
+ Throw = false;
+} // end of BJNX constructor
+
+/*********************************************************************************/
+/* SetJpath: set and parse the json path. */
+/*********************************************************************************/
+my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb)
+{
+ // Check Value was allocated
+ if (!Value)
+ return true;
+
+ Value->SetNullable(true);
+ Jpath = path;
+
+ // Parse the json path
+ Parsed = false;
+ Nod = 0;
+ Jb = jb;
+ return ParseJpath(g);
+} // end of SetJpath
+
+/*********************************************************************************/
+/* Analyse array processing options. */
+/*********************************************************************************/
+my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm)
+{
+ int n = (int)strlen(p);
+ my_bool dg = true, b = false;
+ PJNODE jnp = &Nodes[i];
+
+ if (*p) {
+ if (p[n - 1] == ']') {
+ p[--n] = 0;
+ } else if (!IsNum(p)) {
+ // Wrong array specification
+ sprintf(g->Message, "Invalid array specification %s", p);
+ return true;
+ } // endif p
+
+ } else
+ b = true;
+
+ // To check whether a numeric Rank was specified
+ dg = IsNum(p);
+
+ if (!n) {
+ // Default specifications
+ if (jnp->Op != OP_EXP) {
+ if (Wr) {
+ // Force append
+ jnp->Rank = INT_MAX32;
+ jnp->Op = OP_LE;
+ } else if (Jb) {
+ // Return a Json item
+ jnp->Op = OP_XX;
+ } else if (b) {
+ // Return 1st value (B is the index base)
+ jnp->Rank = B;
+ jnp->Op = OP_LE;
+ } else if (!Value->IsTypeNum()) {
+ jnp->CncVal = AllocateValue(g, PlugDup(g, ", "), TYPE_STRING);
+ jnp->Op = OP_CNC;
+ } else
+ jnp->Op = OP_ADD;
+
+ } // endif OP
+
+ } else if (dg) {
+ // Return nth value
+ jnp->Rank = atoi(p) - B;
+ jnp->Op = OP_EQ;
+ } else if (Wr) {
+ sprintf(g->Message, "Invalid specification %s in a write path", p);
+ return true;
+ } else if (n == 1) {
+ // Set the Op value;
+ switch (*p) {
+ case '+': jnp->Op = OP_ADD; break;
+ case 'x': jnp->Op = OP_MULT; break;
+ case '>': jnp->Op = OP_MAX; break;
+ case '<': jnp->Op = OP_MIN; break;
+ case '!': jnp->Op = OP_SEP; break; // Average
+ case '#': jnp->Op = OP_NUM; break;
+ case '*': jnp->Op = OP_EXP; break;
+ default:
+ sprintf(g->Message, "Invalid function specification %c", *p);
+ return true;
+ } // endswitch *p
+
+ } else if (*p == '"' && p[n - 1] == '"') {
+ // This is a concat specification
+ jnp->Op = OP_CNC;
+
+ if (n > 2) {
+ // Set concat intermediate string
+ p[n - 1] = 0;
+
+ if (trace(1))
+ htrc("Concat string=%s\n", p + 1);
+
+ jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING);
+ } // endif n
+
+ } else {
+ strcpy(g->Message, "Wrong array specification");
+ return true;
+ } // endif's
+
+ // For calculated arrays, a local Value must be used
+ switch (jnp->Op) {
+ case OP_NUM:
+ jnp->Valp = AllocateValue(g, TYPE_INT);
+ break;
+ case OP_ADD:
+ case OP_MULT:
+ case OP_SEP:
+ if (!IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
+
+ break;
+ case OP_MIN:
+ case OP_MAX:
+ jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
+ break;
+ case OP_CNC:
+ if (IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
+
+ break;
+ default:
+ break;
+ } // endswitch Op
+
+ if (jnp->Valp)
+ MulVal = AllocateValue(g, jnp->Valp);
+
+ return false;
+} // end of SetArrayOptions
+
+/*********************************************************************************/
+/* Parse the eventual passed Jpath information. */
+/* This information can be specified in the Fieldfmt column option when */
+/* creating the table. It permits to indicate the position of the node */
+/* corresponding to that column. */
+/*********************************************************************************/
+my_bool BJNX::ParseJpath(PGLOBAL g)
+{
+ char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL;
+ int i;
+ my_bool a, mul = false;
+
+ if (Parsed)
+ return false; // Already done
+ else if (!Jpath)
+ // Jpath = Name;
+ return true;
+
+ if (trace(1))
+ htrc("ParseJpath %s\n", SVP(Jpath));
+
+ if (!(pbuf = PlgDBDup(g, Jpath)))
+ return true;
+
+ if (*pbuf == '$') pbuf++;
+ if (*pbuf == '.') pbuf++;
+ if (*pbuf == '[') p1 = pbuf++;
+
+ // Estimate the required number of nodes
+ for (i = 0, p = pbuf; (p = NextChr(p, '.')); i++, p++)
+ Nod++; // One path node found
+
+ if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE))))
+ return true;
+
+ memset(Nodes, 0, (Nod) * sizeof(JNODE));
+
+ // Analyze the Jpath for this column
+ for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, '.');
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ // Jpath must be explicit
+ if (a || *p == 0 || *p == '[' || IsNum(p)) {
+ // Analyse intermediate array processing
+ if (SetArrayOptions(g, p, i, Nodes[i - 1].Key))
+ return true;
+
+ } else if (*p == '*') {
+ if (Wr) {
+ sprintf(g->Message, "Invalid specification %c in a write path", *p);
+ return true;
+ } else // Return JSON
+ Nodes[i].Op = OP_XX;
+
+ } else {
+ Nodes[i].Key = p;
+ Nodes[i].Op = OP_EXIST;
+ } // endif's
+
+ } // endfor i, p
+
+ Nod = i;
+ MulVal = AllocateValue(g, Value);
+
+ if (trace(1))
+ for (i = 0; i < Nod; i++)
+ htrc("Node(%d) Key=%s Op=%d Rank=%d\n",
+ i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank);
+
+ Parsed = true;
+ return false;
+} // end of ParseJpath
+
+/*********************************************************************************/
+/* Make a valid key from the passed argument. */
+/*********************************************************************************/
+PSZ BJNX::MakeKey(UDF_ARGS *args, int i)
+{
+ if (args->arg_count > (unsigned)i) {
+ int j = 0, n = args->attribute_lengths[i];
+ my_bool b; // true if attribute is zero terminated
+ PSZ p;
+ PCSZ s = args->attributes[i];
+
+ if (s && *s && (n || *s == '\'')) {
+ if ((b = (!n || !s[n])))
+ n = strlen(s);
+
+ if (IsArgJson(args, i))
+ j = (int)(strchr(s, '_') - s + 1);
+
+ if (j && n > j) {
+ s += j;
+ n -= j;
+ } else if (*s == '\'' && s[n-1] == '\'') {
+ s++;
+ n -= 2;
+ b = false;
+ } // endif *s
+
+ if (n < 1)
+ return NewStr((PSZ)"Key");
+
+ if (!b) {
+ p = (PSZ)BsonSubAlloc(n + 1);
+ memcpy(p, s, n);
+ p[n] = 0;
+ return p;
+ } // endif b
+
+ } // endif s
+
+ return NewStr((PSZ)s);
+ } // endif count
+
+ return NewStr((PSZ)"Key");
+} // end of MakeKey
+
+/*********************************************************************************/
+/* MakeJson: Serialize the json item and set value to it. */
+/*********************************************************************************/
+PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp)
+{
+ if (Value->IsTypeNum()) {
+ strcpy(g->Message, "Cannot make Json for a numeric value");
+ Value->Reset();
+ } else if (bvp->Type != TYPE_JAR && bvp->Type != TYPE_JOB) {
+ strcpy(g->Message, "Target is not an array or object");
+ Value->Reset();
+ } else
+ Value->SetValue_psz(Serialize(g, bvp, NULL, 0));
+
+ return Value;
+} // end of MakeJson
+
+/*********************************************************************************/
+/* SetValue: Set a value from a BVALUE contains. */
+/*********************************************************************************/
+void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp)
+{
+ if (vlp) {
+ vp->SetNull(false);
+
+ if (Jb) {
+ vp->SetValue_psz(Serialize(g, vlp, NULL, 0));
+ } else switch (vlp->Type) {
+ case TYPE_DTM:
+ case TYPE_STRG:
+ vp->SetValue_psz(GetString(vlp));
+ break;
+ case TYPE_INTG:
+ case TYPE_BINT:
+ vp->SetValue(GetInteger(vlp));
+ break;
+ case TYPE_DBL:
+ case TYPE_FLOAT:
+ if (vp->IsTypeNum())
+ vp->SetValue(GetDouble(vlp));
+ else // Get the proper number of decimals
+ vp->SetValue_psz(GetString(vlp));
+
+ break;
+ case TYPE_BOOL:
+ if (vp->IsTypeNum())
+ vp->SetValue(GetInteger(vlp) ? 1 : 0);
+ else
+ vp->SetValue_psz(GetString(vlp));
+
+ break;
+ case TYPE_JAR:
+ vp->SetValue_psz(GetArrayText(g, vlp, NULL));
+ break;
+ case TYPE_JOB:
+ vp->SetValue_psz(GetObjectText(g, vlp, NULL));
+ break;
+ case (char)TYPE_NULL:
+ vp->SetNull(true);
+ default:
+ vp->Reset();
+ } // endswitch Type
+
+ } else {
+ vp->SetNull(true);
+ vp->Reset();
+ } // endif val
+
+} // end of SetJsonValue
+
+/*********************************************************************************/
+/* GetJson: */
+/*********************************************************************************/
+PBVAL BJNX::GetJson(PGLOBAL g)
+{
+ return GetRowValue(g, Row, 0);
+} // end of GetJson
+
+/*********************************************************************************/
+/* ReadValue: */
+/*********************************************************************************/
+void BJNX::ReadValue(PGLOBAL g)
+{
+ Value->SetValue_pval(GetColumnValue(g, Row, 0));
+} // end of ReadValue
+
+/*********************************************************************************/
+/* GetColumnValue: */
+/*********************************************************************************/
+PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i)
+{
+ PBVAL vlp = GetRowValue(g, row, i);
+
+ SetJsonValue(g, Value, vlp);
+ return Value;
+} // end of GetColumnValue
+
+/*********************************************************************************/
+/* GetRowValue: */
+/*********************************************************************************/
+PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b)
+{
+ my_bool expd = false;
+ PBVAL bap;
+ PBVAL vlp = NULL;
+
+ for (; i < Nod && row; i++) {
+ if (Nodes[i].Op == OP_NUM) {
+ Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(row) : 1);
+ vlp = NewVal(Value);
+ return vlp;
+ } else if (Nodes[i].Op == OP_XX) {
+ Jb = b;
+ // return DupVal(g, row);
+ return row; // or last line ???
+ } else if (Nodes[i].Op == OP_EXP) {
+ PUSH_WARNING("Expand not supported by this function");
+ return NULL;
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key) {
+ // Expected Array was not there
+ if (Nodes[i].Op == OP_LE) {
+ if (i < Nod - 1)
+ continue;
+ else
+ vlp = row; // DupVal(g, row) ???
+
+ } else {
+ strcpy(g->Message, "Unexpected object");
+ vlp = NULL;
+ } //endif Op
+
+ } else
+ vlp = GetKeyValue(row, Nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ bap = row;
+
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
+ vlp = GetArrayValue(bap, Nodes[i].Rank);
+ else if (Nodes[i].Op == OP_EXP)
+ return (PBVAL)ExpandArray(g, bap, i);
+ else
+ return NewVal(CalculateArray(g, bap, i));
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ vlp = GetArrayValue(bap, 0);
+ i--;
+ } // endif's
+
+ break;
+ case TYPE_JVAL:
+ vlp = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ vlp = NULL;
+ } // endswitch Type
+
+ row = vlp;
+ } // endfor i
+
+ return vlp;
+} // end of GetRowValue
+
+/*********************************************************************************/
+/* ExpandArray: */
+/*********************************************************************************/
+PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n)
+{
+ strcpy(g->Message, "Expand cannot be done by this function");
+ return NULL;
+} // end of ExpandArray
+
+/*********************************************************************************/
+/* CalculateArray: NIY */
+/*********************************************************************************/
+PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n)
+{
+ int i, ars = GetArraySize(bap), nv = 0;
+ bool err;
+ OPVAL op = Nodes[n].Op;
+ PVAL val[2], vp = Nodes[n].Valp;
+ PBVAL bvrp, bvp;
+ BVAL bval;
+
+ vp->Reset();
+ xtrc(1, "CalculateArray size=%d op=%d\n", ars, op);
+
+ for (i = 0; i < ars; i++) {
+ bvrp = GetArrayValue(bap, i);
+ xtrc(1, "i=%d nv=%d\n", i, nv);
+
+ if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) {
+ if (IsValueNull(bvrp)) {
+ SetString(bvrp, NewStr(GetJsonNull()), 0);
+ bvp = bvrp;
+ } else if (n < Nod - 1 && IsJson(bvrp)) {
+ SetValue(&bval, GetColumnValue(g, bvrp, n + 1));
+ bvp = &bval;
+ } else
+ bvp = bvrp;
+
+ if (trace(1))
+ htrc("bvp=%s null=%d\n",
+ GetString(bvp), IsValueNull(bvp) ? 1 : 0);
+
+ if (!nv++) {
+ SetJsonValue(g, vp, bvp);
+ continue;
+ } else
+ SetJsonValue(g, MulVal, bvp);
+
+ if (!MulVal->IsNull()) {
+ switch (op) {
+ case OP_CNC:
+ if (Nodes[n].CncVal) {
+ val[0] = Nodes[n].CncVal;
+ err = vp->Compute(g, val, 1, op);
+ } // endif CncVal
+
+ val[0] = MulVal;
+ err = vp->Compute(g, val, 1, op);
+ break;
+ // case OP_NUM:
+ case OP_SEP:
+ val[0] = Nodes[n].Valp;
+ val[1] = MulVal;
+ err = vp->Compute(g, val, 2, OP_ADD);
+ break;
+ default:
+ val[0] = Nodes[n].Valp;
+ val[1] = MulVal;
+ err = vp->Compute(g, val, 2, op);
+ } // endswitch Op
+
+ if (err)
+ vp->Reset();
+
+ if (trace(1)) {
+ char buf(32);
+
+ htrc("vp='%s' err=%d\n",
+ vp->GetCharString(&buf), err ? 1 : 0);
+ } // endif trace
+
+ } // endif Zero
+
+ } // endif jvrp
+
+ } // endfor i
+
+ if (op == OP_SEP) {
+ // Calculate average
+ MulVal->SetValue(nv);
+ val[0] = vp;
+ val[1] = MulVal;
+
+ if (vp->Compute(g, val, 2, OP_DIV))
+ vp->Reset();
+
+ } // endif Op
+
+ return vp;
+} // end of CalculateArray
+
+/***********************************************************************/
+/* GetRow: Set the complete path of the object to be set. */
+/***********************************************************************/
+PBVAL BJNX::GetRow(PGLOBAL g)
+{
+ PBVAL val = NULL;
+ PBVAL arp;
+ PBVAL nwr, row = Row;
+
+ for (int i = 0; i < Nod - 1 && row; i++) {
+ if (Nodes[i].Op == OP_XX)
+ break;
+ else if (Nodes[i].Op == OP_EXP) {
+ PUSH_WARNING("Expand not supported by this function");
+ return NULL;
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key)
+ // Expected Array was not there, wrap the value
+ continue;
+
+ val = GetKeyValue(row, Nodes[i].Key);
+ break;
+ case TYPE_JAR:
+ arp = row;
+
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op == OP_EQ)
+ val = GetArrayValue(arp, Nodes[i].Rank);
+ else
+ val = GetArrayValue(arp, Nodes[i].Rx);
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ val = GetArrayValue(arp, 0);
+ i--;
+ } // endif Nodes
+
+ break;
+ case TYPE_JVAL:
+ val = MVP(row->To_Val);
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ val = NULL;
+ } // endswitch Type
+
+ if (val) {
+ row = val;
+ } else {
+ // Construct missing objects
+ for (i++; row && i < Nod; i++) {
+ if (Nodes[i].Op == OP_XX)
+ break;
+
+ // Construct new row
+ nwr = NewVal();
+
+ if (row->Type == TYPE_JOB) {
+ SetKeyValue(row, MOF(nwr), Nodes[i - 1].Key);
+ } else if (row->Type == TYPE_JAR) {
+ AddArrayValue(row, MOF(nwr));
+ } else {
+ strcpy(g->Message, "Wrong type when writing new row");
+ nwr = NULL;
+ } // endif's
+
+ row = nwr;
+ } // endfor i
+
+ break;
+ } // endelse
+
+ } // endfor i
+
+ return row;
+} // end of GetRow
+
+/***********************************************************************/
+/* WriteValue: */
+/***********************************************************************/
+my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp)
+{
+ PBVAL objp = NULL;
+ PBVAL arp = NULL;
+ PBVAL jvp = NULL;
+ PBVAL row = GetRow(g);
+
+ if (!row)
+ return true;
+
+ switch (row->Type) {
+ case TYPE_JOB: objp = row; break;
+ case TYPE_JAR: arp = row; break;
+ case TYPE_JVAL: jvp = MVP(row->To_Val); break;
+ default:
+ strcpy(g->Message, "Invalid target type");
+ return true;
+ } // endswitch Type
+
+ if (arp) {
+ if (!Nodes[Nod - 1].Key) {
+ if (Nodes[Nod - 1].Op == OP_EQ)
+ SetArrayValue(arp, jvalp, Nodes[Nod - 1].Rank);
+ else
+ AddArrayValue(arp, MOF(jvalp));
+
+ } // endif Key
+
+ } else if (objp) {
+ if (Nodes[Nod - 1].Key)
+ SetKeyValue(objp, MOF(jvalp), Nodes[Nod - 1].Key);
+
+ } else if (jvp)
+ SetValueVal(jvp, jvalp);
+
+ return false;
+} // end of WriteValue
+
+/*********************************************************************************/
+/* GetRowValue: */
+/*********************************************************************************/
+my_bool BJNX::DeleteItem(PGLOBAL g, PBVAL row)
+{
+ int n = -1;
+ my_bool b = false;
+ bool loop;
+ PBVAL vlp, pvp, rwp;
+
+ do {
+ loop = false;
+ vlp = NULL;
+ pvp = rwp = row;
+
+ for (int i = 0; i < Nod && rwp; i++) {
+ if (Nodes[i].Op == OP_XX)
+ break;
+ else switch (rwp->Type) {
+ case TYPE_JOB:
+ if (!Nodes[i].Key) {
+ vlp = NULL;
+ } else
+ vlp = GetKeyValue(rwp, Nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ if (!Nodes[i].Key) {
+ if (Nodes[i].Op == OP_EXP) {
+ if (loop) {
+ PUSH_WARNING("Only one expand can be handled");
+ return b;
+ } // endif loop
+
+ n++;
+ } else
+ n = Nodes[i].Rank;
+
+ vlp = GetArrayValue(rwp, n);
+
+ if (GetNext(vlp) && Nodes[i].Op == OP_EXP)
+ loop = true;
+
+ } else
+ vlp = NULL;
+
+ break;
+ case TYPE_JVAL:
+ vlp = rwp;
+ break;
+ default:
+ vlp = NULL;
+ } // endswitch Type
+
+ pvp = rwp;
+ rwp = vlp;
+ vlp = NULL;
+ } // endfor i
+
+ if (rwp) {
+ if (Nodes[Nod - 1].Op == OP_XX) {
+ if (!IsJson(rwp))
+ rwp->Type = TYPE_NULL;
+
+ rwp->To_Val = 0;
+ } else switch (pvp->Type) {
+ case TYPE_JOB:
+ b = DeleteKey(pvp, Nodes[Nod - 1].Key);
+ break;
+ case TYPE_JAR:
+ if (Nodes[Nod - 1].Op == OP_EXP) {
+ pvp->To_Val = 0;
+ loop = false;
+ } else
+ b = DeleteValue(pvp, n);
+
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ } // endif rwp
+
+ } while (loop);
+
+ return b;
+} // end of DeleteItem
+
+/*********************************************************************************/
+/* CheckPath: Checks whether the path exists in the document. */
+/*********************************************************************************/
+my_bool BJNX::CheckPath(PGLOBAL g)
+{
+ PBVAL val = NULL;
+ PBVAL row = Row;
+
+ for (int i = 0; i < Nod && row; i++) {
+ val = NULL;
+
+ if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) {
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (Nodes[i].Key)
+ val = GetKeyValue(row, Nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ if (!Nodes[i].Key)
+ if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
+ val = GetArrayValue(row, Nodes[i].Rank);
+
+ break;
+ case TYPE_JVAL:
+ val = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ } // endswitch Type
+
+ if (i < Nod-1)
+ if (!(row = (IsJson(val)) ? val : NULL))
+ val = NULL;
+
+ } // endfor i
+
+ return (val != NULL);
+} // end of CheckPath
+
+/*********************************************************************************/
+/* Check if a path was specified and set jvp according to it. */
+/*********************************************************************************/
+my_bool BJNX::CheckPath(PGLOBAL g, UDF_ARGS *args, PBVAL jsp, PBVAL& jvp, int n)
+{
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == STRING_RESULT && args->args[i]) {
+ // A path to a subset of the json tree is given
+ char *path = MakePSZ(g, args, i);
+
+ if (path) {
+ Row = jsp;
+
+ if (SetJpath(g, path))
+ return true;
+
+ if (!(jvp = GetJson(g))) {
+ sprintf(g->Message, "No sub-item at '%s'", path);
+ return true;
+ } else
+ return false;
+
+ } else {
+ strcpy(g->Message, "Path argument is null");
+ return true;
+ } // endif path
+
+ } // endif type
+
+ jvp = jsp;
+ return false;
+} // end of CheckPath
+
+/*********************************************************************************/
+/* Locate a value in a JSON tree: */
+/*********************************************************************************/
+PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k)
+{
+ PSZ str = NULL;
+ my_bool b = false, err = true;
+
+ g->Message[0] = 0;
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ return NULL;
+ } // endif jsp
+
+ try {
+ // Write to the path string
+ Jp = new(g) JOUTSTR(g);
+ Jp->WriteChr('$');
+ Bvalp = jvp;
+ K = k;
+
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ err = LocateArray(g, jsp);
+ break;
+ case TYPE_JOB:
+ err = LocateObject(g, jsp);
+ break;
+ case TYPE_JVAL:
+ err = LocateValue(g, MVP(jsp->To_Val));
+ break;
+ default:
+ err = true;
+ } // endswitch Type
+
+ if (err) {
+ if (!g->Message[0])
+ strcpy(g->Message, "Invalid json tree");
+
+ } else if (Found) {
+ Jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, Jp->N);
+ str = Jp->Strp;
+ } // endif's
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ } // end catch
+
+ return str;
+} // end of Locate
+
+/*********************************************************************************/
+/* Locate in a JSON Array. */
+/*********************************************************************************/
+my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp)
+{
+ char s[16];
+ int n = GetArraySize(jarp);
+ size_t m = Jp->N;
+
+ for (int i = 0; i < n && !Found; i++) {
+ Jp->N = m;
+ sprintf(s, "[%d]", i + B);
+
+ if (Jp->WriteStr(s))
+ return true;
+
+ if (LocateValue(g, GetArrayValue(jarp, i)))
+ return true;
+
+ } // endfor i
+
+ return false;
+} // end of LocateArray
+
+/*********************************************************************************/
+/* Locate in a JSON Object. */
+/*********************************************************************************/
+my_bool BJNX::LocateObject(PGLOBAL g, PBVAL jobp)
+{
+ size_t m;
+
+ if (Jp->WriteChr('.'))
+ return true;
+
+ m = Jp->N;
+
+ for (PBPR pair = GetObject(jobp); pair && !Found; pair = GetNext(pair)) {
+ Jp->N = m;
+
+ if (Jp->WriteStr(MZP(pair->Key)))
+ return true;
+
+ if (LocateValue(g, GetVlp(pair)))
+ return true;
+
+ } // endfor i
+
+ return false;
+} // end of LocateObject
+
+/*********************************************************************************/
+/* Locate a JSON Value. */
+/*********************************************************************************/
+my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp)
+{
+ if (CompareTree(g, Bvalp, jvp))
+ Found = (--K == 0);
+ else if (jvp->Type == TYPE_JAR)
+ return LocateArray(g, jvp);
+ else if (jvp->Type == TYPE_JOB)
+ return LocateObject(g, jvp);
+
+ return false;
+} // end of LocateValue
+
+/*********************************************************************************/
+/* Locate all occurrences of a value in a JSON tree: */
+/*********************************************************************************/
+PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx)
+{
+ PSZ str = NULL;
+ my_bool b = false, err = true;
+ PJPN jnp;
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ return NULL;
+ } // endif jsp
+
+ try {
+ jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx);
+ memset(jnp, 0, sizeof(JPN) * mx);
+ g->Message[0] = 0;
+
+ // Write to the path string
+ Jp = new(g)JOUTSTR(g);
+ Bvalp = bvp;
+ Imax = mx - 1;
+ Jpnp = jnp;
+ Jp->WriteChr('[');
+
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ err = LocateArrayAll(g, jsp);
+ break;
+ case TYPE_JOB:
+ err = LocateObjectAll(g, jsp);
+ break;
+ case TYPE_JVAL:
+ err = LocateValueAll(g, MVP(jsp->To_Val));
+ break;
+ default:
+ err = LocateValueAll(g, jsp);
+ } // endswitch Type
+
+ if (!err) {
+ if (Jp->N > 1)
+ Jp->N--;
+
+ Jp->WriteChr(']');
+ Jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, Jp->N);
+ str = Jp->Strp;
+ } else if (!g->Message[0])
+ strcpy(g->Message, "Invalid json tree");
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ } // end catch
+
+ return str;
+} // end of LocateAll
+
+/*********************************************************************************/
+/* Locate in a JSON Array. */
+/*********************************************************************************/
+my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp)
+{
+ int i = 0;
+
+ if (I < Imax) {
+ Jpnp[++I].Type = TYPE_JAR;
+
+ for (PBVAL vp = GetArray(jarp); vp; vp = GetNext(vp)) {
+ Jpnp[I].N = i;
+
+ if (LocateValueAll(g, GetArrayValue(jarp, i)))
+ return true;
+
+ i++;
+ } // endfor i
+
+ I--;
+ } // endif I
+
+ return false;
+} // end of LocateArrayAll
+
+/*********************************************************************************/
+/* Locate in a JSON Object. */
+/*********************************************************************************/
+my_bool BJNX::LocateObjectAll(PGLOBAL g, PBVAL jobp)
+{
+ if (I < Imax) {
+ Jpnp[++I].Type = TYPE_JOB;
+
+ for (PBPR pair = GetObject(jobp); pair; pair = GetNext(pair)) {
+ Jpnp[I].Key = MZP(pair->Key);
+
+ if (LocateValueAll(g, GetVlp(pair)))
+ return true;
+
+ } // endfor i
+
+ I--;
+ } // endif I
+
+ return false;
+} // end of LocateObjectAll
+
+/*********************************************************************************/
+/* Locate a JSON Value. */
+/*********************************************************************************/
+my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp)
+{
+ if (CompareTree(g, Bvalp, jvp))
+ return AddPath();
+ else if (jvp->Type == TYPE_JAR)
+ return LocateArrayAll(g, jvp);
+ else if (jvp->Type == TYPE_JOB)
+ return LocateObjectAll(g, jvp);
+
+ return false;
+} // end of LocateValueAll
+
+/*********************************************************************************/
+/* Compare two JSON trees. */
+/*********************************************************************************/
+my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2)
+{
+ if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2))
+ return false;
+
+ my_bool found = true;
+
+ if (jp1->Type == TYPE_JAR) {
+ for (int i = 0; found && i < GetArraySize(jp1); i++)
+ found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i)));
+
+ } else if (jp1->Type == TYPE_JOB) {
+ PBPR p1 = GetObject(jp1), p2 = GetObject(jp2);
+
+ // Keys can be differently ordered
+ for (; found && p1 && p2; p1 = GetNext(p1))
+ found = CompareValues(g, GetVlp(p1), GetKeyValue(jp2, GetKey(p1)));
+
+ } else if (jp1->Type == TYPE_JVAL) {
+ found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val)));
+ } else
+ found = CompareValues(g, jp1, jp2);
+
+ return found;
+} // end of CompareTree
+
+/*********************************************************************************/
+/* Compare two VAL values and return true if they are equal. */
+/*********************************************************************************/
+my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2)
+{
+ my_bool b = false;
+
+ if (v1 && v2)
+ switch (v1->Type) {
+ case TYPE_JAR:
+ case TYPE_JOB:
+ if (v2->Type == v1->Type)
+ b = CompareTree(g, v1, v2);
+
+ break;
+ case TYPE_STRG:
+ if (v2->Type == TYPE_STRG) {
+ if (v1->Nd || v2->Nd) // Case insensitive
+ b = (!stricmp(MZP(v1->To_Val), MZP(v2->To_Val)));
+ else
+ b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val)));
+
+ } // endif Type
+
+ break;
+ case TYPE_DTM:
+ if (v2->Type == TYPE_DTM)
+ b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val)));
+
+ break;
+ case TYPE_INTG:
+ if (v2->Type == TYPE_INTG)
+ b = (v1->N == v2->N);
+ else if (v2->Type == TYPE_BINT)
+ b = ((longlong)v1->N == LLN(v2->To_Val));
+
+ break;
+ case TYPE_BINT:
+ if (v2->Type == TYPE_INTG)
+ b = (LLN(v1->To_Val) == (longlong)v2->N);
+ else if (v2->Type == TYPE_BINT)
+ b = (LLN(v1->To_Val) == LLN(v2->To_Val));
+
+ break;
+ case TYPE_FLOAT:
+ if (v2->Type == TYPE_FLOAT)
+ b = (v1->F == v2->F);
+ else if (v2->Type == TYPE_DBL)
+ b = ((double)v1->F == DBL(v2->To_Val));
+
+ break;
+ case TYPE_DBL:
+ if (v2->Type == TYPE_DBL)
+ b = (DBL(v1->To_Val) == DBL(v2->To_Val));
+ else if (v2->Type == TYPE_FLOAT)
+ b = (DBL(v1->To_Val) == (double)v2->F);
+
+ break;
+ case TYPE_BOOL:
+ if (v2->Type == TYPE_BOOL)
+ b = (v1->B == v2->B);
+
+ break;
+ case (char)TYPE_NULL:
+ b = (v2->Type == TYPE_NULL);
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ else
+ b = (!v1 && !v2);
+
+ return b;
+} // end of CompareValues
+
+/*********************************************************************************/
+/* Add the found path to the list. */
+/*********************************************************************************/
+my_bool BJNX::AddPath(void)
+{
+ char s[16];
+
+ if (Jp->WriteStr("\"$"))
+ return true;
+
+ for (int i = 0; i <= I; i++) {
+ if (Jpnp[i].Type == TYPE_JAR) {
+ sprintf(s, "[%d]", Jpnp[i].N + B);
+
+ if (Jp->WriteStr(s))
+ return true;
+
+ } else {
+ if (Jp->WriteChr('.'))
+ return true;
+
+ if (Jp->WriteStr(Jpnp[i].Key))
+ return true;
+
+ } // endif's
+
+ } // endfor i
+
+ if (Jp->WriteStr("\","))
+ return true;
+
+ return false;
+} // end of AddPath
+
+/*********************************************************************************/
+/* Make a JSON value from the passed argument. */
+/*********************************************************************************/
+PBVAL BJNX::MakeValue(UDF_ARGS *args, uint i, bool b, PBVAL *top)
+{
+ char *sap = (args->arg_count > i) ? args->args[i] : NULL;
+ int n, len;
+ int ci;
+ long long bigint;
+ PGLOBAL& g = G;
+ PBVAL jvp = NewVal();
+
+ if (top)
+ *top = NULL;
+
+ if (sap) switch (args->arg_type[i]) {
+ case STRING_RESULT:
+ if ((len = args->lengths[i])) {
+ if ((n = IsArgJson(args, i)) < 3)
+ sap = MakePSZ(g, args, i);
+
+ if (n) {
+ if (n == 3) {
+ PBSON bsp = (PBSON)sap;
+
+ if (i == 0) {
+ if (top)
+ *top = (PBVAL)bsp->Top;
+
+ jvp = (PBVAL)bsp->Jsp;
+ G = bsp->G;
+ Base = G->Sarea;
+ } else {
+ BJNX bnx(bsp->G);
+
+ jvp = MoveJson(&bnx, (PBVAL)bsp->Jsp);
+ } // endelse i
+
+ } else {
+ if (n == 2) {
+ if (!(sap = GetJsonFile(g, sap))) {
+ PUSH_WARNING(g->Message);
+ return jvp;
+ } // endif sap
+
+ len = strlen(sap);
+ } // endif n
+
+ if (!(jvp = ParseJson(g, sap, strlen(sap))))
+ PUSH_WARNING(g->Message);
+ else if (top)
+ *top = jvp;
+
+ } // endif's n
+
+ } else {
+ PBVAL bp = NULL;
+
+ if (b) {
+ if (strchr("[{ \t\r\n", *sap)) {
+ // Check whether this string is a valid json string
+ JsonMemSave(g);
+
+ if (!(bp = ParseJson(g, sap, strlen(sap))))
+ JsonSubSet(g); // Recover suballocated memory
+
+ g->Saved_Size = 0;
+ } else {
+ // Perhaps a file name
+ char* s = GetJsonFile(g, sap);
+
+ if (s)
+ bp = ParseJson(g, s, strlen(s));
+
+ } // endif's
+
+ } // endif b
+
+ if (!bp) {
+ ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1;
+ SetString(jvp, sap, ci);
+ } else {
+ if (top)
+ *top = bp;
+
+ jvp = bp;
+ } // endif bp
+
+ } // endif n
+
+ } // endif len
+
+ break;
+ case INT_RESULT:
+ bigint = *(long long*)sap;
+
+ if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) ||
+ (bigint == 1LL && !strcmp(args->attributes[i], "TRUE")))
+ SetBool(jvp, (char)bigint);
+ else
+ SetBigint(jvp, bigint);
+
+ break;
+ case REAL_RESULT:
+ SetFloat(jvp, *(double*)sap);
+ break;
+ case DECIMAL_RESULT:
+ SetFloat(jvp, MakePSZ(g, args, i));
+ break;
+ case TIME_RESULT:
+ case ROW_RESULT:
+ default:
+ break;
+ } // endswitch arg_type
+
+ return jvp;
+} // end of MakeValue
+
+/*********************************************************************************/
+/* Try making a JSON value of the passed type from the passed argument. */
+/*********************************************************************************/
+PBVAL BJNX::MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, JTYP type, PBVAL *top)
+{
+ char *sap;
+ PBVAL jsp;
+ PBVAL jvp = MakeValue(args, i, false, top);
+
+ //if (type == TYPE_JSON) {
+ // if (jvp->GetValType() >= TYPE_JSON)
+ // return jvp;
+
+ //} else if (jvp->GetValType() == type)
+ // return jvp;
+
+ if (jvp->Type == TYPE_STRG) {
+ sap = GetString(jvp);
+
+ if ((jsp = ParseJson(g, sap, strlen(sap)))) {
+ if ((type == TYPE_JSON && jsp->Type != TYPE_JVAL) || jsp->Type == type) {
+ if (top)
+ *top = jvp;
+
+ SetValueVal(jvp, jsp);
+ } // endif Type
+
+ } // endif jsp
+
+ } // endif Type
+
+ return jvp;
+} // end of MakeTypedValue
+
+/*********************************************************************************/
+/* Parse a json file. */
+/*********************************************************************************/
+PBVAL BJNX::ParseJsonFile(PGLOBAL g, char *fn, int& pty, size_t& len)
+{
+ char *memory;
+ HANDLE hFile;
+ MEMMAP mm;
+ PBVAL jsp;
+
+ // Create the mapping file object
+ hFile = CreateFileMap(g, fn, &mm, MODE_READ, false);
+
+ if (hFile == INVALID_HANDLE_VALUE) {
+ DWORD rc = GetLastError();
+
+ if (!(*g->Message))
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int)rc, fn);
+
+ return NULL;
+ } // endif hFile
+
+ // Get the file size
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ memory = (char *)mm.memory;
+
+ if (!len) { // Empty or deleted file
+ CloseFileHandle(hFile);
+ return NULL;
+ } // endif len
+
+ if (!memory) {
+ CloseFileHandle(hFile);
+ sprintf(g->Message, MSG(MAP_VIEW_ERROR), fn, GetLastError());
+ return NULL;
+ } // endif Memory
+
+ CloseFileHandle(hFile); // Not used anymore
+
+ // Parse the json file and allocate its tree structure
+ g->Message[0] = 0;
+ jsp = ParseJson(g, memory, len);
+ pty = pretty;
+ CloseMemMap(memory, len);
+ return jsp;
+} // end of ParseJsonFile
+
+/*********************************************************************************/
+/* Make the result according to the first argument type. */
+/*********************************************************************************/
+char *BJNX::MakeResult(UDF_ARGS *args, PBVAL top, uint n)
+{
+ char *str = NULL;
+ PGLOBAL& g = G;
+
+ if (IsArgJson(args, 0) == 2) {
+ // Make the change in the json file
+ PSZ fn = MakePSZ(g, args, 0);
+
+ if (Changed) {
+ int pretty = 2;
+
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT) {
+ pretty = (int)*(longlong*)args->args[i];
+ break;
+ } // endif type
+
+ if (!Serialize(g, top, fn, pretty))
+ PUSH_WARNING(g->Message);
+
+ Changed = false;
+ } // endif Changed
+
+ str = fn;
+ } else if (IsArgJson(args, 0) == 3) {
+ PBSON bsp = (PBSON)args->args[0];
+
+ if (bsp->Filename) {
+ if (Changed) {
+ // Make the change in the json file
+ if (!Serialize(g, (PBVAL)top, bsp->Filename, bsp->Pretty))
+ PUSH_WARNING(g->Message);
+
+ Changed = false;
+ } // endif Changed
+
+ str = bsp->Filename;
+ } else if (!(str = Serialize(g, (PBVAL)top, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else if (!(str = Serialize(g, top, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ return str;
+} // end of MakeResult
+
+/*********************************************************************************/
+/* Make the binary result according to the first argument type. */
+/*********************************************************************************/
+PBSON BJNX::MakeBinResult(UDF_ARGS *args, PBVAL top, ulong len, int n)
+{
+ char* filename = NULL;
+ int pretty = 2;
+ PBSON bnp = NULL;
+
+ if (IsArgJson(args, 0) == 3) {
+ bnp = (PBSON)args->args[0];
+
+ if (bnp->Top != (PJSON)top)
+ bnp->Top = bnp->Jsp = (PJSON)top;
+
+ return bnp;
+ } // endif 3
+
+ if (IsArgJson(args, 0) == 2) {
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT) {
+ pretty = (int)*(longlong*)args->args[i];
+ break;
+ } // endif type
+
+ filename = (char*)args->args[0];
+ } // endif 2
+
+ if ((bnp = BbinAlloc(G, len, top))) {
+ bnp->Filename = filename;
+ bnp->Pretty = pretty;
+ strcpy(bnp->Msg, "Json Binary item");
+ } //endif bnp
+
+ return bnp;
+} // end of MakeBinResult
+
+/***********************************************************************/
+/* Move a Json val block from one area to the current area. */
+/***********************************************************************/
+PBVAL BJNX::MoveVal(PBVAL vlp)
+{
+ PBVAL nvp = NewVal(vlp->Type);
+
+ nvp->Nd = vlp->Nd;
+ return nvp;
+} // end of MovedVal
+
+/***********************************************************************/
+/* Move a Json tree from one area to current area. */
+/***********************************************************************/
+PBVAL BJNX::MoveJson(PBJNX bxp, PBVAL jvp)
+{
+ PBVAL res = NULL;
+
+ if (jvp)
+ switch (jvp->Type) {
+ case TYPE_JAR:
+ res = MoveArray(bxp, jvp);
+ break;
+ case TYPE_JOB:
+ res = MoveObject(bxp, jvp);
+ break;
+ default:
+ res = MoveValue(bxp, jvp);
+ break;
+ } // endswitch Type
+
+ return res;
+} // end of MoveJson
+
+/***********************************************************************/
+/* Move an array. */
+/***********************************************************************/
+PBVAL BJNX::MoveArray(PBJNX bxp, PBVAL jap)
+{
+ PBVAL vlp, vmp, jvp = NULL, jarp = MoveVal(jap);
+
+ for (vlp = bxp->GetArray(jap); vlp; vlp = bxp->GetNext(vlp)) {
+ vmp = MoveJson(bxp, vlp);
+
+ if (jvp)
+ jvp->Next = MOF(vmp);
+ else
+ jarp->To_Val = MOF(vmp);
+
+ jvp = vmp;
+ } // endfor vlp
+
+ return jarp;
+} // end of MoveArray
+
+/***********************************************************************/
+/* Replace all object pointers by offsets. */
+/***********************************************************************/
+PBVAL BJNX::MoveObject(PBJNX bxp, PBVAL jop)
+{
+ PBPR mpp, prp, ppp = NULL;
+ PBVAL vmp, jobp = MoveVal(jop);
+
+ for (prp = bxp->GetObject(jop); prp; prp = bxp->GetNext(prp)) {
+ vmp = MoveJson(bxp, GetVlp(prp));
+ mpp = NewPair(DupStr(bxp->MZP(prp->Key)));
+ SetPairValue(mpp, vmp);
+
+ if (ppp)
+ ppp->Vlp.Next = MOF(mpp);
+ else
+ jobp->To_Val = MOF(mpp);
+
+ ppp = mpp;
+ } // endfor vlp
+
+ return jobp;
+} // end of MoffObject
+
+/***********************************************************************/
+/* Move a non json value. */
+/***********************************************************************/
+PBVAL BJNX::MoveValue(PBJNX bxp, PBVAL jvp)
+{
+ double *dp;
+ PBVAL nvp = MoveVal(jvp);
+
+ switch (jvp->Type) {
+ case TYPE_STRG:
+ case TYPE_DTM:
+ nvp->To_Val = DupStr(bxp->MZP(jvp->To_Val));
+ break;
+ case TYPE_DBL:
+ dp = (double*)BsonSubAlloc(sizeof(double));
+ *dp = bxp->DBL(jvp->To_Val);
+ nvp->To_Val = MOF(dp);
+ break;
+ case TYPE_JVAL:
+ nvp->To_Val = MOF(MoveJson(bxp, bxp->MVP(jvp->To_Val)));
+ break;
+ default:
+ nvp->To_Val = jvp->To_Val;
+ break;
+ } // endswith Type
+
+ return nvp;
+} // end of MoveValue
+
+/* -----------------------------Utility functions ------------------------------ */
+
+/*********************************************************************************/
+/* Returns a pointer to the first integer argument found from the nth argument. */
+/*********************************************************************************/
+static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n)
+{
+ int *x = NULL;
+
+ for (uint i = n; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT) {
+ if (args->args[i]) {
+ if ((x = (int*)PlgDBSubAlloc(g, NULL, sizeof(int))))
+ *x = (int)*(longlong*)args->args[i];
+ else
+ PUSH_WARNING(g->Message);
+
+ } // endif args
+
+ n = i + 1;
+ break;
+ } // endif arg_type
+
+ return x;
+} // end of GetIntArgPtr
+
+/*********************************************************************************/
+/* Returns not 0 if the argument is a JSON item or file name. */
+/*********************************************************************************/
+int IsArgJson(UDF_ARGS *args, uint i)
+{
+ int n = 0;
+
+ if (i >= args->arg_count || args->arg_type[i] != STRING_RESULT) {
+ } else if (!strnicmp(args->attributes[i], "Bson_", 5) ||
+ !strnicmp(args->attributes[i], "Json_", 5)) {
+ if (!args->args[i] || strchr("[{ \t\r\n", *args->args[i]))
+ n = 1; // arg should be is a json item
+// else
+// n = 2; // A file name may have been returned
+
+ } else if (!strnicmp(args->attributes[i], "Bbin_", 5)) {
+ if (args->lengths[i] == sizeof(BSON))
+ n = 3; // arg is a binary json item
+// else
+// n = 2; // A file name may have been returned
+
+ } else if (!strnicmp(args->attributes[i], "Bfile_", 6) ||
+ !strnicmp(args->attributes[i], "Jfile_", 6)) {
+ n = 2; // arg is a json file name
+#if 0
+ } else if (args->lengths[i]) {
+ PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024);
+ char *sap = MakePSZ(g, args, i);
+
+ if (ParseJson(g, sap, strlen(sap)))
+ n = 4;
+
+ JsonFreeMem(g);
+#endif // 0
+ } // endif's
+
+ return n;
+} // end of IsArgJson
+
+/*********************************************************************************/
+/* GetFileLength: returns file size in number of bytes. */
+/*********************************************************************************/
+static long GetFileLength(char *fn)
+{
+ int h;
+ long len;
+
+ h= open(fn, _O_RDONLY);
+
+ if (h != -1) {
+ if ((len = _filelength(h)) < 0)
+ len = 0;
+
+ close(h);
+ } else
+ len = 0;
+
+ return len;
+} // end of GetFileLength
+
+/* ------------------------- Now the new Bin UDF's ----------------------------- */
+
+/*********************************************************************************/
+/* Make a Json value containing the parameter. */
+/*********************************************************************************/
+my_bool bsonvalue_init(UDF_INIT* initid, UDF_ARGS* args, char* message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count > 1) {
+ strcpy(message, "Cannot accept more than 1 argument");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bsonvalue_init
+
+char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char*, char*)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, 1, false)) {
+ BJNX bnx(g);
+ PBVAL bvp = bnx.MakeValue(args, 0, true);
+
+ if (!(str = bnx.Serialize(g, bvp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ } else
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bsonValue
+
+void bsonvalue_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonvalue_deinit
+
+/*********************************************************************************/
+/* Make a Json array containing all the parameters. */
+/* Note: jvp must be set before arp because it can be a binary argument. */
+/*********************************************************************************/
+my_bool bson_make_array_init(UDF_INIT* initid, UDF_ARGS* args, char* message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_make_array_init
+
+char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char*, char*)
+{
+ char* str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false)) {
+ BJNX bnx(g);
+ PBVAL jvp = bnx.MakeValue(args, 0);
+ PBVAL arp = bnx.NewVal(TYPE_JAR);
+
+ for (uint i = 0; i < args->arg_count;) {
+ bnx.AddArrayValue(arp, jvp);
+ jvp = bnx.MakeValue(args, ++i);
+ } // endfor i
+
+ if (!(str = bnx.Serialize(g, arp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ } else
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_make_array
+
+void bson_make_array_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_make_array_deinit
+
+/*********************************************************************************/
+/* Add one or several values to a Bson array. */
+/*********************************************************************************/
+my_bool bson_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ //} else if (!IsArgJson(args, 0, true)) {
+ // strcpy(message, "First argument must be a valid json string or item");
+ // return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_array_add_values_init
+
+char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char*) {
+ char* str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, true)) {
+ BJNX bnx(g);
+ PBVAL arp = bnx.MakeValue(args, 0, true);
+
+ if (arp->Type != TYPE_JAR) {
+ PUSH_WARNING("First argument is not an array");
+ goto fin;
+ } // endif arp
+
+ for (uint i = 1; i < args->arg_count; i++)
+ bnx.AddArrayValue(arp, bnx.MakeValue(args, i));
+
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, arp, INT_MAX);
+ } // endif CheckMemory
+
+ if (!str) {
+ PUSH_WARNING(g->Message);
+ str = args->args[0];
+ } // endif str
+
+ // Keep result of constant function
+ g->Xchk = (g->N) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_array_add_values
+
+void bson_array_add_values_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_add_values_deinit
+
+/*********************************************************************************/
+/* Add one value to a Json array. */
+/*********************************************************************************/
+my_bool bson_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ //} else if (!IsArgJson(args, 0, true)) {
+ // strcpy(message, "First argument is not a valid Json item");
+ // return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_array_add_init
+
+char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ int *x;
+ uint n = 2;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL jsp, top;
+ PBVAL arp, jvp = bnx.MakeValue(args, 0, true, &top);
+
+ jsp = jvp;
+ x = GetIntArgPtr(g, args, n);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp) {
+ if (jvp->Type != TYPE_JAR) {
+ if ((arp = bnx.NewVal(TYPE_JAR))) {
+ bnx.AddArrayValue(arp, jvp);
+
+ if (!top)
+ top = arp;
+
+ } // endif arp
+
+ } else
+ arp = jvp;
+
+ if (arp) {
+ bnx.AddArrayValue(arp, bnx.MakeValue(args, 1), x);
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top, n);
+ } else
+ PUSH_WARNING(g->Message);
+
+ } else {
+ PUSH_WARNING("Target is not an array");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_array_add
+
+void bson_array_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_add_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json array. */
+/*********************************************************************************/
+my_bool bson_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_array_delete_init
+
+char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ int *x;
+ uint n = 1;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL arp, top;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (!(x = GetIntArgPtr(g, args, n)))
+ PUSH_WARNING("Missing or null array index");
+ else if (bnx.CheckPath(g, args, jvp, arp, 1))
+ PUSH_WARNING(g->Message);
+ else if (arp && arp->Type == TYPE_JAR) {
+ bnx.DeleteValue(arp, *x);
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top, n);
+ } else {
+ PUSH_WARNING("First argument target is not an array");
+ // if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_array_delete
+
+void bson_array_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_delete_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the parameters. */
+/*********************************************************************************/
+my_bool bson_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_make_object_init
+
+char *bson_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, false, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
+
+ str = bnx.Serialize(g, objp, NULL, 0);
+ } // endif objp
+
+ } // endif CheckMemory
+
+ if (!str)
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_make_object
+
+void bson_make_object_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_make_object_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all not null parameters. */
+/*********************************************************************************/
+my_bool bson_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args,
+ char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_object_nonull_init
+
+char *bson_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL jvp, objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i)))
+ bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
+
+ str = bnx.Serialize(g, objp, NULL, 0);
+ } // endif objp
+
+ } // endif CheckMemory
+
+ if (!str)
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_object_nonull
+
+void bson_object_nonull_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_nonull_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the key/value parameters. */
+/*********************************************************************************/
+my_bool bson_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count % 2) {
+ strcpy(message, "This function must have an even number of arguments");
+ return true;
+ } // endif arg_count
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_object_key_init
+
+char *bson_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i += 2)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
+
+ str = bnx.Serialize(g, objp, NULL, 0);
+ } // endif objp
+
+ } // endif CheckMemory
+
+ if (!str)
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_object_key
+
+void bson_object_key_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_key_deinit
+
+/*********************************************************************************/
+/* Add or replace a value in a Json Object. */
+/*********************************************************************************/
+my_bool bson_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_object_add_init
+
+char *bson_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PSZ key;
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, true, true)) {
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL jvp, objp;
+ PBVAL jsp, top;
+
+ jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp && jvp->Type == TYPE_JOB) {
+ objp = jvp;
+ jvp = bnx.MakeValue(args, 1);
+ key = bnx.MakeKey(args, 1);
+ bnx.SetKeyValue(objp, jvp, key);
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top);
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_add
+
+void bson_object_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_add_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json object. */
+/*********************************************************************************/
+my_bool bson_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument must be a key string");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_object_delete_init
+
+char *bson_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, true, true)) {
+ bool chg;
+ BJNX bnx(g, NULL, TYPE_STRG);
+ PSZ key;
+ PBVAL jsp, objp, top;
+ PBVAL jvp = bnx.MakeValue(args, 0, false, &top);
+
+ jsp = jvp;
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp && jvp->Type == TYPE_JOB) {
+ key = bnx.MakeKey(args, 1);
+ objp = jvp;
+ chg = bnx.DeleteKey(objp, key);
+ bnx.SetChanged(chg);
+ str = bnx.MakeResult(args, top);
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_delete
+
+void bson_object_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_delete_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object keys. */
+/*********************************************************************************/
+my_bool bson_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 1) {
+ strcpy(message, "This function must have 1 argument");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "Argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bson_object_list_init
+
+char *bson_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->N) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ PBVAL jarp;
+ PBVAL jsp = bnx.MakeValue(args, 0, true);
+
+ if (jsp->Type == TYPE_JOB) {
+ jarp = bnx.GetKeyList(jsp);
+
+ if (!(str = bnx.Serialize(g, jarp, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ g->Xchk = str;
+ g->N = 1; // str can be NULL
+ } // endif const_item
+
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_list
+
+void bson_object_list_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_list_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object values. */
+/*********************************************************************************/
+my_bool bson_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 1) {
+ strcpy(message, "This function must have 1 argument");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "Argument must be a json object");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bson_object_values_init
+
+char *bson_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->N) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ char *p;
+ PBVAL jsp, jarp;
+ PBVAL jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ return NULL;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (jsp->Type == TYPE_JOB) {
+ jarp = bnx.GetObjectValList(jsp);
+
+ if (!(str = bnx.Serialize(g, jarp, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ } // endif CheckMemory
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ g->Xchk = str;
+ g->N = 1; // str can be NULL
+ } // endif const_item
+
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_object_values
+
+void bson_object_values_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_values_deinit
+
+/*********************************************************************************/
+/* Set the value of JsonGrpSize. */
+/*********************************************************************************/
+my_bool bsonset_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) {
+ strcpy(message, "This function must have 1 integer argument");
+ return true;
+ } else
+ return false;
+
+} // end of bsonset_grp_size_init
+
+long long bsonset_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *)
+{
+ long long n = *(long long*)args->args[0];
+
+ JsonGrpSize = (uint)n;
+ return (long long)GetJsonGroupSize();
+} // end of bsonset_grp_size
+
+/*********************************************************************************/
+/* Get the value of JsonGrpSize. */
+/*********************************************************************************/
+my_bool bsonget_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ if (args->arg_count != 0) {
+ strcpy(message, "This function must have no arguments");
+ return true;
+ } else
+ return false;
+
+} // end of bsonget_grp_size_init
+
+long long bsonget_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *)
+{
+ return (long long)GetJsonGroupSize();
+} // end of bsonget_grp_size
+
+/*********************************************************************************/
+/* Make a Json array from values coming from rows. */
+/*********************************************************************************/
+my_bool bson_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, n = GetJsonGroupSize();
+
+ if (args->arg_count != 1) {
+ strcpy(message, "This function can only accept 1 argument");
+ return true;
+ } else if (IsArgJson(args, 0) == 3) {
+ strcpy(message, "This function does not support Jbin arguments");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ reslen *= n;
+ memlen += ((memlen - MEMFIX) * (n - 1));
+
+ if (JsonInit(initid, args, message, false, reslen, memlen))
+ return true;
+
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = new(g) BJNX(g);
+
+ JsonMemSave(g);
+ return false;
+} // end of bson_array_grp_init
+
+void bson_array_grp_clear(UDF_INIT *initid, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+
+ JsonSubSet(g);
+ g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JAR);
+ g->N = GetJsonGroupSize();
+} // end of bson_array_grp_clear
+
+void bson_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL arp = (PBVAL)g->Activityp;
+
+ if (arp && g->N-- > 0)
+ bxp->AddArrayValue(arp, bxp->MakeValue(args, 0));
+
+} // end of bson_array_grp_add
+
+char *bson_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL arp = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (!arp || !(str = bxp->Serialize(g, arp, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_array_grp
+
+void bson_array_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_array_grp_deinit
+
+/*********************************************************************************/
+/* Make a Json object from values coming from rows. */
+/*********************************************************************************/
+my_bool bson_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, n = GetJsonGroupSize();
+
+ if (args->arg_count != 2) {
+ strcpy(message, "This function requires 2 arguments (key, value)");
+ return true;
+ } else if (IsArgJson(args, 0) == 3) {
+ strcpy(message, "This function does not support Jbin arguments");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen);
+
+ reslen *= n;
+ memlen += ((memlen - MEMFIX) * (n - 1));
+
+ if (JsonInit(initid, args, message, false, reslen, memlen))
+ return true;
+
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = new(g) BJNX(g);
+
+ JsonMemSave(g);
+ return false;
+} // end of bson_object_grp_init
+
+void bson_object_grp_clear(UDF_INIT *initid, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+
+ JsonSubSet(g);
+ g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JOB);
+ g->N = GetJsonGroupSize();
+} // end of bson_object_grp_clear
+
+void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL bop = (PBVAL)g->Activityp;
+
+ if (g->N-- > 0)
+ bxp->SetKeyValue(bop, bxp->MakeValue(args, 0), MakePSZ(g, args, 1));
+
+} // end of bson_object_grp_add
+
+char *bson_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *, char *)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER));
+ PBVAL bop = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (!bop || !(str = bxp->Serialize(g, bop, NULL, 0)))
+ str = strcpy(result, g->Message);
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_object_grp
+
+void bson_object_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_object_grp_deinit
+
+/*********************************************************************************/
+/* Test BJSON parse and serialize. */
+/*********************************************************************************/
+my_bool bson_test_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen, more = 1000;
+
+ if (args->arg_count == 0) {
+ strcpy(message, "At least 1 argument required (json)");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bson_test_init
+
+char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char* str = NULL, * sap = NULL, * fn = NULL;
+ int pretty = 1;
+ PBVAL bvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto err;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, !g->Xchk)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else // Sarea may have been reallocated
+ bnx.Reset();
+
+ bvp = bnx.MakeValue(args, 0, true);
+
+ if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else
+ bvp = (PBVAL)g->Xchk;
+
+ for (uint i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == STRING_RESULT)
+ fn = args->args[i];
+ else if (args->arg_type[i] == INT_RESULT)
+ pretty = (int)*(longlong*)args->args[i];
+
+ // Serialize the parse tree
+ str = bnx.Serialize(g, bvp, fn, pretty);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+ } catch (int n) {
+ xtrc(1, "json_test_bson: error %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ str = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ str = NULL;
+ } // end catch
+
+err:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_test
+
+void bson_test_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_test_deinit
+
+/*********************************************************************************/
+/* Locate a value in a Json tree. */
+/*********************************************************************************/
+my_bool bsonlocate_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen, more = 1000;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (rank)");
+ return true;
+ } // endifs args
+
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ if (IsArgJson(args, 0) == 3)
+ more = 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonlocate_init
+
+char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char *path = NULL;
+ int k;
+ PBVAL bvp, bvp2;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (g->Activityp) {
+ path = (char*)g->Activityp;
+ *res_length = strlen(path);
+ return path;
+ } else {
+ *res_length = 0;
+ *is_null = 1;
+ return NULL;
+ } // endif Activityp
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, !g->Xchk)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else {
+ bnx.Reset(); // Sarea may have been re-allocated
+ bvp = bnx.MakeValue(args, 0, true);
+
+ if (!bvp) {
+ bnx.GetMsg(g);
+ PUSH_WARNING(g->Message);
+ goto err;
+ } else if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING("First argument is not a valid JSON item");
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endif CheckMemory
+
+ } else
+ bvp = (PBVAL)g->Xchk;
+
+ // The item to locate
+ bvp2 = bnx.MakeValue(args, 1, true);
+
+ if (bvp2->Type == TYPE_NULL) {
+ PUSH_WARNING("Invalid second argument");
+ goto err;
+ } // endif bvp
+
+ k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1;
+
+// bnxp = new(g) BJNX(g, bvp, TYPE_STRING);
+ path = bnx.Locate(g, bvp, bvp2, k);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)path;
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
+
+err:
+ if (!path) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(path);
+
+ return path;
+} // end of bsonlocate
+
+void bsonlocate_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonlocate_deinit
+
+/*********************************************************************************/
+/* Locate all occurences of a value in a Json tree. */
+/*********************************************************************************/
+my_bool bson_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen, more = 1000;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (Depth)");
+ return true;
+ } // endifs
+
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ if (IsArgJson(args, 0) == 3)
+ more = 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bson_locate_all_init
+
+char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char* path = NULL;
+ int mx = 10;
+ PBVAL bvp, bvp2;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (g->Activityp) {
+ path = (char*)g->Activityp;
+ *res_length = strlen(path);
+ return path;
+ } else {
+ *error = 1;
+ *res_length = 0;
+ *is_null = 1;
+ return NULL;
+ } // endif Activityp
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else
+ bnx.Reset();
+
+ bvp = bnx.MakeValue(args, 0, true);
+
+ if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING("First argument is not a valid JSON item");
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else
+ bvp = (PBVAL)g->Xchk;
+
+ // The item to locate
+ bvp2 = bnx.MakeValue(args, 1, true);
+
+ if (bvp2->Type == TYPE_NULL) {
+ PUSH_WARNING("Invalid second argument");
+ goto err;
+ } // endif bvp
+
+ if (args->arg_count > 2)
+ mx = (int)*(long long*)args->args[2];
+
+// bnxp = new(g) BJNX(g, bvp, TYPE_STRING);
+ path = bnx.LocateAll(g, bvp, bvp2, mx);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)path;
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
+
+err:
+ if (!path) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(path);
+
+ return path;
+} // end of bson_locate_all
+
+void bson_locate_all_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_locate_all_deinit
+
+/*********************************************************************************/
+/* Check whether the document contains a value or item. */
+/*********************************************************************************/
+my_bool bson_contains_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 1024;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (index)");
+ return true;
+ } else if (args->arg_count > 3) {
+ if (args->arg_type[3] == INT_RESULT && args->args[3])
+ more += (unsigned long)*(long long*)args->args[3];
+ else
+ strcpy(message, "Fourth argument is not an integer (memory)");
+
+ } // endif's
+
+ CalcLen(args, false, reslen, memlen);
+ //memlen += more;
+
+ // TODO: calculate this
+ more += (IsArgJson(args, 0) != 3 ? 1000 : 0);
+
+ return JsonInit(initid, args, message, false, reslen, memlen, more);
+} // end of bson contains_init
+
+long long bson_contains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
+{
+ char isn, res[256];
+ unsigned long reslen;
+
+ isn = 0;
+ bsonlocate(initid, args, res, &reslen, &isn, error);
+ return (isn) ? 0LL : 1LL;
+} // end of bson_contains
+
+void bson_contains_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_contains_deinit
+
+/*********************************************************************************/
+/* Check whether the document contains a path. */
+/*********************************************************************************/
+my_bool bsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 1024;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a string (path)");
+ return true;
+ } else if (args->arg_count > 2) {
+ if (args->arg_type[2] == INT_RESULT && args->args[2])
+ more += (unsigned long)*(long long*)args->args[2];
+ else
+ strcpy(message, "Third argument is not an integer (memory)");
+
+ } // endif's
+
+ CalcLen(args, false, reslen, memlen);
+ //memlen += more;
+
+ // TODO: calculate this
+ more += (IsArgJson(args, 0) != 3 ? 1000 : 0);
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsoncontains_path_init
+
+long long bsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
+{
+ char *p, *path;
+ long long n;
+ PBVAL jsp;
+ PBVAL jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (!g->Activityp) {
+ return 0LL;
+ } else
+ return *(long long*)g->Activityp;
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto err;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ bxp = new(g) BJNX(g, jsp, TYPE_BIGINT);
+ path = MakePSZ(g, args, 1);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif SetJpath
+
+ n = (bxp->CheckPath(g)) ? 1LL : 0LL;
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long));
+
+ if (np) {
+ *np = n;
+ g->Activityp = (PACTIVITY)np;
+ } else
+ PUSH_WARNING(g->Message);
+
+ } // endif const_item
+
+ return n;
+
+err:
+ if (g->Mrr) *error = 1;
+ return 0LL;
+} // end of bsoncontains_path
+
+void bsoncontains_path_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsoncontains_path_deinit
+
+/*********************************************************************************/
+/* Merge two arrays or objects. */
+/*********************************************************************************/
+my_bool bson_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (!IsArgJson(args, i) && args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Argument %d must be a json item", i);
+ return true;
+ } // endif type
+
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_item_merge_init
+
+char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ JTYP type;
+ BJNX bnx(g);
+ PBVAL jvp, top = NULL;
+ PBVAL jsp[2] = {NULL, NULL};
+
+ for (int i = 0; i < 2; i++) {
+ jvp = bnx.MakeValue(args, i, true);
+
+ if (i) {
+ if (jvp->Type != type) {
+ PUSH_WARNING("Argument types mismatch");
+ goto fin;
+ } // endif type
+
+ } else {
+ type = (JTYP)jvp->Type;
+
+ if (type != TYPE_JAR && type != TYPE_JOB) {
+ PUSH_WARNING("First argument is not an array or object");
+ goto fin;
+ } else
+ top = jvp;
+
+ } // endif i
+
+ jsp[i] = jvp;
+ } // endfor i
+
+ if (type == TYPE_JAR)
+ bnx.MergeArray(jsp[0], jsp[1]);
+ else
+ bnx.MergeObject(jsp[0], jsp[1]);
+
+ bnx.SetChanged(true);
+ str = bnx.MakeResult(args, top);
+ } // endif CheckMemory
+
+ // In case of error or file, return unchanged first argument
+ if (!str)
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *error = 1;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_item_merge
+
+void bson_item_merge_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_item_merge_deinit
+
+/*********************************************************************************/
+/* Get a Json item from a Json document. */
+/*********************************************************************************/
+my_bool bson_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a string (jpath)");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ more = fl * 3;
+ } else if (n != 3) {
+ more = args->lengths[0] * 3;
+ } else
+ more = 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bson_get_item_init
+
+char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *path, *str = NULL;
+ PBVAL jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto fin;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto fin;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0, true);
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jvp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+ bxp = new(g) BJNX(g, jvp, TYPE_STRING, initid->max_length);
+
+ if (bxp->SetJpath(g, path, true)) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } else
+ bxp->ReadValue(g);
+
+ if (!bxp->GetValue()->IsNull())
+ str = bxp->GetValue()->GetCharValue();
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_get_item
+
+void bson_get_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_get_item_deinit
+
+/*********************************************************************************/
+/* Get a string value from a Json item. */
+/*********************************************************************************/
+my_bool bsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 1024;
+ int n = IsArgJson(args, 0);
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a string (jpath)");
+ return true;
+ } else if (args->arg_count > 2) {
+ if (args->arg_type[2] == INT_RESULT && args->args[2])
+ more += (unsigned long)*(long long*)args->args[2];
+ else
+ strcpy(message, "Third argument is not an integer (memory)");
+
+ } // endif's
+
+ CalcLen(args, false, reslen, memlen);
+ //memlen += more;
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ more += fl * 3;
+ } else if (n != 3)
+ more += args->lengths[0] * 3;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonget_string_init
+
+char *bsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *p, *path, *str = NULL;
+ PBVAL jsp, jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto err;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto err;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+ bxp = new(g) BJNX(g, jsp, TYPE_STRING, initid->max_length);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ goto err;
+ } else
+ bxp->ReadValue(g);
+
+ if (!bxp->GetValue()->IsNull())
+ str = bxp->GetValue()->GetCharValue();
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } // end catch
+
+err:
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bsonget_string
+
+void bsonget_string_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonget_string_deinit
+
+/*********************************************************************************/
+/* Get an integer value from a Json item. */
+/*********************************************************************************/
+my_bool bsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more;
+
+ if (args->arg_count != 2) {
+ strcpy(message, "This function must have 2 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a (jpath) string");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ more = (IsArgJson(args, 0) != 3) ? 1000 : 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonget_int_init
+
+long long bsonget_int(UDF_INIT *initid, UDF_ARGS *args,
+ char *is_null, char *error)
+{
+ char *p, *path;
+ long long n;
+ PBVAL jsp, jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (!g->Activityp) {
+ *is_null = 1;
+ return 0LL;
+ } else
+ return *(long long*)g->Activityp;
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ if (g->Mrr) *error = 1;
+ *is_null = 1;
+ return 0LL;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ if (g->Mrr) *error = 1;
+ *is_null = 1;
+ return 0;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+ bxp = new(g) BJNX(g, jsp, TYPE_BIGINT);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0;
+ } else
+ bxp->ReadValue(g);
+
+ if (bxp->GetValue()->IsNull()) {
+ *is_null = 1;
+ return 0;
+ } // endif IsNull
+
+ n = bxp->GetValue()->GetBigintValue();
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long));
+
+ if (np) {
+ *np = n;
+ g->Activityp = (PACTIVITY)np;
+ } else
+ PUSH_WARNING(g->Message);
+
+ } // endif const_item
+
+ return n;
+} // end of bsonget_int
+
+void bsonget_int_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonget_int_deinit
+
+/*********************************************************************************/
+/* Get a double value from a Json item. */
+/*********************************************************************************/
+my_bool bsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "At least 2 arguments required");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument is not a (jpath) string");
+ return true;
+ } else if (args->arg_count > 2) {
+ if (args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third argument is not an integer (decimals)");
+ return true;
+ } else
+ initid->decimals = (uint)*(longlong*)args->args[2];
+
+ } else
+ initid->decimals = 15;
+
+ CalcLen(args, false, reslen, memlen);
+
+ // TODO: calculate this
+ more = (IsArgJson(args, 0) != 3) ? 1000 : 0;
+
+ return JsonInit(initid, args, message, true, reslen, memlen, more);
+} // end of bsonget_real_init
+
+double bsonget_real(UDF_INIT *initid, UDF_ARGS *args,
+ char *is_null, char *error)
+{
+ char *p, *path;
+ double d;
+ PBVAL jsp, jvp;
+ PBJNX bxp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ if (!g->Activityp) {
+ *is_null = 1;
+ return 0.0;
+ } else
+ return *(double*)g->Activityp;
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ if (g->Mrr) *error = 1;
+ *is_null = 1;
+ return 0.0;
+ } else {
+ BJNX bnx(g);
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if ((p = bnx.GetString(jvp))) {
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0.0;
+ } // endif jsp
+
+ } else
+ jsp = jvp;
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+ } // endelse CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ path = MakePSZ(g, args, 1);
+ bxp = new(g) BJNX(g, jsp, TYPE_DOUBLE);
+
+ if (bxp->SetJpath(g, path)) {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0.0;
+ } else
+ bxp->ReadValue(g);
+
+ if (bxp->GetValue()->IsNull()) {
+ *is_null = 1;
+ return 0.0;
+ } // endif IsNull
+
+ d = bxp->GetValue()->GetFloatValue();
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ double *dp;
+
+ if ((dp = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) {
+ *dp = d;
+ g->Activityp = (PACTIVITY)dp;
+ } else {
+ PUSH_WARNING(g->Message);
+ *is_null = 1;
+ return 0.0;
+ } // endif dp
+
+ } // endif const_item
+
+ return d;
+} // end of jsonget_real
+
+void bsonget_real_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bsonget_real_deinit
+
+/*********************************************************************************/
+/* Delete items from a Json document. */
+/*********************************************************************************/
+my_bool bson_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ if (IsArgJson(args, 0) != 3) {
+ strcpy(message, "This function must have at least 2 arguments or one binary");
+ return true;
+ } // endif args
+
+ } // endif count
+
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // Is this a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bson_delete_item_init
+
+char *bson_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path, *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ str = (char*)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top, jar = NULL;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (args->arg_count == 1) {
+ // This should be coming from bbin_locate_all
+ jar = jvp; // This is the array of paths
+ jvp = top; // And this is the document
+ } else if(!bnx.IsJson(jvp)) {
+ PUSH_WARNING("First argument is not a JSON document");
+ goto fin;
+ } else if (args->arg_count == 2) {
+ // Check whether this is an array of paths
+ jar = bnx.MakeValue(args, 1, true);
+
+ if (jar && jar->Type != TYPE_JAR)
+ jar = NULL;
+
+ } // endif arg_count
+
+ if (jar) {
+ // Do the deletion in reverse order
+ for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) {
+ path = bnx.GetString(bnx.GetArrayValue(jar, i));
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ } else for (uint i = 1; i < args->arg_count; i++) {
+ path = MakePSZ(g, args, i);
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ str = bnx.MakeResult(args, top, INT_MAX);
+ } // endif CheckMemory
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_delete_item
+
+void bson_delete_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_delete_item_deinit
+
+/*********************************************************************************/
+/* This function is used by the json_set/insert/update_item functions. */
+/*********************************************************************************/
+static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path, *str = NULL;
+ int w;
+ my_bool b = true;
+ PBJNX bxp;
+ PBVAL jsp, jvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Alchecked) {
+ str = (char*)g->Activityp;
+ goto fin;
+ } else if (g->N)
+ g->Alchecked = 1;
+
+ if (!strcmp(result, "$set"))
+ w = 0;
+ else if (!strcmp(result, "$insert"))
+ w = 1;
+ else if (!strcmp(result, "$update"))
+ w = 2;
+ else {
+ PUSH_WARNING("Logical error, please contact CONNECT developer");
+ goto fin;
+ } // endelse
+
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, false, true)) {
+ PUSH_WARNING("CheckMemory error");
+ throw 1;
+ } else {
+ BJNX bnx(g);
+
+ jsp = bnx.MakeValue(args, 0, true);
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endif CheckMemory
+
+ } else
+ jsp = (PBVAL)g->Xchk;
+
+ bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true);
+
+ for (uint i = 1; i + 1 < args->arg_count; i += 2) {
+ jvp = bxp->MakeValue(args, i);
+ path = MakePSZ(g, args, i + 1);
+
+ if (bxp->SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ if (w) {
+ bxp->ReadValue(g);
+ b = bxp->GetValue()->IsNull();
+ b = (w == 1) ? b : !b;
+ } // endif w
+
+ if (b && bxp->WriteValue(g, jvp)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bxp->SetChanged(true);
+ } // endfor i
+
+ // In case of error or file, return unchanged argument
+ if (!(str = bxp->MakeResult(args, jsp, INT_MAX32)))
+ str = MakePSZ(g, args, 0);
+
+ if (g->N)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ str = NULL;
+ } // end catch
+
+fin:
+ if (!str) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_handle_item
+
+/*********************************************************************************/
+/* Set Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bson_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, more = 0;
+ int n = IsArgJson(args, 0);
+
+ if (!(args->arg_count % 2)) {
+ strcpy(message, "This function must have an odd number of arguments");
+ return true;
+ } else if (!n && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ if (n == 2 && args->args[0]) {
+ char fn[_MAX_PATH];
+ long fl;
+
+ memcpy(fn, args->args[0], args->lengths[0]);
+ fn[args->lengths[0]] = 0;
+ fl = GetFileLength(fn);
+ more += fl * 3;
+ } else if (n != 3)
+ more += args->lengths[0] * 3;
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen, more)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ g->Alchecked = 0;
+ return false;
+ } else
+ return true;
+
+} // end of bson_set_item_init
+
+char *bson_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$set");
+ return bson_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bson_set_item
+
+void bson_set_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_set_item_deinit
+
+/*********************************************************************************/
+/* Insert Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bson_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bson_insert_item_init
+
+char *bson_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$insert");
+ return bson_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bson_insert_item
+
+void bson_insert_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_insert_item_deinit
+
+/*********************************************************************************/
+/* Update Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bson_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bson_update_item_init
+
+char *bson_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$update");
+ return bson_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bson_update_item
+
+void bson_update_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_update_item_deinit
+
+/*********************************************************************************/
+/* Returns a json file as a json string. */
+/*********************************************************************************/
+my_bool bson_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen, fl, more = 1024;
+
+ if (args->arg_count < 1 || args->arg_count > 4) {
+ strcpy(message, "This function only accepts 1 to 4 arguments");
+ return true;
+ } else if (args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a string (file name)");
+ return true;
+ } // endif's args[0]
+
+ for (unsigned int i = 1; i < args->arg_count; i++) {
+ if (!(args->arg_type[i] == INT_RESULT || args->arg_type[i] == STRING_RESULT)) {
+ sprintf(message, "Argument %d is not an integer or a string (pretty or path)", i);
+ return true;
+ } // endif arg_type
+
+ // Take care of eventual memory argument
+ if (args->arg_type[i] == INT_RESULT && args->args[i])
+ more += (ulong)*(longlong*)args->args[i];
+
+ } // endfor i
+
+ initid->maybe_null = 1;
+ CalcLen(args, false, reslen, memlen);
+
+ if (args->args[0])
+ fl = GetFileLength(args->args[0]);
+ else
+ fl = 100; // What can be done here?
+
+ reslen += fl;
+
+ if (initid->const_item)
+ more += fl;
+
+ if (args->arg_count > 1)
+ more += fl * M;
+
+ memlen += more;
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bson_file_init
+
+char *bson_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *fn, *str = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->N) {
+ str = (char*)g->Xchk;
+ goto fin;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+
+ if (args->arg_count > 1) {
+ int pretty = 3, pty = 3;
+ size_t len;
+ PBVAL jsp, jvp = NULL;
+ BJNX bnx(g);
+
+ for (unsigned int i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) {
+ pretty = (int) * (longlong*)args->args[i];
+ break;
+ } // endif type
+
+ // Parse the json file and allocate its tree structure
+ if (!(jsp = bnx.ParseJsonFile(g, fn, pty, len))) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } // endif jsp
+
+ if (pty == 3)
+ PUSH_WARNING("File pretty format cannot be determined");
+ else if (pretty != 3 && pty != pretty)
+ PUSH_WARNING("File pretty format doesn't match the specified pretty value");
+ else if (pretty == 3)
+ pretty = pty;
+
+ // Check whether a path was specified
+ if (bnx.CheckPath(g, args, jsp, jvp, 1)) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } else if (jvp)
+ jsp = jvp;
+
+ if (!(str = bnx.Serialize(g, jsp, NULL, 0)))
+ PUSH_WARNING(g->Message);
+
+ } else
+ if (!(str = GetJsonFile(g, fn)))
+ PUSH_WARNING(g->Message);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bson_file
+
+void bson_file_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_file_deinit
+
+/*********************************************************************************/
+/* Make a json file from a json item. */
+/*********************************************************************************/
+my_bool bfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 1 || args->arg_count > 3) {
+ strcpy(message, "Wrong number of arguments");
+ return true;
+ } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } // endif
+
+ CalcLen(args, false, reslen, memlen);
+ memlen = memlen + 5000; // To take care of not pretty files
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bfile_make_init
+
+char *bfile_make(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *)
+{
+ char *p, *str = NULL, *fn = NULL;
+ int n, pretty = 2;
+ PBVAL jsp, jvp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BJNX bnx(g);
+
+ if (g->N) {
+ str = (char*)g->Activityp;
+ goto fin;
+ } else if (initid->const_item)
+ g->N = 1;
+
+ if ((n = IsArgJson(args, 0)) == 3) {
+ // Get default file name and pretty
+ PBSON bsp = (PBSON)args->args[0];
+
+ fn = bsp->Filename;
+ pretty = bsp->Pretty;
+ } else if ((n = IsArgJson(args, 0)) == 2)
+ fn = args->args[0];
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ goto fin;
+ } else
+ bnx.Reset();
+
+ jvp = bnx.MakeValue(args, 0);
+
+ if (!n && (p = bnx.GetString(jvp))) {
+ if (!strchr("[{ \t\r\n", *p)) {
+ // Is this a file name?
+ if (!(p = GetJsonFile(g, p))) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } else
+ fn = bnx.GetString(jvp);
+
+ } // endif p
+
+ if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) {
+ PUSH_WARNING(g->Message);
+ goto fin;
+ } // endif jsp
+
+ bnx.SetValueVal(jvp, jsp);
+ } // endif p
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jvp;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else
+ jvp = (PBVAL)g->Xchk;
+
+ for (uint i = 1; i < args->arg_count; i++)
+ switch (args->arg_type[i]) {
+ case STRING_RESULT:
+ fn = MakePSZ(g, args, i);
+ break;
+ case INT_RESULT:
+ pretty = (int)*(longlong*)args->args[i];
+ break;
+ default:
+ PUSH_WARNING("Unexpected argument type in bfile_make");
+ } // endswitch arg_type
+
+ if (fn) {
+ if (!bnx.Serialize(g, jvp, fn, pretty))
+ PUSH_WARNING(g->Message);
+ } else
+ PUSH_WARNING("Missing file name");
+
+ str = fn;
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)str;
+
+fin:
+ if (!str) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = strlen(str);
+
+ return str;
+} // end of bfile_make
+
+void bfile_make_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bfile_make_deinit
+
+/*********************************************************************************/
+/* Convert a prettiest Json file to Pretty=0. */
+/*********************************************************************************/
+my_bool bfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 3) {
+ strcpy(message, "This function must have 3 arguments");
+ return true;
+ } else if (args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i+1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bfile_convert_init
+
+char *bfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long *res_length, char *is_null, char *error) {
+ char *str, *fn, *ofn;
+ int lrecl = (int)*(longlong*)args->args[2];
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+ ofn = MakePSZ(g, args, 1);
+
+ if (!g->Xchk) {
+ JUP* jup = new(g) JUP(g);
+
+ str = jup->UnprettyJsonFile(g, fn, ofn, lrecl);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ PUSH_WARNING(g->Message ? g->Message : "Unexpected error");
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else {
+ strcpy(result, str);
+ *res_length = strlen(str);
+ } // endif str
+
+ return str;
+} // end of bfile_convert
+
+void bfile_convert_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bfile_convert_deinit
+
+/*********************************************************************************/
+/* Convert a pretty=0 Json file to binary BJSON. */
+/*********************************************************************************/
+my_bool bfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 2 && args->arg_count != 3) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i + 1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ memlen = memlen * M;
+ memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024;
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bfile_bjson_init
+
+char *bfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char*, char *error) {
+ char *buf, *str = NULL, fn[_MAX_PATH], ofn[_MAX_PATH];
+ bool loop;
+ ssize_t len, newloc;
+ size_t lrecl, binszp;
+ PBVAL jsp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BDOC doc(g);
+
+ strcpy(fn, MakePSZ(g, args, 0));
+ strcpy(ofn, MakePSZ(g, args, 1));
+
+ if (args->arg_count == 3)
+ lrecl = (size_t)*(longlong*)args->args[2];
+ else
+ lrecl = 1024;
+
+ if (!g->Xchk) {
+ int msgid = MSGID_OPEN_MODE_STRERROR;
+ FILE *fout;
+ FILE *fin;
+
+ if (!(fin = global_fopen(g, msgid, fn, "rt")))
+ str = strcpy(result, g->Message);
+ else if (!(fout = global_fopen(g, msgid, ofn, "wb")))
+ str = strcpy(result, g->Message);
+ else if ((buf = (char*)malloc(lrecl))) {
+ try {
+ do {
+ loop = false;
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+
+ if (!fgets(buf, lrecl, fin)) {
+ if (!feof(fin)) {
+ sprintf(g->Message, "Error %d reading %zd bytes from %s",
+ errno, lrecl, fn);
+ str = strcpy(result, g->Message);
+ } else
+ str = strcpy(result, ofn);
+
+ } else if ((len = strlen(buf))) {
+ if ((jsp = doc.ParseJson(g, buf, len))) {
+ newloc = (size_t)PlugSubAlloc(g, NULL, 0);
+ binszp = newloc - (size_t)jsp;
+
+ if (fwrite(&binszp, sizeof(binszp), 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, sizeof(binszp), ofn);
+ str = strcpy(result, g->Message);
+ } else if (fwrite(jsp, binszp, 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, binszp, ofn);
+ str = strcpy(result, g->Message);
+ } else
+ loop = true;
+
+ } else {
+ str = strcpy(result, g->Message);
+ } // endif jsp
+
+ } else
+ loop = true;
+
+ } while (loop);
+
+ } catch (int) {
+ str = strcpy(result, g->Message);
+ } catch (const char* msg) {
+ str = strcpy(result, msg);
+ } // end catch
+
+ free(buf);
+ } else
+ str = strcpy(result, "Buffer malloc failed");
+
+ if (fin) fclose(fin);
+ if (fout) fclose(fout);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ if (g->Message)
+ str = strcpy(result, g->Message);
+ else
+ str = strcpy(result, "Unexpected error");
+
+ } // endif str
+
+ *res_length = strlen(str);
+ return str;
+} // end of bfile_bjson
+
+void bfile_bjson_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bfile_bjson_deinit
+
+/*********************************************************************************/
+/* Serialize a Json document. . */
+/*********************************************************************************/
+my_bool bson_serialize_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->args[0] && IsArgJson(args, 0) != 3) {
+ strcpy(message, "Argument must be a Jbin tree");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen);
+
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bson_serialize_init
+
+char *bson_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *, char *error)
+{
+ char *str;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (!g->Xchk) {
+ if (IsArgJson(args, 0) == 3) {
+ PBSON bsp = (PBSON)args->args[0];
+ BJNX bnx(bsp->G);
+ PBVAL bvp = (args->arg_count == 1) ? (PBVAL)bsp->Jsp : (PBVAL)bsp->Top;
+
+ if (!(str = bnx.Serialize(g, bvp, bsp->Filename, bsp->Pretty)))
+ str = strcpy(result, g->Message);
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? str : NULL;
+ } else {
+ // *error = 1;
+ str = strcpy(result, "Argument is not a Jbin tree");
+ } // endif
+
+ } else
+ str = (char*)g->Xchk;
+
+ *res_length = strlen(str);
+ return str;
+} // end of bson_serialize
+
+void bson_serialize_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bson_serialize_deinit
+
+/*********************************************************************************/
+/* Make and return a binary Json array containing all the parameters. */
+/* Note: jvp must be set before arp because it can be a binary argument. */
+/*********************************************************************************/
+my_bool bbin_make_array_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, false, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_make_array_init
+
+char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false)) {
+ BJNX bnx(g);
+ PBVAL jvp = bnx.MakeValue(args, 0);
+ PBVAL arp = bnx.NewVal(TYPE_JAR);
+
+ for (uint i = 0; i < args->arg_count;) {
+ bnx.AddArrayValue(arp, jvp);
+ jvp = bnx.MakeValue(args, ++i);
+ } // endfor i
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) {
+ strcat(bsp->Msg, " array");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif CheckMemory
+
+ } else
+ bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_make_array
+
+void bbin_make_array_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_make_array_deinit
+
+/*********************************************************************************/
+/* Add one value to a Json array. */
+/*********************************************************************************/
+my_bool bbin_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else
+ CalcLen(args, false, reslen, memlen, true);
+
+ if (!JsonInit(initid, args, message, true, reslen, memlen)) {
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ // This is a constant function
+ g->N = (initid->const_item) ? 1 : 0;
+
+ // This is to avoid double execution when using prepared statements
+ if (IsArgJson(args, 0) > 1)
+ initid->const_item = 0;
+
+ return false;
+ } else
+ return true;
+
+} // end of bbin_array_add_init
+
+char *bbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ uint n = 2;
+ int* x = GetIntArgPtr(g, args, n);
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL jarp, top, jvp = NULL;
+ PBVAL jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jvp && jvp->Type != TYPE_JAR) {
+ if ((jarp = bnx.NewVal(TYPE_JAR))) {
+ bnx.AddArrayValue(jarp, jvp);
+
+ if (!top)
+ top = jarp;
+
+ } // endif jarp
+
+ } else
+ jarp = jvp;
+
+ if (jarp) {
+ bnx.AddArrayValue(jarp, bnx.MakeValue(args, 1), x);
+ bnx.SetChanged(true);
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } else
+ PUSH_WARNING(g->Message);
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_add
+
+void bbin_array_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_add_deinit
+
+/*********************************************************************************/
+/* Add one or several values to a Bson array. */
+/*********************************************************************************/
+my_bool bbin_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message)
+{
+ return bson_array_add_values_init(initid, args, message);
+} // end of bbin_array_add_values_init
+
+char* bbin_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (!g->Xchk) {
+ if (!CheckMemory(g, initid, args, args->arg_count, true)) {
+ uint i = 0;
+ BJNX bnx(g);
+ PBVAL arp, top, jvp = NULL;
+ PBVAL bvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bvp->Type == TYPE_JAR) {
+ arp = bvp;
+ i = 1;
+ } else // First argument is not an array
+ arp = bnx.NewVal(TYPE_JAR);
+
+ for (; i < args->arg_count; i++)
+ bnx.AddArrayValue(arp, bnx.MakeValue(args, i));
+
+ bnx.SetChanged(true);
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ } // endif CheckMemory
+
+ // Keep result of constant function
+ g->Xchk = (g->N) ? bsp : NULL;
+ } else
+ bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_add_values
+
+void bbin_array_add_values_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_add_values_deinit
+
+/*********************************************************************************/
+/* Make a Json array from values coming from rows. */
+/*********************************************************************************/
+my_bool bbin_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_array_grp_init(initid, args, message);
+} // end of bbin_array_grp_init
+
+void bbin_array_grp_clear(UDF_INIT *initid, char *a, char *b)
+{
+ bson_array_grp_clear(initid, a, b);
+} // end of bbin_array_grp_clear
+
+void bbin_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b)
+{
+ bson_array_grp_add(initid, args, a, b);
+} // end of bbin_array_grp_add
+
+char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBVAL arp = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (arp)
+ if ((bsp = BbinAlloc(g, initid->max_length, arp)))
+ strcat(bsp->Msg, " array");
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_grp
+
+void bbin_array_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_grp_deinit
+
+/*********************************************************************************/
+/* Make a Json object from values coming from rows. */
+/*********************************************************************************/
+my_bool bbin_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_object_grp_init(initid, args, message);
+} // end of bbin_object_grp_init
+
+void bbin_object_grp_clear(UDF_INIT *initid, char *a, char *b)
+{
+ bson_object_grp_clear(initid, a, b);
+} // end of bbin_object_grp_clear
+
+void bbin_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b)
+{
+ bson_object_grp_add(initid, args, a, b);
+} // end of bbin_object_grp_add
+
+char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBVAL bop = (PBVAL)g->Activityp;
+
+ if (g->N < 0)
+ PUSH_WARNING("Result truncated to json_grp_size values");
+
+ if (bop)
+ if ((bsp = BbinAlloc(g, initid->max_length, bop)))
+ strcat(bsp->Msg, " object");
+
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ *error = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_grp
+
+void bbin_object_grp_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_grp_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the parameters. */
+/*********************************************************************************/
+my_bool bbin_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of bbin_make_object_init
+
+char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, args->arg_count, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i));
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
+ strcat(bsp->Msg, " object");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif objp
+
+ } // endif CheckMemory
+
+ } // endif Xchk
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_make_object
+
+void bbin_make_object_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_make_object_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all not null parameters. */
+/*********************************************************************************/
+my_bool bbin_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_nonull_init
+
+char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL jvp, objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i++)
+ if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i)))
+ bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i));
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
+ strcat(bsp->Msg, " object");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif objp
+
+ } // endif CheckMemory
+
+ } // endif Xchk
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_nonull
+
+void bbin_object_nonull_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_nonull_deinit
+
+/*********************************************************************************/
+/* Make a Json Object containing all the key/value parameters. */
+/*********************************************************************************/
+my_bool bbin_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count % 2) {
+ strcpy(message, "This function must have an even number of arguments");
+ return true;
+ } // endif arg_count
+
+ CalcLen(args, true, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_key_init
+
+char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, args->arg_count, false, true)) {
+ BJNX bnx(g);
+ PBVAL objp;
+
+ if ((objp = bnx.NewVal(TYPE_JOB))) {
+ for (uint i = 0; i < args->arg_count; i += 2)
+ bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i));
+
+ if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) {
+ strcat(bsp->Msg, " object");
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ } // endif objp
+
+ } // endif CheckMemory
+
+ } // endif Xchk
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_key
+
+void bbin_object_key_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_key_deinit
+
+/*********************************************************************************/
+/* Add or replace a value in a Json Object. */
+/*********************************************************************************/
+my_bool bbin_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have at least 2 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_add_init
+
+char *bbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else if (!CheckMemory(g, initid, args, 2, false, true, true)) {
+ PSZ key;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top;
+ PBVAL jobp = bnx.MakeValue(args, 0, true, &top);
+ PBVAL jvp = jobp;
+
+ if (bnx.CheckPath(g, args, jvp, jobp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jobp && jobp->Type == TYPE_JOB) {
+ jvp = bnx.MakeValue(args, 1);
+ key = bnx.MakeKey(args, 1);
+ bnx.SetKeyValue(jobp, jvp, key);
+ bnx.SetChanged(true);
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jobp
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_add
+
+void bbin_object_add_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_add_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json array. */
+/*********************************************************************************/
+my_bool bbin_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_array_delete_init(initid, args, message);
+} // end of bbin_array_delete_init
+
+char *bbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ } else if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ int* x;
+ uint n = 1;
+ BJNX bnx(g);
+ PBVAL arp, top;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (!(x = GetIntArgPtr(g, args, n)))
+ PUSH_WARNING("Missing or null array index");
+ else if (bnx.CheckPath(g, args, jvp, arp, 1))
+ PUSH_WARNING(g->Message);
+ else if (arp && arp->Type == TYPE_JAR) {
+ bnx.SetChanged(bnx.DeleteValue(arp, *x));
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ } else {
+ PUSH_WARNING("First argument target is not an array");
+ // if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_array_delete
+
+void bbin_array_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_array_delete_deinit
+
+/*********************************************************************************/
+/* Delete a value from a Json object. */
+/*********************************************************************************/
+my_bool bbin_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ unsigned long reslen, memlen;
+
+ if (args->arg_count < 2) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (!IsArgJson(args, 0)) {
+ strcpy(message, "First argument must be a json item");
+ return true;
+ } else if (args->arg_type[1] != STRING_RESULT) {
+ strcpy(message, "Second argument must be a key string");
+ return true;
+ } else
+ CalcLen(args, true, reslen, memlen, true);
+
+ return JsonInit(initid, args, message, true, reslen, memlen);
+} // end of bbin_object_delete_init
+
+char *bbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else if (!CheckMemory(g, initid, args, 1, false, true, true)) {
+ PCSZ key;
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top;
+ PBVAL jobp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, top, jobp, 2))
+ PUSH_WARNING(g->Message);
+ else if (jobp && jobp->Type == TYPE_JOB) {
+ key = bnx.MakeKey(args, 1);
+ bnx.SetChanged(bnx.DeleteKey(jobp, key));
+ } else {
+ PUSH_WARNING("First argument target is not an object");
+ // if (g->Mrr) *error = 1; (only if no path)
+ } // endif jvp
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif CheckMemory
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_delete
+
+void bbin_object_delete_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_delete_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object keys. */
+/*********************************************************************************/
+my_bool bbin_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_object_list_init(initid, args, message);
+} // end of bbin_object_list_init
+
+char *bbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ PBVAL top, jarp = NULL;
+ PBVAL jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (jsp->Type == TYPE_JOB) {
+ jarp = bnx.GetKeyList(jsp);
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jsp type
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)jarp;
+
+ } // endif CheckMemory
+
+ // Keep result of constant function
+ g->Xchk = (initid->const_item) ? bsp : NULL;
+ } // endif bsp
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_list
+
+void bbin_object_list_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_list_deinit
+
+/*********************************************************************************/
+/* Returns an array of the Json object values. */
+/*********************************************************************************/
+my_bool bbin_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_object_values_init(initid, args, message);
+} // end of bbin_object_values_init
+
+char *bbin_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (!bsp) {
+ if (!CheckMemory(g, initid, args, 1, true, true)) {
+ BJNX bnx(g);
+ PBVAL top, jarp;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (jvp->Type == TYPE_JOB) {
+ jarp = bnx.GetObjectValList(jvp);
+ } else {
+ PUSH_WARNING("First argument is not an object");
+ if (g->Mrr) *error = 1;
+ } // endif jvp
+
+ // In case of error unchanged argument will be returned
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)jarp;
+
+ } // endif CheckMemory
+
+ if (initid->const_item) {
+ // Keep result of constant function
+ g->Xchk = bsp;
+ } // endif const_item
+
+ } // endif bsp
+
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_object_values
+
+void bbin_object_values_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_object_values_deinit
+
+/*********************************************************************************/
+/* Get a Json item from a Json document. */
+/*********************************************************************************/
+my_bool bbin_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_get_item_init(initid, args, message);
+} // end of bbin_get_item_init
+
+char *bbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ bsp = (PBSON)g->Xchk;
+ } else if (!CheckMemory(g, initid, args, 1, true, true)) {
+ char *path = MakePSZ(g, args, 1);
+ BJNX bnx(g, NULL, TYPE_STRING, initid->max_length);
+ PBVAL top, jvp = NULL;
+ PBVAL jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bnx.CheckPath(g, args, jsp, jvp, 1))
+ PUSH_WARNING(g->Message);
+ else if (jvp) {
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)jvp;
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+ } // endif jvp
+
+ } else
+ PUSH_WARNING("CheckMemory error");
+
+ if (!bsp) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_get_item
+
+void bbin_get_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_get_item_deinit
+
+/*********************************************************************************/
+/* Merge two arrays or objects. */
+/*********************************************************************************/
+my_bool bbin_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_item_merge_init(initid, args, message);
+} // end of bbin_item_merge_init
+
+char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 2, false, false, true)) {
+ JTYP type;
+ BJNX bnx(g);
+ PBVAL jvp, top = NULL;
+ PBVAL jsp[2] = {NULL, NULL};
+
+ for (int i = 0; i < 2; i++) {
+ if (i) {
+ jvp = bnx.MakeValue(args, i, true);
+
+ if (jvp->Type != type) {
+ PUSH_WARNING("Argument types mismatch");
+ goto fin;
+ } // endif type
+
+ } else {
+ jvp = bnx.MakeValue(args, i, true, &top);
+ type = (JTYP)jvp->Type;
+
+ if (type != TYPE_JAR && type != TYPE_JOB) {
+ PUSH_WARNING("First argument is not an array or object");
+ goto fin;
+ } // endif type
+
+ } // endif i
+
+ jsp[i] = jvp;
+ } // endfor i
+
+ if (type == TYPE_JAR)
+ bnx.MergeArray(jsp[0], jsp[1]);
+ else
+ bnx.MergeObject(jsp[0], jsp[1]);
+
+ bnx.SetChanged(true);
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ } // endif CheckMemory
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+fin:
+ if (!bsp) {
+ *res_length = 0;
+ *error = 1;
+ *is_null = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_item_merge
+
+void bbin_item_merge_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_item_merge_deinit
+
+/*********************************************************************************/
+/* This function is used by the jbin_set/insert/update_item functions. */
+/*********************************************************************************/
+static char *bbin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path;
+ int w;
+ my_bool b = true;
+ PBJNX bxp;
+ PBVAL jsp, jvp, top;
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Alchecked) {
+ bsp = (PBSON)g->Activityp;
+ goto fin;
+ } else if (g->N)
+ g->Alchecked = 1;
+
+ if (!strcmp(result, "$set"))
+ w = 0;
+ else if (!strcmp(result, "$insert"))
+ w = 1;
+ else if (!strcmp(result, "$update"))
+ w = 2;
+ else {
+ PUSH_WARNING("Logical error, please contact CONNECT developer");
+ goto fin;
+ } // endelse
+
+ try {
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true, false, true)) {
+ throw 1;
+ } else {
+ BJNX bnx(g);
+
+ jsp = bnx.MakeValue(args, 0, true, &top);
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = jsp;
+ g->More = (size_t)top;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } // endif CheckMemory
+
+ } else {
+ jsp = (PBVAL)g->Xchk;
+ top = (PBVAL)g->More;
+ } // endif Xchk
+
+ bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true);
+
+ for (uint i = 1; i + 1 < args->arg_count; i += 2) {
+ jvp = bxp->MakeValue(args, i);
+ path = MakePSZ(g, args, i + 1);
+
+ if (bxp->SetJpath(g, path, false))
+ throw 2;
+
+ if (w) {
+ bxp->ReadValue(g);
+ b = bxp->GetValue()->IsNull();
+ b = (w == 1) ? b : !b;
+ } // endif w
+
+ if (b && bxp->WriteValue(g, jvp))
+ throw 3;
+
+ bxp->SetChanged(true);
+ } // endfor i
+
+ if (!(bsp = bxp->MakeBinResult(args, top, initid->max_length)))
+ throw 4;
+
+ if (g->N)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)bsp;
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+
+ PUSH_WARNING(g->Message);
+ } catch (const char *msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ } // end catch
+
+fin:
+ if (!bsp) {
+ *is_null = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_handle_item
+
+/*********************************************************************************/
+/* Set Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bbin_set_item_init
+
+char *bbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$set");
+ return bbin_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bbin_set_item
+
+void bbin_set_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_set_item_deinit
+
+/*********************************************************************************/
+/* Insert Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bbin_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bbin_insert_item_init
+
+char *bbin_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$insert");
+ return bbin_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bbin_insert_item
+
+void bbin_insert_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_insert_item_deinit
+
+/*********************************************************************************/
+/* Update Json items of a Json document according to path. */
+/*********************************************************************************/
+my_bool bbin_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_set_item_init(initid, args, message);
+} // end of bbin_update_item_init
+
+char *bbin_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *p)
+{
+ strcpy(result, "$update");
+ return bbin_handle_item(initid, args, result, res_length, is_null, p);
+} // end of bbin_update_item
+
+void bbin_update_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_update_item_deinit
+
+/*********************************************************************************/
+/* Delete items from a Json document. */
+/*********************************************************************************/
+my_bool bbin_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_delete_item_init(initid, args, message);
+} // end of bbin_delete_item_init
+
+char *bbin_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *path;
+ PBSON bsp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ if (g->Xchk) {
+ // This constant function was recalled
+ bsp = (PBSON)g->Xchk;
+ goto fin;
+ } // endif Xchk
+
+ if (!CheckMemory(g, initid, args, 1, false, false, true)) {
+ BJNX bnx(g, NULL, TYPE_STRING);
+ PBVAL top, jar = NULL;
+ PBVAL jvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (args->arg_count == 1) {
+ // This should be coming from bbin_locate_all
+ jar = jvp; // This is the array of paths
+ jvp = top; // And this is the document
+ } else if(!bnx.IsJson(jvp)) {
+ PUSH_WARNING("First argument is not a JSON document");
+ goto fin;
+ } else if (args->arg_count == 2) {
+ // Check whether this is an array of paths
+ jar = bnx.MakeValue(args, 1, true);
+
+ if (jar && jar->Type != TYPE_JAR)
+ jar = NULL;
+
+ } // endif arg_count
+
+ if (jar) {
+ // Do the deletion in reverse order
+ for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) {
+ path = bnx.GetString(bnx.GetArrayValue(jar, i));
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ } else for (uint i = 1; i < args->arg_count; i++) {
+ path = MakePSZ(g, args, i);
+
+ if (bnx.SetJpath(g, path, false)) {
+ PUSH_WARNING(g->Message);
+ continue;
+ } // endif SetJpath
+
+ bnx.SetChanged(bnx.DeleteItem(g, jvp));
+ } // endfor i
+
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+
+ if (args->arg_count == 1)
+ // Here Jsp was not a sub-item of top
+ bsp->Jsp = (PJSON)top;
+
+ } // endif CheckMemory
+
+ if (g->N)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+fin:
+ if (!bsp) {
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_delete_item
+
+void bbin_delete_item_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_delete_item_deinit
+
+/*********************************************************************************/
+/* Returns a json file as a json binary tree. */
+/*********************************************************************************/
+my_bool bbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
+{
+ return bson_file_init(initid, args, message);
+} // end of bbin_file_init
+
+char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char *is_null, char *error)
+{
+ char *fn;
+ int pretty = 3;
+ size_t len = 0;
+ PBVAL jsp, jvp = NULL;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ BJNX bnx(g);
+ PBSON bsp = (PBSON)g->Xchk;
+
+ if (bsp)
+ goto fin;
+
+ fn = MakePSZ(g, args, 0);
+
+ for (unsigned int i = 1; i < args->arg_count; i++)
+ if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) {
+ pretty = (int) * (longlong*)args->args[i];
+ break;
+ } // endif type
+
+ // Parse the json file and allocate its tree structure
+ if (!(jsp = bnx.ParseJsonFile(g, fn, pretty, len))) {
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ goto fin;
+ } // endif jsp
+
+// if (pretty == 3)
+// PUSH_WARNING("File pretty format cannot be determined");
+// else if (pretty == 3)
+// pretty = pty;
+
+ if ((bsp = BbinAlloc(bnx.G, len, jsp))) {
+ strcat(bsp->Msg, " file");
+ bsp->Filename = fn;
+ bsp->Pretty = pretty;
+ } else {
+ *error = 1;
+ goto fin;
+ } // endif bsp
+
+ // Check whether a path was specified
+ if (bnx.CheckPath(g, args, jsp, jvp, 1)) {
+ PUSH_WARNING(g->Message);
+ bsp = NULL;
+ goto fin;
+ } else if (jvp)
+ bsp->Jsp = (PJSON)jvp;
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Xchk = bsp;
+
+fin:
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_file
+
+void bbin_file_deinit(UDF_INIT* initid)
+{
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_file_deinit
+
+/*********************************************************************************/
+/* Locate all occurences of a value in a Json tree. */
+/*********************************************************************************/
+my_bool bbin_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ return bson_locate_all_init(initid, args, message);
+} // end of bbin_locate_all_init
+
+char* bbin_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result,
+ unsigned long* res_length, char* is_null, char* error) {
+ char *path = NULL;
+ int mx = 10;
+ PBVAL bvp, bvp2;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+ PBSON bsp = NULL;
+
+ if (g->N) {
+ if (g->Activityp) {
+ bsp = (PBSON)g->Activityp;
+ *res_length = sizeof(BSON);
+ return (char*)bsp;
+ } else {
+ *error = 1;
+ *res_length = 0;
+ *is_null = 1;
+ return NULL;
+ } // endif Activityp
+
+ } else if (initid->const_item)
+ g->N = 1;
+
+ try {
+ PBVAL top = NULL;
+ BJNX bnx(g);
+
+ if (!g->Xchk) {
+ if (CheckMemory(g, initid, args, 1, true)) {
+ PUSH_WARNING("CheckMemory error");
+ *error = 1;
+ goto err;
+ } else
+ bnx.Reset();
+
+ bvp = bnx.MakeValue(args, 0, true, &top);
+
+ if (bvp->Type == TYPE_NULL) {
+ PUSH_WARNING("First argument is not a valid JSON item");
+ goto err;
+ } // endif bvp
+
+ if (g->Mrr) { // First argument is a constant
+ g->Xchk = bvp;
+ g->More = (size_t)top;
+ JsonMemSave(g);
+ } // endif Mrr
+
+ } else {
+ bvp = (PBVAL)g->Xchk;
+ top = (PBVAL)g->More;
+ } // endif Xchk
+
+ // The item to locate
+ bvp2 = bnx.MakeValue(args, 1, true);
+
+ if (bvp2->Type == TYPE_NULL) {
+ PUSH_WARNING("Invalid second argument");
+ goto err;
+ } // endif bvp2
+
+ if (args->arg_count > 2)
+ mx = (int)*(long long*)args->args[2];
+
+ if ((path = bnx.LocateAll(g, bvp, bvp2, mx))) {
+ bsp = bnx.MakeBinResult(args, top, initid->max_length);
+ bsp->Jsp = (PJSON)bnx.ParseJson(g, path, strlen(path));
+ } // endif path
+
+ if (initid->const_item)
+ // Keep result of constant function
+ g->Activityp = (PACTIVITY)bsp;
+
+ } catch (int n) {
+ xtrc(1, "Exception %d: %s\n", n, g->Message);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ PUSH_WARNING(g->Message);
+ *error = 1;
+ path = NULL;
+ } // end catch
+
+err:
+ if (!bsp) {
+ *res_length = 0;
+ *is_null = 1;
+ } else
+ *res_length = sizeof(BSON);
+
+ return (char*)bsp;
+} // end of bbin_locate_all
+
+void bbin_locate_all_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of bbin_locate_all_deinit
+
+
diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h
new file mode 100644
index 00000000000..01b9b9d55d5
--- /dev/null
+++ b/storage/connect/bsonudf.h
@@ -0,0 +1,403 @@
+/******************** tabjson H Declares Source Code File (.H) *******************/
+/* Name: bsonudf.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */
+/* */
+/* This file contains the BSON UDF function and class declares. */
+/*********************************************************************************/
+#pragma once
+#include "jsonudf.h"
+#include "bson.h"
+
+#if 0
+#define UDF_EXEC_ARGS \
+ UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char*
+
+// BSON size should be equal on Linux and Windows
+#define BMX 255
+typedef struct BSON* PBSON;
+
+/***********************************************************************/
+/* Structure used to return binary json to Json UDF functions. */
+/***********************************************************************/
+struct BSON {
+ char Msg[BMX + 1];
+ char *Filename;
+ PGLOBAL G;
+ int Pretty;
+ ulong Reslen;
+ my_bool Changed;
+ PJSON Top;
+ PJSON Jsp;
+ PBSON Bsp;
+}; // end of struct BSON
+
+PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+
+/*********************************************************************************/
+/* The JSON tree node. Can be an Object or an Array. */
+/*********************************************************************************/
+typedef struct _jnode {
+ PSZ Key; // The key used for object
+ OPVAL Op; // Operator used for this node
+ PVAL CncVal; // To cont value used for OP_CNC
+ PVAL Valp; // The internal array VALUE
+ int Rank; // The rank in array
+ int Rx; // Read row number
+ int Nx; // Next to read row number
+} JNODE, *PJNODE;
+
+/*********************************************************************************/
+/* The JSON utility functions. */
+/*********************************************************************************/
+bool IsNum(PSZ s);
+char *NextChr(PSZ s, char sep);
+char *GetJsonNull(void);
+uint GetJsonGrpSize(void);
+my_bool JsonSubSet(PGLOBAL g, my_bool b = false);
+my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen,
+ unsigned long& memlen, my_bool mod = false);
+my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn,
+ unsigned long reslen, unsigned long memlen,
+ unsigned long more = 0);
+my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n,
+ my_bool m, my_bool obj = false, my_bool mod = false);
+PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i);
+int IsArgJson(UDF_ARGS* args, uint i);
+char *GetJsonFile(PGLOBAL g, char* fn);
+
+/*********************************************************************************/
+/* Structure JPN. Used to make the locate path. */
+/*********************************************************************************/
+typedef struct _jpn {
+ int Type;
+ PCSZ Key;
+ int N;
+} JPN, *PJPN;
+
+#endif // 0
+
+/* --------------------------- New Testing BJSON Stuff --------------------------*/
+extern uint JsonGrpSize;
+uint GetJsonGroupSize(void);
+
+typedef class BJNX* PBJNX;
+
+/*********************************************************************************/
+/* Class BJNX: BJSON access methods. */
+/*********************************************************************************/
+class BJNX : public BDOC {
+public:
+ // Constructors
+ BJNX(PGLOBAL g);
+ BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false);
+
+ // Implementation
+ int GetPrecision(void) { return Prec; }
+ PVAL GetValue(void) { return Value; }
+ void SetRow(PBVAL vp) { Row = vp; }
+ void SetChanged(my_bool b) { Changed = b; }
+
+ // Methods
+ my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false);
+ my_bool ParseJpath(PGLOBAL g);
+ void ReadValue(PGLOBAL g);
+ PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b = true);
+ PBVAL GetJson(PGLOBAL g);
+ my_bool CheckPath(PGLOBAL g);
+ my_bool CheckPath(PGLOBAL g, UDF_ARGS* args, PBVAL jsp, PBVAL& jvp, int n);
+ my_bool WriteValue(PGLOBAL g, PBVAL jvalp);
+ my_bool DeleteItem(PGLOBAL g, PBVAL vlp);
+ char *Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1);
+ char *LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10);
+ PSZ MakeKey(UDF_ARGS* args, int i);
+ PBVAL MakeValue(UDF_ARGS* args, uint i, bool b = false, PBVAL* top = NULL);
+ PBVAL MakeTypedValue(PGLOBAL g, UDF_ARGS* args, uint i,
+ JTYP type, PBVAL* top = NULL);
+ PBVAL ParseJsonFile(PGLOBAL g, char* fn, int& pty, size_t& len);
+ char *MakeResult(UDF_ARGS* args, PBVAL top, uint n = 2);
+ PBSON MakeBinResult(UDF_ARGS* args, PBVAL top, ulong len, int n = 2);
+
+protected:
+ my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm);
+ PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i);
+ PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n);
+ PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n);
+ PVAL MakeJson(PGLOBAL g, PBVAL bvp);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp);
+ PBVAL GetRow(PGLOBAL g);
+ PBVAL MoveVal(PBVAL vlp);
+ PBVAL MoveJson(PBJNX bxp, PBVAL jvp);
+ PBVAL MoveArray(PBJNX bxp, PBVAL jvp);
+ PBVAL MoveObject(PBJNX bxp, PBVAL jvp);
+ PBVAL MoveValue(PBJNX bxp, PBVAL jvp);
+ my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2);
+ my_bool LocateArray(PGLOBAL g, PBVAL jarp);
+ my_bool LocateObject(PGLOBAL g, PBVAL jobp);
+ my_bool LocateValue(PGLOBAL g, PBVAL jvp);
+ my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp);
+ my_bool LocateObjectAll(PGLOBAL g, PBVAL jobp);
+ my_bool LocateValueAll(PGLOBAL g, PBVAL jvp);
+ my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2);
+ my_bool AddPath(void);
+
+ // Default constructor not to be used
+ BJNX(void) {}
+
+ // Members
+ PBVAL Row;
+ PBVAL Bvalp;
+ PJPN Jpnp;
+ JOUTSTR *Jp;
+ JNODE *Nodes; // The intermediate objects
+ PVAL Value;
+ PVAL MulVal; // To value used by multiple column
+ char *Jpath; // The json path
+ int Buf_Type;
+ int Long;
+ int Prec;
+ int Nod; // The number of intermediate objects
+ int Xnod; // Index of multiple values
+ int K; // Kth item to locate
+ int I; // Index of JPN
+ int Imax; // Max number of JPN's
+ int B; // Index base
+ my_bool Xpd; // True for expandable column
+ my_bool Parsed; // True when parsed
+ my_bool Found; // Item found by locate
+ my_bool Wr; // Write mode
+ my_bool Jb; // Must return json item
+ my_bool Changed; // True when contains was modified
+}; // end of class BJNX
+
+extern "C" {
+ DllExport my_bool bson_test_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_test(UDF_EXEC_ARGS);
+ DllExport void bson_test_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonvalue_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bsonvalue(UDF_EXEC_ARGS);
+ DllExport void bsonvalue_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_make_array_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_make_array(UDF_EXEC_ARGS);
+ DllExport void bson_make_array_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_array_add_values(UDF_EXEC_ARGS);
+ DllExport void bson_array_add_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_array_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_array_add(UDF_EXEC_ARGS);
+ DllExport void bson_array_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_array_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_array_delete(UDF_EXEC_ARGS);
+ DllExport void bson_array_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonlocate_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bsonlocate(UDF_EXEC_ARGS);
+ DllExport void bsonlocate_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_locate_all_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_locate_all(UDF_EXEC_ARGS);
+ DllExport void bson_locate_all_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_contains_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bson_contains(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bson_contains_deinit(UDF_INIT*);
+
+ DllExport my_bool bsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bsoncontains_path_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_make_object_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_make_object(UDF_EXEC_ARGS);
+ DllExport void bson_make_object_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_nonull(UDF_EXEC_ARGS);
+ DllExport void bson_object_nonull_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_key_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_key(UDF_EXEC_ARGS);
+ DllExport void bson_object_key_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_add(UDF_EXEC_ARGS);
+ DllExport void bson_object_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_delete(UDF_EXEC_ARGS);
+ DllExport void bson_object_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_list_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_list(UDF_EXEC_ARGS);
+ DllExport void bson_object_list_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_object_values(UDF_EXEC_ARGS);
+ DllExport void bson_object_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_item_merge_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_item_merge(UDF_EXEC_ARGS);
+ DllExport void bson_item_merge_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_get_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bson_get_item(UDF_EXEC_ARGS);
+ DllExport void bson_get_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonget_string_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bsonget_string(UDF_EXEC_ARGS);
+ DllExport void bsonget_string_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonget_int_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonget_int(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bsonget_int_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonget_real_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport double bsonget_real(UDF_INIT*, UDF_ARGS*, char*, char*);
+ DllExport void bsonget_real_deinit(UDF_INIT*);
+
+ DllExport my_bool bsonset_grp_size_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonset_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*);
+
+ DllExport my_bool bsonget_grp_size_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport long long bsonget_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*);
+
+ DllExport my_bool bson_array_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bson_array_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bson_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bson_array_grp(UDF_EXEC_ARGS);
+ DllExport void bson_array_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_object_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bson_object_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bson_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bson_object_grp(UDF_EXEC_ARGS);
+ DllExport void bson_object_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_delete_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_delete_item(UDF_EXEC_ARGS);
+ DllExport void bson_delete_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_set_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_set_item(UDF_EXEC_ARGS);
+ DllExport void bson_set_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_insert_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_insert_item(UDF_EXEC_ARGS);
+ DllExport void bson_insert_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_update_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_update_item(UDF_EXEC_ARGS);
+ DllExport void bson_update_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_file_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_file(UDF_EXEC_ARGS);
+ DllExport void bson_file_deinit(UDF_INIT*);
+
+ DllExport my_bool bfile_make_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bfile_make(UDF_EXEC_ARGS);
+ DllExport void bfile_make_deinit(UDF_INIT*);
+
+ DllExport my_bool bfile_convert_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bfile_convert(UDF_EXEC_ARGS);
+ DllExport void bfile_convert_deinit(UDF_INIT*);
+
+ DllExport my_bool bfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bfile_bjson(UDF_EXEC_ARGS);
+ DllExport void bfile_bjson_deinit(UDF_INIT*);
+
+ DllExport my_bool bson_serialize_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bson_serialize(UDF_EXEC_ARGS);
+ DllExport void bson_serialize_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_make_array_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_make_array(UDF_EXEC_ARGS);
+ DllExport void bbin_make_array_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_array_add(UDF_EXEC_ARGS);
+ DllExport void bbin_array_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_array_add_values(UDF_EXEC_ARGS);
+ DllExport void bbin_array_add_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_array_delete(UDF_EXEC_ARGS);
+ DllExport void bbin_array_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_array_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bbin_array_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bbin_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bbin_array_grp(UDF_EXEC_ARGS);
+ DllExport void bbin_array_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_grp_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport void bbin_object_grp_clear(UDF_INIT *, char *, char *);
+ DllExport void bbin_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *);
+ DllExport char *bbin_object_grp(UDF_EXEC_ARGS);
+ DllExport void bbin_object_grp_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_make_object_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_make_object(UDF_EXEC_ARGS);
+ DllExport void bbin_make_object_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_object_nonull(UDF_EXEC_ARGS);
+ DllExport void bbin_object_nonull_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_key_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_object_key(UDF_EXEC_ARGS);
+ DllExport void bbin_object_key_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_add_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_object_add(UDF_EXEC_ARGS);
+ DllExport void bbin_object_add_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_delete_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_object_delete(UDF_EXEC_ARGS);
+ DllExport void bbin_object_delete_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_list_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_object_list(UDF_EXEC_ARGS);
+ DllExport void bbin_object_list_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_object_values_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_object_values(UDF_EXEC_ARGS);
+ DllExport void bbin_object_values_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_get_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_get_item(UDF_EXEC_ARGS);
+ DllExport void bbin_get_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_item_merge_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_item_merge(UDF_EXEC_ARGS);
+ DllExport void bbin_item_merge_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_set_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_set_item(UDF_EXEC_ARGS);
+ DllExport void bbin_set_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_insert_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_insert_item(UDF_EXEC_ARGS);
+ DllExport void bbin_insert_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_update_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_update_item(UDF_EXEC_ARGS);
+ DllExport void bbin_update_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_delete_item_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_delete_item(UDF_EXEC_ARGS);
+ DllExport void bbin_delete_item_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_locate_all_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* bbin_locate_all(UDF_EXEC_ARGS);
+ DllExport void bbin_locate_all_deinit(UDF_INIT*);
+
+ DllExport my_bool bbin_file_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char *bbin_file(UDF_EXEC_ARGS);
+ DllExport void bbin_file_deinit(UDF_INIT*);
+} // extern "C"
diff --git a/storage/connect/cmgfam.cpp b/storage/connect/cmgfam.cpp
index 579b5b919a7..690c087c2bb 100644
--- a/storage/connect/cmgfam.cpp
+++ b/storage/connect/cmgfam.cpp
@@ -1,11 +1,11 @@
/************** CMGFAM C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: cmgfam.cpp */
/* ------------- */
-/* Version 1.4 */
+/* Version 1.5 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 20017 */
+/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -29,7 +29,11 @@
#include "reldef.h"
#include "filamtxt.h"
#include "tabdos.h"
+#if defined(BSON_SUPPORT)
+#include "tabbson.h"
+#else
#include "tabjson.h"
+#endif // BSON_SUPPORT
#include "cmgfam.h"
#if defined(UNIX) || defined(UNIV_LINUX)
@@ -53,6 +57,7 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL)
Pcg.Options = tdp->Options;
Pcg.Filter = tdp->Filter;
Pcg.Pipe = tdp->Pipe && tdp->Options != NULL;
+ Lrecl = tdp->Lrecl + tdp->Ending;
} else {
Pcg.Uristr = NULL;
Pcg.Db_name = NULL;
@@ -60,21 +65,55 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL)
Pcg.Options = NULL;
Pcg.Filter = NULL;
Pcg.Pipe = false;
+ Lrecl = 0;
} // endif tdp
To_Fbt = NULL;
Mode = MODE_ANY;
Done = false;
- Lrecl = tdp->Lrecl + tdp->Ending;
} // end of CMGFAM standard constructor
- CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp)
+#if defined(BSON_SUPPORT)
+ /***********************************************************************/
+/* Constructors. */
+/***********************************************************************/
+CMGFAM::CMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL)
+{
+ Cmgp = NULL;
+ Pcg.Tdbp = NULL;
+
+ if (tdp) {
+ Pcg.Uristr = tdp->Uri;
+ Pcg.Db_name = tdp->Schema;
+ Pcg.Coll_name = tdp->Collname;
+ Pcg.Options = tdp->Options;
+ Pcg.Filter = tdp->Filter;
+ Pcg.Pipe = tdp->Pipe && tdp->Options != NULL;
+ Lrecl = tdp->Lrecl + tdp->Ending;
+ } else {
+ Pcg.Uristr = NULL;
+ Pcg.Db_name = NULL;
+ Pcg.Coll_name = NULL;
+ Pcg.Options = NULL;
+ Pcg.Filter = NULL;
+ Pcg.Pipe = false;
+ Lrecl = 0;
+ } // endif tdp
+
+ To_Fbt = NULL;
+ Mode = MODE_ANY;
+ Done = false;
+} // end of CMGFAM standard constructor
+#endif // BSON_SUPPORT
+
+CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp)
{
+ Cmgp = tdfp->Cmgp;
Pcg = tdfp->Pcg;
To_Fbt = tdfp->To_Fbt;
Mode = tdfp->Mode;
Done = tdfp->Done;
- } // end of CMGFAM copy constructor
+} // end of CMGFAM copy constructor
/***********************************************************************/
/* Reset: reset position values at the beginning of file. */
diff --git a/storage/connect/cmgfam.h b/storage/connect/cmgfam.h
index 7571f5c5309..9c5f91f0d23 100644
--- a/storage/connect/cmgfam.h
+++ b/storage/connect/cmgfam.h
@@ -1,7 +1,7 @@
/*************** CMGFam H Declares Source Code File (.H) ***************/
-/* Name: cmgfam.h Version 1.5 */
+/* Name: cmgfam.h Version 1.6 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */
/* */
/* This file contains the MongoDB access method classes declares. */
/***********************************************************************/
@@ -20,6 +20,9 @@ class DllExport CMGFAM : public DOSFAM {
public:
// Constructor
CMGFAM(PJDEF tdp);
+#if defined(BSON_SUPPORT)
+ CMGFAM(PBDEF tdp);
+#endif // BSON_SUPPORT
CMGFAM(PCMGFAM txfp);
// Implementation
diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp
index 242e68b5905..e42d9703ad7 100644
--- a/storage/connect/colblk.cpp
+++ b/storage/connect/colblk.cpp
@@ -79,8 +79,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp)
if (trace(2))
htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this);
- if (tdbp)
- {
+ if (tdbp) {
// Attach the new column to the table block
if (!tdbp->GetColumns())
tdbp->SetColumns(this);
@@ -90,6 +89,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp)
colp->Next = this;
} // endelse
}
+
} // end of COLBLK copy constructor
/***********************************************************************/
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 3b58e8b5a8f..484f75d6a04 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -73,8 +73,7 @@ PGLOBAL CntExit(PGLOBAL g)
g->Activityp = NULL;
} // endif Activityp
- PlugExit(g);
- g= NULL;
+ g= PlugExit(g);
} // endif g
return g;
diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp
index 53150f9d8ae..f50290119ae 100644
--- a/storage/connect/filamap.cpp
+++ b/storage/connect/filamap.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -102,7 +102,7 @@ int MAPFAM::GetFileLength(PGLOBAL g)
bool MAPFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
- int len;
+ size_t len;
MODE mode = Tdbp->GetMode();
PFBLOCK fp;
PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr;
@@ -170,13 +170,18 @@ bool MAPFAM::OpenTableFile(PGLOBAL g)
htrc("CreateFileMap: %s\n", g->Message);
return (mode == MODE_READ && rc == ENOENT)
- ? PushWarning(g, Tdbp) : true;
+ ? false : true;
+// ? PushWarning(g, Tdbp) : true; --> assert fails into MariaDB
} // endif hFile
/*******************************************************************/
- /* Get the file size (assuming file is smaller than 4 GB) */
+ /* Get the file size. */
/*******************************************************************/
- len = mm.lenL;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
Memory = (char *)mm.memory;
if (!len) { // Empty or deleted file
diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp
index 67ab120c499..84eab272cc5 100644
--- a/storage/connect/filamtxt.cpp
+++ b/storage/connect/filamtxt.cpp
@@ -1,11 +1,11 @@
/*********** File AM Txt C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMTXT */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -48,6 +48,7 @@
#include "plgdbsem.h"
#include "filamtxt.h"
#include "tabdos.h"
+#include "tabjson.h"
#if defined(UNIX) || defined(UNIV_LINUX)
#include "osutil.h"
@@ -804,14 +805,14 @@ int DOSFAM::ReadBuffer(PGLOBAL g)
Placed = false;
if (trace(2))
- htrc(" About to read: stream=%p To_Buf=%p Buflen=%d\n",
- Stream, To_Buf, Buflen);
+ htrc(" About to read: stream=%p To_Buf=%p Buflen=%d Fpos=%d\n",
+ Stream, To_Buf, Buflen, Fpos);
if (fgets(To_Buf, Buflen, Stream)) {
p = To_Buf + strlen(To_Buf) - 1;
if (trace(2))
- htrc(" Read: To_Buf=%p p=%c\n", To_Buf, To_Buf, p);
+ htrc(" Read: To_Buf=%p p=%c\n", To_Buf, p);
#if defined(__WIN__)
if (Bin) {
@@ -1663,3 +1664,456 @@ void BLKFAM::Rewind(void)
//Rbuf = 0; commented out in case we reuse last read block
} // end of Rewind
+/* --------------------------- Class BINFAM -------------------------- */
+
+#if 0
+/***********************************************************************/
+/* BIN GetFileLength: returns file size in number of bytes. */
+/***********************************************************************/
+int BINFAM::GetFileLength(PGLOBAL g)
+{
+ int len;
+
+ if (!Stream)
+ len = TXTFAM::GetFileLength(g);
+ else
+ if ((len = _filelength(_fileno(Stream))) < 0)
+ sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", To_File);
+
+ xtrc(1, "File length=%d\n", len);
+ return len;
+} // end of GetFileLength
+
+/***********************************************************************/
+/* Cardinality: returns table cardinality in number of rows. */
+/* This function can be called with a null argument to test the */
+/* availability of Cardinality implementation (1 yes, 0 no). */
+/***********************************************************************/
+int BINFAM::Cardinality(PGLOBAL g)
+{
+ return (g) ? -1 : 0;
+} // end of Cardinality
+
+/***********************************************************************/
+/* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */
+/***********************************************************************/
+bool BINFAM::OpenTableFile(PGLOBAL g) {
+ char opmode[4], filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+ PDBUSER dbuserp = PlgGetUser(g);
+
+ switch (mode) {
+ case MODE_READ:
+ strcpy(opmode, "rb");
+ break;
+ case MODE_WRITE:
+ strcpy(opmode, "wb");
+ break;
+ default:
+ sprintf(g->Message, MSG(BAD_OPEN_MODE), mode);
+ return true;
+ } // endswitch Mode
+
+ // Now open the file stream
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!(Stream = PlugOpenFile(g, filename, opmode))) {
+ if (trace(1))
+ htrc("%s\n", g->Message);
+
+ return (mode == MODE_READ && errno == ENOENT)
+ ? PushWarning(g, Tdbp) : true;
+ } // endif Stream
+
+ if (trace(1))
+ htrc("File %s open Stream=%p mode=%s\n", filename, Stream, opmode);
+
+ To_Fb = dbuserp->Openlist; // Keep track of File block
+
+ /*********************************************************************/
+ /* Allocate the line buffer. */
+ /*********************************************************************/
+ return AllocateBuffer(g);
+} // end of OpenTableFile
+#endif // 0
+
+/***********************************************************************/
+/* Allocate the line buffer. For mode Delete a bigger buffer has to */
+/* be allocated because is it also used to move lines into the file. */
+/***********************************************************************/
+bool BINFAM::AllocateBuffer(PGLOBAL g)
+{
+ MODE mode = Tdbp->GetMode();
+
+ // Lrecl is Ok
+ Buflen = Lrecl;
+
+ // Buffer will be allocated separately
+ if (mode == MODE_ANY) {
+ xtrc(1, "SubAllocating a buffer of %d bytes\n", Buflen);
+ To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen);
+ } else if (UseTemp || mode == MODE_DELETE) {
+ // Have a big buffer to move lines
+ Dbflen = Buflen * DOS_BUFF_LEN;
+ DelBuf = PlugSubAlloc(g, NULL, Dbflen);
+ } // endif mode
+
+ return false;
+#if 0
+ MODE mode = Tdbp->GetMode();
+
+ // Lrecl is Ok
+ Dbflen = Buflen = Lrecl;
+
+ if (trace(1))
+ htrc("SubAllocating a buffer of %d bytes\n", Buflen);
+
+ DelBuf = To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen);
+ return false;
+#endif // 0
+} // end of AllocateBuffer
+
+#if 0
+/***********************************************************************/
+/* GetRowID: return the RowID of last read record. */
+/***********************************************************************/
+int BINFAM::GetRowID(void) {
+ return Rows;
+} // end of GetRowID
+
+/***********************************************************************/
+/* GetPos: return the position of last read record. */
+/***********************************************************************/
+int BINFAM::GetPos(void) {
+ return Fpos;
+} // end of GetPos
+
+/***********************************************************************/
+/* GetNextPos: return the position of next record. */
+/***********************************************************************/
+int BINFAM::GetNextPos(void) {
+ return ftell(Stream);
+} // end of GetNextPos
+
+/***********************************************************************/
+/* SetPos: Replace the table at the specified position. */
+/***********************************************************************/
+bool BINFAM::SetPos(PGLOBAL g, int pos) {
+ Fpos = pos;
+
+ if (fseek(Stream, Fpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos);
+ return true;
+ } // endif
+
+ Placed = true;
+ return false;
+} // end of SetPos
+
+/***********************************************************************/
+/* Record file position in case of UPDATE or DELETE. */
+/***********************************************************************/
+bool BINFAM::RecordPos(PGLOBAL g) {
+ if ((Fpos = ftell(Stream)) < 0) {
+ sprintf(g->Message, MSG(FTELL_ERROR), 0, strerror(errno));
+ // strcat(g->Message, " (possible wrong ENDING option value)");
+ return true;
+ } // endif Fpos
+
+ return false;
+} // end of RecordPos
+#endif // 0
+
+/***********************************************************************/
+/* ReadBuffer: Read one line for a text file. */
+/***********************************************************************/
+int BINFAM::ReadBuffer(PGLOBAL g)
+{
+ int rc;
+
+ if (!Stream)
+ return RC_EF;
+
+ xtrc(2, "ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n",
+ Tdbp, Tdbp->GetLine(), Placed);
+
+ if (!Placed) {
+ /*******************************************************************/
+ /* Record file position in case of UPDATE or DELETE. */
+ /*******************************************************************/
+ if (RecordPos(g))
+ return RC_FX;
+
+ CurBlk = (int)Rows++;
+ xtrc(2, "ReadBuffer: CurBlk=%d\n", CurBlk);
+ } else
+ Placed = false;
+
+ xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d Fpos=%d\n",
+ Stream, To_Buf, Buflen, Fpos);
+
+ // Read the prefix giving the row length
+ if (!fread(&Recsize, sizeof(size_t), 1, Stream)) {
+ if (!feof(Stream)) {
+ strcpy(g->Message, "Error reading line prefix\n");
+ return RC_FX;
+ } else
+ return RC_EF;
+
+ } else if (Recsize > (unsigned)Buflen) {
+ sprintf(g->Message, "Record too big (Recsize=%zd Buflen=%d)\n", Recsize, Buflen);
+ return RC_FX;
+ } // endif Recsize
+
+ if (fread(To_Buf, Recsize, 1, Stream)) {
+ xtrc(2, " Read: To_Buf=%p Recsize=%zd\n", To_Buf, Recsize);
+ num_read++;
+ rc = RC_OK;
+ } else if (feof(Stream)) {
+ rc = RC_EF;
+ } else {
+#if defined(__WIN__)
+ sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL));
+#else
+ sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(0));
+#endif
+ xtrc(2, "%s\n", g->Message);
+ rc = RC_FX;
+ } // endif's fread
+
+ xtrc(2, "ReadBuffer: rc=%d\n", rc);
+ IsRead = true;
+ return rc;
+} // end of ReadBuffer
+
+/***********************************************************************/
+/* WriteBuffer: File write routine for BIN access method. */
+/***********************************************************************/
+int BINFAM::WriteBuffer(PGLOBAL g)
+{
+ int curpos = 0;
+ bool moved = true;
+
+ // T_Stream is the temporary stream or the table file stream itself
+ if (!T_Stream) {
+ if (UseTemp && Tdbp->GetMode() == MODE_UPDATE) {
+ if (OpenTempFile(g))
+ return RC_FX;
+
+ } else
+ T_Stream = Stream;
+
+ } // endif T_Stream
+
+ if (Tdbp->GetMode() == MODE_UPDATE) {
+ /*******************************************************************/
+ /* Here we simply rewrite a record on itself. There are two cases */
+ /* were another method should be used, a/ when Update apply to */
+ /* the whole file, b/ when updating the last field of a variable */
+ /* length file. The method could be to rewrite a new file, then */
+ /* to erase the old one and rename the new updated file. */
+ /*******************************************************************/
+ curpos = ftell(Stream);
+
+ if (trace(1))
+ htrc("Last : %d cur: %d\n", Fpos, curpos);
+
+ if (UseTemp) {
+ /*****************************************************************/
+ /* We are using a temporary file. */
+ /* Before writing the updated record, we must eventually copy */
+ /* all the intermediate records that have not been updated. */
+ /*****************************************************************/
+ if (MoveIntermediateLines(g, &moved))
+ return RC_FX;
+
+ Spos = curpos; // New start position
+ } else
+ // Update is directly written back into the file,
+ // with this (fast) method, record size cannot change.
+ if (fseek(Stream, Fpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSETPOS_ERROR), 0);
+ return RC_FX;
+ } // endif
+
+ } // endif mode
+
+ /*********************************************************************/
+ /* Prepare writing the line. */
+ /*********************************************************************/
+//memcpy(To_Buf, Tdbp->GetLine(), Recsize);
+
+ /*********************************************************************/
+ /* Now start the writing process. */
+ /*********************************************************************/
+ if (fwrite(&Recsize, sizeof(size_t), 1, T_Stream) != 1) {
+ sprintf(g->Message, "Error %d writing prefix to %s",
+ errno, To_File);
+ return RC_FX;
+ } else if (fwrite(To_Buf, Recsize, 1, T_Stream) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, Recsize, To_File);
+ return RC_FX;
+ } // endif fwrite
+
+ if (Tdbp->GetMode() == MODE_UPDATE && moved)
+ if (fseek(Stream, curpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSEEK_ERROR), strerror(errno));
+ return RC_FX;
+ } // endif
+
+ xtrc(1, "Binary write done\n");
+ return RC_OK;
+} // end of WriteBuffer
+
+#if 0
+/***********************************************************************/
+/* Data Base delete line routine for DOS and BLK access methods. */
+/***********************************************************************/
+int DOSFAM::DeleteRecords(PGLOBAL g, int irc)
+{
+ bool moved;
+ int curpos = ftell(Stream);
+
+ /*********************************************************************/
+ /* There is an alternative here: */
+ /* 1 - use a temporary file in which are copied all not deleted */
+ /* lines, at the end the original file will be deleted and */
+ /* the temporary file renamed to the original file name. */
+ /* 2 - directly move the not deleted lines inside the original */
+ /* file, and at the end erase all trailing records. */
+ /* This will be experimented. */
+ /*********************************************************************/
+ if (trace(1))
+ htrc(
+ "DOS DeleteDB: rc=%d UseTemp=%d curpos=%d Fpos=%d Tpos=%d Spos=%d\n",
+ irc, UseTemp, curpos, Fpos, Tpos, Spos);
+
+ if (irc != RC_OK) {
+ /*******************************************************************/
+ /* EOF: position Fpos at the end-of-file position. */
+ /*******************************************************************/
+ fseek(Stream, 0, SEEK_END);
+ Fpos = ftell(Stream);
+
+ if (trace(1))
+ htrc("Fpos placed at file end=%d\n", Fpos);
+
+ } // endif irc
+
+ if (Tpos == Spos) {
+ /*******************************************************************/
+ /* First line to delete, Open temporary file. */
+ /*******************************************************************/
+ if (UseTemp) {
+ if (OpenTempFile(g))
+ return RC_FX;
+
+ } else {
+ /*****************************************************************/
+ /* Move of eventual preceding lines is not required here. */
+ /* Set the target file as being the source file itself. */
+ /* Set the future Tpos, and give Spos a value to block copying. */
+ /*****************************************************************/
+ T_Stream = Stream;
+ Spos = Tpos = Fpos;
+ } // endif UseTemp
+
+ } // endif Tpos == Spos
+
+ /*********************************************************************/
+ /* Move any intermediate lines. */
+ /*********************************************************************/
+ if (MoveIntermediateLines(g, &moved))
+ return RC_FX;
+
+ if (irc == RC_OK) {
+ /*******************************************************************/
+ /* Reposition the file pointer and set Spos. */
+ /*******************************************************************/
+ if (!UseTemp || moved)
+ if (fseek(Stream, curpos, SEEK_SET)) {
+ sprintf(g->Message, MSG(FSETPOS_ERROR), 0);
+ return RC_FX;
+ } // endif
+
+ Spos = GetNextPos(); // New start position
+
+ if (trace(1))
+ htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos);
+
+ } else {
+ /*******************************************************************/
+ /* Last call after EOF has been reached. */
+ /* The UseTemp case is treated in CloseTableFile. */
+ /*******************************************************************/
+ if (!UseTemp & !Abort) {
+ /*****************************************************************/
+ /* Because the chsize functionality is only accessible with a */
+ /* system call we must close the file and reopen it with the */
+ /* open function (_fopen for MS ??) this is still to be checked */
+ /* for compatibility with Text files and other OS's. */
+ /*****************************************************************/
+ char filename[_MAX_PATH];
+ int h; // File handle, return code
+
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+ /*rc=*/ PlugCloseFile(g, To_Fb);
+
+ if ((h= global_open(g, MSGID_OPEN_STRERROR, filename, O_WRONLY)) <= 0)
+ return RC_FX;
+
+ /*****************************************************************/
+ /* Remove extra records. */
+ /*****************************************************************/
+#if defined(__WIN__)
+ if (chsize(h, Tpos)) {
+ sprintf(g->Message, MSG(CHSIZE_ERROR), strerror(errno));
+ close(h);
+ return RC_FX;
+ } // endif
+#else
+ if (ftruncate(h, (off_t)Tpos)) {
+ sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno));
+ close(h);
+ return RC_FX;
+ } // endif
+#endif
+
+ close(h);
+
+ if (trace(1))
+ htrc("done, h=%d irc=%d\n", h, irc);
+
+ } // endif !UseTemp
+
+ } // endif irc
+
+ return RC_OK; // All is correct
+} // end of DeleteRecords
+
+/***********************************************************************/
+/* Table file close routine for DOS access method. */
+/***********************************************************************/
+void BINFAM::CloseTableFile(PGLOBAL g, bool abort)
+{
+ int rc;
+
+ Abort = abort;
+ rc = PlugCloseFile(g, To_Fb);
+ xtrc(1, "BIN Close: closing %s rc=%d\n", To_File, rc);
+ Stream = NULL; // So we can know whether table is open
+} // end of CloseTableFile
+
+/***********************************************************************/
+/* Rewind routine for BIN access method. */
+/***********************************************************************/
+void BINFAM::Rewind(void)
+{
+ if (Stream) // Can be NULL when making index on void table
+ rewind(Stream);
+
+ Rows = 0;
+ OldBlk = CurBlk = -1;
+} // end of Rewind
+#endif // 0
diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h
index 1fdae8fcd37..353e06ad3bd 100644
--- a/storage/connect/filamtxt.h
+++ b/storage/connect/filamtxt.h
@@ -1,7 +1,7 @@
/************** FilAMTxt H Declares Source Code File (.H) **************/
-/* Name: FILAMTXT.H Version 1.3 */
+/* Name: FILAMTXT.H Version 1.4 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* This file contains the file access method classes declares. */
/***********************************************************************/
@@ -15,6 +15,7 @@
typedef class TXTFAM *PTXF;
typedef class DOSFAM *PDOSFAM;
typedef class BLKFAM *PBLKFAM;
+typedef class BINFAM *PBINFAM;
typedef class DOSDEF *PDOSDEF;
typedef class TDBDOS *PTDBDOS;
@@ -210,4 +211,44 @@ class DllExport BLKFAM : public DOSFAM {
bool Closing; // True when closing on Update
}; // end of class BLKFAM
+/***********************************************************************/
+/* This is the DOS/UNIX Access Method class declaration for binary */
+/* files with variable record format (BJSON) */
+/***********************************************************************/
+class DllExport BINFAM : public DOSFAM {
+public:
+ // Constructor
+ BINFAM(PDOSDEF tdp) : DOSFAM(tdp) {Recsize = 0;}
+ BINFAM(PBINFAM txfp) : DOSFAM(txfp) {Recsize = txfp->Recsize;}
+
+ // Implementation
+ virtual AMT GetAmType(void) {return TYPE_AM_BIN;}
+//virtual int GetPos(void);
+//virtual int GetNextPos(void);
+ virtual PTXF Duplicate(PGLOBAL g) { return (PTXF)new(g) BINFAM(this); }
+
+ // Methods
+//virtual void Reset(void) {TXTFAM::Reset();}
+//virtual int GetFileLength(PGLOBAL g);
+//virtual int Cardinality(PGLOBAL g);
+ virtual int MaxBlkSize(PGLOBAL g, int s) {return s;}
+ virtual bool AllocateBuffer(PGLOBAL g);
+//virtual int GetRowID(void);
+//virtual bool RecordPos(PGLOBAL g);
+//virtual bool SetPos(PGLOBAL g, int recpos);
+ virtual int SkipRecord(PGLOBAL g, bool header) {return RC_OK;}
+//virtual bool OpenTableFile(PGLOBAL g);
+ virtual int ReadBuffer(PGLOBAL g);
+ virtual int WriteBuffer(PGLOBAL g);
+//virtual int DeleteRecords(PGLOBAL g, int irc);
+//virtual void CloseTableFile(PGLOBAL g, bool abort);
+//virtual void Rewind(void);
+
+//protected:
+//virtual int InitDelete(PGLOBAL g, int fpos, int spos);
+
+ // Members
+ size_t Recsize; // Length of last read or next written record
+}; // end of class BINFAM
+
#endif // __FILAMTXT_H
diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp
index 49283f8c0c7..56e6e3715fb 100644
--- a/storage/connect/filamvct.cpp
+++ b/storage/connect/filamvct.cpp
@@ -5,7 +5,7 @@
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -1328,7 +1328,7 @@ VCMFAM::VCMFAM(PVCMFAM txfp) : VCTFAM(txfp)
bool VCMFAM::OpenTableFile(PGLOBAL g)
{
char filename[_MAX_PATH];
- int len;
+ size_t len;
MODE mode = Tdbp->GetMode();
PFBLOCK fp = NULL;
PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr;
@@ -1422,10 +1422,14 @@ bool VCMFAM::OpenTableFile(PGLOBAL g)
} // endif hFile
/*******************************************************************/
- /* Get the file size (assuming file is smaller than 4 GB) */
+ /* Get the file size. */
/*******************************************************************/
- len = mm.lenL;
- Memory = (char *)mm.memory;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ Memory = (char *)mm.memory;
if (!len) { // Empty or deleted file
CloseFileHandle(hFile);
@@ -2763,7 +2767,7 @@ bool VMPFAM::OpenTableFile(PGLOBAL g)
bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i)
{
char filename[_MAX_PATH];
- int len;
+ size_t len;
HANDLE hFile;
MEMMAP mm;
PFBLOCK fp;
@@ -2817,8 +2821,12 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i)
/*****************************************************************/
/* Get the file size (assuming file is smaller than 4 GB) */
/*****************************************************************/
- len = mm.lenL;
- Memcol[i] = (char *)mm.memory;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ Memcol[i] = (char *)mm.memory;
if (!len) { // Empty or deleted file
CloseFileHandle(hFile);
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index eb14e846120..cb933449cbb 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -154,6 +154,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf)
strcpy(filename, pat);
#if defined(__WIN__)
+ int rc;
char drive[_MAX_DRIVE], direc[_MAX_DIR];
WIN32_FIND_DATA FileData;
HANDLE hSearch;
@@ -1207,7 +1208,7 @@ int UZDFAM::Cardinality(PGLOBAL g)
return 1;
int card = -1;
- int len = GetFileLength(g);
+ GetFileLength(g);
card = Records;
diff --git a/storage/connect/global.h b/storage/connect/global.h
index d17620861fa..8774285e54b 100644
--- a/storage/connect/global.h
+++ b/storage/connect/global.h
@@ -185,7 +185,7 @@ typedef struct _global { /* Global structure */
size_t Sarea_Size; /* Work area size */
PACTIVITY Activityp;
char Message[MAX_STR]; /* Message (result, error, trace) */
- ulong More; /* Used by jsonudf */
+ size_t More; /* Used by jsonudf */
size_t Saved_Size; /* Saved work area to_free */
bool Createas; /* To pass multi to ext tables */
void *Xchk; /* indexes in create/alter */
@@ -208,7 +208,7 @@ DllExport char *PlugGetMessage(PGLOBAL, int);
DllExport short GetLineLength(PGLOBAL); // Console line length
#endif // __WIN__
DllExport PGLOBAL PlugInit(LPCSTR, size_t); // Plug global initialization
-DllExport int PlugExit(PGLOBAL); // Plug global termination
+DllExport PGLOBAL PlugExit(PGLOBAL); // Plug global termination
DllExport LPSTR PlugRemoveType(LPSTR, LPCSTR);
DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir);
DllExport BOOL PlugIsAbsolutePath(LPCSTR path);
@@ -220,30 +220,11 @@ DllExport char *PlugDup(PGLOBAL g, const char *str);
DllExport void htrc(char const *fmt, ...);
DllExport void xtrc(uint, char const* fmt, ...);
DllExport uint GetTraceValue(void);
+DllExport void* MakePtr(void* memp, size_t offset);
+DllExport size_t MakeOff(void* memp, void* ptr);
#if defined(__cplusplus)
} // extern "C"
#endif
-/***********************************************************************/
-/* Inline routine definitions. */
-/***********************************************************************/
-/***********************************************************************/
-/* This routine makes a pointer from an offset to a memory pointer. */
-/***********************************************************************/
-inline void* MakePtr(void* memp, size_t offset) {
- // return ((offset == 0) ? NULL : &((char*)memp)[offset]);
- return (!offset) ? NULL : (char *)memp + offset;
-} /* end of MakePtr */
-
-/***********************************************************************/
-/* This routine makes an offset from a pointer new format. */
-/***********************************************************************/
-inline size_t MakeOff(void* memp, void* ptr) {
-#if defined(_DEBUG)
- assert(ptr > memp);
-#endif // _DEBUG
- return ((!ptr) ? 0 : (size_t)((char*)ptr - (size_t)memp));
-} /* end of MakeOff */
-
/*-------------------------- End of Global.H --------------------------*/
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index f8bf8804246..f87f1abddeb 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -170,7 +170,7 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
- char version[]= "Version 1.07.0002 October 18, 2020";
+ char version[]= "Version 1.07.0002 December 25, 2020";
#if defined(__WIN__)
char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__;
char slash= '\\';
@@ -230,6 +230,9 @@ char *GetUserVariable(PGLOBAL g, const uchar *varname)
PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
PQRYRES VirColumns(PGLOBAL g, bool info);
PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info);
+#ifdef BSON_SUPPORT
+PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info);
+#endif // BSON_SUPPORT
PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info);
#if defined(REST_SUPPORT)
PQRYRES RESTColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info);
@@ -251,11 +254,14 @@ bool ExactInfo(void);
USETEMP UseTemp(void);
int GetConvSize(void);
TYPCONV GetTypeConv(void);
+int GetDefaultDepth(void);
bool JsonAllPath(void);
char *GetJsonNull(void);
-int GetDefaultDepth(void);
uint GetJsonGrpSize(void);
char *GetJavaWrapper(void);
+#if defined(BSON_SUPPORT)
+bool Force_Bson(void);
+#endif // BSON_SUPPORT
size_t GetWorkSize(void);
void SetWorkSize(size_t);
extern "C" const char *msglang(void);
@@ -397,7 +403,7 @@ static MYSQL_THDVAR_ENUM(
// Adding JPATH to all Json table columns
static MYSQL_THDVAR_BOOL(json_all_path, PLUGIN_VAR_RQCMDARG,
"Adding JPATH to all Json table columns",
- NULL, NULL, 0); // NO by default
+ NULL, NULL, 1); // YES by default
// Null representation for JSON values
static MYSQL_THDVAR_STR(json_null,
@@ -410,7 +416,7 @@ static MYSQL_THDVAR_STR(json_null,
static MYSQL_THDVAR_INT(default_depth,
PLUGIN_VAR_RQCMDARG,
"Default depth used by Json, XML and Mongo discovery",
- NULL, NULL, 0, -1, 16, 1);
+ NULL, NULL, 5, -1, 16, 1); // Defaults to 5
// Estimate max number of rows for JSON aggregate functions
static MYSQL_THDVAR_UINT(json_grp_size,
@@ -439,6 +445,13 @@ static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG,
#endif // !version 2,3
#endif // JAVA_SUPPORT || CMGO_SUPPORT
+#if defined(BSON_SUPPORT)
+// Force using BSON for JSON tables
+static MYSQL_THDVAR_BOOL(force_bson, PLUGIN_VAR_RQCMDARG,
+ "Force using BSON for JSON tables",
+ NULL, NULL, 0); // NO by default
+#endif // BSON_SUPPORT
+
#if defined(XMSG) || defined(NEWMSG)
const char *language_names[]=
{
@@ -501,6 +514,10 @@ char *GetJavaWrapper(void)
bool MongoEnabled(void) {return THDVAR(current_thd, enable_mongo);}
#endif // JAVA_SUPPORT || CMGO_SUPPORT
+#if defined(BSON_SUPPORT)
+bool Force_Bson(void) {return THDVAR(current_thd, force_bson);}
+#endif // BSON_SUPPORT)
+
#if defined(XMSG) || defined(NEWMSG)
extern "C" const char *msglang(void)
{return language_names[THDVAR(current_thd, msg_lang)];}
@@ -1051,12 +1068,12 @@ static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp)
/****************************************************************************/
TABTYPE ha_connect::GetRealType(PTOS pos)
{
- TABTYPE type;
+ TABTYPE type= TAB_UNDEF;
if (pos || (pos= GetTableOptionStruct())) {
type= GetTypeID(pos->type);
- if (type == TAB_UNDEF)
+ if (type == TAB_UNDEF && !pos->http)
type= pos->srcdef ? TAB_MYSQL : pos->tabname ? TAB_PRX : TAB_DOS;
#if defined(REST_SUPPORT)
else if (pos->http)
@@ -1064,7 +1081,8 @@ TABTYPE ha_connect::GetRealType(PTOS pos)
case TAB_JSON:
case TAB_XML:
case TAB_CSV:
- type = TAB_REST;
+ case TAB_UNDEF:
+ type = TAB_REST;
break;
case TAB_REST:
type = TAB_NIY;
@@ -1074,8 +1092,7 @@ TABTYPE ha_connect::GetRealType(PTOS pos)
} // endswitch type
#endif // REST_SUPPORT
- } else
- type= TAB_UNDEF;
+ } // endif pos
return type;
} // end of GetRealType
@@ -1387,7 +1404,7 @@ PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef)
PTOS options= GetTableOptionStruct();
if (!stricmp(opname, "Connect")) {
- LEX_CSTRING cnc= (tshp) ? tshp->connect_string
+ LEX_CSTRING cnc= (tshp) ? tshp->connect_string
: table->s->connect_string;
if (cnc.length)
@@ -1573,6 +1590,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
// Now get column information
pcf->Name= (char*)fp->field_name.str;
+ chset = (char*)fp->charset()->name;
if (fop && fop->special) {
pcf->Fieldfmt= (char*)fop->special;
@@ -1583,8 +1601,15 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Scale= 0;
pcf->Opt= (fop) ? (int)fop->opt : 0;
- if ((pcf->Length= fp->field_length) < 0)
- pcf->Length= 256; // BLOB?
+ if (fp->field_length >= 0) {
+ pcf->Length = fp->field_length;
+
+ // length is bytes for Connect, not characters
+ if (!strnicmp(chset, "utf8", 4))
+ pcf->Length /= 3;
+
+ } else
+ pcf->Length= 256; // BLOB?
pcf->Precision= pcf->Length;
@@ -1601,8 +1626,6 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Fieldfmt= NULL;
} // endif fop
- chset= (char *)fp->charset()->name;
-
if (!strcmp(chset, "binary"))
v = 'B'; // Binary string
@@ -2156,7 +2179,6 @@ int ha_connect::MakeRecord(char *buf)
int rc= 0;
Field* *field;
Field *fp;
- my_bitmap_map *org_bitmap;
CHARSET_INFO *charset= tdbp->data_charset();
//MY_BITMAP readmap;
MY_BITMAP *map;
@@ -2170,7 +2192,7 @@ int ha_connect::MakeRecord(char *buf)
*table->def_read_set.bitmap, *table->def_write_set.bitmap);
// Avoid asserts in field::store() for columns that are not updated
- org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set);
// This is for variable_length rows
memset(buf, 0, table->s->null_bytes);
@@ -2197,7 +2219,7 @@ int ha_connect::MakeRecord(char *buf)
continue;
htrc("Column %s not found\n", fp->field_name.str);
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
DBUG_RETURN(HA_ERR_WRONG_IN_RECORD);
} // endif colp
@@ -2257,7 +2279,7 @@ int ha_connect::MakeRecord(char *buf)
sprintf(buf, "Out of range value %.140s for column '%s' at row %ld",
value->GetCharString(val),
- fp->field_name.str,
+ fp->field_name.str,
thd->get_stmt_da()->current_row_for_warning());
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf);
@@ -2280,7 +2302,7 @@ int ha_connect::MakeRecord(char *buf)
memcpy(buf, table->record[0], table->s->stored_rec_length);
// This is copied from ha_tina and is necessary to avoid asserts
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
DBUG_RETURN(rc);
} // end of MakeRecord
@@ -2300,7 +2322,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *)
//PTDBASE tp= (PTDBASE)tdbp;
String attribute(attr_buffer, sizeof(attr_buffer),
table->s->table_charset);
- my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *bmap= dbug_tmp_use_all_columns(table, &table->read_set);
const CHARSET_INFO *charset= tdbp->data_charset();
String data_charset_value(data_buffer, sizeof(data_buffer), charset);
@@ -2422,7 +2444,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *)
} // endfor field
err:
- dbug_tmp_restore_column_map(table->read_set, bmap);
+ dbug_tmp_restore_column_map(&table->read_set, bmap);
return rc;
} // end of ScanRecord
@@ -2470,7 +2492,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
OPVAL op;
Field *fp;
const key_range *ranges[2];
- my_bitmap_map *old_map;
+ MY_BITMAP *old_map;
KEY *kfp;
KEY_PART_INFO *kpart;
@@ -2487,7 +2509,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
both= ranges[0] && ranges[1];
kfp= &table->key_info[active_index];
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ old_map= dbug_tmp_use_all_columns(table, &table->write_set);
for (i= 0; i <= 1; i++) {
if (ranges[i] == NULL)
@@ -2582,11 +2604,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q,
if ((oom= qry->IsTruncated()))
strcpy(g->Message, "Out of memory");
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return oom;
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return true;
} // end of MakeKeyWhere
@@ -4501,7 +4523,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_VEC:
case TAB_REST:
case TAB_JSON:
- if (options->filename && *options->filename) {
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ if (options->filename && *options->filename) {
if (!quick) {
char path[FN_REFLEN], dbpath[FN_REFLEN];
@@ -4532,11 +4557,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_DIR:
case TAB_ZIP:
case TAB_OEM:
- if (table && table->pos_in_table_list) { // if SELECT
+ if (table && table->pos_in_table_list) { // if SELECT
#if MYSQL_VERSION_ID > 100200
Switch_to_definer_security_ctx backup_ctx(thd, table->pos_in_table_list);
#endif // VERSION_ID > 100200
-
return check_global_access(thd, FILE_ACL);
} else
return check_global_access(thd, FILE_ACL);
@@ -4552,9 +4576,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
case TAB_OCCUR:
case TAB_PIVOT:
case TAB_VIR:
+ default:
// This is temporary until a solution is found
return false;
- } // endswitch type
+ } // endswitch type
my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0));
return true;
@@ -4937,11 +4962,11 @@ int ha_connect::external_lock(THD *thd, int lock_type)
// Here we do make the new indexes
if (tdp->MakeIndex(g, adp, true) == RC_FX) {
// Make it a warning to avoid crash
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- 0, g->Message);
- rc= 0;
- //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0));
- //rc= HA_ERR_INTERNAL_ERROR;
+ //push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ // 0, g->Message);
+ //rc= 0;
+ my_message(ER_TOO_MANY_KEYS, g->Message, MYF(0));
+ rc= HA_ERR_INDEX_CORRUPT;
} // endif MakeIndex
} else if (tdbp->GetDef()->Indexable() == 3) {
@@ -5351,7 +5376,8 @@ static char *encode(PGLOBAL g, const char *cnm)
*/
static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
int len, int dec, char* key, uint tm, const char* rem,
- char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) {
+ char* dft, char* xtra, char* fmt, int flag, bool dbf, char v)
+{
#if defined(DEVELOPMENT)
// Some client programs regard CHAR(36) as GUID
char var = (len > 255 || len == 36) ? 'V' : v;
@@ -5428,7 +5454,10 @@ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
if (fmt && *fmt) {
switch (ttp) {
case TAB_JSON: error |= sql->append(" JPATH='"); break;
- case TAB_XML: error |= sql->append(" XPATH='"); break;
+#if defined(BSON_SUPPORT)
+ case TAB_BSON: error |= sql->append(" JPATH='"); break;
+#endif // BSON_SUPPORT
+ case TAB_XML: error |= sql->append(" XPATH='"); break;
default: error |= sql->append(" FIELD_FORMAT='");
} // endswitch ttp
@@ -5593,8 +5622,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
String sql(buf, sizeof(buf), system_charset_info);
sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info);
- user = host = pwd = tbl = src = col = ocl = pic = fcl = skc = rnk = zfn = NULL;
- dsn = url = NULL;
+ user= host= pwd= tbl= src= col= ocl= pic= fcl= skc= rnk= zfn= NULL;
+ dsn= url= NULL;
// Get the useful create options
ttp= GetTypeID(topt->type);
@@ -5655,7 +5684,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
try {
// Check table type
- if (ttp == TAB_UNDEF) {
+ if (ttp == TAB_UNDEF && !topt->http) {
topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS";
ttp= GetTypeID(topt->type);
sprintf(g->Message, "No table_type. Was set to %s", topt->type);
@@ -5666,11 +5695,21 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
goto err;
#if defined(REST_SUPPORT)
} else if (topt->http) {
- switch (ttp) {
+ if (ttp == TAB_UNDEF) {
+ topt->type = "JSON";
+ ttp= GetTypeID(topt->type);
+ sprintf(g->Message, "No table_type. Was set to %s", topt->type);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message);
+ } // endif ttp
+
+ switch (ttp) {
case TAB_JSON:
- case TAB_XML:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ case TAB_XML:
case TAB_CSV:
- ttp = TAB_REST;
+ ttp = TAB_REST;
break;
default:
break;
@@ -5853,7 +5892,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
case TAB_XML:
#endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT
case TAB_JSON:
- dsn= strz(g, create_info->connect_string);
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
+ dsn= strz(g, create_info->connect_string);
if (!fn && !zfn && !mul && !dsn)
sprintf(g->Message, "Missing %s file name", topt->type);
@@ -6017,8 +6059,15 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
qrp= VirColumns(g, fnc == FNC_COL);
break;
case TAB_JSON:
+#if !defined(FORCE_BSON)
qrp= JSONColumns(g, db, dsn, topt, fnc == FNC_COL);
break;
+#endif // !FORCE_BSON
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+ qrp= BSONColumns(g, db, dsn, topt, fnc == FNC_COL);
+ break;
+#endif // BSON_SUPPORT
#if defined(JAVA_SUPPORT)
case TAB_MONGO:
url= strz(g, create_info->connect_string);
@@ -6083,6 +6132,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
goto err;
} // endif !nblin
+ // Restore language type
+ if (ttp == TAB_REST)
+ ttp = GetTypeID(topt->type);
+
for (i= 0; !rc && i < qrp->Nblin; i++) {
typ= len= prec= dec= flg= 0;
tm= NOT_NULL_FLAG;
@@ -6258,7 +6311,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
// Now add the field
if (add_field(&sql, ttp, cnm, typ, prec, dec, key, tm, rem, dft, xtra,
- fmt, flg, dbf, v))
+ fmt, flg, dbf, v))
rc= HA_ERR_OUT_OF_MEM;
} // endfor i
@@ -6382,6 +6435,9 @@ int ha_connect::create(const char *name, TABLE *table_arg,
// Check table type
if (type == TAB_UNDEF) {
options->type= (options->srcdef) ? "MYSQL" :
+#if defined(REST_SUPPORT)
+ (options->http) ? "JSON" :
+#endif // REST_SUPPORT
(options->tabname) ? "PROXY" : "DOS";
type= GetTypeID(options->type);
sprintf(g->Message, "No table_type. Will be set to %s", options->type);
@@ -6399,7 +6455,7 @@ int ha_connect::create(const char *name, TABLE *table_arg,
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
inward= IsFileType(type) && !options->filename &&
- (type != TAB_JSON || !cnc.length);
+ ((type != TAB_JSON && type != TAB_BSON) || !cnc.length);
if (options->data_charset) {
const CHARSET_INFO *data_charset;
@@ -6757,8 +6813,8 @@ int ha_connect::create(const char *name, TABLE *table_arg,
if (trace(1))
htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas);
-#if defined(ZIP_SUPPORT)
if (options->zipped) {
+#if defined(ZIP_SUPPORT)
// Check whether the zip entry must be made from a file
PCSZ fn= GetListOption(g, "Load", options->oplist, NULL);
@@ -6780,9 +6836,11 @@ int ha_connect::create(const char *name, TABLE *table_arg,
} // endif LoadFile
} // endif fn
-
+#else // !ZIP_SUPPORT
+ my_message(ER_UNKNOWN_ERROR, "Option ZIP not supported", MYF(0));
+ DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
+#endif // !ZIP_SUPPORT
} // endif zipped
-#endif // ZIP_SUPPORT
// To check whether indexes have to be made or remade
if (!g->Xchk) {
@@ -7394,7 +7452,10 @@ static struct st_mysql_sys_var* connect_system_variables[]= {
MYSQL_SYSVAR(enable_mongo),
#endif // JAVA_SUPPORT || CMGO_SUPPORT
MYSQL_SYSVAR(cond_push),
- NULL
+#if defined(BSON_SUPPORT)
+ MYSQL_SYSVAR(force_bson),
+#endif // BSON_SUPPORT
+ NULL
};
maria_declare_plugin(connect)
diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp
index 2dab385a36f..2cb75e0adc1 100644
--- a/storage/connect/jdbconn.cpp
+++ b/storage/connect/jdbconn.cpp
@@ -766,6 +766,7 @@ void JDBConn::AddJars(PSTRG jpop, char sep)
/***********************************************************************/
bool JDBConn::Connect(PJPARM sop)
{
+ int irc = RC_FX;
bool err = false;
jint rc;
jboolean jt = (trace(1));
diff --git a/storage/connect/jmgfam.cpp b/storage/connect/jmgfam.cpp
index 30f6279146d..2d45753ec63 100644
--- a/storage/connect/jmgfam.cpp
+++ b/storage/connect/jmgfam.cpp
@@ -1,15 +1,15 @@
/************ JMONGO FAM C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: jmgfam.cpp */
/* ------------- */
-/* Version 1.0 */
+/* Version 1.1 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 20017 */
+/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
-/* This program are the Java MongoDB access method classes. */
+/* This program are the Java MongoDB access method classes. */
/* */
/***********************************************************************/
@@ -49,7 +49,11 @@
#include "reldef.h"
#include "filamtxt.h"
#include "tabdos.h"
+#if defined(BSON_SUPPORT)
+#include "tabbson.h"
+#else
#include "tabjson.h"
+#endif // BSON_SUPPORT
#include "jmgfam.h"
#if defined(UNIX) || defined(UNIV_LINUX)
@@ -92,10 +96,38 @@ JMGFAM::JMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL)
Version = tdp->Version;
Lrecl = tdp->Lrecl + tdp->Ending;
Curpos = 0;
-} // end of JMGFAM standard constructor
+} // end of JMGFAM Json standard constructor
+
+#if defined(BSON_SUPPORT)
+JMGFAM::JMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL)
+{
+ Jcp = NULL;
+ Ops.Driver = tdp->Schema;
+ Ops.Url = tdp->Uri;
+ Ops.User = NULL;
+ Ops.Pwd = NULL;
+ Ops.Scrollable = false;
+ Ops.Fsize = 0;
+ Ops.Version = tdp->Version;
+ To_Fbt = NULL;
+ Mode = MODE_ANY;
+ Uristr = tdp->Uri;
+ Db_name = tdp->Schema;
+ Coll_name = tdp->Collname;
+ Options = tdp->Options;
+ Filter = tdp->Filter;
+ Wrapname = tdp->Wrapname;
+ Done = false;
+ Pipe = tdp->Pipe;
+ Version = tdp->Version;
+ Lrecl = tdp->Lrecl + tdp->Ending;
+ Curpos = 0;
+} // end of JMGFAM Bson standard constructor
+#endif // BSON_SUPPORT
JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp)
{
+ Jcp = tdfp->Jcp;
//Client = tdfp->Client;
//Database = NULL;
//Collection = tdfp->Collection;
@@ -114,6 +146,7 @@ JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp)
Done = tdfp->Done;
Pipe = tdfp->Pipe;
Version = tdfp->Version;
+ Curpos = tdfp->Curpos;
} // end of JMGFAM copy constructor
/***********************************************************************/
diff --git a/storage/connect/jmgfam.h b/storage/connect/jmgfam.h
index 5c80d993833..c5d9d1f57e6 100644
--- a/storage/connect/jmgfam.h
+++ b/storage/connect/jmgfam.h
@@ -1,7 +1,7 @@
/************** MongoFam H Declares Source Code File (.H) **************/
-/* Name: jmgfam.h Version 1.0 */
+/* Name: jmgfam.h Version 1.1 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */
/* */
/* This file contains the JAVA MongoDB access method classes declares */
/***********************************************************************/
@@ -25,6 +25,9 @@ class DllExport JMGFAM : public DOSFAM {
public:
// Constructor
JMGFAM(PJDEF tdp);
+#if defined(BSON_SUPPORT)
+ JMGFAM(PBDEF tdp);
+#endif // BSON_SUPPORT
JMGFAM(PJMGFAM txfp);
// Implementation
diff --git a/storage/connect/jmgoconn.cpp b/storage/connect/jmgoconn.cpp
index c80800bd897..8a12fffbd05 100644
--- a/storage/connect/jmgoconn.cpp
+++ b/storage/connect/jmgoconn.cpp
@@ -121,7 +121,7 @@ JMgoConn::JMgoConn(PGLOBAL g, PCSZ collname, PCSZ wrapper)
/***********************************************************************/
void JMgoConn::AddJars(PSTRG jpop, char sep)
{
-#if defined(DEVELOPMENT)
+#if defined(BSON_SUPPORT)
if (m_Version == 2) {
jpop->Append(sep);
// jpop->Append("C:/Eclipse/workspace/MongoWrap2/bin");
@@ -134,7 +134,7 @@ void JMgoConn::AddJars(PSTRG jpop, char sep)
jpop->Append(sep);
jpop->Append("C:/mongo-java-driver/mongo-java-driver-3.4.2.jar");
} // endif m_Version
-#endif // DEVELOPMENT
+#endif // BSON_SUPPORT
} // end of AddJars
/***********************************************************************/
diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp
index f6dca8146d6..0152a44fffa 100644
--- a/storage/connect/json.cpp
+++ b/storage/connect/json.cpp
@@ -1,7 +1,7 @@
/*************** json CPP Declares Source Code File (.H) ***************/
-/* Name: json.cpp Version 1.4 */
+/* Name: json.cpp Version 1.5 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
/* */
/* This file contains the JSON classes functions. */
/***********************************************************************/
@@ -21,7 +21,7 @@
#include "plgdbsem.h"
#include "json.h"
-#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0)
+#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0)
#if defined(__WIN__)
#define EL "\r\n"
@@ -38,16 +38,16 @@
class SE_Exception {
public:
- SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
- ~SE_Exception() {}
+ SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {}
+ ~SE_Exception() {}
- unsigned int nSE;
- PEXCEPTION_RECORD eRec;
+ unsigned int nSE;
+ PEXCEPTION_RECORD eRec;
}; // end of class SE_Exception
void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp)
{
- throw SE_Exception(u, pExp->ExceptionRecord);
+ throw SE_Exception(u, pExp->ExceptionRecord);
} // end of trans_func
char *GetExceptionDesc(PGLOBAL g, unsigned int e);
@@ -58,46 +58,60 @@ char *GetJsonNull(void);
/***********************************************************************/
/* IsNum: check whether this string is all digits. */
/***********************************************************************/
-bool IsNum(PSZ s)
-{
- for (char *p = s; *p; p++)
- if (*p == ']')
- break;
- else if (!isdigit(*p) || *p == '-')
- return false;
+bool IsNum(PSZ s) {
+ for (char* p = s; *p; p++)
+ if (*p == ']')
+ break;
+ else if (!isdigit(*p) || *p == '-')
+ return false;
- return true;
-} // end of IsNum
+ return true;
+} // end of IsNum
/***********************************************************************/
/* NextChr: return the first found '[' or Sep pointer. */
/***********************************************************************/
-char *NextChr(PSZ s, char sep)
+char* NextChr(PSZ s, char sep)
{
- char *p1 = strchr(s, '[');
- char *p2 = strchr(s, sep);
+ char* p1 = strchr(s, '[');
+ char* p2 = strchr(s, sep);
- if (!p2)
- return p1;
- else if (p1)
- return MY_MIN(p1, p2);
+ if (!p2)
+ return p1;
+ else if (p1)
+ return MY_MIN(p1, p2);
- return p2;
-} // end of NextChr
+ return p2;
+} // end of NextChr
+#if 0
+/***********************************************************************/
+/* Allocate a VAL structure, make sure common field and Nd are zeroed. */
+/***********************************************************************/
+PVL AllocVal(PGLOBAL g, JTYP type)
+{
+ PVL vlp = (PVL)PlugSubAlloc(g, NULL, sizeof(VAL));
+
+ vlp->LLn = 0;
+ vlp->Nd = 0;
+ vlp->Type = type;
+ return vlp;
+} // end of AllocVal
+#endif // 0
/***********************************************************************/
/* Parse a json string. */
/* Note: when pretty is not known, the caller set pretty to 3. */
/***********************************************************************/
-PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
+PJSON ParseJson(PGLOBAL g, char* s, size_t len, int* ptyp, bool* comma)
{
- int i, pretty = (ptyp) ? *ptyp : 3;
- bool b = false, pty[3] = {true,true,true};
- PJSON jsp = NULL, jp = NULL;
+ int i, pretty = (ptyp) ? *ptyp : 3;
+ bool b = false, pty[3] = { true,true,true };
+ PJSON jsp = NULL;
+ PJDOC jdp = NULL;
- if (trace(1))
- htrc("ParseJson: s=%.10s len=%d\n", s, len);
+ if (trace(1))
+ htrc("ParseJson: s=%.10s len=%zd\n", s, len);
if (!s || !len) {
strcpy(g->Message, "Void JSON object");
@@ -105,116 +119,402 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma)
} else if (comma)
*comma = false;
- // Trying to guess the pretty format
- if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
- pty[0] = false;
-
- try {
- jp = new(g) JSON();
- jp->s = s;
- jp->len = len;
- jp->pty = pty;
-
- for (i = 0; i < jp->len; i++)
- switch (s[i]) {
- case '[':
- if (jsp)
- jsp = jp->ParseAsArray(g, i, pretty, ptyp);
- else
- jsp = jp->ParseArray(g, ++i);
-
- break;
- case '{':
- if (jsp)
- jsp = jp->ParseAsArray(g, i, pretty, ptyp);
- else if (!(jsp = jp->ParseObject(g, ++i)))
- throw 2;
-
- break;
- case ' ':
- case '\t':
- case '\n':
- case '\r':
- break;
- case ',':
- if (jsp && (pretty == 1 || pretty == 3)) {
- if (comma)
- *comma = true;
-
- pty[0] = pty[2] = false;
- break;
- } // endif pretty
-
- sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
- throw 3;
- case '(':
- b = true;
- break;
- case ')':
- if (b) {
- b = false;
- break;
- } // endif b
- /* falls through */
- default:
- if (jsp)
- jsp = jp->ParseAsArray(g, i, pretty, ptyp);
- else if (!(jsp = jp->ParseValue(g, i)))
- throw 4;
-
- break;
- }; // endswitch s[i]
-
- if (!jsp)
- sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN(len, 50), s);
- else if (ptyp && pretty == 3) {
- *ptyp = 3; // Not recognized pretty
-
- for (i = 0; i < 3; i++)
- if (pty[i]) {
- *ptyp = i;
- break;
- } // endif pty
-
- } // endif ptyp
-
- } catch (int n) {
- if (trace(1))
- htrc("Exception %d: %s\n", n, g->Message);
- jsp = NULL;
- } catch (const char *msg) {
- strcpy(g->Message, msg);
- jsp = NULL;
- } // end catch
-
- return jsp;
+ // Trying to guess the pretty format
+ if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n')))
+ pty[0] = false;
+
+ try {
+ jdp = new(g) JDOC;
+ jdp->s = s;
+ jdp->len = len;
+ jdp->pty = pty;
+
+ for (i = 0; i < jdp->len; i++)
+ switch (s[i]) {
+ case '[':
+ if (jsp)
+ jsp = jdp->ParseAsArray(g, i, pretty, ptyp);
+ else
+ jsp = jdp->ParseArray(g, ++i);
+
+ break;
+ case '{':
+ if (jsp)
+ jsp = jdp->ParseAsArray(g, i, pretty, ptyp);
+ else if (!(jsp = jdp->ParseObject(g, ++i)))
+ throw 2;
+
+ break;
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
+ break;
+ case ',':
+ if (jsp && (pretty == 1 || pretty == 3)) {
+ if (comma)
+ *comma = true;
+
+ pty[0] = pty[2] = false;
+ break;
+ } // endif pretty
+
+ sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty);
+ throw 3;
+ case '(':
+ b = true;
+ break;
+ case ')':
+ if (b) {
+ b = false;
+ break;
+ } // endif b
+
+ default:
+ if (jsp)
+ jsp = jdp->ParseAsArray(g, i, pretty, ptyp);
+ else if (!(jsp = jdp->ParseValue(g, i)))
+ throw 4;
+
+ break;
+ }; // endswitch s[i]
+
+ if (!jsp)
+ sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s);
+ else if (ptyp && pretty == 3) {
+ *ptyp = 3; // Not recognized pretty
+
+ for (i = 0; i < 3; i++)
+ if (pty[i]) {
+ *ptyp = i;
+ break;
+ } // endif pty
+
+ } // endif ptyp
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+ jsp = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ jsp = NULL;
+ } // end catch
+
+ return jsp;
} // end of ParseJson
/***********************************************************************/
+/* Serialize a JSON document tree: */
+/***********************************************************************/
+PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) {
+ PSZ str = NULL;
+ bool b = false, err = true;
+ JOUT* jp;
+ FILE* fs = NULL;
+ PJDOC jdp = NULL;
+
+ g->Message[0] = 0;
+
+ try {
+ jdp = new(g) JDOC; // MUST BE ALLOCATED BEFORE jp !!!!!
+
+ if (!jsp) {
+ strcpy(g->Message, "Null json tree");
+ throw 1;
+ } else if (!fn) {
+ // Serialize to a string
+ jp = new(g) JOUTSTR(g);
+ b = pretty == 1;
+ } else {
+ if (!(fs = fopen(fn, "wb"))) {
+ sprintf(g->Message, MSG(OPEN_MODE_ERROR),
+ "w", (int)errno, fn);
+ strcat(strcat(g->Message, ": "), strerror(errno));
+ throw 2;
+ } else if (pretty >= 2) {
+ // Serialize to a pretty file
+ jp = new(g)JOUTPRT(g, fs);
+ } else {
+ // Serialize to a flat file
+ b = true;
+ jp = new(g)JOUTFILE(g, fs, pretty);
+ } // endif's
+
+ } // endif's
+
+ jdp->SetJp(jp);
+
+ switch (jsp->GetType()) {
+ case TYPE_JAR:
+ err = jdp->SerializeArray((PJAR)jsp, b);
+ break;
+ case TYPE_JOB:
+ err = ((b && jp->Prty()) && jp->WriteChr('\t'));
+ err |= jdp->SerializeObject((PJOB)jsp);
+ break;
+ case TYPE_JVAL:
+ err = jdp->SerializeValue((PJVAL)jsp);
+ break;
+ default:
+ strcpy(g->Message, "Invalid json tree");
+ } // endswitch Type
+
+ if (fs) {
+ fputs(EL, fs);
+ fclose(fs);
+ str = (err) ? NULL : strcpy(g->Message, "Ok");
+ } else if (!err) {
+ str = ((JOUTSTR*)jp)->Strp;
+ jp->WriteChr('\0');
+ PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
+ } else {
+ if (!g->Message[0])
+ strcpy(g->Message, "Error in Serialize");
+
+ } // endif's
+
+ } catch (int n) {
+ if (trace(1))
+ htrc("Exception %d: %s\n", n, g->Message);
+ str = NULL;
+ } catch (const char* msg) {
+ strcpy(g->Message, msg);
+ str = NULL;
+ } // end catch
+
+ return str;
+} // end of Serialize
+
+
+/* -------------------------- Class JOUTSTR -------------------------- */
+
+/***********************************************************************/
+/* JOUTSTR constructor. */
+/***********************************************************************/
+JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g) {
+ PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
+
+ N = 0;
+ Max = pph->FreeBlk;
+ Max = (Max > 32) ? Max - 32 : Max;
+ Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet
+} // end of JOUTSTR constructor
+
+/***********************************************************************/
+/* Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::WriteStr(const char* s) {
+ if (s) {
+ size_t len = strlen(s);
+
+ if (N + len > Max)
+ return true;
+
+ memcpy(Strp + N, s, len);
+ N += len;
+ return false;
+ } else
+ return true;
+
+} // end of WriteStr
+
+/***********************************************************************/
+/* Concatenate a character to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::WriteChr(const char c) {
+ if (N + 1 > Max)
+ return true;
+
+ Strp[N++] = c;
+ return false;
+} // end of WriteChr
+
+/***********************************************************************/
+/* Escape and Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTSTR::Escape(const char* s)
+{
+ if (s) {
+ WriteChr('"');
+
+ for (unsigned int i = 0; s[i]; i++)
+ switch (s[i]) {
+ case '"':
+ case '\\':
+ case '\t':
+ case '\n':
+ case '\r':
+ case '\b':
+ case '\f': WriteChr('\\');
+ // fall through
+ default:
+ WriteChr(s[i]);
+ break;
+ } // endswitch s[i]
+
+ WriteChr('"');
+ } else
+ WriteStr("null");
+
+ return false;
+} // end of Escape
+
+/* ------------------------- Class JOUTFILE -------------------------- */
+
+/***********************************************************************/
+/* Write a string to the Serialize file. */
+/***********************************************************************/
+bool JOUTFILE::WriteStr(const char* s)
+{
+ // This is temporary
+ fputs(s, Stream);
+ return false;
+} // end of WriteStr
+
+/***********************************************************************/
+/* Write a character to the Serialize file. */
+/***********************************************************************/
+bool JOUTFILE::WriteChr(const char c)
+{
+ // This is temporary
+ fputc(c, Stream);
+ return false;
+} // end of WriteChr
+
+/***********************************************************************/
+/* Escape and Concatenate a string to the Serialize string. */
+/***********************************************************************/
+bool JOUTFILE::Escape(const char* s)
+{
+ // This is temporary
+ if (s) {
+ fputc('"', Stream);
+
+ for (unsigned int i = 0; s[i]; i++)
+ switch (s[i]) {
+ case '"': fputs("\\\"", Stream); break;
+ case '\\': fputs("\\\\", Stream); break;
+ case '\t': fputs("\\t", Stream); break;
+ case '\n': fputs("\\n", Stream); break;
+ case '\r': fputs("\\r", Stream); break;
+ case '\b': fputs("\\b", Stream); break;
+ case '\f': fputs("\\f", Stream); break;
+ default:
+ fputc(s[i], Stream);
+ break;
+ } // endswitch s[i]
+
+ fputc('"', Stream);
+ } else
+ fputs("null", Stream);
+
+ return false;
+} // end of Escape
+
+/* ------------------------- Class JOUTPRT --------------------------- */
+
+/***********************************************************************/
+/* Write a string to the Serialize pretty file. */
+/***********************************************************************/
+bool JOUTPRT::WriteStr(const char* s)
+{
+ // This is temporary
+ if (B) {
+ fputs(EL, Stream);
+ M--;
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ B = false;
+ } // endif B
+
+ fputs(s, Stream);
+ return false;
+} // end of WriteStr
+
+/***********************************************************************/
+/* Write a character to the Serialize pretty file. */
+/***********************************************************************/
+bool JOUTPRT::WriteChr(const char c)
+{
+ switch (c) {
+ case ':':
+ fputs(": ", Stream);
+ break;
+ case '{':
+ case '[':
+#if 0
+ if (M)
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+#endif // 0
+
+ fputc(c, Stream);
+ fputs(EL, Stream);
+ M++;
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ break;
+ case '}':
+ case ']':
+ M--;
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ fputc(c, Stream);
+ B = true;
+ break;
+ case ',':
+ fputc(c, Stream);
+ fputs(EL, Stream);
+
+ for (int i = 0; i < M; i++)
+ fputc('\t', Stream);
+
+ B = false;
+ break;
+ default:
+ fputc(c, Stream);
+ } // endswitch c
+
+ return false;
+} // end of WriteChr
+
+/* --------------------------- Class JDOC ---------------------------- */
+
+/***********************************************************************/
/* Parse several items as being in an array. */
/***********************************************************************/
-PJAR JSON::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp)
+PJAR JDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp)
{
- if (pty[0] && (!pretty || pretty > 2)) {
- PJAR jsp;
+ if (pty[0] && (!pretty || pretty > 2)) {
+ PJAR jsp;
- if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3)
- *ptyp = (pty[0]) ? 0 : 3;
+ if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3)
+ *ptyp = (pty[0]) ? 0 : 3;
- return jsp;
- } else
- strcpy(g->Message, "More than one item in file");
+ return jsp;
+ } else
+ strcpy(g->Message, "More than one item in file");
- return NULL;
+ return NULL;
} // end of ParseAsArray
/***********************************************************************/
/* Parse a JSON Array. */
/***********************************************************************/
-PJAR JSON::ParseArray(PGLOBAL g, int& i)
+PJAR JDOC::ParseArray(PGLOBAL g, int& i)
{
- int level = 0;
- bool b = (!i);
+ int level = 0;
+ bool b = (!i);
PJAR jarp = new(g) JARRAY;
for (; i < len; i++)
@@ -235,11 +535,11 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i)
jarp->InitArray(g);
return jarp;
- case '\n':
- if (!b)
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
+ case '\n':
+ if (!b)
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
case '\t':
break;
default:
@@ -247,17 +547,17 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i)
sprintf(g->Message, "Unexpected value near %.*s", ARGS);
throw 1;
} else
- jarp->AddValue(g, ParseValue(g, i));
+ jarp->AddArrayValue(g, ParseValue(g, i));
level = (b) ? 1 : 2;
break;
}; // endswitch s[i]
- if (b) {
- // Case of Pretty == 0
- jarp->InitArray(g);
- return jarp;
- } // endif b
+ if (b) {
+ // Case of Pretty == 0
+ jarp->InitArray(g);
+ return jarp;
+ } // endif b
throw ("Unexpected EOF in array");
} // end of ParseArray
@@ -265,10 +565,10 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i)
/***********************************************************************/
/* Parse a JSON Object. */
/***********************************************************************/
-PJOB JSON::ParseObject(PGLOBAL g, int& i)
+PJOB JDOC::ParseObject(PGLOBAL g, int& i)
{
PSZ key;
- int level = 0;
+ int level = -1;
PJOB jobp = new(g) JOBJECT;
PJPR jpp = NULL;
@@ -276,7 +576,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
switch (s[i]) {
case '"':
if (level < 2) {
- key = ParseString(g, ++i);
+ key = ParseString(g, ++i);
jpp = jobp->AddPair(g, key);
level = 1;
} else {
@@ -287,7 +587,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
break;
case ':':
if (level == 1) {
- jpp->Val = ParseValue(g, ++i);
+ jpp->Val = ParseValue(g, ++i);
level = 2;
} else {
sprintf(g->Message, "Unexpected ':' near %.*s", ARGS);
@@ -304,16 +604,16 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
break;
case '}':
- if (level < 2) {
+ if (level == 0 || level == 1) {
sprintf(g->Message, "Unexpected '}' near %.*s", ARGS);
throw 2;
} // endif level
return jobp;
- case '\n':
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
case '\t':
break;
default:
@@ -329,38 +629,42 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i)
/***********************************************************************/
/* Parse a JSON Value. */
/***********************************************************************/
-PJVAL JSON::ParseValue(PGLOBAL g, int& i)
+PJVAL JDOC::ParseValue(PGLOBAL g, int& i)
{
- int n;
PJVAL jvp = new(g) JVALUE;
for (; i < len; i++)
- switch (s[i]) {
- case '\n':
- pty[0] = pty[1] = false;
- case '\r':
- case ' ':
- case '\t':
- break;
- default:
- goto suite;
- } // endswitch
+ switch (s[i]) {
+ case '\n':
+ pty[0] = pty[1] = false;
+ case '\r':
+ case ' ':
+ case '\t':
+ break;
+ default:
+ goto suite;
+ } // endswitch
suite:
switch (s[i]) {
case '[':
- jvp->Jsp = ParseArray(g, ++i);
+ jvp->Jsp = ParseArray(g, ++i);
+ jvp->DataType = TYPE_JSON;
break;
case '{':
- jvp->Jsp = ParseObject(g, ++i);
+ jvp->Jsp = ParseObject(g, ++i);
+ jvp->DataType = TYPE_JSON;
break;
case '"':
- jvp->Value = AllocateValue(g, ParseString(g, ++i), TYPE_STRING);
+// jvp->Val = AllocVal(g, TYPE_STRG);
+ jvp->Strp = ParseString(g, ++i);
+ jvp->DataType = TYPE_STRG;
break;
case 't':
if (!strncmp(s + i, "true", 4)) {
- n = 1;
- jvp->Value = AllocateValue(g, &n, TYPE_TINY);
+// jvp->Val = AllocVal(g, TYPE_BOOL);
+ jvp->B = true;
+ jvp->DataType = TYPE_BOOL;
i += 3;
} else
goto err;
@@ -368,24 +672,26 @@ PJVAL JSON::ParseValue(PGLOBAL g, int& i)
break;
case 'f':
if (!strncmp(s + i, "false", 5)) {
- n = 0;
- jvp->Value = AllocateValue(g, &n, TYPE_TINY);
+// jvp->Val = AllocVal(g, TYPE_BOOL);
+ jvp->B = false;
+ jvp->DataType = TYPE_BOOL;
i += 4;
} else
goto err;
break;
case 'n':
- if (!strncmp(s + i, "null", 4))
+ if (!strncmp(s + i, "null", 4)) {
+ jvp->DataType = TYPE_NULL;
i += 3;
- else
+ } else
goto err;
break;
case '-':
default:
if (s[i] == '-' || isdigit(s[i]))
- jvp->Value = ParseNumeric(g, i);
+ ParseNumeric(g, i, jvp);
else
goto err;
@@ -401,7 +707,7 @@ err:
/***********************************************************************/
/* Unescape and parse a JSON string. */
/***********************************************************************/
-char *JSON::ParseString(PGLOBAL g, int& i)
+char *JDOC::ParseString(PGLOBAL g, int& i)
{
uchar *p;
int n = 0;
@@ -488,15 +794,15 @@ char *JSON::ParseString(PGLOBAL g, int& i)
/***********************************************************************/
/* Parse a JSON numeric value. */
/***********************************************************************/
-PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
+void JDOC::ParseNumeric(PGLOBAL g, int& i, PJVAL vlp)
{
char buf[50];
int n = 0;
short nd = 0;
- bool has_dot = false;
- bool has_e = false;
- bool found_digit = false;
- PVAL valp = NULL;
+ bool has_dot = false;
+ bool has_e = false;
+ bool found_digit = false;
+//PVL vlp = NULL;
for (; i < len; i++) {
switch (s[i]) {
@@ -545,15 +851,27 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
if (has_dot || has_e) {
double dv = strtod(buf, NULL);
- valp = AllocateValue(g, &dv, TYPE_DOUBLE, nd);
+// vlp = AllocVal(g, TYPE_DBL);
+ vlp->F = dv;
+ vlp->Nd = nd;
+ vlp->DataType = TYPE_DBL;
} else {
long long iv = strtoll(buf, NULL, 10);
- valp = AllocateValue(g, &iv, TYPE_BIGINT);
+ if (iv > INT_MAX32 || iv < INT_MIN32) {
+// vlp = AllocVal(g, TYPE_BINT);
+ vlp->LLn = iv;
+ vlp->DataType = TYPE_BINT;
+ } else {
+// vlp = AllocVal(g, TYPE_INTG);
+ vlp->N = (int)iv;
+ vlp->DataType = TYPE_INTG;
+ } // endif iv
+
} // endif has
i--; // Unstack following character
- return valp;
+ return;
} else
throw("No digit found");
@@ -562,137 +880,59 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i)
} // end of ParseNumeric
/***********************************************************************/
-/* Serialize a JSON tree: */
-/***********************************************************************/
-PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty)
-{
- PSZ str = NULL;
- bool b = false, err = true;
- JOUT *jp;
- FILE *fs = NULL;
-
- g->Message[0] = 0;
-
- try {
- if (!jsp) {
- strcpy(g->Message, "Null json tree");
- throw 1;
- } else if (!fn) {
- // Serialize to a string
- jp = new(g) JOUTSTR(g);
- b = pretty == 1;
- } else {
- if (!(fs = fopen(fn, "wb"))) {
- sprintf(g->Message, MSG(OPEN_MODE_ERROR),
- "w", (int)errno, fn);
- strcat(strcat(g->Message, ": "), strerror(errno));
- throw 2;
- } else if (pretty >= 2) {
- // Serialize to a pretty file
- jp = new(g)JOUTPRT(g, fs);
- } else {
- // Serialize to a flat file
- b = true;
- jp = new(g)JOUTFILE(g, fs, pretty);
- } // endif's
-
- } // endif's
-
- switch (jsp->GetType()) {
- case TYPE_JAR:
- err = SerializeArray(jp, (PJAR)jsp, b);
- break;
- case TYPE_JOB:
- err = ((b && jp->Prty()) && jp->WriteChr('\t'));
- err |= SerializeObject(jp, (PJOB)jsp);
- break;
- case TYPE_JVAL:
- err = SerializeValue(jp, (PJVAL)jsp);
- break;
- default:
- strcpy(g->Message, "Invalid json tree");
- } // endswitch Type
-
- if (fs) {
- fputs(EL, fs);
- fclose(fs);
- str = (err) ? NULL : strcpy(g->Message, "Ok");
- } else if (!err) {
- str = ((JOUTSTR*)jp)->Strp;
- jp->WriteChr('\0');
- PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N);
- } else {
- if (!g->Message[0])
- strcpy(g->Message, "Error in Serialize");
-
- } // endif's
-
- } catch (int n) {
- if (trace(1))
- htrc("Exception %d: %s\n", n, g->Message);
- str = NULL;
- } catch (const char *msg) {
- strcpy(g->Message, msg);
- str = NULL;
- } // end catch
-
- return str;
-} // end of Serialize
-
-/***********************************************************************/
/* Serialize a JSON Array. */
/***********************************************************************/
-bool SerializeArray(JOUT *js, PJAR jarp, bool b)
+bool JDOC::SerializeArray(PJAR jarp, bool b)
{
bool first = true;
- if (b) {
- if (js->Prty()) {
- if (js->WriteChr('['))
- return true;
- else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t')))
- return true;
+ if (b) {
+ if (js->Prty()) {
+ if (js->WriteChr('['))
+ return true;
+ else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t')))
+ return true;
- } // endif Prty
+ } // endif Prty
- } else if (js->WriteChr('['))
- return true;
+ } else if (js->WriteChr('['))
+ return true;
for (int i = 0; i < jarp->size(); i++) {
if (first)
first = false;
- else if ((!b || js->Prty()) && js->WriteChr(','))
+ else if ((!b || js->Prty()) && js->WriteChr(','))
return true;
- else if (b) {
- if (js->Prty() < 2 && js->WriteStr(EL))
- return true;
- else if (js->Prty() == 1 && js->WriteChr('\t'))
- return true;
+ else if (b) {
+ if (js->Prty() < 2 && js->WriteStr(EL))
+ return true;
+ else if (js->Prty() == 1 && js->WriteChr('\t'))
+ return true;
- } // endif b
+ } // endif b
- if (SerializeValue(js, jarp->GetValue(i)))
+ if (SerializeValue(jarp->GetArrayValue(i)))
return true;
} // endfor i
- if (b && js->Prty() == 1 && js->WriteStr(EL))
+ if (b && js->Prty() == 1 && js->WriteStr(EL))
return true;
- return ((!b || js->Prty()) && js->WriteChr(']'));
+ return ((!b || js->Prty()) && js->WriteChr(']'));
} // end of SerializeArray
/***********************************************************************/
/* Serialize a JSON Object. */
/***********************************************************************/
-bool SerializeObject(JOUT *js, PJOB jobp)
+bool JDOC::SerializeObject(PJOB jobp)
{
bool first = true;
if (js->WriteChr('{'))
return true;
- for (PJPR pair = jobp->First; pair; pair = pair->Next) {
+ for (PJPR pair = jobp->GetFirst(); pair; pair = pair->Next) {
if (first)
first = false;
else if (js->WriteChr(','))
@@ -702,7 +942,7 @@ bool SerializeObject(JOUT *js, PJOB jobp)
js->WriteStr(pair->Key) ||
js->WriteChr('"') ||
js->WriteChr(':') ||
- SerializeValue(js, pair->Val))
+ SerializeValue(pair->Val))
return true;
} // endfor i
@@ -713,259 +953,70 @@ bool SerializeObject(JOUT *js, PJOB jobp)
/***********************************************************************/
/* Serialize a JSON Value. */
/***********************************************************************/
-bool SerializeValue(JOUT *js, PJVAL jvp)
+bool JDOC::SerializeValue(PJVAL jvp)
{
+ char buf[64];
PJAR jap;
PJOB jop;
- PVAL valp;
+ //PVL vlp;
if ((jap = jvp->GetArray()))
- return SerializeArray(js, jap, false);
+ return SerializeArray(jap, false);
else if ((jop = jvp->GetObject()))
- return SerializeObject(js, jop);
- else if (!(valp = jvp->Value) || valp->IsNull())
- return js->WriteStr("null");
- else switch (valp->GetType()) {
- case TYPE_TINY:
- return js->WriteStr(valp->GetTinyValue() ? "true" : "false");
- case TYPE_STRING:
- return js->Escape(valp->GetCharValue());
+ return SerializeObject(jop);
+//else if (!(vlp = jvp->Val))
+// return js->WriteStr("null");
+ else switch (jvp->DataType) {
+ case TYPE_BOOL:
+ return js->WriteStr(jvp->B ? "true" : "false");
+ case TYPE_STRG:
+ case TYPE_DTM:
+ return js->Escape(jvp->Strp);
+ case TYPE_INTG:
+ sprintf(buf, "%d", jvp->N);
+ return js->WriteStr(buf);
+ case TYPE_BINT:
+ sprintf(buf, "%lld", jvp->LLn);
+ return js->WriteStr(buf);
+ case TYPE_DBL:
+ sprintf(buf, "%.*lf", jvp->Nd, jvp->F);
+ return js->WriteStr(buf);
+ case TYPE_NULL:
+ return js->WriteStr("null");
default:
- if (valp->IsTypeNum()) {
- char buf[32];
-
- return js->WriteStr(valp->GetCharString(buf));
- } // endif valp
-
- } // endswitch Type
+ return js->WriteStr("???"); // TODO
+ } // endswitch Type
- strcpy(js->g->Message, "Unrecognized value");
- return true;
+ strcpy(js->g->Message, "Unrecognized value");
+ return true;
} // end of SerializeValue
-/* -------------------------- Class JOUTSTR -------------------------- */
-
-/***********************************************************************/
-/* JOUTSTR constructor. */
-/***********************************************************************/
-JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g)
-{
- PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
-
- N = 0;
- Max = pph->FreeBlk;
- Max = (Max > 32) ? Max - 32 : Max;
- Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet
-} // end of JOUTSTR constructor
-
-/***********************************************************************/
-/* Concatenate a string to the Serialize string. */
-/***********************************************************************/
-bool JOUTSTR::WriteStr(const char *s)
-{
- if (s) {
- size_t len = strlen(s);
-
- if (N + len > Max)
- return true;
-
- memcpy(Strp + N, s, len);
- N += len;
- return false;
- } else
- return true;
-
-} // end of WriteStr
-
-/***********************************************************************/
-/* Concatenate a character to the Serialize string. */
-/***********************************************************************/
-bool JOUTSTR::WriteChr(const char c)
-{
- if (N + 1 > Max)
- return true;
-
- Strp[N++] = c;
- return false;
-} // end of WriteChr
-
-/***********************************************************************/
-/* Escape and Concatenate a string to the Serialize string. */
-/***********************************************************************/
-bool JOUTSTR::Escape(const char *s)
-{
- WriteChr('"');
-
- for (unsigned int i = 0; s[i]; i++)
- switch (s[i]) {
- case '"':
- case '\\':
- case '\t':
- case '\n':
- case '\r':
- case '\b':
- case '\f': WriteChr('\\');
- // fall through
- default:
- WriteChr(s[i]);
- break;
- } // endswitch s[i]
-
- WriteChr('"');
- return false;
-} // end of Escape
-
-/* ------------------------- Class JOUTFILE -------------------------- */
-
-/***********************************************************************/
-/* Write a string to the Serialize file. */
-/***********************************************************************/
-bool JOUTFILE::WriteStr(const char *s)
-{
- // This is temporary
- fputs(s, Stream);
- return false;
-} // end of WriteStr
-
-/***********************************************************************/
-/* Write a character to the Serialize file. */
-/***********************************************************************/
-bool JOUTFILE::WriteChr(const char c)
-{
- // This is temporary
- fputc(c, Stream);
- return false;
-} // end of WriteChr
-
-/***********************************************************************/
-/* Escape and Concatenate a string to the Serialize string. */
-/***********************************************************************/
-bool JOUTFILE::Escape(const char *s)
-{
- // This is temporary
- fputc('"', Stream);
-
- for (unsigned int i = 0; s[i]; i++)
- switch (s[i]) {
- case '"': fputs("\\\"", Stream); break;
- case '\\': fputs("\\\\", Stream); break;
- case '\t': fputs("\\t", Stream); break;
- case '\n': fputs("\\n", Stream); break;
- case '\r': fputs("\\r", Stream); break;
- case '\b': fputs("\\b", Stream); break;
- case '\f': fputs("\\f", Stream); break;
- default:
- fputc(s[i], Stream);
- break;
- } // endswitch s[i]
-
- fputc('"', Stream);
- return false;
-} // end of Escape
-
-/* ------------------------- Class JOUTPRT --------------------------- */
-
-/***********************************************************************/
-/* Write a string to the Serialize pretty file. */
-/***********************************************************************/
-bool JOUTPRT::WriteStr(const char *s)
-{
- // This is temporary
- if (B) {
- fputs(EL, Stream);
- M--;
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- B = false;
- } // endif B
-
- fputs(s, Stream);
- return false;
-} // end of WriteStr
-
-/***********************************************************************/
-/* Write a character to the Serialize pretty file. */
-/***********************************************************************/
-bool JOUTPRT::WriteChr(const char c)
-{
- switch (c) {
- case ':':
- fputs(": ", Stream);
- break;
- case '{':
- case '[':
-#if 0
- if (M)
- fputs(EL, Stream);
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-#endif // 0
-
- fputc(c, Stream);
- fputs(EL, Stream);
- M++;
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- break;
- case '}':
- case ']':
- M--;
- fputs(EL, Stream);
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- fputc(c, Stream);
- B = true;
- break;
- case ',':
- fputc(c, Stream);
- fputs(EL, Stream);
-
- for (int i = 0; i < M; i++)
- fputc('\t', Stream);
-
- B = false;
- break;
- default:
- fputc(c, Stream);
- } // endswitch c
-
-return false;
-} // end of WriteChr
-
/* -------------------------- Class JOBJECT -------------------------- */
/***********************************************************************/
/* Return the number of pairs in this object. */
/***********************************************************************/
-int JOBJECT::GetSize(bool b)
-{
- if (b) {
- // Return only non null pairs
- int n = 0;
-
- for (PJPR jpp = First; jpp; jpp = jpp->Next)
- if (jpp->Val && !jpp->Val->IsNull())
- n++;
+int JOBJECT::GetSize(bool b) {
+ int n = 0;
- return n;
- } else
- return Size;
+ for (PJPR jpp = First; jpp; jpp = jpp->Next)
+ // If b return only non null pairs
+ if (!b || jpp->Val && !jpp->Val->IsNull())
+ n++;
-} // end of GetSize
+ return n;
+} // end of GetSize
/***********************************************************************/
/* Add a new pair to an Object. */
/***********************************************************************/
PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
{
- PJPR jpp = new(g) JPAIR(key);
+ PJPR jpp = (PJPR)PlugSubAlloc(g, NULL, sizeof(JPAIR));
+
+ jpp->Key = key;
+ jpp->Next = NULL;
+ jpp->Val = NULL;
if (Last)
Last->Next = jpp;
@@ -973,7 +1024,6 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
First = jpp;
Last = jpp;
- Size++;
return jpp;
} // end of AddPair
@@ -982,13 +1032,13 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key)
/***********************************************************************/
PJAR JOBJECT::GetKeyList(PGLOBAL g)
{
- PJAR jarp = new(g) JARRAY();
+ PJAR jarp = new(g) JARRAY();
- for (PJPR jpp = First; jpp; jpp = jpp->Next)
- jarp->AddValue(g, new(g) JVALUE(g, jpp->GetKey()));
+ for (PJPR jpp = First; jpp; jpp = jpp->Next)
+ jarp->AddArrayValue(g, new(g) JVALUE(g, jpp->Key));
- jarp->InitArray(g);
- return jarp;
+ jarp->InitArray(g);
+ return jarp;
} // end of GetKeyList
/***********************************************************************/
@@ -996,19 +1046,19 @@ PJAR JOBJECT::GetKeyList(PGLOBAL g)
/***********************************************************************/
PJAR JOBJECT::GetValList(PGLOBAL g)
{
- PJAR jarp = new(g) JARRAY();
+ PJAR jarp = new(g) JARRAY();
- for (PJPR jpp = First; jpp; jpp = jpp->Next)
- jarp->AddValue(g, jpp->GetVal());
+ for (PJPR jpp = First; jpp; jpp = jpp->Next)
+ jarp->AddArrayValue(g, jpp->Val);
- jarp->InitArray(g);
- return jarp;
+ jarp->InitArray(g);
+ return jarp;
} // end of GetValList
/***********************************************************************/
/* Get the value corresponding to the given key. */
/***********************************************************************/
-PJVAL JOBJECT::GetValue(const char* key)
+PJVAL JOBJECT::GetKeyValue(const char* key)
{
for (PJPR jp = First; jp; jp = jp->Next)
if (!strcmp(jp->Key, key))
@@ -1020,43 +1070,57 @@ PJVAL JOBJECT::GetValue(const char* key)
/***********************************************************************/
/* Return the text corresponding to all keys (XML like). */
/***********************************************************************/
-PSZ JOBJECT::GetText(PGLOBAL g, PSZ text)
+PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text)
{
- int n;
+ if (First) {
+ bool b;
- if (!text) {
- text = (char*)PlugSubAlloc(g, NULL, 0);
- text[0] = 0;
- n = 1;
- } else
- n = 0;
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(' ');
- if (!First && n)
- return NULL;
- else if (n == 1 && Size == 1 && !strcmp(First->GetKey(), "$date")) {
- int i;
+ b = false;
+ } // endif text
- First->Val->GetText(g, text);
- i = (text[1] == '-' ? 2 : 1);
+ if (b && !First->Next && !strcmp(First->Key, "$date")) {
+ int i;
+ PSZ s;
- if (IsNum(text + i)) {
- // Date is in milliseconds
- int j = (int)strlen(text);
+ First->Val->GetText(g, text);
+ s = text->GetStr();
+ i = (s[1] == '-' ? 2 : 1);
- if (j >= 4 + i)
- text[j - 3] = 0; // Change it to seconds
- else
- strcpy(text, " 0");
+ if (IsNum(s + i)) {
+ // Date is in milliseconds
+ int j = text->GetLength();
- } // endif text
+ if (j >= 4 + i) {
+ s[j - 3] = 0; // Change it to seconds
+ text->SetLength((uint)strlen(s));
+ } else
+ text->Set(" 0");
+
+ } // endif text
+
+ } else for (PJPR jp = First; jp; jp = jp->Next) {
+ jp->Val->GetText(g, text);
- } else for (PJPR jp = First; jp; jp = jp->Next)
- jp->Val->GetText(g, text);
+ if (jp->Next)
+ text->Append(' ');
- if (n)
- PlugSubAlloc(g, NULL, strlen(text) + 1);
+ } // endfor jp
- return text + n;
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
+
+ } // endif First
+
+ return NULL;
} // end of GetText;
/***********************************************************************/
@@ -1064,25 +1128,25 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text)
/***********************************************************************/
bool JOBJECT::Merge(PGLOBAL g, PJSON jsp)
{
- if (jsp->GetType() != TYPE_JOB) {
- strcpy(g->Message, "Second argument is not an object");
- return true;
- } // endif Type
+ if (jsp->GetType() != TYPE_JOB) {
+ strcpy(g->Message, "Second argument is not an object");
+ return true;
+ } // endif Type
- PJOB jobp = (PJOB)jsp;
+ PJOB jobp = (PJOB)jsp;
- for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next)
- SetValue(g, jpp->GetVal(), jpp->GetKey());
+ for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next)
+ SetKeyValue(g, jpp->Val, jpp->Key);
- return false;
+ return false;
} // end of Marge;
/***********************************************************************/
/* Set or add a value corresponding to the given key. */
/***********************************************************************/
-void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key)
+void JOBJECT::SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key)
{
- PJPR jp;
+ PJPR jp;
for (jp = First; jp; jp = jp->Next)
if (!strcmp(jp->Key, key)) {
@@ -1102,15 +1166,14 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key)
/***********************************************************************/
void JOBJECT::DeleteKey(PCSZ key)
{
- PJPR jp, *pjp = &First;
+ PJPR jp, *pjp = &First;
- for (jp = First; jp; jp = jp->Next)
- if (!strcmp(jp->Key, key)) {
- *pjp = jp->Next;
- Size--;
- break;
- } else
- pjp = &jp->Next;
+ for (jp = First; jp; jp = jp->Next)
+ if (!strcmp(jp->Key, key)) {
+ *pjp = jp->Next;
+ break;
+ } else
+ pjp = &jp->Next;
} // end of DeleteKey
@@ -1129,23 +1192,35 @@ bool JOBJECT::IsNull(void)
/* -------------------------- Class JARRAY --------------------------- */
/***********************************************************************/
+/* JARRAY constructor. */
+/***********************************************************************/
+JARRAY::JARRAY(void) : JSON()
+{
+ Type = TYPE_JAR;
+ Size = 0;
+ Alloc = 0;
+ First = Last = NULL;
+ Mvals = NULL;
+} // end of JARRAY constructor
+
+/***********************************************************************/
/* Return the number of values in this object. */
/***********************************************************************/
int JARRAY::GetSize(bool b)
{
- if (b) {
- // Return only non null values
- int n = 0;
+ if (b) {
+ // Return only non null values
+ int n = 0;
- for (PJVAL jvp = First; jvp; jvp = jvp->Next)
- if (!jvp->IsNull())
- n++;
+ for (PJVAL jvp = First; jvp; jvp = jvp->Next)
+ if (!jvp->IsNull())
+ n++;
- return n;
- } else
- return Size;
+ return n;
+ } else
+ return Size;
-} // end of GetSize
+} // end of GetSize
/***********************************************************************/
/* Make the array of values from the values list. */
@@ -1166,19 +1241,19 @@ void JARRAY::InitArray(PGLOBAL g)
} // endif Size
for (i = 0, jvp = First; jvp; jvp = jvp->Next)
- if (!jvp->Del) {
- Mvals[i++] = jvp;
- pjvp = &jvp->Next;
- Last = jvp;
- } else
- *pjvp = jvp->Next;
+ if (!jvp->Del) {
+ Mvals[i++] = jvp;
+ pjvp = &jvp->Next;
+ Last = jvp;
+ } else
+ *pjvp = jvp->Next;
} // end of InitArray
/***********************************************************************/
/* Get the Nth value of an Array. */
/***********************************************************************/
-PJVAL JARRAY::GetValue(int i)
+PJVAL JARRAY::GetArrayValue(int i)
{
if (Mvals && i >= 0 && i < Size)
return Mvals[i];
@@ -1189,33 +1264,33 @@ PJVAL JARRAY::GetValue(int i)
/***********************************************************************/
/* Add a Value to the Array Value list. */
/***********************************************************************/
-PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x)
+PJVAL JARRAY::AddArrayValue(PGLOBAL g, PJVAL jvp, int *x)
{
if (!jvp)
jvp = new(g) JVALUE;
- if (x) {
- int i = 0, n = *x;
- PJVAL jp, *jpp = &First;
+ if (x) {
+ int i = 0, n = *x;
+ PJVAL jp, *jpp = &First;
- for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next));
+ for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next));
- (*jpp) = jvp;
+ (*jpp) = jvp;
- if (!(jvp->Next = jp))
- Last = jvp;
+ if (!(jvp->Next = jp))
+ Last = jvp;
- } else {
- if (!First)
- First = jvp;
- else if (Last == First)
- First->Next = Last = jvp;
- else
- Last->Next = jvp;
+ } else {
+ if (!First)
+ First = jvp;
+ else if (Last == First)
+ First->Next = Last = jvp;
+ else
+ Last->Next = jvp;
- Last = jvp;
- Last->Next = NULL;
- } // endif x
+ Last = jvp;
+ Last->Next = NULL;
+ } // endif x
return jvp;
} // end of AddValue
@@ -1225,24 +1300,24 @@ PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x)
/***********************************************************************/
bool JARRAY::Merge(PGLOBAL g, PJSON jsp)
{
- if (jsp->GetType() != TYPE_JAR) {
- strcpy(g->Message, "Second argument is not an array");
- return true;
- } // endif Type
+ if (jsp->GetType() != TYPE_JAR) {
+ strcpy(g->Message, "Second argument is not an array");
+ return true;
+ } // endif Type
- PJAR arp = (PJAR)jsp;
+ PJAR arp = (PJAR)jsp;
- for (int i = 0; i < jsp->size(); i++)
- AddValue(g, arp->GetValue(i));
+ for (int i = 0; i < arp->size(); i++)
+ AddArrayValue(g, arp->GetArrayValue(i));
- InitArray(g);
- return false;
+ InitArray(g);
+ return false;
} // end of Merge
/***********************************************************************/
/* Set the nth Value of the Array Value list. */
/***********************************************************************/
-bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n)
+bool JARRAY::SetArrayValue(PGLOBAL g, PJVAL jvp, int n)
{
int i = 0;
PJVAL jp, *jpp = &First;
@@ -1259,25 +1334,42 @@ bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n)
/***********************************************************************/
/* Return the text corresponding to all values. */
/***********************************************************************/
-PSZ JARRAY::GetText(PGLOBAL g, PSZ text)
+PSZ JARRAY::GetText(PGLOBAL g, PSTRG text)
{
- int n;
- PJVAL jp;
+ if (First) {
+ bool b;
+ PJVAL jp;
+
+ if (!text) {
+ text = new(g) STRING(g, 256);
+ b = true;
+ } else {
+ if (text->GetLastChar() != ' ')
+ text->Append(" (");
+ else
+ text->Append('(');
+
+ b = false;
+ }
+
+ for (jp = First; jp; jp = jp->Next) {
+ jp->GetText(g, text);
- if (!text) {
- text = (char*)PlugSubAlloc(g, NULL, 0);
- text[0] = 0;
- n = 1;
- } else
- n = 0;
+ if (jp->Next)
+ text->Append(", ");
+ else if (!b)
+ text->Append(')');
- for (jp = First; jp; jp = jp->Next)
- jp->GetText(g, text);
+ } // endfor jp
- if (n)
- PlugSubAlloc(g, NULL, strlen(text) + 1);
+ if (b) {
+ text->Trim();
+ return text->GetStr();
+ } // endif b
- return text + n;
+ } // endif First
+
+ return NULL;
} // end of GetText;
/***********************************************************************/
@@ -1285,13 +1377,13 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text)
/***********************************************************************/
bool JARRAY::DeleteValue(int n)
{
- PJVAL jvp = GetValue(n);
+ PJVAL jvp = GetArrayValue(n);
- if (jvp) {
- jvp->Del = true;
- return false;
- } else
- return true;
+ if (jvp) {
+ jvp->Del = true;
+ return false;
+ } else
+ return true;
} // end of DeleteValue
@@ -1310,32 +1402,60 @@ bool JARRAY::IsNull(void)
/* -------------------------- Class JVALUE- -------------------------- */
/***********************************************************************/
-/* Constructor for a JSON. */
+/* Constructor for a JVALUE. */
/***********************************************************************/
JVALUE::JVALUE(PJSON jsp) : JSON()
{
- if (jsp->GetType() == TYPE_JVAL) {
- Jsp = jsp->GetJsp();
- Value = jsp->GetValue();
- } else {
- Jsp = jsp;
- Value = NULL;
- } // endif Type
+ if (jsp->GetType() == TYPE_JVAL) {
+ PJVAL jvp = (PJVAL)jsp;
+
+// Val = ((PJVAL)jsp)->GetVal();
+ if (jvp->DataType == TYPE_JSON) {
+ Jsp = jvp->GetJsp();
+ DataType = TYPE_JSON;
+ Nd = 0;
+ } else {
+ LLn = jvp->LLn; // Must be LLn on 32 bit machines
+ Nd = jvp->Nd;
+ DataType = jvp->DataType;
+ } // endelse Jsp
+
+ } else {
+ Jsp = jsp;
+// Val = NULL;
+ DataType = TYPE_JSON;
+ Nd = 0;
+ } // endif Type
- Next = NULL;
- Del = false;
- Size = 1;
-} // end of JVALUE constructor
+ Next = NULL;
+ Del = false;
+ Type = TYPE_JVAL;
+} // end of JVALUE constructor
+#if 0
/***********************************************************************/
-/* Constructor for a Value with a given string or numeric value. */
+/* Constructor for a JVALUE with a given string or numeric value. */
/***********************************************************************/
-JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
+JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON()
{
Jsp = NULL;
- Value = AllocateValue(g, valp);
+ Val = vlp;
Next = NULL;
Del = false;
+ Type = TYPE_JVAL;
+} // end of JVALUE constructor
+#endif // 0
+
+/***********************************************************************/
+/* Constructor for a JVALUE with a given string or numeric value. */
+/***********************************************************************/
+JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() {
+ Jsp = NULL;
+//Val = NULL;
+ SetValue(g, valp);
+ Next = NULL;
+ Del = false;
+ Type = TYPE_JVAL;
} // end of JVALUE constructor
/***********************************************************************/
@@ -1343,23 +1463,40 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON()
/***********************************************************************/
JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON()
{
- Jsp = NULL;
- Value = AllocateValue(g, (void*)strp, TYPE_STRING);
- Next = NULL;
- Del = false;
+ Jsp = NULL;
+//Val = AllocVal(g, TYPE_STRG);
+ Strp = (char*)strp;
+ DataType = TYPE_STRG;
+ Nd = 0;
+ Next = NULL;
+ Del = false;
+ Type = TYPE_JVAL;
} // end of JVALUE constructor
/***********************************************************************/
+/* Set or reset all Jvalue members. */
+/***********************************************************************/
+void JVALUE::Clear(void)
+{
+ Jsp = NULL;
+ Next = NULL;
+ Type = TYPE_JVAL;
+ Del = false;
+ Nd = 0;
+ DataType = TYPE_NULL;
+} // end of Clear
+
+/***********************************************************************/
/* Returns the type of the Value's value. */
/***********************************************************************/
JTYP JVALUE::GetValType(void)
{
- if (Jsp)
+ if (DataType == TYPE_JSON)
return Jsp->GetType();
- else if (Value)
- return (JTYP)Value->GetType();
+//else if (Val)
+// return Val->Type;
else
- return TYPE_NULL;
+ return DataType;
} // end of GetValType
@@ -1368,7 +1505,7 @@ JTYP JVALUE::GetValType(void)
/***********************************************************************/
PJOB JVALUE::GetObject(void)
{
- if (Jsp && Jsp->GetType() == TYPE_JOB)
+ if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JOB)
return (PJOB)Jsp;
return NULL;
@@ -1379,18 +1516,46 @@ PJOB JVALUE::GetObject(void)
/***********************************************************************/
PJAR JVALUE::GetArray(void)
{
- if (Jsp && Jsp->GetType() == TYPE_JAR)
+ if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JAR)
return (PJAR)Jsp;
return NULL;
} // end of GetArray
/***********************************************************************/
-/* Return the Value's Integer value. */
+/* Return the Value's as a Value class. */
/***********************************************************************/
-int JVALUE::GetInteger(void)
+PVAL JVALUE::GetValue(PGLOBAL g)
{
- return (Value) ? Value->GetIntValue() : 0;
+ PVAL valp = NULL;
+
+ if (DataType != TYPE_JSON)
+ if (DataType == TYPE_STRG)
+ valp = AllocateValue(g, Strp, DataType, Nd);
+ else
+ valp = AllocateValue(g, &LLn, DataType, Nd);
+
+ return valp;
+} // end of GetValue
+
+/***********************************************************************/
+/* Return the Value's Integer value. */
+/***********************************************************************/
+int JVALUE::GetInteger(void) {
+ int n;
+
+ switch (DataType) {
+ case TYPE_INTG: n = N; break;
+ case TYPE_DBL: n = (int)F; break;
+ case TYPE_DTM:
+ case TYPE_STRG: n = atoi(Strp); break;
+ case TYPE_BOOL: n = (B) ? 1 : 0; break;
+ case TYPE_BINT: n = (int)LLn; break;
+ default:
+ n = 0;
+ } // endswitch Type
+
+ return n;
} // end of GetInteger
/***********************************************************************/
@@ -1398,7 +1563,20 @@ int JVALUE::GetInteger(void)
/***********************************************************************/
long long JVALUE::GetBigint(void)
{
- return (Value) ? Value->GetBigintValue() : 0;
+ long long lln;
+
+ switch (DataType) {
+ case TYPE_BINT: lln = LLn; break;
+ case TYPE_INTG: lln = (long long)N; break;
+ case TYPE_DBL: lln = (long long)F; break;
+ case TYPE_DTM:
+ case TYPE_STRG: lln = atoll(Strp); break;
+ case TYPE_BOOL: lln = (B) ? 1 : 0; break;
+ default:
+ lln = 0;
+ } // endswitch Type
+
+ return lln;
} // end of GetBigint
/***********************************************************************/
@@ -1406,75 +1584,157 @@ long long JVALUE::GetBigint(void)
/***********************************************************************/
double JVALUE::GetFloat(void)
{
- return (Value) ? Value->GetFloatValue() : 0.0;
+ double d;
+
+ switch (DataType) {
+ case TYPE_DBL: d = F; break;
+ case TYPE_BINT: d = (double)LLn; break;
+ case TYPE_INTG: d = (double)N; break;
+ case TYPE_DTM:
+ case TYPE_STRG: d = atof(Strp); break;
+ case TYPE_BOOL: d = (B) ? 1.0 : 0.0; break;
+ default:
+ d = 0.0;
+ } // endswitch Type
+
+ return d;
} // end of GetFloat
/***********************************************************************/
/* Return the Value's String value. */
/***********************************************************************/
-PSZ JVALUE::GetString(PGLOBAL g)
+PSZ JVALUE::GetString(PGLOBAL g, char *buff)
{
- char *p;
-
- if (Value) {
- char buf[32];
-
- if ((p = Value->GetCharString(buf)) == buf)
- p = PlugDup(g, buf);
-
- } else
- p = NULL;
-
- return p;
+ char buf[32];
+ char *p = (buff) ? buff : buf;
+
+ switch (DataType) {
+ case TYPE_DTM:
+ case TYPE_STRG:
+ p = Strp;
+ break;
+ case TYPE_INTG:
+ sprintf(p, "%d", N);
+ break;
+ case TYPE_BINT:
+ sprintf(p, "%lld", LLn);
+ break;
+ case TYPE_DBL:
+ sprintf(p, "%.*lf", Nd, F);
+ break;
+ case TYPE_BOOL:
+ p = (char*)((B) ? "true" : "false");
+ break;
+ case TYPE_NULL:
+ p = (char*)"null";
+ break;
+ default:
+ p = NULL;
+ } // endswitch Type
+
+
+ return (p == buf) ? (char*)PlugDup(g, buf) : p;
} // end of GetString
/***********************************************************************/
/* Return the Value's String value. */
/***********************************************************************/
-PSZ JVALUE::GetText(PGLOBAL g, PSZ text)
+PSZ JVALUE::GetText(PGLOBAL g, PSTRG text)
{
- if (Jsp)
+ if (DataType == TYPE_JSON)
return Jsp->GetText(g, text);
- char buf[32];
- PSZ s = (Value) ? Value->GetCharString(buf) : NULL;
+ char buff[32];
+ PSZ s = (DataType == TYPE_NULL) ? NULL : GetString(g, buff);
- if (s)
- strcat(strcat(text, " "), s);
- else if (GetJsonNull())
- strcat(strcat(text, " "), GetJsonNull());
+ if (s)
+ text->Append(s);
+ else if (GetJsonNull())
+ text->Append(GetJsonNull());
- return text;
+ return NULL;
} // end of GetText
void JVALUE::SetValue(PJSON jsp)
{
- if (jsp && jsp->GetType() == TYPE_JVAL) {
- Jsp = jsp->GetJsp();
- Value = jsp->GetValue();
- } else {
- Jsp = jsp;
- Value = NULL;
- } // endif Type
+ if (DataType == TYPE_JSON && jsp->GetType() == TYPE_JVAL) {
+ Jsp = jsp->GetJsp();
+ Nd = ((PJVAL)jsp)->Nd;
+ DataType = ((PJVAL)jsp)->DataType;
+ // Val = ((PJVAL)jsp)->GetVal();
+ } else {
+ Jsp = jsp;
+ DataType = TYPE_JSON;
+ } // endif Type
+
+} // end of SetValue;
+
+void JVALUE::SetValue(PGLOBAL g, PVAL valp)
+{
+//if (!Val)
+// Val = AllocVal(g, TYPE_VAL);
+
+ if (!valp || valp->IsNull()) {
+ DataType = TYPE_NULL;
+ } else switch (valp->GetType()) {
+ case TYPE_DATE:
+ if (((DTVAL*)valp)->IsFormatted())
+ Strp = PlugDup(g, valp->GetCharValue());
+ else {
+ char buf[32];
+
+ Strp = PlugDup(g, valp->GetCharString(buf));
+ } // endif Formatted
+
+ DataType = TYPE_DTM;
+ break;
+ case TYPE_STRING:
+ Strp = PlugDup(g, valp->GetCharValue());
+ DataType = TYPE_STRG;
+ break;
+ case TYPE_DOUBLE:
+ case TYPE_DECIM:
+ F = valp->GetFloatValue();
+
+ if (IsTypeNum(valp->GetType()))
+ Nd = valp->GetValPrec();
+
+ DataType = TYPE_DBL;
+ break;
+ case TYPE_TINY:
+ B = valp->GetTinyValue() != 0;
+ DataType = TYPE_BOOL;
+ case TYPE_INT:
+ N = valp->GetIntValue();
+ DataType = TYPE_INTG;
+ break;
+ case TYPE_BIGINT:
+ LLn = valp->GetBigintValue();
+ DataType = TYPE_BINT;
+ break;
+ default:
+ sprintf(g->Message, "Unsupported typ %d\n", valp->GetType());
+ throw(777);
+ } // endswitch Type
-} // end of SetValue;
+} // end of SetValue
/***********************************************************************/
/* Set the Value's value as the given integer. */
/***********************************************************************/
void JVALUE::SetInteger(PGLOBAL g, int n)
{
- Value = AllocateValue(g, &n, TYPE_INT);
- Jsp = NULL;
+ N = n;
+ DataType = TYPE_INTG;
} // end of SetInteger
/***********************************************************************/
/* Set the Value's Boolean value as a tiny integer. */
/***********************************************************************/
-void JVALUE::SetTiny(PGLOBAL g, char n)
+void JVALUE::SetBool(PGLOBAL g, bool b)
{
- Value = AllocateValue(g, &n, TYPE_TINY);
- Jsp = NULL;
+ B = b;
+ DataType = TYPE_BOOL;
} // end of SetTiny
/***********************************************************************/
@@ -1482,8 +1742,8 @@ void JVALUE::SetTiny(PGLOBAL g, char n)
/***********************************************************************/
void JVALUE::SetBigint(PGLOBAL g, long long ll)
{
- Value = AllocateValue(g, &ll, TYPE_BIGINT);
- Jsp = NULL;
+ LLn = ll;
+ DataType = TYPE_BINT;
} // end of SetBigint
/***********************************************************************/
@@ -1491,17 +1751,19 @@ void JVALUE::SetBigint(PGLOBAL g, long long ll)
/***********************************************************************/
void JVALUE::SetFloat(PGLOBAL g, double f)
{
- Value = AllocateValue(g, &f, TYPE_DOUBLE, 6);
- Jsp = NULL;
+ F = f;
+ Nd = 6;
+ DataType = TYPE_DBL;
} // end of SetFloat
/***********************************************************************/
/* Set the Value's value as the given string. */
/***********************************************************************/
-void JVALUE::SetString(PGLOBAL g, PSZ s, short c)
+void JVALUE::SetString(PGLOBAL g, PSZ s, int ci)
{
- Value = AllocateValue(g, s, TYPE_STRING, c);
- Jsp = NULL;
+ Strp = s;
+ Nd = ci;
+ DataType = TYPE_STRG;
} // end of SetString
/***********************************************************************/
@@ -1509,6 +1771,239 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c)
/***********************************************************************/
bool JVALUE::IsNull(void)
{
- return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true;
+ return (DataType == TYPE_JSON) ? Jsp->IsNull() : DataType == TYPE_NULL;
} // end of IsNull
+
+/* ---------------------------- Class SWAP --------------------------- */
+
+/***********************************************************************/
+/* Replace all pointers by offsets or the opposite. */
+/***********************************************************************/
+void SWAP::SwapJson(PJSON jsp, bool move)
+{
+ if (move)
+ MoffJson(jsp);
+ else
+ MptrJson((PJSON)MakeOff(Base, jsp));
+
+ return;
+} // end of SwapJson
+
+/***********************************************************************/
+/* Replace all pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffJson(PJSON jsp) {
+ size_t res = 0;
+
+ if (jsp)
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ res = MoffArray((PJAR)jsp);
+ break;
+ case TYPE_JOB:
+ res = MoffObject((PJOB)jsp);
+ break;
+ case TYPE_JVAL:
+ res = MoffJValue((PJVAL)jsp);
+ break;
+ default:
+ throw "Invalid json tree";
+ } // endswitch Type
+
+ return res;
+} // end of MoffJson
+
+/***********************************************************************/
+/* Replace all array pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffArray(PJAR jarp)
+{
+ if (jarp->First) {
+ for (int i = 0; i < jarp->Size; i++)
+ jarp->Mvals[i] = (PJVAL)MakeOff(Base, jarp->Mvals[i]);
+
+ jarp->Mvals = (PJVAL*)MakeOff(Base, jarp->Mvals);
+ jarp->First = (PJVAL)MoffJValue(jarp->First);
+ jarp->Last = (PJVAL)MakeOff(Base, jarp->Last);
+ } // endif First
+
+ return MakeOff(Base, jarp);
+} // end of MoffArray
+
+/***********************************************************************/
+/* Replace all object pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffObject(PJOB jobp) {
+ if (jobp->First) {
+ jobp->First = (PJPR)MoffPair(jobp->First);
+ jobp->Last = (PJPR)MakeOff(Base, jobp->Last);
+ } // endif First
+
+ return MakeOff(Base, jobp);
+} // end of MoffObject
+
+/***********************************************************************/
+/* Replace all pair pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffPair(PJPR jpp) {
+ jpp->Key = (PCSZ)MakeOff(Base, (void*)jpp->Key);
+
+ if (jpp->Val)
+ jpp->Val = (PJVAL)MoffJValue(jpp->Val);
+
+ if (jpp->Next)
+ jpp->Next = (PJPR)MoffPair(jpp->Next);
+
+ return MakeOff(Base, jpp);
+} // end of MoffPair
+
+/***********************************************************************/
+/* Replace all jason value pointers by offsets. */
+/***********************************************************************/
+size_t SWAP::MoffJValue(PJVAL jvp) {
+ if (!jvp->Del) {
+ if (jvp->DataType == TYPE_JSON)
+ jvp->Jsp = (PJSON)MoffJson(jvp->Jsp);
+ else if (jvp->DataType == TYPE_STRG)
+ jvp->Strp = (PSZ)MakeOff(Base, (jvp->Strp));
+
+// if (jvp->Val)
+// jvp->Val = (PVL)MoffVal(jvp->Val);
+
+ } // endif Del
+
+ if (jvp->Next)
+ jvp->Next = (PJVAL)MoffJValue(jvp->Next);
+
+ return MakeOff(Base, jvp);
+} // end of MoffJValue
+
+#if 0
+/***********************************************************************/
+/* Replace string pointers by offset. */
+/***********************************************************************/
+size_t SWAP::MoffVal(PVL vlp) {
+ if (vlp->Type == TYPE_STRG)
+ vlp->Strp = (PSZ)MakeOff(Base, (vlp->Strp));
+
+ return MakeOff(Base, vlp);
+} // end of MoffVal
+#endif // 0
+
+/***********************************************************************/
+/* Replace all offsets by pointers. */
+/***********************************************************************/
+PJSON SWAP::MptrJson(PJSON ojp) { // ojp is an offset
+ PJSON jsp = (PJSON)MakePtr(Base, (size_t)ojp);
+
+ if (ojp)
+ switch (jsp->Type) {
+ case TYPE_JAR:
+ jsp = MptrArray((PJAR)ojp);
+ break;
+ case TYPE_JOB:
+ jsp = MptrObject((PJOB)ojp);
+ break;
+ case TYPE_JVAL:
+ jsp = MptrJValue((PJVAL)ojp);
+ break;
+ default:
+ throw "Invalid json tree";
+ } // endswitch Type
+
+ return jsp;
+} // end of MptrJson
+
+/***********************************************************************/
+/* Replace all array offsets by pointers. */
+/***********************************************************************/
+PJAR SWAP::MptrArray(PJAR ojar) {
+ PJAR jarp = (PJAR)MakePtr(Base, (size_t)ojar);
+
+ jarp = (PJAR)new((long long)jarp) JARRAY(0);
+
+ if (jarp->First) {
+ jarp->Mvals = (PJVAL*)MakePtr(Base, (size_t)jarp->Mvals);
+
+ for (int i = 0; i < jarp->Size; i++)
+ jarp->Mvals[i] = (PJVAL)MakePtr(Base, (size_t)jarp->Mvals[i]);
+
+ jarp->First = (PJVAL)MptrJValue(jarp->First);
+ jarp->Last = (PJVAL)MakePtr(Base, (size_t)jarp->Last);
+ } // endif First
+
+ return jarp;
+} // end of MptrArray
+
+/***********************************************************************/
+/* Replace all object offsets by pointers. */
+/***********************************************************************/
+PJOB SWAP::MptrObject(PJOB ojob) {
+ PJOB jobp = (PJOB)MakePtr(Base, (size_t)ojob);
+
+ jobp = (PJOB)new((long long)jobp) JOBJECT(0);
+
+ if (jobp->First) {
+ jobp->First = (PJPR)MptrPair(jobp->First);
+ jobp->Last = (PJPR)MakePtr(Base, (size_t)jobp->Last);
+ } // endif First
+
+ return jobp;
+} // end of MptrObject
+
+/***********************************************************************/
+/* Replace all pair offsets by pointers. */
+/***********************************************************************/
+PJPR SWAP::MptrPair(PJPR ojp) {
+ PJPR jpp = (PJPR)MakePtr(Base, (size_t)ojp);
+
+ jpp->Key = (PCSZ)MakePtr(Base, (size_t)jpp->Key);
+
+ if (jpp->Val)
+ jpp->Val = (PJVAL)MptrJValue(jpp->Val);
+
+ if (jpp->Next)
+ jpp->Next = (PJPR)MptrPair(jpp->Next);
+
+ return jpp;
+} // end of MptrPair
+
+/***********************************************************************/
+/* Replace all value offsets by pointers. */
+/***********************************************************************/
+PJVAL SWAP::MptrJValue(PJVAL ojv) {
+ PJVAL jvp = (PJVAL)MakePtr(Base, (size_t)ojv);
+
+ jvp = (PJVAL)new((long long)jvp) JVALUE(0);
+
+ if (!jvp->Del) {
+ if (jvp->DataType == TYPE_JSON)
+ jvp->Jsp = (PJSON)MptrJson(jvp->Jsp);
+ else if (jvp->DataType == TYPE_STRG)
+ jvp->Strp = (PSZ)MakePtr(Base, (size_t)jvp->Strp);
+
+// if (jvp->Val)
+// jvp->Val = (PVL)MptrVal(jvp->Val);
+
+ } // endif Del
+
+ if (jvp->Next)
+ jvp->Next = (PJVAL)MptrJValue(jvp->Next);
+
+ return jvp;
+} // end of MptrJValue
+
+#if 0
+/***********************************************************************/
+/* Replace string offsets by a pointer. */
+/***********************************************************************/
+PVL SWAP::MptrVal(PVL ovl) {
+ PVL vlp = (PVL)MakePtr(Base, (size_t)ovl);
+
+ if (vlp->Type == TYPE_STRG)
+ vlp->Strp = (PSZ)MakePtr(Base, (size_t)vlp->Strp);
+
+ return vlp;
+} // end of MptrValue
+#endif // 0
diff --git a/storage/connect/json.h b/storage/connect/json.h
index bc94b372133..3a026f5df22 100644
--- a/storage/connect/json.h
+++ b/storage/connect/json.h
@@ -5,8 +5,10 @@
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
+#pragma once
#include <mysql_com.h>
#include "value.h"
+#include "xobject.h"
#if defined(_DEBUG)
#define X assert(false);
@@ -14,240 +16,147 @@
#define X
#endif
-enum JTYP {TYPE_NULL = TYPE_VOID,
- TYPE_STRG = TYPE_STRING,
- TYPE_DBL = TYPE_DOUBLE,
- TYPE_BOOL = TYPE_TINY,
- TYPE_BINT = TYPE_BIGINT,
- TYPE_DTM = TYPE_DATE,
- TYPE_INTG = TYPE_INT,
- TYPE_VAL = 12,
- TYPE_JSON,
- TYPE_JAR,
- TYPE_JOB,
- TYPE_JVAL};
-
+enum JTYP {
+ TYPE_NULL = TYPE_VOID,
+ TYPE_STRG = TYPE_STRING,
+ TYPE_DBL = TYPE_DOUBLE,
+ TYPE_BOOL = TYPE_TINY,
+ TYPE_BINT = TYPE_BIGINT,
+ TYPE_INTG = TYPE_INT,
+ TYPE_DTM = TYPE_DATE,
+ TYPE_FLOAT,
+ TYPE_JAR,
+ TYPE_JOB,
+ TYPE_JVAL,
+ TYPE_JSON,
+ TYPE_DEL,
+ TYPE_UNKNOWN
+};
+
+class JDOC;
class JOUT;
class JSON;
-class JMAP;
class JVALUE;
class JOBJECT;
class JARRAY;
-typedef class JPAIR *PJPR;
+typedef class JDOC *PJDOC;
typedef class JSON *PJSON;
typedef class JVALUE *PJVAL;
typedef class JOBJECT *PJOB;
typedef class JARRAY *PJAR;
-typedef struct {
- char *str;
- int len;
- } STRG, *PSG;
-
-// BSON size should be equal on Linux and Windows
-#define BMX 255
-typedef struct BSON* PBSON;
+typedef struct JPAIR *PJPR;
+//typedef struct VAL *PVL;
/***********************************************************************/
-/* Structure used to return binary json to Json UDF functions. */
+/* Structure JPAIR. The pairs of a json Object. */
/***********************************************************************/
-struct BSON {
- char Msg[BMX + 1];
- char *Filename;
- PGLOBAL G;
- int Pretty;
- ulong Reslen;
- my_bool Changed;
- PJSON Top;
- PJSON Jsp;
- PBSON Bsp;
-}; // end of struct BSON
-
-PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+struct JPAIR {
+ PCSZ Key; // This pair key name
+ PJVAL Val; // To the value of the pair
+ PJPR Next; // To the next pair
+}; // end of struct JPAIR
+//PVL AllocVal(PGLOBAL g, JTYP type);
char *NextChr(PSZ s, char sep);
char *GetJsonNull(void);
+const char* GetFmt(int type, bool un);
-PJSON ParseJson(PGLOBAL g, char* s, int n, int* prty = NULL, bool* b = NULL);
+PJSON ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL);
PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty);
-bool SerializeArray(JOUT *js, PJAR jarp, bool b);
-bool SerializeObject(JOUT *js, PJOB jobp);
-bool SerializeValue(JOUT *js, PJVAL jvp);
-char *NextChr(PSZ s, char sep);
DllExport bool IsNum(PSZ s);
/***********************************************************************/
-/* Class JOUT. Used by Serialize. */
-/***********************************************************************/
-class JOUT : public BLOCK {
- public:
- JOUT(PGLOBAL gp) : BLOCK() {g = gp; Pretty = 3;}
-
- virtual bool WriteStr(const char *s) = 0;
- virtual bool WriteChr(const char c) = 0;
- virtual bool Escape(const char *s) = 0;
- int Prty(void) {return Pretty;}
-
- // Member
- PGLOBAL g;
- int Pretty;
-}; // end of class JOUT
-
-/***********************************************************************/
-/* Class JOUTSTR. Used to Serialize to a string. */
-/***********************************************************************/
-class JOUTSTR : public JOUT {
- public:
- JOUTSTR(PGLOBAL g);
-
- virtual bool WriteStr(const char *s);
- virtual bool WriteChr(const char c);
- virtual bool Escape(const char *s);
-
- // Member
- char *Strp; // The serialized string
- size_t N; // Position of next char
- size_t Max; // String max size
-}; // end of class JOUTSTR
-
-/***********************************************************************/
-/* Class JOUTFILE. Used to Serialize to a file. */
-/***********************************************************************/
-class JOUTFILE : public JOUT {
- public:
- JOUTFILE(PGLOBAL g, FILE *str, int pty) : JOUT(g) {Stream = str; Pretty = pty;}
-
- virtual bool WriteStr(const char *s);
- virtual bool WriteChr(const char c);
- virtual bool Escape(const char *s);
-
- // Member
- FILE *Stream;
-}; // end of class JOUTFILE
-
-/***********************************************************************/
-/* Class JOUTPRT. Used to Serialize to a pretty file. */
-/***********************************************************************/
-class JOUTPRT : public JOUTFILE {
- public:
- JOUTPRT(PGLOBAL g, FILE *str) : JOUTFILE(g, str, 2) {M = 0; B = false;}
-
- virtual bool WriteStr(const char *s);
- virtual bool WriteChr(const char c);
-
- // Member
- int M;
- bool B;
-}; // end of class JOUTPRT
-
-/***********************************************************************/
-/* Class PAIR. The pairs of a json Object. */
+/* Class JDOC. The class for parsing and serializing json documents. */
/***********************************************************************/
-class JPAIR : public BLOCK {
- friend class JOBJECT;
- friend class JSNX;
- friend class JSON;
- friend bool SerializeObject(JOUT *, PJOB);
- public:
- JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;}
-
- inline PCSZ GetKey(void) {return Key;}
- inline PJVAL GetVal(void) {return Val;}
- inline PJPR GetNext(void) {return Next;}
+class JDOC: public BLOCK {
+ friend PJSON ParseJson(PGLOBAL, char*, size_t, int*, bool*);
+ friend PSZ Serialize(PGLOBAL, PJSON, char*, int);
+public:
+ JDOC(void) : js(NULL), s(NULL), len(0), pty(NULL) {}
- protected:
- PCSZ Key; // This pair key name
- PJVAL Val; // To the value of the pair
- PJPR Next; // To the next pair
-}; // end of class JPAIR
-
-/***********************************************************************/
-/* Class JSON. The base class for all other json classes. */
-/***********************************************************************/
-class JSON : public BLOCK {
- friend PJSON ParseJson(PGLOBAL, char*, int, int*, bool*);
- public:
- JSON(void) : s(NULL), len(0), pty(NULL) {Size = 0;}
-
- int size(void) {return Size;}
- virtual int GetSize(bool b) {return Size;}
- virtual void Clear(void) {Size = 0;}
- virtual JTYP GetType(void) {return TYPE_JSON;}
- virtual JTYP GetValType(void) {X return TYPE_JSON;}
- virtual void InitArray(PGLOBAL g) {X}
-//virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;}
- virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;}
- virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;}
- virtual PJAR GetValList(PGLOBAL g) {X return NULL;}
- virtual PJVAL GetValue(const char *key) {X return NULL;}
- virtual PJOB GetObject(void) {return NULL;}
- virtual PJAR GetArray(void) {return NULL;}
- virtual PJVAL GetValue(int i) {X return NULL;}
- virtual PVAL GetValue(void) {X return NULL;}
- virtual PJSON GetJsp(void) { X return NULL; }
- virtual PJSON GetJson(void) { X return NULL; }
- virtual PJPR GetFirst(void) {X return NULL;}
- virtual int GetInteger(void) {X return 0;}
- virtual double GetFloat() {X return 0.0;}
- virtual PSZ GetString(PGLOBAL g) {X return NULL;}
- virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;}
- virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; }
- virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; }
- virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X}
- virtual void SetValue(PVAL valp) {X}
- virtual void SetValue(PJSON jsp) {X}
- virtual void SetString(PGLOBAL g, PSZ s, short c) {X}
- virtual void SetInteger(PGLOBAL g, int n) {X}
- virtual void SetFloat(PGLOBAL g, double f) {X}
- virtual void DeleteKey(PCSZ k) {X}
- virtual bool DeleteValue(int i) {X return true;}
- virtual bool IsNull(void) {X return true;}
+ void SetJp(JOUT* jp) { js = jp; }
protected:
PJAR ParseArray(PGLOBAL g, int& i);
PJOB ParseObject(PGLOBAL g, int& i);
PJVAL ParseValue(PGLOBAL g, int& i);
char *ParseString(PGLOBAL g, int& i);
- PVAL ParseNumeric(PGLOBAL g, int& i);
+ void ParseNumeric(PGLOBAL g, int& i, PJVAL jvp);
PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp);
+ bool SerializeArray(PJAR jarp, bool b);
+ bool SerializeObject(PJOB jobp);
+ bool SerializeValue(PJVAL jvp);
- // Members
- int Size;
-
- // Only used when parsing
+ // Members used when parsing and serializing
private:
+ JOUT* js;
char *s;
int len;
bool *pty;
+}; // end of class JDOC
+
+/***********************************************************************/
+/* Class JSON. The base class for all other json classes. */
+/***********************************************************************/
+class JSON : public BLOCK {
+public:
+ // Constructor
+ JSON(void) { Type = TYPE_JSON; }
+ JSON(int) {}
+
+ // Implementation
+ inline JTYP GetType(void) { return Type; }
+
+ // Methods
+ virtual int size(void) { return 1; }
+ virtual void Clear(void) { X }
+ virtual PJOB GetObject(void) { return NULL; }
+ virtual PJAR GetArray(void) { return NULL; }
+ virtual PJVAL GetArrayValue(int i) { X return NULL; }
+ virtual int GetSize(bool b) { X return 0; }
+ virtual PJSON GetJsp(void) { X return NULL; }
+ virtual PJPR GetFirst(void) { X return NULL; }
+ virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; }
+ virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; }
+ virtual void SetValue(PJSON jsp) { X }
+ virtual bool DeleteValue(int i) { X return true; }
+ virtual bool IsNull(void) { X return true; }
+
+ // Members
+ JTYP Type;
}; // end of class JSON
/***********************************************************************/
/* Class JOBJECT: contains a list of value pairs. */
/***********************************************************************/
class JOBJECT : public JSON {
- friend bool SerializeObject(JOUT *, PJOB);
+ friend class JDOC;
friend class JSNX;
- public:
- JOBJECT(void) : JSON() {First = Last = NULL;}
-
- using JSON::GetValue;
- using JSON::SetValue;
- virtual void Clear(void) {First = Last = NULL; Size = 0;}
- virtual JTYP GetType(void) {return TYPE_JOB;}
+ friend class SWAP;
+public:
+ JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; }
+ JOBJECT(int i) : JSON(i) {}
+
+ // Methods
+ virtual void Clear(void) {First = Last = NULL;}
+//virtual JTYP GetValType(void) {return TYPE_JOB;}
virtual PJPR GetFirst(void) {return First;}
virtual int GetSize(bool b);
- virtual PJPR AddPair(PGLOBAL g, PCSZ key);
virtual PJOB GetObject(void) {return this;}
- virtual PJVAL GetValue(const char* key);
- virtual PJAR GetKeyList(PGLOBAL g);
- virtual PJAR GetValList(PGLOBAL g);
- virtual PSZ GetText(PGLOBAL g, PSZ text);
+ virtual PSZ GetText(PGLOBAL g, PSTRG text);
virtual bool Merge(PGLOBAL g, PJSON jsp);
- virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key);
- virtual void DeleteKey(PCSZ k);
virtual bool IsNull(void);
+ // Specific
+ PJPR AddPair(PGLOBAL g, PCSZ key);
+ PJVAL GetKeyValue(const char* key);
+ PJAR GetKeyList(PGLOBAL g);
+ PJAR GetValList(PGLOBAL g);
+ void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key);
+ void DeleteKey(PCSZ k);
+
protected:
PJPR First;
PJPR Last;
@@ -257,27 +166,30 @@ class JOBJECT : public JSON {
/* Class JARRAY. */
/***********************************************************************/
class JARRAY : public JSON {
- friend PJAR ParseArray(PGLOBAL, int&, STRG&, bool*);
+ friend class SWAP;
public:
- JARRAY(void) : JSON() {Alloc = 0; First = Last = NULL; Mvals = NULL;}
+ JARRAY(void);
+ JARRAY(int i) : JSON(i) {}
- using JSON::GetValue;
- using JSON::SetValue;
+ // Methods
virtual void Clear(void) {First = Last = NULL; Size = 0;}
- virtual JTYP GetType(void) {return TYPE_JAR;}
+ virtual int size(void) { return Size; }
virtual PJAR GetArray(void) {return this;}
virtual int GetSize(bool b);
- PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL);
- virtual void InitArray(PGLOBAL g);
- virtual PJVAL GetValue(int i);
- virtual PSZ GetText(PGLOBAL g, PSZ text);
+ virtual PJVAL GetArrayValue(int i);
+ virtual PSZ GetText(PGLOBAL g, PSTRG text);
virtual bool Merge(PGLOBAL g, PJSON jsp);
- virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i);
virtual bool DeleteValue(int n);
virtual bool IsNull(void);
+ // Specific
+ PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL);
+ bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i);
+ void InitArray(PGLOBAL g);
+
protected:
// Members
+ int Size; // The number of items in the array
int Alloc; // The Mvals allocated size
PJVAL First; // Used when constructing
PJVAL Last; // Last constructed value
@@ -290,43 +202,161 @@ class JARRAY : public JSON {
class JVALUE : public JSON {
friend class JARRAY;
friend class JSNX;
+ friend class JSONDISC;
friend class JSONCOL;
friend class JSON;
- friend bool SerializeValue(JOUT*, PJVAL);
- public:
- JVALUE(void) : JSON() {Clear();}
+ friend class JDOC;
+ friend class SWAP;
+public:
+ JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); }
JVALUE(PJSON jsp);
+//JVALUE(PGLOBAL g, PVL vlp);
JVALUE(PGLOBAL g, PVAL valp);
JVALUE(PGLOBAL g, PCSZ strp);
+ JVALUE(int i) : JSON(i) {}
+
+ //using JSON::GetVal;
+ //using JSON::SetVal;
- using JSON::GetValue;
- using JSON::SetValue;
- virtual void Clear(void)
- {Jsp = NULL; Value = NULL; Next = NULL; Del = false; Size = 1;}
- virtual JTYP GetType(void) {return TYPE_JVAL;}
+ // Methods
+ virtual void Clear(void);
+//virtual JTYP GetType(void) {return TYPE_JVAL;}
virtual JTYP GetValType(void);
virtual PJOB GetObject(void);
virtual PJAR GetArray(void);
- virtual PVAL GetValue(void) {return Value;}
- virtual PJSON GetJsp(void) {return Jsp;}
- virtual PJSON GetJson(void) { return (Jsp ? Jsp : this); }
- virtual int GetInteger(void);
- virtual long long GetBigint(void);
- virtual double GetFloat(void);
- virtual PSZ GetString(PGLOBAL g);
- virtual PSZ GetText(PGLOBAL g, PSZ text);
- virtual void SetValue(PJSON jsp);
- virtual void SetValue(PVAL valp) { Value = valp; Jsp = NULL; }
- virtual void SetString(PGLOBAL g, PSZ s, short c = 0);
- virtual void SetInteger(PGLOBAL g, int n);
- virtual void SetBigint(PGLOBAL g, longlong ll);
- virtual void SetFloat(PGLOBAL g, double f);
- virtual void SetTiny(PGLOBAL g, char f);
+ virtual PJSON GetJsp(void) {return (DataType == TYPE_JSON ? Jsp : NULL);}
+ virtual PSZ GetText(PGLOBAL g, PSTRG text);
virtual bool IsNull(void);
+ // Specific
+ //inline PVL GetVal(void) { return Val; }
+ //inline void SetVal(PVL vlp) { Val = vlp; }
+ inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); }
+ PSZ GetString(PGLOBAL g, char* buff = NULL);
+ int GetInteger(void);
+ long long GetBigint(void);
+ double GetFloat(void);
+ PVAL GetValue(PGLOBAL g);
+ void SetValue(PJSON jsp);
+ void SetValue(PGLOBAL g, PVAL valp);
+ void SetString(PGLOBAL g, PSZ s, int ci = 0);
+ void SetInteger(PGLOBAL g, int n);
+ void SetBigint(PGLOBAL g, longlong ll);
+ void SetFloat(PGLOBAL g, double f);
+ void SetBool(PGLOBAL g, bool b);
+
protected:
- PJSON Jsp; // To the json value
- PVAL Value; // The numeric value
- PJVAL Next; // Next value in array
- bool Del; // True when deleted
+ union {
+ PJSON Jsp; // To the json value
+ char *Strp; // Ptr to a string
+ int N; // An integer value
+ long long LLn; // A big integer value
+ double F; // A (double) float value
+ bool B; // True or false
+ };
+//PVL Val; // To the string or numeric value
+ PJVAL Next; // Next value in array
+ JTYP DataType; // The data value type
+ int Nd; // Decimal number
+ bool Del; // True when deleted
}; // end of class JVALUE
+
+
+/***********************************************************************/
+/* Class JOUT. Used by Serialize. */
+/***********************************************************************/
+class JOUT : public BLOCK {
+public:
+ JOUT(PGLOBAL gp) : BLOCK() { g = gp; Pretty = 3; }
+
+ virtual bool WriteStr(const char* s) = 0;
+ virtual bool WriteChr(const char c) = 0;
+ virtual bool Escape(const char* s) = 0;
+ int Prty(void) { return Pretty; }
+
+ // Member
+ PGLOBAL g;
+ int Pretty;
+}; // end of class JOUT
+
+/***********************************************************************/
+/* Class JOUTSTR. Used to Serialize to a string. */
+/***********************************************************************/
+class JOUTSTR : public JOUT {
+public:
+ JOUTSTR(PGLOBAL g);
+
+ virtual bool WriteStr(const char* s);
+ virtual bool WriteChr(const char c);
+ virtual bool Escape(const char* s);
+
+ // Member
+ char* Strp; // The serialized string
+ size_t N; // Position of next char
+ size_t Max; // String max size
+}; // end of class JOUTSTR
+
+/***********************************************************************/
+/* Class JOUTFILE. Used to Serialize to a file. */
+/***********************************************************************/
+class JOUTFILE : public JOUT {
+public:
+ JOUTFILE(PGLOBAL g, FILE* str, int pty) : JOUT(g) { Stream = str; Pretty = pty; }
+
+ virtual bool WriteStr(const char* s);
+ virtual bool WriteChr(const char c);
+ virtual bool Escape(const char* s);
+
+ // Member
+ FILE* Stream;
+}; // end of class JOUTFILE
+
+/***********************************************************************/
+/* Class JOUTPRT. Used to Serialize to a pretty file. */
+/***********************************************************************/
+class JOUTPRT : public JOUTFILE {
+public:
+ JOUTPRT(PGLOBAL g, FILE* str) : JOUTFILE(g, str, 2) { M = 0; B = false; }
+
+ virtual bool WriteStr(const char* s);
+ virtual bool WriteChr(const char c);
+
+ // Member
+ int M;
+ bool B;
+}; // end of class JOUTPRT
+
+
+/***********************************************************************/
+/* Class SWAP. Used to make or unmake a JSON tree movable. */
+/* This is done by making all pointers to offsets. */
+/***********************************************************************/
+class SWAP : public BLOCK {
+public:
+ // Constructor
+ SWAP(PGLOBAL g, PJSON jsp)
+ {
+ G = g, Base = (char*)jsp - 8;
+ }
+
+ // Methods
+ void SwapJson(PJSON jsp, bool move);
+
+protected:
+ size_t MoffJson(PJSON jnp);
+ size_t MoffArray(PJAR jarp);
+ size_t MoffObject(PJOB jobp);
+ size_t MoffJValue(PJVAL jvp);
+ size_t MoffPair(PJPR jpp);
+//size_t MoffVal(PVL vlp);
+ PJSON MptrJson(PJSON jnp);
+ PJAR MptrArray(PJAR jarp);
+ PJOB MptrObject(PJOB jobp);
+ PJVAL MptrJValue(PJVAL jvp);
+ PJPR MptrPair(PJPR jpp);
+//PVL MptrVal(PVL vlp);
+
+ // Member
+ PGLOBAL G;
+ void *Base;
+}; // end of class SWAP
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 44028a32564..9104277489d 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -27,12 +27,6 @@
#endif
#define M 9
-bool IsNum(PSZ s);
-char *NextChr(PSZ s, char sep);
-char *GetJsonNull(void);
-uint GetJsonGrpSize(void);
-static int IsJson(UDF_ARGS *args, uint i, bool b = false);
-static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i);
static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error);
static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
@@ -40,8 +34,10 @@ static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
static PJSON JsonNew(PGLOBAL g, JTYP type);
static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL);
static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64);
+uint GetJsonGroupSize(void);
+static void SetChanged(PBSON bsp);
-static uint JsonGrpSize = 10;
+uint JsonGrpSize = 10;
/*********************************************************************************/
/* SubAlloc a new JSNX class with protection against memory exhaustion. */
@@ -63,7 +59,7 @@ static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len)
return jsx;
} /* end of JsnxNew */
- /* ----------------------------------- JSNX ------------------------------------ */
+/* ----------------------------------- JSNX ------------------------------------ */
/*********************************************************************************/
/* JSNX public constructor. */
@@ -347,7 +343,7 @@ PVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp)
/*********************************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/*********************************************************************************/
-void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
+void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
{
if (val) {
vp->SetNull(false);
@@ -355,11 +351,20 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
if (Jb) {
vp->SetValue_psz(Serialize(g, val->GetJsp(), NULL, 0));
} else switch (val->GetValType()) {
+ case TYPE_DTM:
case TYPE_STRG:
+ vp->SetValue_psz(val->GetString(g));
+ break;
case TYPE_INTG:
case TYPE_BINT:
+ vp->SetValue(val->GetInteger());
+ break;
case TYPE_DBL:
- vp->SetValue_pval(val->GetValue());
+ if (vp->IsTypeNum())
+ vp->SetValue(val->GetFloat());
+ else // Get the proper number of decimals
+ vp->SetValue_psz(val->GetString(g));
+
break;
case TYPE_BOOL:
if (vp->IsTypeNum())
@@ -369,14 +374,11 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
break;
case TYPE_JAR:
- SetJsonValue(g, vp, val->GetArray()->GetValue(0), n);
+ vp->SetValue_psz(val->GetArray()->GetText(g, NULL));
break;
case TYPE_JOB:
-// if (!vp->IsTypeNum() || !Strict) {
vp->SetValue_psz(val->GetObject()->GetText(g, NULL));
break;
-// } // endif Type
-
case TYPE_NULL:
vp->SetNull(true);
/* falls through */
@@ -412,11 +414,10 @@ void JSNX::ReadValue(PGLOBAL g)
/*********************************************************************************/
PVAL JSNX::GetColumnValue(PGLOBAL g, PJSON row, int i)
{
- int n = Nod - 1;
PJVAL val = NULL;
val = GetRowValue(g, row, i);
- SetJsonValue(g, Value, val, n);
+ SetJsonValue(g, Value, val);
return Value;
} // end of GetColumnValue
@@ -430,7 +431,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
for (; i < Nod && row; i++) {
if (Nodes[i].Op == OP_NUM) {
- Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1);
+ Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1);
val = new(g) JVALUE(g, Value);
return val;
} else if (Nodes[i].Op == OP_XX) {
@@ -452,7 +453,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
} //endif Op
} else
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
@@ -460,7 +461,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else if (Nodes[i].Op == OP_EXP)
return (PJVAL)ExpandArray(g, arp, i);
else
@@ -468,7 +469,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif's
@@ -488,7 +489,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
} // endfor i
- // SetJsonValue(g, Value, val, n);
+ // SetJsonValue(g, Value, val);
return val;
} // end of GetRowValue
@@ -519,17 +520,17 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
htrc("CalculateArray size=%d op=%d\n", ars, op);
for (i = 0; i < ars; i++) {
- jvrp = arp->GetValue(i);
+ jvrp = arp->GetArrayValue(i);
if (trace(1))
htrc("i=%d nv=%d\n", i, nv);
if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) {
if (jvrp->IsNull()) {
- jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING);
+ jvrp->SetString(g, GetJsonNull(), 0);
jvp = jvrp;
} else if (n < Nod - 1 && jvrp->GetJson()) {
- jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1));
+ jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1));
jvp = &jval;
} else
jvp = jvrp;
@@ -539,10 +540,10 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
jvp->GetString(g), jvp->IsNull() ? 1 : 0);
if (!nv++) {
- SetJsonValue(g, vp, jvp, n);
+ SetJsonValue(g, vp, jvp);
continue;
} else
- SetJsonValue(g, MulVal, jvp, n);
+ SetJsonValue(g, MulVal, jvp);
if (!MulVal->IsNull()) {
switch (op) {
@@ -612,13 +613,13 @@ my_bool JSNX::CheckPath(PGLOBAL g)
} else switch (row->GetType()) {
case TYPE_JOB:
if (Nodes[i].Key)
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
if (!Nodes[i].Key)
if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE)
- val = ((PJAR)row)->GetValue(Nodes[i].Rank);
+ val = ((PJAR)row)->GetArrayValue(Nodes[i].Rank);
break;
case TYPE_JVAL:
@@ -655,20 +656,20 @@ PJSON JSNX::GetRow(PGLOBAL g)
// Expected Array was not there, wrap the value
continue;
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
arp = (PJAR)row;
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else
- val = arp->GetValue(Nodes[i].Rx);
+ val = arp->GetArrayValue(Nodes[i].Rx);
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif Nodes
@@ -695,9 +696,9 @@ PJSON JSNX::GetRow(PGLOBAL g)
nwr = new(g)JOBJECT;
if (row->GetType() == TYPE_JOB) {
- ((PJOB)row)->SetValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key);
+ ((PJOB)row)->SetKeyValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key);
} else if (row->GetType() == TYPE_JAR) {
- ((PJAR)row)->AddValue(g, new(g)JVALUE(nwr));
+ ((PJAR)row)->AddArrayValue(g, new(g)JVALUE(nwr));
((PJAR)row)->InitArray(g);
} else {
strcpy(g->Message, "Wrong type when writing new row");
@@ -740,16 +741,16 @@ my_bool JSNX::WriteValue(PGLOBAL g, PJVAL jvalp)
if (arp) {
if (!Nodes[Nod-1].Key) {
if (Nodes[Nod-1].Op == OP_EQ)
- arp->SetValue(g, jvalp, Nodes[Nod-1].Rank);
+ arp->SetArrayValue(g, jvalp, Nodes[Nod-1].Rank);
else
- arp->AddValue(g, jvalp);
+ arp->AddArrayValue(g, jvalp);
arp->InitArray(g);
} // endif Key
} else if (objp) {
if (Nodes[Nod-1].Key)
- objp->SetValue(g, jvalp, Nodes[Nod-1].Key);
+ objp->SetKeyValue(g, jvalp, Nodes[Nod-1].Key);
} else if (jvp)
jvp->SetValue(jvalp);
@@ -781,13 +782,13 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k)
switch (jsp->GetType()) {
case TYPE_JAR:
- err = LocateArray((PJAR)jsp);
+ err = LocateArray(g, (PJAR)jsp);
break;
case TYPE_JOB:
- err = LocateObject((PJOB)jsp);
+ err = LocateObject(g, (PJOB)jsp);
break;
case TYPE_JVAL:
- err = LocateValue((PJVAL)jsp);
+ err = LocateValue(g, (PJVAL)jsp);
break;
default:
err = true;
@@ -818,7 +819,7 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k)
/*********************************************************************************/
/* Locate in a JSON Array. */
/*********************************************************************************/
-my_bool JSNX::LocateArray(PJAR jarp)
+my_bool JSNX::LocateArray(PGLOBAL g, PJAR jarp)
{
char s[16];
size_t m = Jp->N;
@@ -830,7 +831,7 @@ my_bool JSNX::LocateArray(PJAR jarp)
if (Jp->WriteStr(s))
return true;
- if (LocateValue(jarp->GetValue(i)))
+ if (LocateValue(g, jarp->GetArrayValue(i)))
return true;
} // endfor i
@@ -841,7 +842,7 @@ my_bool JSNX::LocateArray(PJAR jarp)
/*********************************************************************************/
/* Locate in a JSON Object. */
/*********************************************************************************/
-my_bool JSNX::LocateObject(PJOB jobp)
+my_bool JSNX::LocateObject(PGLOBAL g, PJOB jobp)
{
size_t m;
@@ -856,7 +857,7 @@ my_bool JSNX::LocateObject(PJOB jobp)
if (Jp->WriteStr(pair->Key))
return true;
- if (LocateValue(pair->Val))
+ if (LocateValue(g, pair->Val))
return true;
} // endfor i
@@ -867,14 +868,14 @@ my_bool JSNX::LocateObject(PJOB jobp)
/*********************************************************************************/
/* Locate a JSON Value. */
/*********************************************************************************/
-my_bool JSNX::LocateValue(PJVAL jvp)
+my_bool JSNX::LocateValue(PGLOBAL g, PJVAL jvp)
{
- if (CompareTree(Jvalp, jvp))
+ if (CompareTree(g, Jvalp, jvp))
Found = (--K == 0);
else if (jvp->GetArray())
- return LocateArray(jvp->GetArray());
+ return LocateArray(g, jvp->GetArray());
else if (jvp->GetObject())
- return LocateObject(jvp->GetObject());
+ return LocateObject(g, jvp->GetObject());
return false;
} // end of LocateValue
@@ -907,13 +908,13 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx)
switch (jsp->GetType()) {
case TYPE_JAR:
- err = LocateArrayAll((PJAR)jsp);
+ err = LocateArrayAll(g, (PJAR)jsp);
break;
case TYPE_JOB:
- err = LocateObjectAll((PJOB)jsp);
+ err = LocateObjectAll(g, (PJOB)jsp);
break;
case TYPE_JVAL:
- err = LocateValueAll((PJVAL)jsp);
+ err = LocateValueAll(g, (PJVAL)jsp);
break;
default:
err = true;
@@ -945,7 +946,7 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx)
/*********************************************************************************/
/* Locate in a JSON Array. */
/*********************************************************************************/
-my_bool JSNX::LocateArrayAll(PJAR jarp)
+my_bool JSNX::LocateArrayAll(PGLOBAL g, PJAR jarp)
{
if (I < Imax) {
Jpnp[++I].Type = TYPE_JAR;
@@ -953,7 +954,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp)
for (int i = 0; i < jarp->size(); i++) {
Jpnp[I].N = i;
- if (LocateValueAll(jarp->GetValue(i)))
+ if (LocateValueAll(g, jarp->GetArrayValue(i)))
return true;
} // endfor i
@@ -967,7 +968,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp)
/*********************************************************************************/
/* Locate in a JSON Object. */
/*********************************************************************************/
-my_bool JSNX::LocateObjectAll(PJOB jobp)
+my_bool JSNX::LocateObjectAll(PGLOBAL g, PJOB jobp)
{
if (I < Imax) {
Jpnp[++I].Type = TYPE_JOB;
@@ -975,7 +976,7 @@ my_bool JSNX::LocateObjectAll(PJOB jobp)
for (PJPR pair = jobp->First; pair; pair = pair->Next) {
Jpnp[I].Key = pair->Key;
- if (LocateValueAll(pair->Val))
+ if (LocateValueAll(g, pair->Val))
return true;
} // endfor i
@@ -989,14 +990,14 @@ my_bool JSNX::LocateObjectAll(PJOB jobp)
/*********************************************************************************/
/* Locate a JSON Value. */
/*********************************************************************************/
-my_bool JSNX::LocateValueAll(PJVAL jvp)
+my_bool JSNX::LocateValueAll(PGLOBAL g, PJVAL jvp)
{
- if (CompareTree(Jvalp, jvp))
+ if (CompareTree(g, Jvalp, jvp))
return AddPath();
else if (jvp->GetArray())
- return LocateArrayAll(jvp->GetArray());
+ return LocateArrayAll(g, jvp->GetArray());
else if (jvp->GetObject())
- return LocateObjectAll(jvp->GetObject());
+ return LocateObjectAll(g, jvp->GetObject());
return false;
} // end of LocateValueAll
@@ -1004,7 +1005,7 @@ my_bool JSNX::LocateValueAll(PJVAL jvp)
/*********************************************************************************/
/* Compare two JSON trees. */
/*********************************************************************************/
-my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2)
+my_bool JSNX::CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2)
{
if (!jp1 || !jp2 || jp1->GetType() != jp2->GetType()
|| jp1->size() != jp2->size())
@@ -1013,26 +1014,22 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2)
my_bool found = true;
if (jp1->GetType() == TYPE_JVAL) {
- PVAL v1 = jp1->GetValue(), v2 = jp2->GetValue();
-
- if (v1 && v2) {
- if (v1->GetType() == v2->GetType())
- found = !v1->CompareValue(v2);
- else
- found = false;
+// PVL v1 = ((PJVAL)jp1)->GetVal(), v2 = ((PJVAL)jp2)->GetVal();
- } else
- found = CompareTree(jp1->GetJsp(), jp2->GetJsp());
+ if (((PJVAL)jp1)->DataType == TYPE_JSON && ((PJVAL)jp2)->DataType == TYPE_JSON)
+ found = CompareTree(g, jp1->GetJsp(), jp2->GetJsp());
+ else
+ found = CompareValues(((PJVAL)jp1), ((PJVAL)jp2));
} else if (jp1->GetType() == TYPE_JAR) {
for (int i = 0; found && i < jp1->size(); i++)
- found = (CompareTree(jp1->GetValue(i), jp2->GetValue(i)));
+ found = (CompareTree(g, jp1->GetArrayValue(i), jp2->GetArrayValue(i)));
} else if (jp1->GetType() == TYPE_JOB) {
PJPR p1 = jp1->GetFirst(), p2 = jp2->GetFirst();
for (; found && p1 && p2; p1 = p1->Next, p2 = p2->Next)
- found = CompareTree(p1->Val, p2->Val);
+ found = CompareTree(g, p1->Val, p2->Val);
} else
found = false;
@@ -1041,10 +1038,68 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2)
} // end of CompareTree
/*********************************************************************************/
-/* Add the found path to the list. */
+/* Compare two VAL values and return true if they are equal. */
/*********************************************************************************/
-my_bool JSNX::AddPath(void)
+my_bool JSNX::CompareValues(PJVAL v1, PJVAL v2)
{
+ my_bool b = false;
+
+ switch (v1->DataType) {
+ case TYPE_STRG:
+ if (v2->DataType == TYPE_STRG) {
+ if (v1->Nd || v2->Nd) // Case insensitive
+ b = (!stricmp(v1->Strp, v2->Strp));
+ else
+ b = (!strcmp(v1->Strp, v2->Strp));
+
+ } // endif Type
+
+ break;
+ case TYPE_DTM:
+ if (v2->DataType == TYPE_DTM)
+ b = (!strcmp(v1->Strp, v2->Strp));
+
+ break;
+ case TYPE_INTG:
+ if (v2->DataType == TYPE_INTG)
+ b = (v1->N == v2->N);
+ else if (v2->DataType == TYPE_BINT)
+ b = (v1->N == v2->LLn);
+
+ break;
+ case TYPE_BINT:
+ if (v2->DataType == TYPE_INTG)
+ b = (v1->LLn == v2->N);
+ else if (v2->DataType == TYPE_BINT)
+ b = (v1->LLn == v2->LLn);
+
+ break;
+ case TYPE_DBL:
+ if (v2->DataType == TYPE_DBL)
+ b = (v1->F == v2->F);
+
+ break;
+ case TYPE_BOOL:
+ if (v2->DataType == TYPE_BOOL)
+ b = (v1->B == v2->B);
+
+ break;
+ case TYPE_NULL:
+ if (v2->DataType == TYPE_NULL)
+ b = true;
+
+ break;
+ default:
+ break;
+ } // endswitch Type
+
+ return b;
+} // end of CompareValues
+
+/*********************************************************************************/
+/* Add the found path to the list. */
+/*********************************************************************************/
+my_bool JSNX::AddPath(void) {
char s[16];
if (Jp->WriteStr("\"$"))
@@ -1113,7 +1168,7 @@ static void SetChanged(PBSON bsp)
/*********************************************************************************/
/* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */
/*********************************************************************************/
-static uint GetJsonGroupSize(void)
+uint GetJsonGroupSize(void)
{
return (JsonGrpSize) ? JsonGrpSize : GetJsonGrpSize();
} // end of GetJsonGroupSize
@@ -1121,12 +1176,16 @@ static uint GetJsonGroupSize(void)
/*********************************************************************************/
/* Program for SubSet re-initialization of the memory pool. */
/*********************************************************************************/
-static my_bool JsonSubSet(PGLOBAL g)
+my_bool JsonSubSet(PGLOBAL g, my_bool b)
{
PPOOLHEADER pph = (PPOOLHEADER)g->Sarea;
- pph->To_Free = (g->Saved_Size) ? g->Saved_Size : (size_t)sizeof(POOLHEADER);
+ pph->To_Free = (g->Saved_Size) ? g->Saved_Size : sizeof(POOLHEADER);
pph->FreeBlk = g->Sarea_Size - pph->To_Free;
+
+ if (b)
+ g->Saved_Size = 0;
+
return FALSE;
} /* end of JsonSubSet */
@@ -1144,7 +1203,7 @@ inline void JsonMemSave(PGLOBAL g)
inline void JsonFreeMem(PGLOBAL g)
{
g->Activityp = NULL;
- PlugExit(g);
+ g = PlugExit(g);
} /* end of JsonFreeMem */
/*********************************************************************************/
@@ -1193,9 +1252,10 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp)
case TYPE_JOB:
jvp = new(g) JVALUE((PJSON)vp);
break;
- case TYPE_VAL:
- jvp = new(g) JVALUE(g, (PVAL)vp);
- break;
+// case TYPE_VAL:
+// jvp = new(g) JVALUE(g, (PVAL)vp);
+// break;
+ case TYPE_DTM:
case TYPE_STRG:
jvp = new(g) JVALUE(g, (PCSZ)vp);
break;
@@ -1211,24 +1271,22 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp)
} // end try/catch
return jvp;
-} /* end of JsonNew */
+} /* end of JvalNew */
/*********************************************************************************/
/* Allocate and initialise the memory area. */
/*********************************************************************************/
-static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args,
- char *message, my_bool mbn,
- unsigned long reslen, unsigned long memlen,
- unsigned long more = 0)
+my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, char *message, my_bool mbn,
+ unsigned long reslen, unsigned long memlen, unsigned long more)
{
- PGLOBAL g = PlugInit(NULL, memlen + more + 500); // +500 to avoid CheckMem
+ PGLOBAL g = PlugInit(NULL, (size_t)memlen + more + 500); // +500 to avoid CheckMem
if (!g) {
strcpy(message, "Allocation error");
return true;
} else if (g->Sarea_Size == 0) {
strcpy(message, g->Message);
- PlugExit(g);
+ g = PlugExit(g);
return true;
} // endif g
@@ -1382,7 +1440,7 @@ static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n)
/*********************************************************************************/
/* Returns not 0 if the argument is a JSON item or file name. */
/*********************************************************************************/
-static int IsJson(UDF_ARGS *args, uint i, bool b)
+int IsJson(UDF_ARGS *args, uint i, bool b)
{
int n = 0;
@@ -1405,7 +1463,7 @@ static int IsJson(UDF_ARGS *args, uint i, bool b)
char *sap;
PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024);
- JsonSubSet(g);
+// JsonSubSet(g);
sap = MakePSZ(g, args, i);
if (ParseJson(g, sap, strlen(sap)))
@@ -1449,9 +1507,8 @@ static long GetFileLength(char *fn)
/*********************************************************************************/
/* Calculate the reslen and memlen needed by a function. */
/*********************************************************************************/
-static my_bool CalcLen(UDF_ARGS *args, my_bool obj,
- unsigned long& reslen, unsigned long& memlen,
- my_bool mod = false)
+my_bool CalcLen(UDF_ARGS *args, my_bool obj, unsigned long& reslen,
+ unsigned long& memlen, my_bool mod)
{
char fn[_MAX_PATH];
unsigned long i, k, m, n;
@@ -1568,8 +1625,8 @@ static my_bool CalcLen(UDF_ARGS *args, my_bool obj,
/*********************************************************************************/
/* Check if the calculated memory is enough. */
/*********************************************************************************/
-static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
- my_bool m, my_bool obj = false, my_bool mod = false)
+my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
+ my_bool m, my_bool obj, my_bool mod)
{
unsigned long rl, ml;
my_bool b = false;
@@ -1621,7 +1678,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n,
/*********************************************************************************/
/* Make a zero terminated string from the passed argument. */
/*********************************************************************************/
-static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i)
+PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i)
{
if (args->arg_count > (unsigned)i && args->args[i]) {
int n = args->lengths[i];
@@ -1690,7 +1747,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
/*********************************************************************************/
/* Parse a json file. */
/*********************************************************************************/
-static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len)
+static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, size_t& len)
{
char *memory;
HANDLE hFile;
@@ -1712,9 +1769,13 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len)
} // endif hFile
/*******************************************************************************/
- /* Get the file size (assuming file is smaller than 4 GB) */
+ /* Get the file size. */
/*******************************************************************************/
- len = mm.lenL;
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
memory = (char *)mm.memory;
if (!len) { // Empty or deleted file
@@ -1742,7 +1803,7 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len)
/*********************************************************************************/
/* Return a json file contains. */
/*********************************************************************************/
-static char *GetJsonFile(PGLOBAL g, char *fn)
+char *GetJsonFile(PGLOBAL g, char *fn)
{
char *str;
int h, n, len;
@@ -1784,7 +1845,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL)
{
char *sap = (args->arg_count > i) ? args->args[i] : NULL;
int n, len;
- short c;
+ int ci;
long long bigint;
PJSON jsp;
PJVAL jvp = new(g) JVALUE;
@@ -1827,8 +1888,8 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL)
jvp->SetValue(jsp);
} else {
- c = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1;
- jvp->SetString(g, sap, c);
+ ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1;
+ jvp->SetString(g, sap, ci);
} // endif n
} // endif len
@@ -1839,7 +1900,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL)
if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) ||
(bigint == 1LL && !strcmp(args->attributes[i], "TRUE")))
- jvp->SetTiny(g, (char)bigint);
+ jvp->SetBool(g, (char)bigint);
else
jvp->SetBigint(g, bigint);
@@ -1894,6 +1955,8 @@ static PJVAL MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i,
return jvp;
} // end of MakeTypedValue
+/* ------------------------------ The JSON UDF's ------------------------------- */
+
/*********************************************************************************/
/* Make a Json value containing the parameter. */
/*********************************************************************************/
@@ -1962,7 +2025,7 @@ char *json_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
PJAR arp = new(g)JARRAY;
for (uint i = 0; i < args->arg_count; i++)
- arp->AddValue(g, MakeValue(g, args, i));
+ arp->AddArrayValue(g, MakeValue(g, args, i));
arp->InitArray(g);
@@ -2032,13 +2095,13 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
arp = new(g)JARRAY;
- arp->AddValue(g, jvp);
+ arp->AddArrayValue(g, jvp);
top = arp;
} else
arp = jvp->GetArray();
for (uint i = 1; i < args->arg_count; i++)
- arp->AddValue(g, MakeValue(g, args, i));
+ arp->AddArrayValue(g, MakeValue(g, args, i));
arp->InitArray(g);
str = MakeResult(g, args, top, args->arg_count);
@@ -2130,7 +2193,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) {
- arp->AddValue(gb, JvalNew(gb, TYPE_JVAL, jvp));
+ arp->AddArrayValue(gb, JvalNew(gb, TYPE_JVAL, jvp));
jvp->SetValue(arp);
if (!top)
@@ -2142,7 +2205,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
arp = jvp->GetArray();
if (arp) {
- arp->AddValue(gb, MakeValue(gb, args, 1), x);
+ arp->AddArrayValue(gb, MakeValue(gb, args, 1), x);
arp->InitArray(gb);
str = MakeResult(g, args, top, n);
} else
@@ -2311,7 +2374,7 @@ long long jsonsum_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *err
PJAR arp = jvp->GetArray();
for (int i = 0; i < arp->size(); i++)
- n += arp->GetValue(i)->GetBigint();
+ n += arp->GetArrayValue(i)->GetBigint();
} else {
PUSH_WARNING("First argument target is not an array");
@@ -2386,7 +2449,7 @@ double jsonsum_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error
PJAR arp = jvp->GetArray();
for (int i = 0; i < arp->size(); i++)
- n += arp->GetValue(i)->GetFloat();
+ n += arp->GetArrayValue(i)->GetFloat();
} else {
PUSH_WARNING("First argument target is not an array");
@@ -2451,7 +2514,7 @@ double jsonavg_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error
if (arp->size()) {
for (int i = 0; i < arp->size(); i++)
- n += arp->GetValue(i)->GetFloat();
+ n += arp->GetArrayValue(i)->GetFloat();
n /= arp->size();
} // endif size
@@ -2510,7 +2573,7 @@ char *json_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
- objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
str = Serialize(g, objp, NULL, 0);
} // endif objp
@@ -2560,7 +2623,7 @@ char *json_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
if (!(jvp = MakeValue(g, args, i))->IsNull())
- objp->SetValue(g, jvp, MakeKey(g, args, i));
+ objp->SetKeyValue(g, jvp, MakeKey(g, args, i));
str = Serialize(g, objp, NULL, 0);
} // endif objp
@@ -2612,7 +2675,7 @@ char *json_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i += 2)
- objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
str = Serialize(g, objp, NULL, 0);
} // endif objp
@@ -2696,7 +2759,7 @@ char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
jobp = jvp->GetObject();
jvp = MakeValue(gb, args, 1);
key = MakeKey(gb, args, 1);
- jobp->SetValue(gb, jvp, key);
+ jobp->SetKeyValue(gb, jvp, key);
str = MakeResult(g, args, top);
} else {
PUSH_WARNING("First argument target is not an object");
@@ -3049,7 +3112,7 @@ void json_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
PJAR arp = (PJAR)g->Activityp;
if (arp && g->N-- > 0)
- arp->AddValue(g, MakeValue(g, args, 0));
+ arp->AddArrayValue(g, MakeValue(g, args, 0));
} // end of json_array_grp_add
@@ -3126,7 +3189,7 @@ void json_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
PJOB objp = (PJOB)g->Activityp;
if (g->N-- > 0)
- objp->SetValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0));
+ objp->SetKeyValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0));
} // end of json_object_grp_add
@@ -4007,17 +4070,14 @@ my_bool jsoncontains_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
return JsonInit(initid, args, message, false, reslen, memlen, more);
} // end of jsoncontains_init
-long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *is_null, char *error)
+long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
{
- char *p __attribute__((unused)), res[256];
- long long n;
+ char isn, res[256];
unsigned long reslen;
- *is_null = 0;
- p = jsonlocate(initid, args, res, &reslen, is_null, error);
- n = (*is_null) ? 0LL : 1LL;
- return n;
+ isn = 0;
+ jsonlocate(initid, args, res, &reslen, &isn, error);
+ return (isn) ? 0LL : 1LL;
} // end of jsoncontains
void jsoncontains_deinit(UDF_INIT* initid)
@@ -4059,8 +4119,7 @@ my_bool jsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
return JsonInit(initid, args, message, true, reslen, memlen, more);
} // end of jsoncontains_path_init
-long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *is_null, char *error)
+long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error)
{
char *p, *path;
long long n;
@@ -4071,7 +4130,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (g->N) {
if (!g->Activityp) {
- *is_null = 1;
return 0LL;
} else
return *(long long*)g->Activityp;
@@ -4129,7 +4187,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result,
err:
if (g->Mrr) *error = 1;
- *is_null = 1;
return 0LL;
} // end of jsoncontains_path
@@ -4406,7 +4463,8 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
fn = MakePSZ(g, args, 0);
if (args->arg_count > 1) {
- int len, pretty = 3, pty = 3;
+ int pretty = 3, pty = 3;
+ size_t len;
PJSON jsp;
PJVAL jvp = NULL;
@@ -4609,7 +4667,7 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result,
strcat(bsp->Msg, " array");
for (uint i = 0; i < args->arg_count; i++)
- arp->AddValue(g, MakeValue(g, args, i));
+ arp->AddArrayValue(g, MakeValue(g, args, i));
arp->InitArray(g);
} // endif arp && bsp
@@ -4670,7 +4728,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) {
- arp->AddValue(gb, jvp);
+ arp->AddArrayValue(gb, jvp);
top = arp;
} // endif arp
@@ -4678,7 +4736,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result,
arp = jvp->GetArray();
for (uint i = 1; i < args->arg_count; i++)
- arp->AddValue(gb, MakeValue(gb, args, i));
+ arp->AddArrayValue(gb, MakeValue(gb, args, i));
arp->InitArray(gb);
@@ -4761,7 +4819,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
if (jvp->GetValType() != TYPE_JAR) {
if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) {
- arp->AddValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp));
+ arp->AddArrayValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp));
jvp->SetValue(arp);
if (!top)
@@ -4772,7 +4830,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
} else
arp = jvp->GetArray();
- arp->AddValue(gb, MakeValue(gb, args, 1), x);
+ arp->AddArrayValue(gb, MakeValue(gb, args, 1), x);
arp->InitArray(gb);
} else {
PUSH_WARNING("First argument target is not an array");
@@ -4900,7 +4958,7 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
- objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
@@ -4957,7 +5015,7 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i++)
if (!(jvp = MakeValue(g, args, i))->IsNull())
- objp->SetValue(g, jvp, MakeKey(g, args, i));
+ objp->SetKeyValue(g, jvp, MakeKey(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
strcat(bsp->Msg, " object");
@@ -5016,7 +5074,7 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result,
if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) {
for (uint i = 0; i < args->arg_count; i += 2)
- objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
+ objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i));
if ((bsp = JbinAlloc(g, args, initid->max_length, objp)))
strcat(bsp->Msg, " object");
@@ -5094,7 +5152,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result,
jobp = jvp->GetObject();
jvp = MakeValue(gb, args, 1);
key = MakeKey(gb, args, 1);
- jobp->SetValue(gb, jvp, key);
+ jobp->SetKeyValue(gb, jvp, key);
} else {
PUSH_WARNING("First argument target is not an object");
// if (g->Mrr) *error = 1; (only if no path)
@@ -5313,7 +5371,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
// Get the json tree
if ((jvp = jsx->GetRowValue(g, jsp, 0, false))) {
- jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue());
+ jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_JVAL, jvp->GetValue(g));
if ((bsp = JbinAlloc(g, args, initid->max_length, jsp)))
strcat(bsp->Msg, " item");
@@ -5639,7 +5697,8 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result,
unsigned long *res_length, char *is_null, char *error)
{
char *fn;
- int pretty = 3, len = 0, pty = 3;
+ int pretty = 3, pty = 3;
+ size_t len = 0;
PJSON jsp;
PJVAL jvp = NULL;
PGLOBAL g = (PGLOBAL)initid->ptr;
@@ -5782,11 +5841,11 @@ my_bool jfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
} // endif args
CalcLen(args, false, reslen, memlen);
- return JsonInit(initid, args, message, false, reslen, memlen);
+ return JsonInit(initid, args, message, true, reslen, memlen);
} // end of jfile_convert_init
char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
- unsigned long *res_length, char *, char *error) {
+ unsigned long *res_length, char *is_null, char *error) {
char *str, *fn, *ofn;
int lrecl = (int)*(longlong*)args->args[2];
PGLOBAL g = (PGLOBAL)initid->ptr;
@@ -5804,10 +5863,15 @@ char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result,
str = (char*)g->Xchk;
if (!str) {
- str = PlugDup(g, g->Message);
- } // endif str
+ PUSH_WARNING(g->Message ? g->Message : "Unexpected error");
+ *is_null = 1;
+ *error = 1;
+ *res_length = 0;
+ } else {
+ strcpy(result, str);
+ *res_length = strlen(str);
+ } // endif str
- *res_length = strlen(str);
return str;
} // end of jfile_convert
@@ -5815,9 +5879,136 @@ void jfile_convert_deinit(UDF_INIT* initid) {
JsonFreeMem((PGLOBAL)initid->ptr);
} // end of jfile_convert_deinit
+/*********************************************************************************/
+/* Convert a prettiest Json file to Pretty=0. */
+/*********************************************************************************/
+my_bool jfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) {
+ unsigned long reslen, memlen;
+
+ if (args->arg_count != 2 && args->arg_count != 3) {
+ strcpy(message, "This function must have 2 or 3 arguments");
+ return true;
+ } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) {
+ strcpy(message, "Third Argument must be an integer (LRECL)");
+ return true;
+ } else for (int i = 0; i < 2; i++)
+ if (args->arg_type[i] != STRING_RESULT) {
+ sprintf(message, "Arguments %d must be a string (file name)", i + 1);
+ return true;
+ } // endif args
+
+ CalcLen(args, false, reslen, memlen);
+ memlen = memlen * M;
+ memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024;
+ return JsonInit(initid, args, message, false, reslen, memlen);
+} // end of jfile_bjson_init
+
+char *jfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result,
+ unsigned long *res_length, char*, char *error) {
+ char *fn, *ofn, *buf, *str = NULL;
+ bool loop;
+ ssize_t len, newloc;
+ size_t lrecl, *binszp;
+ PJSON jsp;
+ SWAP *swp;
+ PGLOBAL g = (PGLOBAL)initid->ptr;
+
+ PlugSubSet(g->Sarea, g->Sarea_Size);
+ fn = MakePSZ(g, args, 0);
+ ofn = MakePSZ(g, args, 1);
+
+ if (args->arg_count == 3)
+ lrecl = (size_t)*(longlong*)args->args[2];
+ else
+ lrecl = 1024;
+
+ if (!g->Xchk) {
+ int msgid = MSGID_OPEN_MODE_STRERROR;
+ FILE *fout;
+ FILE *fin;
+
+ if (!(fin = global_fopen(g, msgid, fn, "rt")))
+ str = strcpy(result, g->Message);
+ else if (!(fout = global_fopen(g, msgid, ofn, "wb")))
+ str = strcpy(result, g->Message);
+ else if ((buf = (char*)PlgDBSubAlloc(g, NULL, lrecl)) &&
+ (binszp = (size_t*)PlgDBSubAlloc(g, NULL, sizeof(size_t)))) {
+ JsonMemSave(g);
+
+ try {
+ do {
+ loop = false;
+ JsonSubSet(g);
+
+ if (!fgets(buf, lrecl, fin)) {
+ if (!feof(fin)) {
+ sprintf(g->Message, "Error %d reading %zd bytes from %s", errno, lrecl, fn);
+ str = strcpy(result, g->Message);
+ } else
+ str = strcpy(result, ofn);
+
+ } else if ((len = strlen(buf))) {
+ if ((jsp = ParseJson(g, buf, len))) {
+ newloc = (size_t)PlugSubAlloc(g, NULL, 0);
+ *binszp = newloc - (size_t)jsp;
+
+ swp = new(g) SWAP(g, jsp);
+ swp->SwapJson(jsp, true);
+
+ if (fwrite(binszp, sizeof(binszp), 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, sizeof(binszp), ofn);
+ str = strcpy(result, g->Message);
+ } else if (fwrite(jsp, *binszp, 1, fout) != 1) {
+ sprintf(g->Message, "Error %d writing %zd bytes to %s",
+ errno, *binszp, ofn);
+ str = strcpy(result, g->Message);
+ } else
+ loop = true;
+
+ } else {
+ str = strcpy(result, g->Message);
+ } // endif jsp
+
+ } else
+ loop = true;
+
+ } while (loop);
+
+ } catch (int) {
+ str = strcpy(result, g->Message);
+ } catch (const char* msg) {
+ str = strcpy(result, msg);
+ } // end catch
+
+ } else
+ str = strcpy(result, g->Message);
+
+ if (fin) fclose(fin);
+ if (fout) fclose(fout);
+ g->Xchk = str;
+ } else
+ str = (char*)g->Xchk;
+
+ if (!str) {
+ if (g->Message)
+ str = strcpy(result, g->Message);
+ else
+ str = strcpy(result, "Unexpected error");
+
+ } // endif str
+
+ *res_length = strlen(str);
+ return str;
+} // end of jfile_bjson
+
+void jfile_bjson_deinit(UDF_INIT* initid) {
+ JsonFreeMem((PGLOBAL)initid->ptr);
+} // end of jfile_bjson_deinit
+
/* --------------------------------- Class JUP --------------------------------- */
-#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0)
+#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0)
/*********************************************************************************/
/* JUP public constructor. */
@@ -5825,7 +6016,9 @@ void jfile_convert_deinit(UDF_INIT* initid) {
JUP::JUP(PGLOBAL g) {
fs = NULL;
s = buff = NULL;
- i = k = len = recl = 0;
+ len = 0;
+ k = recl = 0;
+ i = 0;
} // end of JUP constructor
/*********************************************************************************/
@@ -5853,11 +6046,16 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
/*******************************************************************************/
/* Get the file size (assuming file is smaller than 4 GB) */
/*******************************************************************************/
- if (!mm.lenL) { // Empty or deleted file
+ if (!mm.lenL && !mm.lenH) { // Empty or deleted file
CloseFileHandle(hFile);
return NULL;
- } else
- len = (int)mm.lenL;
+ } else {
+ len = (size_t)mm.lenL;
+
+ if (mm.lenH)
+ len += ((size_t)mm.lenH * 0x000000001LL);
+
+ } // endif size
if (!mm.memory) {
CloseFileHandle(hFile);
@@ -5875,7 +6073,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
sprintf(g->Message, MSG(OPEN_MODE_ERROR),
"w", (int)errno, outfn);
strcat(strcat(g->Message, ": "), strerror(errno));
- CloseMemMap(mm.memory, (size_t)mm.lenL);
+ CloseMemMap(mm.memory, len);
return NULL;
} // endif fs
@@ -5884,7 +6082,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) {
if (!unPretty(g, lrecl))
ret = outfn;
- CloseMemMap(mm.memory, (size_t)mm.lenL);
+ CloseMemMap(mm.memory, len);
fclose(fs);
return ret;
} // end of UnprettyJsonFile
@@ -6329,8 +6527,7 @@ my_bool countin_init(UDF_INIT *initid, UDF_ARGS *args, char *message)
return false;
} // end of countin_init
-long long countin(UDF_INIT *initid, UDF_ARGS *args, char *result,
- unsigned long *res_length, char *is_null, char *)
+long long countin(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *)
{
PSZ str1, str2;
char *s;
diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h
index 897b0fe9919..689a02ebbc5 100644
--- a/storage/connect/jsonudf.h
+++ b/storage/connect/jsonudf.h
@@ -1,10 +1,11 @@
/******************** tabjson H Declares Source Code File (.H) *******************/
-/* Name: jsonudf.h Version 1.3 */
+/* Name: jsonudf.h Version 1.4 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2015-2020 */
/* */
/* This file contains the JSON UDF function and class declares. */
/*********************************************************************************/
+#pragma once
#include "global.h"
#include "plgdbsem.h"
#include "block.h"
@@ -15,6 +16,27 @@
#define UDF_EXEC_ARGS \
UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char*
+// BSON size should be equal on Linux and Windows
+#define BMX 255
+typedef struct BSON* PBSON;
+
+/***********************************************************************/
+/* Structure used to return binary json to Json UDF functions. */
+/***********************************************************************/
+struct BSON {
+ char Msg[BMX + 1];
+ char *Filename;
+ PGLOBAL G;
+ int Pretty;
+ ulong Reslen;
+ my_bool Changed;
+ PJSON Top;
+ PJSON Jsp;
+ PBSON Bsp;
+}; // end of struct BSON
+
+PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp);
+
/*********************************************************************************/
/* The JSON tree node. Can be an Object or an Array. */
/*********************************************************************************/
@@ -29,9 +51,29 @@ typedef struct _jnode {
} JNODE, *PJNODE;
typedef class JSNX *PJSNX;
-typedef class JOUTPATH *PJTP;
-typedef class JOUTALL *PJTA;
+/*********************************************************************************/
+/* The JSON utility functions. */
+/*********************************************************************************/
+bool IsNum(PSZ s);
+char *NextChr(PSZ s, char sep);
+char *GetJsonNull(void);
+uint GetJsonGrpSize(void);
+my_bool JsonSubSet(PGLOBAL g, my_bool b = false);
+my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen,
+ unsigned long& memlen, my_bool mod = false);
+my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn,
+ unsigned long reslen, unsigned long memlen,
+ unsigned long more = 0);
+my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n,
+ my_bool m, my_bool obj = false, my_bool mod = false);
+PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i);
+int IsJson(UDF_ARGS* args, uint i, bool b = false);
+char *GetJsonFile(PGLOBAL g, char* fn);
+
+/*********************************************************************************/
+/* The JSON UDF functions. */
+/*********************************************************************************/
extern "C" {
DllExport my_bool jsonvalue_init(UDF_INIT*, UDF_ARGS*, char*);
DllExport char *jsonvalue(UDF_EXEC_ARGS);
@@ -132,7 +174,7 @@ extern "C" {
DllExport void jsonget_real_deinit(UDF_INIT*);
DllExport my_bool jsoncontains_init(UDF_INIT*, UDF_ARGS*, char*);
- DllExport long long jsoncontains(UDF_EXEC_ARGS);
+ DllExport long long jsoncontains(UDF_INIT*, UDF_ARGS*, char*, char*);
DllExport void jsoncontains_deinit(UDF_INIT*);
DllExport my_bool jsonlocate_init(UDF_INIT*, UDF_ARGS*, char*);
@@ -144,7 +186,7 @@ extern "C" {
DllExport void json_locate_all_deinit(UDF_INIT*);
DllExport my_bool jsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*);
- DllExport long long jsoncontains_path(UDF_EXEC_ARGS);
+ DllExport long long jsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*);
DllExport void jsoncontains_path_deinit(UDF_INIT*);
DllExport my_bool json_set_item_init(UDF_INIT*, UDF_ARGS*, char*);
@@ -239,6 +281,10 @@ extern "C" {
DllExport char* jfile_convert(UDF_EXEC_ARGS);
DllExport void jfile_convert_deinit(UDF_INIT*);
+ DllExport my_bool jfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*);
+ DllExport char* jfile_bjson(UDF_EXEC_ARGS);
+ DllExport void jfile_bjson_deinit(UDF_INIT*);
+
DllExport my_bool envar_init(UDF_INIT*, UDF_ARGS*, char*);
DllExport char *envar(UDF_EXEC_ARGS);
@@ -248,17 +294,17 @@ extern "C" {
#endif // DEVELOPMENT
DllExport my_bool countin_init(UDF_INIT*, UDF_ARGS*, char*);
- DllExport long long countin(UDF_EXEC_ARGS);
-} // extern "C"
+ DllExport long long countin(UDF_INIT*, UDF_ARGS*, char*, char*);
+} // extern "C"
/*********************************************************************************/
/* Structure JPN. Used to make the locate path. */
/*********************************************************************************/
typedef struct _jpn {
- enum JTYP Type;
- PCSZ Key;
- int N;
+ int Type;
+ PCSZ Key;
+ int N;
} JPN, *PJPN;
/*********************************************************************************/
@@ -290,15 +336,16 @@ protected:
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
PVAL MakeJson(PGLOBAL g, PJSON jsp);
- void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PJSON GetRow(PGLOBAL g);
- my_bool LocateArray(PJAR jarp);
- my_bool LocateObject(PJOB jobp);
- my_bool LocateValue(PJVAL jvp);
- my_bool LocateArrayAll(PJAR jarp);
- my_bool LocateObjectAll(PJOB jobp);
- my_bool LocateValueAll(PJVAL jvp);
- my_bool CompareTree(PJSON jp1, PJSON jp2);
+ my_bool CompareValues(PJVAL v1, PJVAL v2);
+ my_bool LocateArray(PGLOBAL g, PJAR jarp);
+ my_bool LocateObject(PGLOBAL g, PJOB jobp);
+ my_bool LocateValue(PGLOBAL g, PJVAL jvp);
+ my_bool LocateArrayAll(PGLOBAL g, PJAR jarp);
+ my_bool LocateObjectAll(PGLOBAL g, PJOB jobp);
+ my_bool LocateValueAll(PGLOBAL g, PJVAL jvp);
+ my_bool CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2);
my_bool AddPath(void);
// Default constructor not to be used
@@ -355,11 +402,10 @@ public:
void CopyNumeric(PGLOBAL g);
// Members
- FILE* fs;
- char* s;
- char* buff;
- int len;
- int recl;
- int i, k;
+ FILE *fs;
+ char *s;
+ char *buff;
+ size_t len;
+ uint i;
+ int k, recl;
}; // end of class JUP
-
diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp
index 69bbe980eba..61921555ad7 100644
--- a/storage/connect/libdoc.cpp
+++ b/storage/connect/libdoc.cpp
@@ -378,7 +378,7 @@ bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped)
if (zipped && InitZip(g, entry))
return true;
- int n __attribute__((unused))= xmlKeepBlanksDefault(1);
+ xmlKeepBlanksDefault(1);
return MakeNSlist(g);
} // end of Initialize
diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc
index f8b3dc03aa5..e3fa00e119f 100644
--- a/storage/connect/mycat.cc
+++ b/storage/connect/mycat.cc
@@ -16,9 +16,9 @@
/*************** Mycat CC Program Source Code File (.CC) ***************/
/* PROGRAM NAME: MYCAT */
/* ------------- */
-/* Version 1.7 */
+/* Version 1.8 */
/* */
-/* Author: Olivier Bertrand 2012 - 2019 */
+/* Author: Olivier Bertrand 2012 - 2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -82,7 +82,11 @@
#endif // JAVA_SUPPORT
#include "tabpivot.h"
#include "tabvir.h"
+#if defined(BSON_SUPPORT)
+#include "tabbson.h"
+#else
#include "tabjson.h"
+#endif // BSON_SUPPORT
#include "ha_connect.h"
#if defined(XML_SUPPORT)
#include "tabxml.h"
@@ -107,6 +111,9 @@ extern "C" HINSTANCE s_hModule; // Saved module handle
#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
bool MongoEnabled(void);
#endif // JAVA_SUPPORT || CMGO_SUPPORT
+#if defined(BSON_SUPPORT)
+bool Force_Bson(void);
+#endif // BSON_SUPPORT
/***********************************************************************/
/* Get the plugin directory. */
@@ -130,25 +137,25 @@ TABTYPE GetTypeID(const char *type)
: (!stricmp(type, "DBF")) ? TAB_DBF
#if defined(XML_SUPPORT)
: (!stricmp(type, "XML")) ? TAB_XML
-#endif
+#endif // XML_SUPPORT
: (!stricmp(type, "INI")) ? TAB_INI
: (!stricmp(type, "VEC")) ? TAB_VEC
#if defined(ODBC_SUPPORT)
: (!stricmp(type, "ODBC")) ? TAB_ODBC
-#endif
+#endif // ODBC_SUPPORT
#if defined(JAVA_SUPPORT)
: (!stricmp(type, "JDBC")) ? TAB_JDBC
-#endif
+#endif // JAVA_SUPPORT
#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
: (!stricmp(type, "MONGO") && MongoEnabled()) ? TAB_MONGO
-#endif
+#endif // JAVA_SUPPORT || CMGO_SUPPORT
: (!stricmp(type, "MYSQL")) ? TAB_MYSQL
: (!stricmp(type, "MYPRX")) ? TAB_MYSQL
: (!stricmp(type, "DIR")) ? TAB_DIR
#if defined(__WIN__)
: (!stricmp(type, "MAC")) ? TAB_MAC
: (!stricmp(type, "WMI")) ? TAB_WMI
-#endif
+#endif // __WIN__
: (!stricmp(type, "TBL")) ? TAB_TBL
: (!stricmp(type, "XCOL")) ? TAB_XCL
: (!stricmp(type, "OCCUR")) ? TAB_OCCUR
@@ -157,9 +164,12 @@ TABTYPE GetTypeID(const char *type)
: (!stricmp(type, "PIVOT")) ? TAB_PIVOT
: (!stricmp(type, "VIR")) ? TAB_VIR
: (!stricmp(type, "JSON")) ? TAB_JSON
+#if defined(BSON_SUPPORT)
+ : (!stricmp(type, "BSON")) ? TAB_BSON
+#endif // BSON_SUPPORT
#if defined(ZIP_SUPPORT)
: (!stricmp(type, "ZIP")) ? TAB_ZIP
-#endif
+#endif // ZIP_SUPPORT
: (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY;
} // end of GetTypeID
@@ -181,6 +191,9 @@ bool IsFileType(TABTYPE type)
case TAB_INI:
case TAB_VEC:
case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
case TAB_REST:
// case TAB_ZIP:
isfile= true;
@@ -276,6 +289,9 @@ bool IsTypeIndexable(TABTYPE type)
case TAB_VEC:
case TAB_DBF:
case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
idx= true;
break;
default:
@@ -302,6 +318,9 @@ int GetIndexType(TABTYPE type)
case TAB_VEC:
case TAB_DBF:
case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ case TAB_BSON:
+#endif // BSON_SUPPORT
xtyp= 1;
break;
case TAB_MYSQL:
@@ -445,7 +464,7 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_XML: tdp= new(g) XMLDEF; break;
#endif // XML_SUPPORT
#if defined(VCT_SUPPORT)
- case TAB_VEC: tdp = new(g) VCTDEF; break;
+ case TAB_VEC: tdp= new(g) VCTDEF; break;
#endif // VCT_SUPPORT
#if defined(ODBC_SUPPORT)
case TAB_ODBC: tdp= new(g) ODBCDEF; break;
@@ -465,9 +484,20 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am)
case TAB_MYSQL: tdp= new(g) MYSQLDEF; break;
case TAB_PIVOT: tdp= new(g) PIVOTDEF; break;
case TAB_VIR: tdp= new(g) VIRDEF; break;
- case TAB_JSON: tdp= new(g) JSONDEF; break;
+ case TAB_JSON:
+#if defined(BSON_SUPPORT)
+ if (Force_Bson())
+ tdp= new(g) BSONDEF;
+ else
+#endif // BSON_SUPPORT
+ tdp= new(g) JSONDEF;
+
+ break;
+#if defined(BSON_SUPPORT)
+ case TAB_BSON: tdp= new(g) BSONDEF; break;
+#endif // BSON_SUPPORT
#if defined(ZIP_SUPPORT)
- case TAB_ZIP: tdp = new(g) ZIPDEF; break;
+ case TAB_ZIP: tdp= new(g) ZIPDEF; break;
#endif // ZIP_SUPPORT
#if defined(REST_SUPPORT)
case TAB_REST: tdp= new (g) RESTDEF; break;
diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def
index a4d629fc3d1..5107de7a930 100644
--- a/storage/connect/mysql-test/connect/disabled.def
+++ b/storage/connect/mysql-test/connect/disabled.def
@@ -16,9 +16,12 @@ jdbc_postgresql : Variable settings depend on machine configuration
json_mongo_c : Need MongoDB running and its C Driver installed
json_java_2 : Need MongoDB running and its Java Driver installed
json_java_3 : Need MongoDB running and its Java Driver installed
+bson_mongo_c : Need MongoDB running and its C Driver installed
+bson_java_2 : Need MongoDB running and its Java Driver installed
+bson_java_3 : Need MongoDB running and its Java Driver installed
mongo_c : Need MongoDB running and its C Driver installed
mongo_java_2 : Need MongoDB running and its Java Driver installed
mongo_java_3 : Need MongoDB running and its Java Driver installed
tbl_thread : Bug MDEV-9844,10179,14214 03/01/2018 OB Option THREAD removed
-grant2 : Until fixed
+#bson : Development
#vcol : Different error code on different versions
diff --git a/storage/connect/mysql-test/connect/r/alter_xml.result b/storage/connect/mysql-test/connect/r/alter_xml.result
index 7cdb1e5d21c..d2f882f1287 100644
--- a/storage/connect/mysql-test/connect/r/alter_xml.result
+++ b/storage/connect/mysql-test/connect/r/alter_xml.result
@@ -54,7 +54,7 @@ line
</t1>
# NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
# Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
c d
1 One
@@ -64,7 +64,7 @@ SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c` int(11) NOT NULL,
- `d` char(10) NOT NULL `FIELD_FORMAT`='@'
+ `d` char(10) NOT NULL `XPATH`='@'
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=domdoc,rownode=row' `HEADER`=0
SELECT * FROM t2;
line
diff --git a/storage/connect/mysql-test/connect/r/alter_xml2.result b/storage/connect/mysql-test/connect/r/alter_xml2.result
index 8eb56e3dcc3..a15be966aa8 100644
--- a/storage/connect/mysql-test/connect/r/alter_xml2.result
+++ b/storage/connect/mysql-test/connect/r/alter_xml2.result
@@ -56,7 +56,7 @@ line
</t1>
# NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
# Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
c d
1 One
@@ -66,7 +66,7 @@ SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c` int(11) NOT NULL,
- `d` char(10) NOT NULL `FIELD_FORMAT`='@'
+ `d` char(10) NOT NULL `XPATH`='@'
) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=libxml2,rownode=row' `HEADER`=0
SELECT * FROM t2;
line
diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result
new file mode 100644
index 00000000000..fd15e020aac
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson.result
@@ -0,0 +1,517 @@
+#
+# Testing doc samples
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+LANG CHAR(2),
+SUBJECT CHAR(32),
+AUTHOR CHAR(64),
+TITLE CHAR(32),
+TRANSLATION CHAR(32),
+TRANSLATOR CHAR(80),
+PUBLISHER CHAR(32),
+DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB
+9782212090819 fr applications Jean-Christophe Bernadac, François Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Testing Jpath. Get the number of authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+Authors INT(2) JPATH='$.AUTHOR[#]',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN Language Subject Authors Title Translation Translator Publisher Location Year
+9782212090819 fr applications 2 Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications 1 XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Concatenates the authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe and François Bernadac and Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+#
+# Testing expanding authors
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15),
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications François Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
+SELECT * FROM t1 WHERE ISBN = '9782212090819';
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+#
+# To add an author a new table must be created
+#
+CREATE TABLE t2 (
+FIRSTNAME CHAR(32),
+LASTNAME CHAR(32))
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR';
+SELECT * FROM t2;
+FIRSTNAME LASTNAME
+William J. Pardi
+INSERT INTO t2 VALUES('Charles','Dickens');
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year
+9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+9782840825685 fr applications Charles Dickens XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
+DROP TABLE t1;
+DROP TABLE t2;
+#
+# Check the biblio file has the good format
+#
+CREATE TABLE t1
+(
+line char(255)
+)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json';
+SELECT * FROM t1;
+line
+[
+ {
+ "ISBN": "9782212090819",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "Jean-Christophe",
+ "LASTNAME": "Bernadac"
+ },
+ {
+ "FIRSTNAME": "Philippe",
+ "LASTNAME": "Knab"
+ }
+ ],
+ "TITLE": "Construire une application XML",
+ "PUBLISHER": {
+ "NAME": "Eyrolles",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ },
+ {
+ "ISBN": "9782840825685",
+ "LANG": "fr",
+ "SUBJECT": "applications",
+ "AUTHOR": [
+ {
+ "FIRSTNAME": "William J.",
+ "LASTNAME": "Pardi"
+ },
+ {
+ "FIRSTNAME": "Charles",
+ "LASTNAME": "Dickens"
+ }
+ ],
+ "TITLE": "XML en Action",
+ "TRANSLATION": "adapté de l'anglais par",
+ "TRANSLATOR": {
+ "FIRSTNAME": "James",
+ "LASTNAME": "Guerin"
+ },
+ "PUBLISHER": {
+ "NAME": "Microsoft Press",
+ "PLACE": "Paris"
+ },
+ "DATEPUB": 1999
+ }
+]
+DROP TABLE t1;
+#
+# Testing a pretty=0 file
+#
+CREATE TABLE t1
+(
+ISBN CHAR(15) NOT NULL,
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB',
+INDEX IX(ISBN)
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
+SHOW INDEX FROM t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 1 IX 1 ISBN A NULL NULL NULL XINDEX
+SELECT * FROM t1;
+ISBN Language Subject AuthorFN AuthorLN Title Translation TranslatorFN TranslatorLN Publisher Location Year
+9782212090819 fr applications Jean-Michel Bernadac Construire une application XML NULL NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications François Knab Construire une application XML NULL NULL NULL Eyrolles Paris 1999
+9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 2001
+DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref IX IX 15 const 1 Using where
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819';
+ERROR HY000: Got error 122 'Cannot write expanded column when Pretty is not 2' from CONNECT
+DROP TABLE t1;
+#
+# A file with 2 arrays
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer+Food+Food+Car 69.00
+Joe 4 Beer+Beer+Food+Food+Beer 83.00
+Joe 5 Beer+Food 26.00
+Beth 3 Beer 16.00
+Beth 4 Food+Beer 32.00
+Beth 5 Food+Beer 32.00
+Janet 3 Car+Food+Beer 55.00
+Janet 4 Car 17.00
+Janet 5 Beer+Car+Beer+Food 57.00
+DROP TABLE t1;
+#
+# Now it can be fully expanded
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 3 Beer 16.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Janet 4 Car 17.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+DROP TABLE t1;
+#
+# A table showing many calculated results
+#
+CREATE TABLE t1 (
+WHO CHAR(12) NOT NULL,
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE
+Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18
+Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00
+Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12
+DROP TABLE t1;
+#
+# Expand expense in 3 one week tables
+#
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t2;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t3;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t4;
+WHO WEEK WHAT AMOUNT
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+#
+# The expanded table is made as a TBL table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32),
+AMOUNT DOUBLE(8,2))
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+DROP TABLE t1, t2, t3, t4;
+#
+# Three partial JSON tables
+#
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json';
+SELECT * FROM t2;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json';
+SELECT * FROM t3;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json';
+SELECT * FROM t4;
+WHO WEEK WHAT AMOUNT
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+#
+# The complete table can be a multiple JSON table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1;
+SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
+WHO WEEK WHAT AMOUNT
+Beth 3 Beer 16.00
+Beth 4 Beer 15.00
+Beth 4 Food 17.00
+Beth 5 Beer 20.00
+Beth 5 Food 12.00
+Janet 3 Beer 18.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 4 Car 17.00
+Janet 5 Beer 14.00
+Janet 5 Beer 19.00
+Janet 5 Car 12.00
+Janet 5 Food 12.00
+Joe 3 Beer 18.00
+Joe 3 Car 20.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 4 Beer 14.00
+Joe 4 Beer 16.00
+Joe 4 Beer 19.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+DROP TABLE t1;
+#
+# Or also a partition JSON table
+#
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json';
+ALTER TABLE t1
+PARTITION BY LIST COLUMNS(WEEK) (
+PARTITION `3` VALUES IN(3),
+PARTITION `4` VALUES IN(4),
+PARTITION `5` VALUES IN(5));
+Warnings:
+Warning 1105 Data repartition in 3 is unchecked
+Warning 1105 Data repartition in 4 is unchecked
+Warning 1105 Data repartition in 5 is unchecked
+SHOW WARNINGS;
+Level Code Message
+Warning 1105 Data repartition in 3 is unchecked
+Warning 1105 Data repartition in 4 is unchecked
+Warning 1105 Data repartition in 5 is unchecked
+SELECT * FROM t1;
+WHO WEEK WHAT AMOUNT
+Joe 3 Beer 18.00
+Joe 3 Food 12.00
+Joe 3 Food 19.00
+Joe 3 Car 20.00
+Beth 3 Beer 16.00
+Janet 3 Car 19.00
+Janet 3 Food 18.00
+Janet 3 Beer 18.00
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+Joe 5 Beer 14.00
+Joe 5 Food 12.00
+Beth 5 Food 12.00
+Beth 5 Beer 20.00
+Janet 5 Beer 14.00
+Janet 5 Car 12.00
+Janet 5 Beer 19.00
+Janet 5 Food 12.00
+SELECT * FROM t1 WHERE WEEK = 4;
+WHO WEEK WHAT AMOUNT
+Joe 4 Beer 19.00
+Joe 4 Beer 16.00
+Joe 4 Food 17.00
+Joe 4 Food 17.00
+Joe 4 Beer 14.00
+Beth 4 Food 17.00
+Beth 4 Beer 15.00
+Janet 4 Car 17.00
+DROP TABLE t1, t2, t3, t4;
diff --git a/storage/connect/mysql-test/connect/r/bson_java_2.result b/storage/connect/mysql-test/connect/r/bson_java_2.result
new file mode 100644
index 00000000000..1c21fc7c54f
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_java_2.result
@@ -0,0 +1,385 @@
+set connect_enable_mongo=1;
+set connect_json_all_path=0;
+#
+# Test the MONGO table type
+#
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8;
+SELECT * from t1 limit 3;
+Document
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":"2014-03-03T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-09-11T00:00:00.000Z"},"grade":"A","score":6},{"date":{"$date":"2013-01-24T00:00:00.000Z"},"grade":"A","score":10},{"date":{"$date":"2011-11-23T00:00:00.000Z"},"grade":"A","score":9},{"date":{"$date":"2011-03-10T00:00:00.000Z"},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":"2014-12-30T00:00:00.000Z"},"grade":"A","score":8},{"date":{"$date":"2014-07-01T00:00:00.000Z"},"grade":"B","score":23},{"date":{"$date":"2013-04-30T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2012-05-08T00:00:00.000Z"},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":"2014-09-06T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-07-22T00:00:00.000Z"},"grade":"A","score":11},{"date":{"$date":"2012-07-31T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2011-12-29T00:00:00.000Z"},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"}
+DROP TABLE t1;
+#
+# Test catfunc
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * from t1;
+Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
+_id 1 CHAR 24 24 0 0 _id
+address_building 1 CHAR 10 10 0 0 address.building
+address_coord 1 CHAR 1024 1024 0 1 address.coord
+address_street 1 CHAR 38 38 0 0 address.street
+address_zipcode 1 CHAR 5 5 0 0 address.zipcode
+borough 1 CHAR 13 13 0 0
+cuisine 1 CHAR 64 64 0 0
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
+grades_grade 1 CHAR 14 14 0 1 grades.0.grade
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
+name 1 CHAR 98 98 0 0
+restaurant_id 1 CHAR 8 8 0 0
+DROP TABLE t1;
+#
+# Explicit columns
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(255) NOT NULL,
+cuisine VARCHAR(255) NOT NULL,
+borough VARCHAR(255) NOT NULL,
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=2';
+SELECT * FROM t1 LIMIT 10;
+_id name cuisine borough restaurant_id
+58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445
+58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340
+58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841
+58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018
+58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068
+58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151
+58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442
+58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483
+58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649
+58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731
+DROP TABLE t1;
+#
+# Test discovery
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `cuisine` char(64) NOT NULL,
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
+SELECT * FROM t1 LIMIT 5;
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
+DROP TABLE t1;
+#
+# Dropping a column
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+_id address borough cuisine name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+DROP TABLE t1;
+#
+# Specifying Jpath
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(64) NOT NULL,
+cuisine CHAR(200) NOT NULL,
+borough CHAR(16) NOT NULL,
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 1;
+_id 58ada47de5a51ddfcd5ed51c
+name Morris Park Bake Shop
+cuisine Bakery
+borough Bronx
+street Morris Park Ave
+building 1007
+zipcode 10462
+grade A
+score 2
+date 1970-01-01
+restaurant_id 30075445
+SELECT name, street, score, date FROM t1 LIMIT 5;
+name street score date
+Morris Park Bake Shop Morris Park Ave 2 1970-01-01
+Wendy'S Flatbush Avenue 8 1970-01-01
+Dj Reynolds Pub And Restaurant West 57 Street 2 1970-01-01
+Riviera Caterer Stillwell Avenue 5 1970-01-01
+Tov Kosher Kitchen 63 Road 20 1970-01-01
+SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10;
+name cuisine borough
+Morris Park Bake Shop Bakery Bronx
+Wendy'S Hamburgers Brooklyn
+Dj Reynolds Pub And Restaurant Irish Manhattan
+Riviera Caterer American Brooklyn
+Kosher Island Jewish/Kosher Staten Island
+Wilken'S Fine Food Delicatessen Brooklyn
+Regina Caterers American Brooklyn
+Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn
+Wild Asia American Bronx
+C & C Catering Service American Brooklyn
+SELECT COUNT(*) FROM t1 WHERE grade = 'A';
+COUNT(*)
+20687
+SELECT * FROM t1 WHERE cuisine = 'English';
+_id name cuisine borough street building zipcode grade score date restaurant_id
+58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 1970-01-01 40391531
+58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 1970-01-01 40392496
+58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 1970-01-01 40816202
+58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 1970-01-01 41022701
+58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 1970-01-01 41076583
+58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 1970-01-01 41443706
+58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 1970-01-01 41448559
+58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 1970-01-01 41513545
+58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 1970-01-01 41557377
+58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 1970-01-01 41625263
+58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 1970-01-01 41633327
+58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 1970-01-01 41660253
+58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 1970-01-01 41664704
+58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 1970-01-01 41690534
+58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 1970-01-01 50000290
+58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 1970-01-01 50011097
+SELECT * FROM t1 WHERE score = building;
+_id name cuisine borough street building zipcode grade score date restaurant_id
+DROP TABLE t1;
+#
+# Specifying Filter
+#
+CREATE TABLE t1 (
+_id CHAR(24) NOT NULL,
+name CHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+restaurant_id CHAR(8) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT name FROM t1 WHERE borough = 'Queens';
+name
+La Baraka Restaurant
+Air France Lounge
+Tournesol
+Winegasm
+Cafe Henri
+Bistro 33
+Domaine Wine Bar
+Cafe Triskell
+Cannelle Patisserie
+La Vie
+Dirty Pierres Bistro
+Fresca La Crepe
+Bliss 46 Bistro
+Bear
+Cuisine By Claudette
+Paris Baguette
+The Baroness Bar
+Francis Cafe
+Madame Sou Sou
+Crepe 'N' Tearia
+Aperitif Bayside Llc
+DROP TABLE t1;
+#
+# Testing pipeline
+#
+CREATE TABLE t1 (
+name VARCHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+date DATETIME NOT NULL,
+grade CHAR(1) NOT NULL,
+score INT(4) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}'
+OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+name borough date grade score
+Tout Va Bien Manhattan 1970-01-01 01:33:34 B 15
+Tout Va Bien Manhattan 1970-01-01 01:33:34 A 13
+Tout Va Bien Manhattan 1970-01-01 01:33:33 C 36
+Tout Va Bien Manhattan 1970-01-01 01:33:33 B 22
+Tout Va Bien Manhattan 1970-01-01 01:33:32 C 36
+Tout Va Bien Manhattan 1970-01-01 01:33:32 C 7
+La Grenouille Manhattan 1970-01-01 01:33:34 A 10
+La Grenouille Manhattan 1970-01-01 01:33:33 A 9
+La Grenouille Manhattan 1970-01-01 01:33:32 A 13
+Le Perigord Manhattan 1970-01-01 01:33:34 B 14
+SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx';
+name grade score date
+Bistro Sk A 10 1970-01-01 01:33:34
+Bistro Sk A 12 1970-01-01 01:33:34
+Bistro Sk B 18 1970-01-01 01:33:33
+DROP TABLE t1;
+#
+# try level 2 discovery
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,level=2,version=2';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+name borough address_street score
+Le Gamin Brooklyn Vanderbilt Avenue 24
+Bistro 33 Queens Ditmars Boulevard 15
+Dirty Pierres Bistro Queens Station Square 22
+Santos Anne Brooklyn Union Avenue 26
+Le Paddock Brooklyn Prospect Avenue 17
+La Crepe Et La Vie Brooklyn Foster Avenue 24
+Francis Cafe Queens Ditmars Boulevard 19
+DROP TABLE t1;
+#
+# try CRUD operations
+#
+false
+CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64))
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+DELETE FROM t1;
+INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three');
+SELECT * FROM t1;
+_id msg
+0 NULL
+1 One
+2 Two
+3 Three
+UPDATE t1 SET msg = 'Deux' WHERE _id = 2;
+DELETE FROM t1 WHERE msg IS NULL;
+SELECT * FROM t1;
+_id msg
+1 One
+2 Deux
+3 Three
+DELETE FROM t1;
+DROP TABLE t1;
+true
+#
+# List states whose population is equal or more than 10 millions
+#
+false
+CREATE TABLE t1 (
+_id char(5) NOT NULL,
+city char(16) NOT NULL,
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
+pop int(11) NOT NULL,
+state char(2) NOT NULL)
+ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities'
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8';
+# Using SQL for grouping
+SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC;
+state totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+# Using a pipeline for grouping
+CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}'
+OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+_id totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+true
+#
+# Test making array
+#
+CREATE TABLE t1 (
+_id int(4) NOT NULL,
+item CHAR(8) NOT NULL,
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+INSERT INTO t1 VALUES
+(1,'journal',87,45,63,12,78),
+(2,'notebook',123,456,789,NULL,NULL),
+(3,'paper',5,7,3,8,NULL),
+(4,'planner',25,71,NULL,44,27),
+(5,'postcard',5,7,3,8,NULL);
+SELECT * FROM t1;
+_id item prices_0 prices_1 prices_2 prices_3 prices_4
+1 journal 87 45 63 12 78
+2 notebook 123 456 789 NULL NULL
+3 paper 5 7 3 8 NULL
+4 planner 25 71 NULL 44 27
+5 postcard 5 7 3 8 NULL
+DROP TABLE t1;
+#
+# Test array aggregation
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}'
+OPTION_LIST='Driver=Java,Version=2,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+item total average
+journal 285 57.00
+notebook 1368 456.00
+paper 23 5.75
+planner 167 41.75
+postcard 23 5.75
+DROP TABLE t1;
+true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/bson_java_3.result b/storage/connect/mysql-test/connect/r/bson_java_3.result
new file mode 100644
index 00000000000..d198ee3faa4
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_java_3.result
@@ -0,0 +1,385 @@
+set connect_enable_mongo=1;
+set connect_json_all_path=0;
+#
+# Test the MONGO table type
+#
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8;
+SELECT * from t1 limit 3;
+Document
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"}
+DROP TABLE t1;
+#
+# Test catfunc
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * from t1;
+Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
+_id 1 CHAR 24 24 0 0 _id
+address_building 1 CHAR 10 10 0 0 address.building
+address_coord 1 CHAR 1024 1024 0 1 address.coord
+address_street 1 CHAR 38 38 0 0 address.street
+address_zipcode 1 CHAR 5 5 0 0 address.zipcode
+borough 1 CHAR 13 13 0 0
+cuisine 1 CHAR 64 64 0 0
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
+grades_grade 1 CHAR 14 14 0 1 grades.0.grade
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
+name 1 CHAR 98 98 0 0
+restaurant_id 1 CHAR 8 8 0 0
+DROP TABLE t1;
+#
+# Explicit columns
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(255) NOT NULL,
+cuisine VARCHAR(255) NOT NULL,
+borough VARCHAR(255) NOT NULL,
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=3';
+SELECT * FROM t1 LIMIT 10;
+_id name cuisine borough restaurant_id
+58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445
+58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340
+58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841
+58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018
+58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068
+58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151
+58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442
+58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483
+58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649
+58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731
+DROP TABLE t1;
+#
+# Test discovery
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `cuisine` char(64) NOT NULL,
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
+SELECT * FROM t1 LIMIT 5;
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+DROP TABLE t1;
+#
+# Dropping a column
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+_id address borough cuisine name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+DROP TABLE t1;
+#
+# Specifying Jpath
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(64) NOT NULL,
+cuisine CHAR(200) NOT NULL,
+borough CHAR(16) NOT NULL,
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 1;
+_id 58ada47de5a51ddfcd5ed51c
+name Morris Park Bake Shop
+cuisine Bakery
+borough Bronx
+street Morris Park Ave
+building 1007
+zipcode 10462
+grade A
+score 2
+date 2014-03-03
+restaurant_id 30075445
+SELECT name, street, score, date FROM t1 LIMIT 5;
+name street score date
+Morris Park Bake Shop Morris Park Ave 2 2014-03-03
+Wendy'S Flatbush Avenue 8 2014-12-30
+Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06
+Riviera Caterer Stillwell Avenue 5 2014-06-10
+Tov Kosher Kitchen 63 Road 20 2014-11-24
+SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10;
+name cuisine borough
+Morris Park Bake Shop Bakery Bronx
+Wendy'S Hamburgers Brooklyn
+Dj Reynolds Pub And Restaurant Irish Manhattan
+Riviera Caterer American Brooklyn
+Kosher Island Jewish/Kosher Staten Island
+Wilken'S Fine Food Delicatessen Brooklyn
+Regina Caterers American Brooklyn
+Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn
+Wild Asia American Bronx
+C & C Catering Service American Brooklyn
+SELECT COUNT(*) FROM t1 WHERE grade = 'A';
+COUNT(*)
+20687
+SELECT * FROM t1 WHERE cuisine = 'English';
+_id name cuisine borough street building zipcode grade score date restaurant_id
+58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531
+58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496
+58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202
+58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701
+58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583
+58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706
+58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559
+58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545
+58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377
+58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263
+58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327
+58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253
+58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704
+58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534
+58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290
+58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097
+SELECT * FROM t1 WHERE score = building;
+_id name cuisine borough street building zipcode grade score date restaurant_id
+DROP TABLE t1;
+#
+# Specifying Filter
+#
+CREATE TABLE t1 (
+_id CHAR(24) NOT NULL,
+name CHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+restaurant_id CHAR(8) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT name FROM t1 WHERE borough = 'Queens';
+name
+La Baraka Restaurant
+Air France Lounge
+Tournesol
+Winegasm
+Cafe Henri
+Bistro 33
+Domaine Wine Bar
+Cafe Triskell
+Cannelle Patisserie
+La Vie
+Dirty Pierres Bistro
+Fresca La Crepe
+Bliss 46 Bistro
+Bear
+Cuisine By Claudette
+Paris Baguette
+The Baroness Bar
+Francis Cafe
+Madame Sou Sou
+Crepe 'N' Tearia
+Aperitif Bayside Llc
+DROP TABLE t1;
+#
+# Testing pipeline
+#
+CREATE TABLE t1 (
+name VARCHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+date DATETIME NOT NULL,
+grade CHAR(1) NOT NULL,
+score INT(4) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}'
+OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1 LIMIT 10;
+name borough date grade score
+Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15
+Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13
+Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36
+Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22
+Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36
+Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7
+La Grenouille Manhattan 2014-04-09 02:00:00 A 10
+La Grenouille Manhattan 2013-03-05 01:00:00 A 9
+La Grenouille Manhattan 2012-02-02 01:00:00 A 13
+Le Perigord Manhattan 2014-07-14 02:00:00 B 14
+SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx';
+name grade score date
+Bistro Sk A 10 2014-11-21 01:00:00
+Bistro Sk A 12 2014-02-19 01:00:00
+Bistro Sk B 18 2013-06-12 02:00:00
+DROP TABLE t1;
+#
+# try level 2 discovery
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096
+OPTION_LIST='Driver=Java,level=2,version=3';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+name borough address_street score
+Le Gamin Brooklyn Vanderbilt Avenue 24
+Bistro 33 Queens Ditmars Boulevard 15
+Dirty Pierres Bistro Queens Station Square 22
+Santos Anne Brooklyn Union Avenue 26
+Le Paddock Brooklyn Prospect Avenue 17
+La Crepe Et La Vie Brooklyn Foster Avenue 24
+Francis Cafe Queens Ditmars Boulevard 19
+DROP TABLE t1;
+#
+# try CRUD operations
+#
+false
+CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64))
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+DELETE FROM t1;
+INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three');
+SELECT * FROM t1;
+_id msg
+0 NULL
+1 One
+2 Two
+3 Three
+UPDATE t1 SET msg = 'Deux' WHERE _id = 2;
+DELETE FROM t1 WHERE msg IS NULL;
+SELECT * FROM t1;
+_id msg
+1 One
+2 Deux
+3 Three
+DELETE FROM t1;
+DROP TABLE t1;
+true
+#
+# List states whose population is equal or more than 10 millions
+#
+false
+CREATE TABLE t1 (
+_id char(5) NOT NULL,
+city char(16) NOT NULL,
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
+pop int(11) NOT NULL,
+state char(2) NOT NULL)
+ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities'
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8';
+# Using SQL for grouping
+SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC;
+state totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+# Using a pipeline for grouping
+CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}'
+OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+_id totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+true
+#
+# Test making array
+#
+CREATE TABLE t1 (
+_id int(4) NOT NULL,
+item CHAR(8) NOT NULL,
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8
+OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+INSERT INTO t1 VALUES
+(1,'journal',87,45,63,12,78),
+(2,'notebook',123,456,789,NULL,NULL),
+(3,'paper',5,7,3,8,NULL),
+(4,'planner',25,71,NULL,44,27),
+(5,'postcard',5,7,3,8,NULL);
+SELECT * FROM t1;
+_id item prices_0 prices_1 prices_2 prices_3 prices_4
+1 journal 87 45 63 12 78
+2 notebook 123 456 789 NULL NULL
+3 paper 5 7 3 8 NULL
+4 planner 25 71 NULL 44 27
+5 postcard 5 7 3 8 NULL
+DROP TABLE t1;
+#
+# Test array aggregation
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}'
+OPTION_LIST='Driver=Java,Version=3,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096;
+SELECT * FROM t1;
+item total average
+journal 285 57.00
+notebook 1368 456.00
+paper 23 5.75
+planner 167 41.75
+postcard 23 5.75
+DROP TABLE t1;
+true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/bson_mongo_c.result b/storage/connect/mysql-test/connect/r/bson_mongo_c.result
new file mode 100644
index 00000000000..83bf7cd1974
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_mongo_c.result
@@ -0,0 +1,385 @@
+set connect_enable_mongo=1;
+set connect_json_all_path=0;
+#
+# Test the MONGO table type
+#
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024
+OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8;
+SELECT * from t1 limit 3;
+Document
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.8560769999999991,40.8484470000000002],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.9617039999999974,40.6629420000000010],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"}
+{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.9851355999999925,40.7676919000000026],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"}
+DROP TABLE t1;
+#
+# Test catfunc
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * from t1;
+Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
+_id 1 CHAR 24 24 0 0 _id
+address_building 1 CHAR 10 10 0 0 address.building
+address_coord 1 CHAR 1024 1024 0 1 address.coord
+address_street 1 CHAR 38 38 0 0 address.street
+address_zipcode 1 CHAR 5 5 0 0 address.zipcode
+borough 1 CHAR 13 13 0 0
+cuisine 1 CHAR 64 64 0 0
+grades_date 1 CHAR 1024 1024 0 1 grades.0.date
+grades_grade 1 CHAR 14 14 0 1 grades.0.grade
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
+name 1 CHAR 98 98 0 0
+restaurant_id 1 CHAR 8 8 0 0
+DROP TABLE t1;
+#
+# Explicit columns
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(255) NOT NULL,
+cuisine VARCHAR(255) NOT NULL,
+borough VARCHAR(255) NOT NULL,
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8
+OPTION_LIST='Driver=C,Version=0';
+SELECT * FROM t1 LIMIT 10;
+_id name cuisine borough restaurant_id
+58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445
+58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340
+58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841
+58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018
+58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068
+58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151
+58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442
+58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483
+58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649
+58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731
+DROP TABLE t1;
+#
+# Test discovery
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `cuisine` char(64) NOT NULL,
+ `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
+SELECT * FROM t1 LIMIT 5;
+_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 -73.8560769999999991, 40.8484470000000002 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.9617039999999974, 40.6629420000000010 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.9851355999999925, 40.7676919000000026 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.9824199999999905, 40.5795049999999975 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601151999999956, 40.7311739000000017 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+DROP TABLE t1;
+#
+# Dropping a column
+#
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1 LIMIT 10;
+_id address borough cuisine name restaurant_id
+58ada47de5a51ddfcd5ed51c 1007 (-73.8560769999999991, 40.8484470000000002) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.9617039999999974, 40.6629420000000010) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.9851355999999925, 40.7676919000000026) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.9824199999999905, 40.5795049999999975) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601151999999956, 40.7311739000000017) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803826999999984, 40.7643124000000014) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286000000026, 40.6119571999999991) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068505999999985, 40.6199033999999983) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.0052889999999906, 40.6288860000000014) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482608999999940, 40.6408271000000028) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+DROP TABLE t1;
+#
+# Specifying Jpath
+#
+CREATE TABLE t1 (
+_id VARCHAR(24) NOT NULL,
+name VARCHAR(64) NOT NULL,
+cuisine CHAR(200) NOT NULL,
+borough CHAR(16) NOT NULL,
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
+restaurant_id VARCHAR(255) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1 LIMIT 1;
+_id 58ada47de5a51ddfcd5ed51c
+name Morris Park Bake Shop
+cuisine Bakery
+borough Bronx
+street Morris Park Ave
+building 1007
+zipcode 10462
+grade A
+score 2
+date 2014-03-03
+restaurant_id 30075445
+SELECT name, street, score, date FROM t1 LIMIT 5;
+name street score date
+Morris Park Bake Shop Morris Park Ave 2 2014-03-03
+Wendy'S Flatbush Avenue 8 2014-12-30
+Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06
+Riviera Caterer Stillwell Avenue 5 2014-06-10
+Tov Kosher Kitchen 63 Road 20 2014-11-24
+SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10;
+name cuisine borough
+Morris Park Bake Shop Bakery Bronx
+Wendy'S Hamburgers Brooklyn
+Dj Reynolds Pub And Restaurant Irish Manhattan
+Riviera Caterer American Brooklyn
+Kosher Island Jewish/Kosher Staten Island
+Wilken'S Fine Food Delicatessen Brooklyn
+Regina Caterers American Brooklyn
+Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn
+Wild Asia American Bronx
+C & C Catering Service American Brooklyn
+SELECT COUNT(*) FROM t1 WHERE grade = 'A';
+COUNT(*)
+20687
+SELECT * FROM t1 WHERE cuisine = 'English';
+_id name cuisine borough street building zipcode grade score date restaurant_id
+58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531
+58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496
+58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202
+58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701
+58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583
+58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706
+58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559
+58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545
+58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377
+58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263
+58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327
+58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253
+58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704
+58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534
+58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290
+58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097
+SELECT * FROM t1 WHERE score = building;
+_id name cuisine borough street building zipcode grade score date restaurant_id
+DROP TABLE t1;
+#
+# Specifying Filter
+#
+CREATE TABLE t1 (
+_id CHAR(24) NOT NULL,
+name CHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+restaurant_id CHAR(8) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT name FROM t1 WHERE borough = 'Queens';
+name
+La Baraka Restaurant
+Air France Lounge
+Tournesol
+Winegasm
+Cafe Henri
+Bistro 33
+Domaine Wine Bar
+Cafe Triskell
+Cannelle Patisserie
+La Vie
+Dirty Pierres Bistro
+Fresca La Crepe
+Bliss 46 Bistro
+Bear
+Cuisine By Claudette
+Paris Baguette
+The Baroness Bar
+Francis Cafe
+Madame Sou Sou
+Crepe 'N' Tearia
+Aperitif Bayside Llc
+DROP TABLE t1;
+#
+# Testing pipeline
+#
+CREATE TABLE t1 (
+name VARCHAR(64) NOT NULL,
+borough CHAR(16) NOT NULL,
+date DATETIME NOT NULL,
+grade CHAR(1) NOT NULL,
+score INT(4) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}'
+OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1 LIMIT 10;
+name borough date grade score
+Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15
+Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13
+Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36
+Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22
+Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36
+Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7
+La Grenouille Manhattan 2014-04-09 02:00:00 A 10
+La Grenouille Manhattan 2013-03-05 01:00:00 A 9
+La Grenouille Manhattan 2012-02-02 01:00:00 A 13
+Le Perigord Manhattan 2014-07-14 02:00:00 B 14
+SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx';
+name grade score date
+Bistro Sk A 10 2014-11-21 01:00:00
+Bistro Sk A 12 2014-02-19 01:00:00
+Bistro Sk B 18 2013-06-12 02:00:00
+DROP TABLE t1;
+#
+# try level 2 discovery
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants
+FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}'
+COLIST='{"projection":{"cuisine":0}}' CONNECTION='mongodb://localhost:27017' LRECL=1024
+OPTION_LIST='Driver=C,level=2,version=0';
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `_id` char(24) NOT NULL `JPATH`='_id',
+ `address_building` char(10) NOT NULL `JPATH`='address.building',
+ `address_coord` double(21,16) DEFAULT NULL `JPATH`='address.coord.0',
+ `address_street` char(38) NOT NULL `JPATH`='address.street',
+ `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode',
+ `borough` char(13) NOT NULL,
+ `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
+ `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `name` char(98) NOT NULL,
+ `restaurant_id` char(8) NOT NULL
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+name borough address_street score
+Le Gamin Brooklyn Vanderbilt Avenue 24
+Bistro 33 Queens Ditmars Boulevard 15
+Dirty Pierres Bistro Queens Station Square 22
+Santos Anne Brooklyn Union Avenue 26
+Le Paddock Brooklyn Prospect Avenue 17
+La Crepe Et La Vie Brooklyn Foster Avenue 24
+Francis Cafe Queens Ditmars Boulevard 19
+DROP TABLE t1;
+#
+# try CRUD operations
+#
+false
+CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64))
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+DELETE FROM t1;
+INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three');
+SELECT * FROM t1;
+_id msg
+0 NULL
+1 One
+2 Two
+3 Three
+UPDATE t1 SET msg = 'Deux' WHERE _id = 2;
+DELETE FROM t1 WHERE msg IS NULL;
+SELECT * FROM t1;
+_id msg
+1 One
+2 Deux
+3 Three
+DELETE FROM t1;
+DROP TABLE t1;
+true
+#
+# List states whose population is equal or more than 10 millions
+#
+false
+CREATE TABLE t1 (
+_id char(5) NOT NULL,
+city char(16) NOT NULL,
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
+pop int(11) NOT NULL,
+state char(2) NOT NULL)
+ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities'
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET='utf8';
+# Using SQL for grouping
+SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC;
+state totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+# Using a pipeline for grouping
+CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL)
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8
+COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}'
+OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1;
+_id totalPop
+CA 29754890
+NY 17990402
+TX 16984601
+FL 12686644
+PA 11881643
+IL 11427576
+OH 10846517
+DROP TABLE t1;
+true
+#
+# Test making array
+#
+CREATE TABLE t1 (
+_id int(4) NOT NULL,
+item CHAR(8) NOT NULL,
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8
+OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+INSERT INTO t1 VALUES
+(1,'journal',87,45,63,12,78),
+(2,'notebook',123,456,789,NULL,NULL),
+(3,'paper',5,7,3,8,NULL),
+(4,'planner',25,71,NULL,44,27),
+(5,'postcard',5,7,3,8,NULL);
+SELECT * FROM t1;
+_id item prices_0 prices_1 prices_2 prices_3 prices_4
+1 journal 87 45 63 12 78
+2 notebook 123 456 789 NULL NULL
+3 paper 5 7 3 8 NULL
+4 planner 25 71 44 27 NULL
+5 postcard 5 7 3 8 NULL
+DROP TABLE t1;
+#
+# Test array aggregation
+#
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll'
+COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}'
+OPTION_LIST='Driver=C,Version=0,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=1024;
+SELECT * FROM t1;
+item total average
+journal 285 57.00
+notebook 1368 456.00
+paper 23 5.75
+planner 167 41.75
+postcard 23 5.75
+DROP TABLE t1;
+true
+set connect_enable_mongo=0;
diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result
new file mode 100644
index 00000000000..a0b93f2e547
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/bson_udf.result
@@ -0,0 +1,708 @@
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5;
+#
+# Test UDF's with constant arguments
+#
+SELECT BsonValue(56, 3.1416, 'foo', NULL);
+ERROR HY000: Can't initialize function 'bsonvalue'; Cannot accept more than 1 argument
+SELECT BsonValue(3.1416);
+BsonValue(3.1416)
+3.1416
+SELECT BsonValue(-80);
+BsonValue(-80)
+-80
+SELECT BsonValue('foo');
+BsonValue('foo')
+foo
+SELECT BsonValue(9223372036854775807);
+BsonValue(9223372036854775807)
+9223372036854775807
+SELECT BsonValue(NULL);
+BsonValue(NULL)
+null
+SELECT BsonValue(TRUE);
+BsonValue(TRUE)
+true
+SELECT BsonValue(FALSE);
+BsonValue(FALSE)
+false
+SELECT BsonValue();
+BsonValue()
+null
+SELECT BsonValue('[11, 22, 33]' json_) FROM t1;
+BsonValue('[11, 22, 33]' json_)
+[11,22,33]
+[11,22,33]
+[11,22,33]
+[11,22,33]
+[11,22,33]
+SELECT Bson_Make_Array();
+Bson_Make_Array()
+[]
+SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL);
+Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL)
+[56,3.1416,"My name is \"Foo\"",null]
+SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE);
+Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE)
+[[56,3.1416,"foo"],true]
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array;
+ERROR HY000: Can't initialize function 'bson_array_add'; This function must have at least 2 arguments
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array;
+Array
+[56,3.1416,"foo",null,"One more"]
+SELECT Bson_Array_Add(BsonValue('one value'), 'One more');
+Bson_Array_Add(BsonValue('one value'), 'One more')
+["one value","One more"]
+SELECT Bson_Array_Add('one value', 'One more');
+Bson_Array_Add('one value', 'One more')
+["one value","One more"]
+SELECT Bson_Array_Add('one value' json_, 'One more');
+Bson_Array_Add('one value' json_, 'One more')
+["one value","One more"]
+SELECT Bson_Array_Add(5 json_, 'One more');
+Bson_Array_Add(5 json_, 'One more')
+[5,"One more"]
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0);
+Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0)
+[4,5,3,8,7,9]
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array;
+Array
+[5,3,4,8,7,9]
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9);
+Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9)
+[5,3,8,7,9,4]
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1);
+Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1)
+[1,2,[11,22],"[2]"]
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1);
+Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1)
+[1,2,[11,33,22]]
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]');
+Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]')
+[1,2,[11,33,22]]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array;
+Array
+[56,3.1416,"machin",null,"One more","Two more"]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1;
+Array
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+[56,3.1416,"machin","One more","Two more"]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1;
+Array
+[56,3.1416,"machin",1]
+[56,3.1416,"machin",2]
+[56,3.1416,"machin",3]
+[56,3.1416,"machin",4]
+[56,3.1416,"machin",5]
+SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1;
+Array
+[1,3.1416,"machin",1]
+[2,3.1416,"machin",2]
+[3,3.1416,"machin",3]
+[4,3.1416,"machin",4]
+[5,3.1416,"machin",5]
+SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array;
+Array
+[56,3.1416,"machin"]
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0);
+Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0)
+[3.1416,"My name is \"Foo\"",null]
+SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2);
+Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2)
+{"56":56,"3.1416":3.1416,"My name is Foo":"My name is Foo","NULL":null}
+Warnings:
+Warning 1105 First argument target is not an array
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2');
+Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2')
+[56,3.1416,"My name is \"Foo\"",null]
+Warnings:
+Warning 1105 Missing or null array index
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2);
+Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2)
+[56,3.1416,"My name is \"Foo\"",null]
+Warnings:
+Warning 1105 First argument target is not an array
+/* WARNING VOID */
+#
+SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL);
+Bson_Make_Object(56, 3.1416, 'foo', NULL)
+{"56":56,"3.1416":3.1416,"foo":"foo","NULL":null}
+SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty);
+Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty)
+{"qty":56,"price":3.1416,"truc":"foo","garanty":null}
+SELECT Bson_Make_Object();
+Bson_Make_Object()
+{}
+SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL);
+Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL)
+{"Make_Array(56, 3.1416, 'foo')":[56,3.1416,"foo"],"NULL":null}
+SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL);
+Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL)
+[{"qty":56,"price":3.1416,"foo":"foo"},null]
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL);
+Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL)
+{"qty":56,"price":3.1416,"truc":"machin","garanty":null}
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty');
+ERROR HY000: Can't initialize function 'bson_object_key'; This function must have an even number of arguments
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color);
+Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color)
+{"qty":56,"price":3.1416,"truc":"machin","garanty":null,"color":"blue"}
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price);
+Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price)
+{"qty":56,"price":45.99,"truc":"machin","garanty":null}
+SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1);
+Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1)
+NULL
+Warnings:
+Warning 1105 Error 2 opening notexist.json
+Warning 1105 No sub-item at '[1]'
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc');
+Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc')
+{"qty":56,"price":3.1416,"garanty":null}
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose');
+Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose')
+{"qty":56,"price":3.1416,"truc":"machin","garanty":null}
+SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List";
+Key List
+["qty","price","truc","garanty"]
+SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List";
+Key List
+["qty","price","truc","garanty"]
+SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List";
+Value List
+[1,2,3]
+#
+# Test UDF's with column arguments
+#
+CREATE TABLE t2
+(
+ISBN CHAR(15),
+LANG CHAR(2),
+SUBJECT CHAR(32),
+AUTHOR CHAR(64),
+TITLE CHAR(32),
+TRANSLATION CHAR(32),
+TRANSLATOR CHAR(80),
+PUBLISHER CHAR(32),
+DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2;
+Bson_Make_Array(AUTHOR, TITLE, DATEPUB)
+[" Jean-Christophe Bernadac, François Knab","Construire une application XML",1999]
+["William J. Pardi","XML en Action",1999]
+SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2;
+Bson_Make_Object(AUTHOR, TITLE, DATEPUB)
+{"AUTHOR":" Jean-Christophe Bernadac, François Knab","TITLE":"Construire une application XML","DATEPUB":1999}
+{"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999}
+SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2;
+ERROR HY000: Can't initialize function 'bson_array_grp'; This function can only accept 1 argument
+SELECT Bson_Array_Grp(TITLE) FROM t2;
+Bson_Array_Grp(TITLE)
+["Construire une application XML","XML en Action"]
+CREATE TABLE t3 (
+SERIALNO CHAR(5) NOT NULL,
+NAME VARCHAR(12) NOT NULL FLAG=6,
+SEX SMALLINT(1) NOT NULL,
+TITLE VARCHAR(15) NOT NULL FLAG=20,
+MANAGER CHAR(5) DEFAULT NULL,
+DEPARTMENT CHAr(4) NOT NULL FLAG=41,
+SECRETARY CHAR(5) DEFAULT NULL FLAG=46,
+SALARY DOUBLE(8,2) NOT NULL FLAG=52
+) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1;
+SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT';
+Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)
+{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.0000000000000000}
+SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT;
+DEPARTMENT Bson_Array_Grp(NAME)
+0021 ["STRONG","SHORTSIGHT"]
+0318 ["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]
+0319 ["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL"]
+2452 ["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]
+Warnings:
+Warning 1105 Result truncated to json_grp_size values
+SELECT BsonSet_Grp_Size(30);
+BsonSet_Grp_Size(30)
+30
+SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title;
+Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`)
+{"title":"ADMINISTRATOR","names":["GOOSEPEN","FUNNIGUY","SHRINKY"]}
+{"title":"DIRECTOR","names":["QUINN","WERTHER","STRONG"]}
+{"title":"ENGINEER","names":["BROWNY","ORELLY","MARTIN","TONGHO","WALTER","SMITH"]}
+{"title":"PROGRAMMER","names":["BUGHAPPY"]}
+{"title":"SALESMAN","names":["WHEELFOR","MERCHANT","BULLOZER","BANCROFT","FODDERMAN"]}
+{"title":"SCIENTIST","names":["BIGHEAD","BIGHORN"]}
+{"title":"SECRETARY","names":["MESSIFUL","HONEY","SHORTSIGHT","CHERRY","MONAPENNY"]}
+{"title":"TYPIST","names":["KITTY","PLUMHEAD"]}
+SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME))
+["0021",["STRONG","SHORTSIGHT"]]
+["0318",["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]]
+["0319",["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]]
+["2452",["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]]
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES)
+{"DEPARTMENT":"0021","NAMES":["STRONG","SHORTSIGHT"]}
+{"DEPARTMENT":"0318","NAMES":["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]}
+{"DEPARTMENT":"0319","NAMES":["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]}
+{"DEPARTMENT":"2452","NAMES":["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]}
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES)
+{"DEPARTMENT":"0021","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","TITLE":"DIRECTOR","SALARY":23000.0000000000000000},{"SERIALNO":"22222","NAME":"SHORTSIGHT","TITLE":"SECRETARY","SALARY":5500.0000000000000000}]}
+{"DEPARTMENT":"0318","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","TITLE":"SALESMAN","SALARY":9600.0000000000000000},{"SERIALNO":"24888","NAME":"PLUMHEAD","TITLE":"TYPIST","SALARY":2800.0000000000000000},{"SERIALNO":"27845","NAME":"HONEY","TITLE":"SECRETARY","SALARY":4900.0000000000000000},{"SERIALNO":"73452","NAME":"TONGHO","TITLE":"ENGINEER","SALARY":6800.0000000000000000},{"SERIALNO":"74234","NAME":"WALTER","TITLE":"ENGINEER","SALARY":7400.0000000000000000},{"SERIALNO":"77777","NAME":"SHRINKY","TITLE":"ADMINISTRATOR","SALARY":7500.0000000000000000},{"SERIALNO":"70012","NAME":"WERTHER","TITLE":"DIRECTOR","SALARY":14500.0000000000000000},{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.0000000000000000},{"SERIALNO":"73111","NAME":"WHEELFOR","TITLE":"SALESMAN","SALARY":10030.0000000000000000}]}
+{"DEPARTMENT":"0319","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","TITLE":"SALESMAN","SALARY":14800.0000000000000000},{"SERIALNO":"40567","NAME":"QUINN","TITLE":"DIRECTOR","SALARY":14000.0000000000000000},{"SERIALNO":"00137","NAME":"BROWNY","TITLE":"ENGINEER","SALARY":10500.0000000000000000},{"SERIALNO":"12345","NAME":"KITTY","TITLE":"TYPIST","SALARY":3000.4499999999998181},{"SERIALNO":"33333","NAME":"MONAPENNY","TITLE":"SECRETARY","SALARY":3800.0000000000000000},{"SERIALNO":"00023","NAME":"MARTIN","TITLE":"ENGINEER","SALARY":10000.0000000000000000},{"SERIALNO":"07654","NAME":"FUNNIGUY","TITLE":"ADMINISTRATOR","SALARY":8500.0000000000000000},{"SERIALNO":"45678","NAME":"BUGHAPPY","TITLE":"PROGRAMMER","SALARY":8500.0000000000000000},{"SERIALNO":"56789","NAME":"FODDERMAN","TITLE":"SALESMAN","SALARY":7000.0000000000000000},{"SERIALNO":"55555","NAME":"MESSIFUL","TITLE":"SECRETARY","SALARY":5000.5000000000000000},{"SERIALNO":"98765","NAME":"GOOSEPEN","TITLE":"ADMINISTRATOR","SALARY":4700.0000000000000000}]}
+{"DEPARTMENT":"2452","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","TITLE":"SCIENTIST","SALARY":8000.0000000000000000},{"SERIALNO":"31416","NAME":"ORELLY","TITLE":"ENGINEER","SALARY":13400.0000000000000000},{"SERIALNO":"36666","NAME":"BIGHORN","TITLE":"SCIENTIST","SALARY":11000.0000000000000000},{"SERIALNO":"02345","NAME":"SMITH","TITLE":"ENGINEER","SALARY":9000.0000000000000000},{"SERIALNO":"11111","NAME":"CHERRY","TITLE":"SECRETARY","SALARY":4500.0000000000000000}]}
+SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE;
+Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES)
+{"DEPARTMENT":"0021","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","SALARY":23000.0000000000000000}]}
+{"DEPARTMENT":"0021","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"22222","NAME":"SHORTSIGHT","SALARY":5500.0000000000000000}]}
+{"DEPARTMENT":"0318","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"77777","NAME":"SHRINKY","SALARY":7500.0000000000000000}]}
+{"DEPARTMENT":"0318","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"70012","NAME":"WERTHER","SALARY":14500.0000000000000000}]}
+{"DEPARTMENT":"0318","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"73452","NAME":"TONGHO","SALARY":6800.0000000000000000},{"SERIALNO":"74234","NAME":"WALTER","SALARY":7400.0000000000000000}]}
+{"DEPARTMENT":"0318","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","SALARY":9600.0000000000000000},{"SERIALNO":"78943","NAME":"MERCHANT","SALARY":8700.0000000000000000},{"SERIALNO":"73111","NAME":"WHEELFOR","SALARY":10030.0000000000000000}]}
+{"DEPARTMENT":"0318","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"27845","NAME":"HONEY","SALARY":4900.0000000000000000}]}
+{"DEPARTMENT":"0318","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"24888","NAME":"PLUMHEAD","SALARY":2800.0000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"98765","NAME":"GOOSEPEN","SALARY":4700.0000000000000000},{"SERIALNO":"07654","NAME":"FUNNIGUY","SALARY":8500.0000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"40567","NAME":"QUINN","SALARY":14000.0000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"00023","NAME":"MARTIN","SALARY":10000.0000000000000000},{"SERIALNO":"00137","NAME":"BROWNY","SALARY":10500.0000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"PROGRAMMER","EMPLOYES":[{"SERIALNO":"45678","NAME":"BUGHAPPY","SALARY":8500.0000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","SALARY":14800.0000000000000000},{"SERIALNO":"56789","NAME":"FODDERMAN","SALARY":7000.0000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"33333","NAME":"MONAPENNY","SALARY":3800.0000000000000000},{"SERIALNO":"55555","NAME":"MESSIFUL","SALARY":5000.5000000000000000}]}
+{"DEPARTMENT":"0319","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"12345","NAME":"KITTY","SALARY":3000.4499999999998181}]}
+{"DEPARTMENT":"2452","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"31416","NAME":"ORELLY","SALARY":13400.0000000000000000},{"SERIALNO":"02345","NAME":"SMITH","SALARY":9000.0000000000000000}]}
+{"DEPARTMENT":"2452","TITLE":"SCIENTIST","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","SALARY":8000.0000000000000000},{"SERIALNO":"36666","NAME":"BIGHORN","SALARY":11000.0000000000000000}]}
+{"DEPARTMENT":"2452","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"11111","NAME":"CHERRY","SALARY":4500.0000000000000000}]}
+SELECT Bson_Object_Grp(SALARY) FROM t3;
+ERROR HY000: Can't initialize function 'bson_object_grp'; This function requires 2 arguments (key, value)
+SELECT Bson_Object_Grp(NAME, SALARY) FROM t3;
+Bson_Object_Grp(NAME, SALARY)
+{"":"MARTIN","ffffæp§@":"KITTY"}
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT;
+Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES")
+{"DEPARTMENT":"0021","SALARIES":{"":"SHORTSIGHT"}}
+{"DEPARTMENT":"0318","SALARIES":{"":"WHEELFOR"}}
+{"DEPARTMENT":"0319","SALARIES":{"":"GOOSEPEN","ffffæp§@":"KITTY"}}
+{"DEPARTMENT":"2452","SALARIES":{"":"CHERRY"}}
+SELECT Bson_Array_Grp(NAME) FROM t3;
+Bson_Array_Grp(NAME)
+["BANCROFT","SMITH","MERCHANT","FUNNIGUY","BUGHAPPY","BIGHEAD","SHRINKY","WALTER","FODDERMAN","TONGHO","SHORTSIGHT","MESSIFUL","HONEY","GOOSEPEN","CHERRY","MONAPENNY","KITTY","PLUMHEAD","STRONG","BULLOZER","WERTHER","QUINN","ORELLY","BIGHORN","BROWNY","WHEELFOR","MARTIN"]
+SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318;
+Bson_Object_Key(name, title)
+{"BANCROFT":"SALESMAN"}
+{"MERCHANT":"SALESMAN"}
+{"SHRINKY":"ADMINISTRATOR"}
+{"WALTER":"ENGINEER"}
+{"TONGHO":"ENGINEER"}
+{"HONEY":"SECRETARY"}
+{"PLUMHEAD":"TYPIST"}
+{"WERTHER":"DIRECTOR"}
+{"WHEELFOR":"SALESMAN"}
+SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318;
+Bson_Object_Grp(name, title)
+{"SALESMAN":"WHEELFOR","ADMINISTRATOR":"SHRINKY","ENGINEER":"TONGHO","SECRETARY":"HONEY","TYPIST":"PLUMHEAD","DIRECTOR":"WERTHER"}
+#
+# Test value getting UDF's
+#
+SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3;
+BsonGet_String(Bson_Array_Grp(name),'[#]')
+27
+SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3;
+BsonGet_String(Bson_Array_Grp(name),'[","]')
+BANCROFT,SMITH,MERCHANT,FUNNIGUY,BUGHAPPY,BIGHEAD,SHRINKY,WALTER,FODDERMAN,TONGHO,SHORTSIGHT,MESSIFUL,HONEY,GOOSEPEN,CHERRY,MONAPENNY,KITTY,PLUMHEAD,STRONG,BULLOZER,WERTHER,QUINN,ORELLY,BIGHORN,BROWNY,WHEELFOR,MARTIN
+SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3;
+BsonGet_String(Bson_Array_Grp(name),'[>]')
+WHEELFOR
+SET @j1 = '[45,28,36,45,89]';
+SELECT BsonGet_String(@j1,'1');
+BsonGet_String(@j1,'1')
+28
+SELECT BsonGet_String(@j1 json_,'3');
+BsonGet_String(@j1 json_,'3')
+45
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3');
+BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3')
+45
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum";
+list egal sum
+45+28+36+45+89 = 243.00
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0');
+BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0')
+36
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*');
+BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*')
+[36,45,89]
+SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc');
+BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc')
+machin
+SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}';
+SELECT BsonGet_String(@j2 json_,'truc');
+BsonGet_String(@j2 json_,'truc')
+machin
+SELECT BsonGet_String(@j2,'truc');
+BsonGet_String(@j2,'truc')
+machin
+SELECT BsonGet_String(@j2,'chose');
+BsonGet_String(@j2,'chose')
+NULL
+SELECT BsonGet_String(NULL json_, NULL);
+BsonGet_String(NULL json_, NULL)
+NULL
+Warnings:
+Warning 1105
+/* NULL WARNING */
+SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+department Sumsal
+0021 28500.00
+0318 72230.00
+0319 89800.95
+2452 45900.00
+SELECT BsonGet_Int(@j1, '4');
+BsonGet_Int(@j1, '4')
+89
+SELECT BsonGet_Int(@j1, '[#]');
+BsonGet_Int(@j1, '[#]')
+5
+SELECT BsonGet_Int(@j1, '[+]');
+BsonGet_Int(@j1, '[+]')
+243
+SELECT BsonGet_Int(@j1 json_, '3');
+BsonGet_Int(@j1 json_, '3')
+45
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3');
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3')
+45
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]');
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]')
+45
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]');
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]')
+243
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0')
+36
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1');
+BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1')
+28
+SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty');
+BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty')
+56
+SELECT BsonGet_Int(@j2 json_, 'price');
+BsonGet_Int(@j2 json_, 'price')
+3
+SELECT BsonGet_Int(@j2, 'qty');
+BsonGet_Int(@j2, 'qty')
+56
+SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose')
+NULL
+SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum;
+sum
+170
+SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+department Sumsal
+0021 28500
+0318 72230
+0319 89800
+2452 45900
+SELECT BsonGet_Real(@j1, '2');
+BsonGet_Real(@j1, '2')
+36.000000000000000
+SELECT BsonGet_Real(@j1 json_, '3', 2);
+BsonGet_Real(@j1 json_, '3', 2)
+45.00
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3')
+45.000000000000000
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]')
+45.000000000000000
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]')
+243.000000000000000
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]');
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]')
+48.600000000000000
+SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0')
+36.000000000000000
+SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price');
+BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price')
+3.141600000000000
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty');
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty')
+56.000000000000000
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price');
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price')
+3.141600000000000
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4);
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4)
+3.1416
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose')
+NULL
+SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+department Sumsal
+0021 28500.000000000000000
+0318 72230.000000000000000
+0319 89800.950000000000000
+2452 45900.000000000000000
+#
+# Documentation examples
+#
+SELECT
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank",
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number",
+BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat",
+BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum",
+BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg";
+Rank Number Concat Sum Avg
+89 5 45,28,36,45,89 243 48.60
+SELECT
+BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String",
+BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int",
+BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real";
+String Int Real
+29.50 29 29.500000000000000
+SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real";
+Real
+29.500
+#
+# Testing Locate
+#
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin');
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin')
+$.truc
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56);
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56)
+$.qty
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416);
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416)
+$.price
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose');
+BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose')
+NULL
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path;
+Path
+$.AUTHORS[1].FN
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path;
+Path
+$.AUTHORS[1].FN
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path;
+Path
+$.AUTHORS[1]
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path;
+Path
+NULL
+SELECT BsonLocate('[45,28,36,45,89]',36);
+BsonLocate('[45,28,36,45,89]',36)
+$[2]
+SELECT BsonLocate('[45,28,36,45,89]' json_,28.0);
+BsonLocate('[45,28,36,45,89]' json_,28.0)
+NULL
+SELECT Bson_Locate_All('[45,28,36,45,89]',10);
+Bson_Locate_All('[45,28,36,45,89]',10)
+[]
+SELECT Bson_Locate_All('[45,28,36,45,89]',45);
+Bson_Locate_All('[45,28,36,45,89]',45)
+["$[0]","$[3]"]
+SELECT Bson_Locate_All('[[45,28],36,45,89]',45);
+Bson_Locate_All('[[45,28],36,45,89]',45)
+["$[0][0]","$[2]"]
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45);
+Bson_Locate_All('[[45,28,45],36,45,89]',45)
+["$[0][0]","$[0][2]","$[2]"]
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]'));
+Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]'))
+["$[0][0]","$[0][2]","$[2]"]
+SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1;
+BsonLocate('[[45,28,45],36,45,89]',45,n)
+$[0][0]
+$[0][2]
+$[2]
+NULL
+NULL
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1;
+BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']'))
+$[0][0]
+$[0][2]
+$[2]
+NULL
+NULL
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL;
+Path
+$[0][0]
+$[0][2]
+$[2]
+SELECT Bson_Locate_All('[45,28,[36,45,89]]',45);
+Bson_Locate_All('[45,28,[36,45,89]]',45)
+["$[0]","$[2][1]"]
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0));
+Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0))
+[]
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0);
+Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0)
+["$[1][1]"]
+SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_);
+BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_)
+$[1]
+SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_);
+BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_)
+$[0]
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths";
+All paths
+[]
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_);
+Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_)
+["$[1][0]"]
+SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs";
+Nb of occurs
+2
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2);
+Bson_Locate_All('[[45,28],[[36,45],89]]',45,2)
+["$[0][0]"]
+SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0');
+BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0')
+$[0]
+SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab');
+BsonLocate(Bson_File('test/biblio.json'), 'Knab')
+$[0].AUTHOR[1].LASTNAME
+SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab');
+Bson_Locate_All('test/biblio.json' jfile_, 'Knab')
+["$[0].AUTHOR[1].LASTNAME"]
+#
+# Testing json files
+#
+SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},
+{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},
+{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile;
+NewFile
+test/fx.json
+SELECT Bfile_Make('test/fx.json', 1);
+Bfile_Make('test/fx.json', 1)
+test/fx.json
+SELECT Bfile_Make('test/fx.json' jfile_);
+Bfile_Make('test/fx.json' jfile_)
+test/fx.json
+SELECT Bfile_Make(Bbin_File('test/fx.json'), 0);
+Bfile_Make(Bbin_File('test/fx.json'), 0)
+test/fx.json
+SELECT Bson_File('test/fx.json', 1);
+Bson_File('test/fx.json', 1)
+[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]
+Warnings:
+Warning 1105 File pretty format doesn't match the specified pretty value
+SELECT Bson_File('test/fx.json', 2);
+Bson_File('test/fx.json', 2)
+[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]
+Warnings:
+Warning 1105 File pretty format doesn't match the specified pretty value
+SELECT Bson_File('test/fx.json', 0);
+Bson_File('test/fx.json', 0)
+[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]
+SELECT Bson_File('test/fx.json', '0');
+Bson_File('test/fx.json', '0')
+NULL
+Warnings:
+Warning 1105
+SELECT Bson_File('test/fx.json', '[?]');
+Bson_File('test/fx.json', '[?]')
+NULL
+Warnings:
+Warning 1105
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*');
+BsonGet_String(Bson_File('test/fx.json'), '1.*')
+{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1');
+BsonGet_String(Bson_File('test/fx.json'), '1')
+6 car roadster 56000 (6, 9)
+SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage;
+Mileage
+56000
+SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price;
+Price
+5.65
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings');
+Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings')
+NULL
+Warnings:
+Warning 1105
+Warning 1105 No sub-item at 'ratings'
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings');
+Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings')
+NULL
+Warnings:
+Warning 1105
+Warning 1105 No sub-item at 'ratings'
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1);
+Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1)
+NULL
+Warnings:
+Warning 1105
+Warning 1105 No sub-item at 'ratings'
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0);
+Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0)
+[6,null]
+Warnings:
+Warning 1105
+SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1);
+Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1)
+NULL
+Warnings:
+Warning 1105
+Warning 1105 No sub-item at 'ratings'
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin);
+Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin)
+NULL
+Warnings:
+Warning 1105
+Warning 1105 First argument target is not an object
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size');
+Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size')
+NULL
+Warnings:
+Warning 1105
+Warning 1105 No sub-item at 'size'
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size');
+Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size')
+NULL
+Warnings:
+Warning 1105
+Warning 1105 No sub-item at 'size'
+SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size'));
+Bson_Object_List(Bson_File('test/fx.json', '3.size'))
+NULL
+Warnings:
+Warning 1105
+Warning 1105 First argument is not an object
+#
+# Testing new functions
+#
+SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result";
+Result
+["a","b","c","d","e","f"]
+SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result";
+Result
+["a","b","c","d","e","f"]
+SELECT
+Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set",
+Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert",
+Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update";
+Set Insert Update
+[1,"foo",3,{"quatre":4,"cinq":5}] [1,2,3,{"quatre":4,"cinq":5}] [1,"foo",3,{"quatre":4}]
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux');
+bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux')
+[1,3,{"quatre":4}]
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]');
+bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]')
+[1,3,{"quatre":4}]
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux');
+bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux')
+[1,2,3,{"quatre":4}]
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+SELECT BsonSet_Grp_Size(10);
+BsonSet_Grp_Size(10)
+10
diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
index ec314c5f072..d895a9aed87 100644
--- a/storage/connect/mysql-test/connect/r/jdbc_oracle.result
+++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result
@@ -3,7 +3,7 @@ command varchar(128) not null,
number int(5) not null flag=1,
message varchar(255) flag=2)
ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager,Execsrc=1';
+OPTION_LIST='User=system,Password=Choupy01,Execsrc=1';
SELECT * FROM t2 WHERE command = 'drop table employee';
command number message
drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist
@@ -23,14 +23,14 @@ Warnings:
Warning 1105 Affected rows
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1 WHERE table_name='employee';
Table_Cat Table_Schema Table_Name Table_Type Remark
NULL SYSTEM EMPLOYEE TABLE NULL
DROP TABLE t1;
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL
@@ -42,7 +42,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP
HOST 'jdbc:oracle:thin:@localhost:1521:xe',
DATABASE 'SYSTEM',
USER 'system',
-PASSWORD 'manager',
+PASSWORD 'Choupy01',
PORT 0,
SOCKET '',
OWNER 'SYSTEM');
diff --git a/storage/connect/mysql-test/connect/r/json.result b/storage/connect/mysql-test/connect/r/json.result
index 6b6f40d2c47..dc527acd4a3 100644
--- a/storage/connect/mysql-test/connect/r/json.result
+++ b/storage/connect/mysql-test/connect/r/json.result
@@ -15,7 +15,7 @@ DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB
-9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999
+9782212090819 fr applications Jean-Christophe Bernadac, François Knab Construire une application XML NULL NULL Eyrolles Paris 1999
9782840825685 fr applications William J. Pardi XML en Action adapté de l'anglais par James Guerin Microsoft Press Paris 1999
DROP TABLE t1;
#
@@ -24,15 +24,15 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
-Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB'
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+Authors INT(2) JPATH='$.AUTHOR[#]',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -46,16 +46,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME',
-AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
-Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB'
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -69,16 +69,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
-AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
-Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB'
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATION',
+Translator CHAR(80) JPATH='$.TRANSLATOR',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -176,17 +176,17 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15) NOT NULL,
-Language CHAR(2) FIELD_FORMAT='$.LANG',
-Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
-AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
-AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
-Title CHAR(32) FIELD_FORMAT='$.TITLE',
-Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX',
-TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
-TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME',
-Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
-Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
-Year int(4) FIELD_FORMAT='$.DATEPUB',
+Language CHAR(2) JPATH='$.LANG',
+Subject CHAR(32) JPATH='$.SUBJECT',
+AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+Title CHAR(32) JPATH='$.TITLE',
+Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+Year int(4) JPATH='$.DATEPUB',
INDEX IX(ISBN)
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
@@ -209,9 +209,9 @@ DROP TABLE t1;
#
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
WHO WEEK WHAT AMOUNT
@@ -230,9 +230,9 @@ DROP TABLE t1;
#
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
WHO WEEK WHAT AMOUNT
@@ -266,14 +266,14 @@ DROP TABLE t1;
#
CREATE TABLE t1 (
WHO CHAR(12) NOT NULL,
-WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER',
-SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT',
-SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT',
-AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT',
-SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT',
-AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT',
-AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT',
-AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT')
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE
@@ -286,9 +286,9 @@ DROP TABLE t1;
#
CREATE TABLE t2 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
WHO WEEK WHAT AMOUNT
@@ -302,9 +302,9 @@ Janet 3 Food 18.00
Janet 3 Beer 18.00
CREATE TABLE t3 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
WHO WEEK WHAT AMOUNT
@@ -318,9 +318,9 @@ Beth 4 Beer 15.00
Janet 4 Car 17.00
CREATE TABLE t4 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
WHO WEEK WHAT AMOUNT
@@ -374,8 +374,8 @@ DROP TABLE t1, t2, t3, t4;
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json';
SELECT * FROM t2;
WHO WEEK WHAT AMOUNT
@@ -390,8 +390,8 @@ Janet 3 Beer 18.00
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json';
SELECT * FROM t3;
WHO WEEK WHAT AMOUNT
@@ -406,8 +406,8 @@ Janet 4 Car 17.00
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json';
SELECT * FROM t4;
WHO WEEK WHAT AMOUNT
@@ -425,8 +425,8 @@ Janet 5 Food 12.00
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1;
SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
WHO WEEK WHAT AMOUNT
@@ -461,8 +461,8 @@ DROP TABLE t1;
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json';
ALTER TABLE t1
PARTITION BY LIST COLUMNS(WEEK) (
diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result
index 47fc4abbd28..e0b08889f40 100644
--- a/storage/connect/mysql-test/connect/r/json_java_2.result
+++ b/storage/connect/mysql-test/connect/r/json_java_2.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
@@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
-grades_score 5 BIGINT 2 2 0 1 grades.0.score
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
restaurant_id 1 CHAR 8 8 0 0
DROP TABLE t1;
@@ -60,7 +61,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` (
`cuisine` char(64) NOT NULL,
`grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET=
COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * FROM t1 LIMIT 10;
_id address borough cuisine name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
-58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
-58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
-58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
-58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
-58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
DROP TABLE t1;
#
# Specifying Jpath
@@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
@@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` (
`borough` char(13) NOT NULL,
`grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096
@@ -305,8 +306,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities'
@@ -344,11 +345,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result
index 720c82cd7f9..b9ba919507d 100644
--- a/storage/connect/mysql-test/connect/r/json_java_3.result
+++ b/storage/connect/mysql-test/connect/r/json_java_3.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096
OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
@@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
-grades_score 5 BIGINT 2 2 0 1 grades.0.score
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
restaurant_id 1 CHAR 8 8 0 0
DROP TABLE t1;
@@ -60,7 +61,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` (
`cuisine` char(64) NOT NULL,
`grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET=
COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096;
SELECT * FROM t1 LIMIT 10;
_id address borough cuisine name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
-58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
-58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
-58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
-58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
-58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
DROP TABLE t1;
#
# Specifying Jpath
@@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
@@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` (
`borough` char(13) NOT NULL,
`grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096
@@ -305,8 +306,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities'
@@ -344,11 +345,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result
index f9bfc01763e..482ccc85b57 100644
--- a/storage/connect/mysql-test/connect/r/json_mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024
OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024;
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath
_id 1 CHAR 24 24 0 0 _id
@@ -27,7 +28,7 @@ borough 1 CHAR 13 13 0 0
cuisine 1 CHAR 64 64 0 0
grades_date 1 CHAR 1024 1024 0 1 grades.0.date
grades_grade 1 CHAR 14 14 0 1 grades.0.grade
-grades_score 5 BIGINT 2 2 0 1 grades.0.score
+grades_score 7 INTEGER 2 2 0 1 grades.0.score
name 1 CHAR 98 98 0 0
restaurant_id 1 CHAR 8 8 0 0
DROP TABLE t1;
@@ -60,7 +61,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -73,17 +74,17 @@ t1 CREATE TABLE `t1` (
`cuisine` char(64) NOT NULL,
`grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089, 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745, 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451, 40.767691900000002647 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523, 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639, 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068
DROP TABLE t1;
#
# Dropping a column
@@ -92,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET=
COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
SELECT * FROM t1 LIMIT 10;
_id address borough cuisine name restaurant_id
-58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
-58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
-58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 40.767691900000002647 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
-58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
-58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
-58ada47de5a51ddfcd5ed521 8825 -73.880382699999998408 40.764312400000001446 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
-58ada47de5a51ddfcd5ed522 2206 -74.137728600000002643 40.611957199999999091 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
-58ada47de5a51ddfcd5ed523 7114 -73.906850599999998508 40.619903399999998328 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
-58ada47de5a51ddfcd5ed524 6409 -74.005288999999990551 40.628886000000001388 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
-58ada47de5a51ddfcd5ed525 1839 -73.948260899999993967 40.640827100000002758 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
+58ada47de5a51ddfcd5ed51c 1007 (-73.856076999999999089, 40.848447000000000173) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445
+58ada47de5a51ddfcd5ed51d 469 (-73.96170399999999745, 40.66294200000000103) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340
+58ada47de5a51ddfcd5ed51e 351 (-73.985135599999992451, 40.767691900000002647) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841
+58ada47de5a51ddfcd5ed51f 2780 (-73.982419999999990523, 40.579504999999997494) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018
+58ada47de5a51ddfcd5ed520 97-22 (-73.860115199999995639, 40.731173900000001709) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068
+58ada47de5a51ddfcd5ed521 8825 (-73.880382699999998408, 40.764312400000001446) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151
+58ada47de5a51ddfcd5ed522 2206 (-74.137728600000002643, 40.611957199999999091) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442
+58ada47de5a51ddfcd5ed523 7114 (-73.906850599999998508, 40.619903399999998328) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483
+58ada47de5a51ddfcd5ed524 6409 (-74.005288999999990551, 40.628886000000001388) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649
+58ada47de5a51ddfcd5ed525 1839 (-73.948260899999993967, 40.640827100000002758) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731
DROP TABLE t1;
#
# Specifying Jpath
@@ -111,12 +112,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
@@ -259,7 +260,7 @@ t1 CREATE TABLE `t1` (
`borough` char(13) NOT NULL,
`grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date',
`grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade',
- `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score',
+ `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024
@@ -305,8 +306,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities'
@@ -344,11 +345,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/json_udf.result b/storage/connect/mysql-test/connect/r/json_udf.result
index 09544bb1ecb..8315fc3f3bf 100644
--- a/storage/connect/mysql-test/connect/r/json_udf.result
+++ b/storage/connect/mysql-test/connect/r/json_udf.result
@@ -187,11 +187,11 @@ DATEPUB int(4)
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT Json_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2;
Json_Make_Array(AUTHOR, TITLE, DATEPUB)
-["Jean-Christophe Bernadac","Construire une application XML",1999]
+[" Jean-Christophe Bernadac, François Knab","Construire une application XML",1999]
["William J. Pardi","XML en Action",1999]
SELECT Json_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2;
Json_Make_Object(AUTHOR, TITLE, DATEPUB)
-{"AUTHOR":"Jean-Christophe Bernadac","TITLE":"Construire une application XML","DATEPUB":1999}
+{"AUTHOR":" Jean-Christophe Bernadac, François Knab","TITLE":"Construire une application XML","DATEPUB":1999}
{"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999}
SELECT Json_Array_Grp(TITLE, DATEPUB) FROM t2;
ERROR HY000: Can't initialize function 'json_array_grp'; This function can only accept 1 argument
@@ -610,7 +610,7 @@ JsonGet_String(Json_File('test/fx.json'), '1.*')
{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}
SELECT JsonGet_String(Json_File('test/fx.json'), '1');
JsonGet_String(Json_File('test/fx.json'), '1')
-6 car roadster 56000 6 9
+6 car roadster 56000 (6, 9)
SELECT JsonGet_Int(Json_File('test/fx.json'), '1.mileage') AS Mileage;
Mileage
56000
diff --git a/storage/connect/mysql-test/connect/r/json_udf_bin.result b/storage/connect/mysql-test/connect/r/json_udf_bin.result
index d0819619c33..c20cf7ce632 100644
--- a/storage/connect/mysql-test/connect/r/json_udf_bin.result
+++ b/storage/connect/mysql-test/connect/r/json_udf_bin.result
@@ -87,7 +87,7 @@ Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv')
{"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}}
SELECT JsonGet_String(Json_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') lang;
lang
-GML
+GML, XML
SELECT Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') "See also";
See also
["GML","XML"]
diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result
index 132bb34ce64..8b86ce32943 100644
--- a/storage/connect/mysql-test/connect/r/mongo_c.result
+++ b/storage/connect/mysql-test/connect/r/mongo_c.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 ;
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 ;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath
_id 1 CHAR 24 24 0 0
@@ -58,7 +59,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` (
`grades_0` varchar(512) DEFAULT NULL `FIELD_FORMAT`='grades.0',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8'
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id
58ada47de5a51ddfcd5ed51c 1007 Morris Park Ave 10462 Bronx Bakery {"date":{"$date":1393804800000},"grade":"A","score":2} Morris Park Bake Shop 30075445
@@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' ;
@@ -301,8 +302,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities'
@@ -340,11 +341,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=C,Version=0' ;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result
index bc186d7137e..cccda2760d6 100644
--- a/storage/connect/mysql-test/connect/r/mongo_java_2.result
+++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath
_id 1 CHAR 24 24 0 0
@@ -58,7 +59,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` (
`grades_0` char(99) DEFAULT NULL `FIELD_FORMAT`='grades.0',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8'
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id
58ada47de5a51ddfcd5ed51c 1007 [ -73.856077 , 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : "2014-03-03T00:00:00.000Z"} , "grade" : "A" , "score" : 2} Morris Park Bake Shop 30075445
@@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' ;
@@ -301,8 +302,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities'
@@ -340,11 +341,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=2' ;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result
index 30c696fc9eb..ae39148a156 100644
--- a/storage/connect/mysql-test/connect/r/mongo_java_3.result
+++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result
@@ -1,8 +1,9 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
#
# Test the MONGO table type
#
-CREATE TABLE t1 (Document varchar(1024) field_format='*')
+CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -15,7 +16,7 @@ DROP TABLE t1;
# Test catfunc
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ;
SELECT * from t1;
Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath
_id 1 CHAR 24 24 0 0
@@ -58,7 +59,7 @@ DROP TABLE t1;
# Test discovery
#
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -72,7 +73,7 @@ t1 CREATE TABLE `t1` (
`grades_0` char(84) DEFAULT NULL `FIELD_FORMAT`='grades.0',
`name` char(98) NOT NULL,
`restaurant_id` char(8) NOT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8'
SELECT * FROM t1 LIMIT 5;
_id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id
58ada47de5a51ddfcd5ed51c 1007 [-73.856077, 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : 1393804800000 }, "grade" : "A", "score" : 2 } Morris Park Bake Shop 30075445
@@ -107,12 +108,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' ;
@@ -301,8 +302,8 @@ false
CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
-loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
-loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities'
@@ -340,11 +341,11 @@ true
CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
-prices_0 INT(6) FIELD_FORMAT='prices.0',
-prices_1 INT(6) FIELD_FORMAT='prices.1',
-prices_2 INT(6) FIELD_FORMAT='prices.2',
-prices_3 INT(6) FIELD_FORMAT='prices.3',
-prices_4 INT(6) FIELD_FORMAT='prices.4')
+prices_0 INT(6) JPATH='prices.0',
+prices_1 INT(6) JPATH='prices.1',
+prices_2 INT(6) JPATH='prices.2',
+prices_3 INT(6) JPATH='prices.3',
+prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=Java,Version=3' ;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/r/odbc_oracle.result b/storage/connect/mysql-test/connect/r/odbc_oracle.result
index 8dc7dc07bb1..acb7d9a74c9 100644
--- a/storage/connect/mysql-test/connect/r/odbc_oracle.result
+++ b/storage/connect/mysql-test/connect/r/odbc_oracle.result
@@ -10,7 +10,7 @@ SET NAMES utf8;
# All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -20,7 +20,7 @@ NULL MTR V1 VIEW NULL
DROP TABLE t1;
# All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -30,7 +30,7 @@ NULL MTR V1 VIEW NULL
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -38,7 +38,7 @@ NULL MTR T1 TABLE NULL
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -46,7 +46,7 @@ NULL MTR T1 TABLE NULL
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -54,7 +54,7 @@ NULL MTR T1 TABLE NULL
DROP TABLE t1;
# All tables in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.%';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Table_Type Remark
@@ -68,7 +68,7 @@ DROP TABLE t1;
# All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -80,7 +80,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
DROP TABLE t1;
# All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -91,7 +91,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
MTR V1 B 6 NUMBER 38 40 NULL NULL 1
DROP TABLE t1;
# All tables "T1" in all schemas (limited with WHERE)
-CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1';
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
MTR T1 A 3 DECIMAL 38 40 0 10 1
@@ -99,7 +99,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -108,7 +108,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu
DROP TABLE t1;
# All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks
@@ -121,14 +121,14 @@ DROP TABLE t1;
# Table "T1" in the default schema ("MTR")
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='T1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` decimal(40,0) DEFAULT NULL,
`B` double DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1'
SELECT * FROM t1 ORDER BY A;
A B
10 1000000000
@@ -157,14 +157,14 @@ DROP VIEW v1;
DROP TABLE t1;
# Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` decimal(40,0) DEFAULT NULL,
`B` double DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1'
SELECT * FROM t1;
A B
10 1000000000
@@ -173,14 +173,14 @@ A B
DROP TABLE t1;
# View "V1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.V1';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` decimal(40,0) DEFAULT NULL,
`B` double DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1'
SELECT * FROM t1;
A B
10 1000000000
@@ -209,13 +209,13 @@ DROP VIEW v1;
DROP TABLE t1;
# Table "T2" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T2';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`A` varchar(64) DEFAULT NULL
-) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2'
+) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2'
SELECT * FROM t1;
A
test
diff --git a/storage/connect/mysql-test/connect/r/rest.result b/storage/connect/mysql-test/connect/r/rest.result
new file mode 100644
index 00000000000..3c4ec80ce71
--- /dev/null
+++ b/storage/connect/mysql-test/connect/r/rest.result
@@ -0,0 +1,19 @@
+#
+# Testing REST query
+#
+CREATE TABLE t1
+ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json'
+HTTP='http://jsonplaceholder.typicode.com/users';
+SELECT * FROM t1;
+id name username email address_street address_suite address_city address_zipcode address_geo_lat address_geo_lng phone website company_name company_catchPhrase company_bs
+1 Leanne Graham Bret Sincere@april.biz Kulas Light Apt. 556 Gwenborough 92998-3874 -37.3159 81.1496 1-770-736-8031 x56442 hildegard.org Romaguera-Crona Multi-layered client-server neural-net harness real-time e-markets
+2 Ervin Howell Antonette Shanna@melissa.tv Victor Plains Suite 879 Wisokyburgh 90566-7771 -43.9509 -34.4618 010-692-6593 x09125 anastasia.net Deckow-Crist Proactive didactic contingency synergize scalable supply-chains
+3 Clementine Bauch Samantha Nathan@yesenia.net Douglas Extension Suite 847 McKenziehaven 59590-4157 -68.6102 -47.0653 1-463-123-4447 ramiro.info Romaguera-Jacobson Face to face bifurcated interface e-enable strategic applications
+4 Patricia Lebsack Karianne Julianne.OConner@kory.org Hoeger Mall Apt. 692 South Elvis 53919-4257 29.4572 -164.2990 493-170-9623 x156 kale.biz Robel-Corkery Multi-tiered zero tolerance productivity transition cutting-edge web services
+5 Chelsey Dietrich Kamren Lucio_Hettinger@annie.ca Skiles Walks Suite 351 Roscoeview 33263 -31.8129 62.5342 (254)954-1289 demarco.info Keebler LLC User-centric fault-tolerant solution revolutionize end-to-end systems
+6 Mrs. Dennis Schulist Leopoldo_Corkery Karley_Dach@jasper.info Norberto Crossing Apt. 950 South Christy 23505-1337 -71.4197 71.7478 1-477-935-8478 x6430 ola.org Considine-Lockman Synchronised bottom-line interface e-enable innovative applications
+7 Kurtis Weissnat Elwyn.Skiles Telly.Hoeger@billy.biz Rex Trail Suite 280 Howemouth 58804-1099 24.8918 21.8984 210.067.6132 elvis.io Johns Group Configurable multimedia task-force generate enterprise e-tailers
+8 Nicholas Runolfsdottir V Maxime_Nienow Sherwood@rosamond.me Ellsworth Summit Suite 729 Aliyaview 45169 -14.3990 -120.7677 586.493.6943 x140 jacynthe.com Abernathy Group Implemented secondary concept e-enable extensible e-tailers
+9 Glenna Reichert Delphine Chaim_McDermott@dana.io Dayna Park Suite 449 Bartholomebury 76495-3109 24.6463 -168.8889 (775)976-6794 x41206 conrad.com Yost and Sons Switchable contextually-based project aggregate real-time technologies
+10 Clementina DuBuque Moriah.Stanton Rey.Padberg@karina.biz Kattie Turnpike Suite 198 Lebsackbury 31428-2261 -38.2386 57.2232 024-648-3804 ambrose.net Hoeger LLC Centralized empowering task-force target end-to-end models
+DROP TABLE t1;
diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result
index 6a0c9db27b3..575c903bbbc 100644
--- a/storage/connect/mysql-test/connect/r/xml.result
+++ b/storage/connect/mysql-test/connect/r/xml.result
@@ -374,8 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3);
Warnings:
Level Warning
Code 1105
-Message Com error: Unable to save character to 'iso-8859-1' encoding.
-
+Message warning about characters outside of iso-8859-1
INSERT INTO t1 VALUES ('&<>"\'');
SELECT node, hex(node) FROM t1;
node &<>"'
diff --git a/storage/connect/mysql-test/connect/r/xml2.result b/storage/connect/mysql-test/connect/r/xml2.result
index f7bbc17c8a0..891c6e6f8dd 100644
--- a/storage/connect/mysql-test/connect/r/xml2.result
+++ b/storage/connect/mysql-test/connect/r/xml2.result
@@ -87,9 +87,9 @@ DROP TABLE t1;
# Testing mixed tag and attribute values
#
CREATE TABLE t1 (
-ISBN CHAR(15) FIELD_FORMAT='@',
-LANG CHAR(2) FIELD_FORMAT='@',
-SUBJECT CHAR(32) FIELD_FORMAT='@',
+ISBN CHAR(15) XPATH='@',
+LANG CHAR(2) XPATH='@',
+SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -120,9 +120,9 @@ DROP TABLE t1;
# Testing INSERT on mixed tag and attribute values
#
CREATE TABLE t1 (
-ISBN CHAR(15) FIELD_FORMAT='@',
-LANG CHAR(2) FIELD_FORMAT='@',
-SUBJECT CHAR(32) FIELD_FORMAT='@',
+ISBN CHAR(15) XPATH='@',
+LANG CHAR(2) XPATH='@',
+SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -207,18 +207,18 @@ DROP TABLE t1;
# Testing XPath
#
CREATE TABLE t1 (
-isbn CHAR(15) FIELD_FORMAT='@ISBN',
-language CHAR(2) FIELD_FORMAT='@LANG',
-subject CHAR(32) FIELD_FORMAT='@SUBJECT',
-authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME',
-authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME',
-title CHAR(32) FIELD_FORMAT='TITLE',
-translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX',
-tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME',
-publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME',
-location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE',
-year INT(4) FIELD_FORMAT='DATEPUB'
+isbn CHAR(15) XPATH='@ISBN',
+language CHAR(2) XPATH='@LANG',
+subject CHAR(32) XPATH='@SUBJECT',
+authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME',
+authorln CHAR(20) XPATH='AUTHOR/LASTNAME',
+title CHAR(32) XPATH='TITLE',
+translated CHAR(32) XPATH='TRANSLATOR/@PREFIX',
+tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME',
+tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME',
+publisher CHAR(20) XPATH='PUBLISHER/NAME',
+location CHAR(20) XPATH='PUBLISHER/PLACE',
+year INT(4) XPATH='DATEPUB'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
@@ -260,7 +260,7 @@ DROP TABLE t1;
#
CREATE TABLE t1
(
-isbn CHAR(15) FIELD_FORMAT='@isbn'
+isbn CHAR(15) XPATH='@isbn'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
diff --git a/storage/connect/mysql-test/connect/r/xml2_html.result b/storage/connect/mysql-test/connect/r/xml2_html.result
index 143f46529f6..499108b724d 100644
--- a/storage/connect/mysql-test/connect/r/xml2_html.result
+++ b/storage/connect/mysql-test/connect/r/xml2_html.result
@@ -5,9 +5,9 @@ SET NAMES utf8;
# Testing HTML like XML file
#
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/r/xml2_mult.result b/storage/connect/mysql-test/connect/r/xml2_mult.result
index 07c86d961e1..0146baa89c0 100644
--- a/storage/connect/mysql-test/connect/r/xml2_mult.result
+++ b/storage/connect/mysql-test/connect/r/xml2_mult.result
@@ -5,9 +5,9 @@ SET NAMES utf8;
# Testing expanded values
#
CREATE TABLE `bookstore` (
-`category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+`category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
-`lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+`lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/r/xml2_zip.result b/storage/connect/mysql-test/connect/r/xml2_zip.result
index f176149c53f..e743af32418 100644
--- a/storage/connect/mysql-test/connect/r/xml2_zip.result
+++ b/storage/connect/mysql-test/connect/r/xml2_zip.result
@@ -4,20 +4,20 @@ Warning 1105 No file name. Table will use t1.xml
# Testing zipped XML tables
#
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
ISBN 9782212090819
LANG fr
@@ -69,7 +69,7 @@ PUBLISHER_PLACE Paris
DATEPUB 2003
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=libxml2';
+OPTION_LIST='depth=0,xmlsup=libxml2';
SELECT * FROM t2;
ISBN 9782212090819
LANG fr
diff --git a/storage/connect/mysql-test/connect/r/xml_html.result b/storage/connect/mysql-test/connect/r/xml_html.result
index 4b984a49901..308c67ffc28 100644
--- a/storage/connect/mysql-test/connect/r/xml_html.result
+++ b/storage/connect/mysql-test/connect/r/xml_html.result
@@ -3,9 +3,9 @@ SET NAMES utf8;
# Testing HTML like XML file
#
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/r/xml_mult.result b/storage/connect/mysql-test/connect/r/xml_mult.result
index c786a80819c..9cdc36dea6b 100644
--- a/storage/connect/mysql-test/connect/r/xml_mult.result
+++ b/storage/connect/mysql-test/connect/r/xml_mult.result
@@ -3,9 +3,9 @@ SET NAMES utf8;
# Testing expanded values
#
CREATE TABLE `bookstore` (
-`category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+`category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
-`lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+`lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result
index f7790e4cfff..5f17249b390 100644
--- a/storage/connect/mysql-test/connect/r/xml_zip.result
+++ b/storage/connect/mysql-test/connect/r/xml_zip.result
@@ -2,20 +2,20 @@
# Testing zipped XML tables
#
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
ISBN 9782212090819
LANG fr
@@ -67,7 +67,7 @@ PUBLISHER_PLACE Paris
DATEPUB 2003
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=domdoc';
+OPTION_LIST='depth=0,xmlsup=domdoc';
SELECT * FROM t2;
ISBN 9782212090819
LANG fr
diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result
index c81546a4689..c696252ca43 100644
--- a/storage/connect/mysql-test/connect/r/zip.result
+++ b/storage/connect/mysql-test/connect/r/zip.result
@@ -171,32 +171,32 @@ DROP TABLE t1,t2,t3,t4;
#
CREATE TABLE t1 (
_id INT(2) NOT NULL,
-name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+name_first CHAR(9) NOT NULL JPATH='$.name.first',
+name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+name_last CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth CHAR(20) DEFAULT NULL,
death CHAR(20) DEFAULT NULL,
-contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs',
-awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award',
-awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year',
-awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by'
+contribs CHAR(50) NOT NULL JPATH='$.contribs',
+awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award',
+awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year',
+awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by'
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
SELECT * FROM t1;
_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
-1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
-2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM
-3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association
-4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
-5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association
+1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran, ALGOL, Backus-Naur Form, FP W.W. McDowell Award 1967 IEEE Computer Society
+2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp, Artificial Intelligence, ALGOL Turing Award 1971 ACM
+3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC, compiler, FLOW-MATIC, COBOL Computer Sciences Man of the Year 1969 Data Processing Management Association
+4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association
+5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association
6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation
-7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM
+7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX, C Turing Award 1983 ACM
8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation
9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist
10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
-OPTION_LIST='LEVEL=5';
+OPTION_LIST='DEPTH=5';
SELECT * FROM t2;
_id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by
1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society
@@ -211,16 +211,16 @@ _id name_first name_aka name_last title birth death contribs awards_award awards
10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL
CREATE TABLE t3 (
_id INT(2) NOT NULL,
-firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+firstname CHAR(9) NOT NULL JPATH='$.name.first',
+aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+lastname CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
-contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]',
-award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award',
-year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year',
-`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by'
+contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]',
+award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award',
+year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year',
+`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by'
) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
SELECT * FROM t3 WHERE _id = 1;
_id firstname aka lastname title birth death contribs award year by
diff --git a/storage/connect/mysql-test/connect/t/alter_xml.test b/storage/connect/mysql-test/connect/t/alter_xml.test
index 8b2164d5548..4c2e1670f4c 100644
--- a/storage/connect/mysql-test/connect/t/alter_xml.test
+++ b/storage/connect/mysql-test/connect/t/alter_xml.test
@@ -21,7 +21,7 @@ SELECT * FROM t2;
--echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
--echo # Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
SELECT * FROM t2;
diff --git a/storage/connect/mysql-test/connect/t/alter_xml2.test b/storage/connect/mysql-test/connect/t/alter_xml2.test
index d67c80c4e9f..ec4065baa47 100644
--- a/storage/connect/mysql-test/connect/t/alter_xml2.test
+++ b/storage/connect/mysql-test/connect/t/alter_xml2.test
@@ -21,7 +21,7 @@ SELECT * FROM t2;
--echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option.
--echo # Testing field option modification
-ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0;
+ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0;
SELECT * FROM t1;
SHOW CREATE TABLE t1;
SELECT * FROM t2;
diff --git a/storage/connect/mysql-test/connect/t/bson.test b/storage/connect/mysql-test/connect/t/bson.test
new file mode 100644
index 00000000000..ab38cab73fc
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson.test
@@ -0,0 +1,294 @@
+--source include/not_embedded.inc
+--source include/have_partition.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
+--copy_file $MTR_SUITE_DIR/std_data/bib0.json $MYSQLD_DATADIR/test/bib0.json
+--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json
+--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json
+--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json
+--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json
+
+--echo #
+--echo # Testing doc samples
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ LANG CHAR(2),
+ SUBJECT CHAR(32),
+ AUTHOR CHAR(64),
+ TITLE CHAR(32),
+ TRANSLATION CHAR(32),
+ TRANSLATOR CHAR(80),
+ PUBLISHER CHAR(32),
+ DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Testing Jpath. Get the number of authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ Authors INT(2) JPATH='$.AUTHOR[#]',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Concatenates the authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing expanding authors
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15),
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+SELECT * FROM t1;
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab';
+SELECT * FROM t1 WHERE ISBN = '9782212090819';
+
+--echo #
+--echo # To add an author a new table must be created
+--echo #
+CREATE TABLE t2 (
+FIRSTNAME CHAR(32),
+LASTNAME CHAR(32))
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR';
+SELECT * FROM t2;
+INSERT INTO t2 VALUES('Charles','Dickens');
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP TABLE t2;
+
+--echo #
+--echo # Check the biblio file has the good format
+--echo #
+CREATE TABLE t1
+(
+ line char(255)
+)
+ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Testing a pretty=0 file
+--echo #
+CREATE TABLE t1
+(
+ ISBN CHAR(15) NOT NULL,
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+ TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+ TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB',
+ INDEX IX(ISBN)
+)
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
+SHOW INDEX FROM t1;
+SELECT * FROM t1;
+DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819';
+--error ER_GET_ERRMSG
+UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819';
+DROP TABLE t1;
+
+--echo #
+--echo # A file with 2 arrays
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Now it can be fully expanded
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+#--error ER_GET_ERRMSG
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # A table showing many calculated results
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12) NOT NULL,
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Expand expense in 3 one week tables
+--echo #
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json';
+SELECT * FROM t4;
+
+--echo #
+--echo # The expanded table is made as a TBL table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32),
+AMOUNT DOUBLE(8,2))
+ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4';
+SELECT * FROM t1;
+DROP TABLE t1, t2, t3, t4;
+
+--echo #
+--echo # Three partial JSON tables
+--echo #
+CREATE TABLE t2 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json';
+SELECT * FROM t2;
+
+CREATE TABLE t3 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json';
+SELECT * FROM t3;
+
+CREATE TABLE t4 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json';
+SELECT * FROM t4;
+
+--echo #
+--echo # The complete table can be a multiple JSON table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1;
+SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
+DROP TABLE t1;
+
+--echo #
+--echo # Or also a partition JSON table
+--echo #
+CREATE TABLE t1 (
+WHO CHAR(12),
+WEEK INT(2),
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
+ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json';
+ALTER TABLE t1
+PARTITION BY LIST COLUMNS(WEEK) (
+PARTITION `3` VALUES IN(3),
+PARTITION `4` VALUES IN(4),
+PARTITION `5` VALUES IN(5));
+SHOW WARNINGS;
+SELECT * FROM t1;
+SELECT * FROM t1 WHERE WEEK = 4;
+DROP TABLE t1, t2, t3, t4;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/biblio.json
+--remove_file $MYSQLD_DATADIR/test/bib0.dnx
+--remove_file $MYSQLD_DATADIR/test/bib0.json
+--remove_file $MYSQLD_DATADIR/test/expense.json
+--remove_file $MYSQLD_DATADIR/test/mulexp3.json
+--remove_file $MYSQLD_DATADIR/test/mulexp4.json
+--remove_file $MYSQLD_DATADIR/test/mulexp5.json
diff --git a/storage/connect/mysql-test/connect/t/bson_java_2.test b/storage/connect/mysql-test/connect/t/bson_java_2.test
new file mode 100644
index 00000000000..2188d9c2c91
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_java_2.test
@@ -0,0 +1,14 @@
+-- source jdbconn.inc
+-- source mongo.inc
+
+--disable_query_log
+eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar';
+set connect_json_all_path=0;
+--enable_query_log
+let $DRV= Java;
+let $VERS= 2;
+let $TYPE= BSON;
+let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096;
+
+-- source mongo_test.inc
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/bson_java_3.test b/storage/connect/mysql-test/connect/t/bson_java_3.test
new file mode 100644
index 00000000000..e7dd90b3563
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_java_3.test
@@ -0,0 +1,14 @@
+-- source jdbconn.inc
+-- source mongo.inc
+
+--disable_query_log
+eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar';
+set connect_json_all_path=0;
+--enable_query_log
+let $DRV= Java;
+let $VERS= 3;
+let $TYPE= BSON;
+let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096;
+
+-- source mongo_test.inc
+-- source jdbconn_cleanup.inc
diff --git a/storage/connect/mysql-test/connect/t/bson_mongo_c.test b/storage/connect/mysql-test/connect/t/bson_mongo_c.test
new file mode 100644
index 00000000000..938d77c7c95
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_mongo_c.test
@@ -0,0 +1,10 @@
+-- source mongo.inc
+
+let $DRV= C;
+let $VERS= 0;
+let $PROJ= {"projection":;
+let $ENDP= };
+let $TYPE= BSON;
+let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=1024;
+
+-- source mongo_test.inc
diff --git a/storage/connect/mysql-test/connect/t/bson_udf.inc b/storage/connect/mysql-test/connect/t/bson_udf.inc
new file mode 100644
index 00000000000..366f48e5861
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_udf.inc
@@ -0,0 +1,70 @@
+--disable_query_log
+#
+# Check if server has support for loading plugins
+#
+if (`SELECT @@have_dynamic_loading != 'YES'`) {
+ --skip UDF requires dynamic loading
+}
+if (!$HA_CONNECT_SO) {
+ --skip Needs a dynamically built ha_connect.so
+}
+
+--eval CREATE FUNCTION bson_test RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonvalue RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_make_array RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_array_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_make_object RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_key RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_list RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_object_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonset_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bson_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bson_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonlocate RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_contains RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsoncontains_path RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_get_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_string RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_int RETURNS INTEGER SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bsonget_real RETURNS REAL SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_set_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_update_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_file RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bson_serialize RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bfile_make RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bfile_convert RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bfile_bjson RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_make_array RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_array_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bbin_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE AGGREGATE FUNCTION bbin_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_make_object RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_key RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_add RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_list RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_object_values RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_get_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_set_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_update_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO';
+--eval CREATE FUNCTION bbin_file RETURNS STRING SONAME '$HA_CONNECT_SO';
+
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test
new file mode 100644
index 00000000000..84a3db6d061
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_udf.test
@@ -0,0 +1,281 @@
+--source bson_udf.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json
+--copy_file $MTR_SUITE_DIR/std_data/employee.dat $MYSQLD_DATADIR/test/employee.dat
+
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5;
+
+--echo #
+--echo # Test UDF's with constant arguments
+--echo #
+--error ER_CANT_INITIALIZE_UDF
+SELECT BsonValue(56, 3.1416, 'foo', NULL);
+SELECT BsonValue(3.1416);
+SELECT BsonValue(-80);
+SELECT BsonValue('foo');
+SELECT BsonValue(9223372036854775807);
+SELECT BsonValue(NULL);
+SELECT BsonValue(TRUE);
+SELECT BsonValue(FALSE);
+SELECT BsonValue();
+SELECT BsonValue('[11, 22, 33]' json_) FROM t1;
+#
+SELECT Bson_Make_Array();
+SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL);
+SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE);
+#
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array;
+SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array;
+#--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add(BsonValue('one value'), 'One more');
+#--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add('one value', 'One more');
+SELECT Bson_Array_Add('one value' json_, 'One more');
+#--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Add(5 json_, 'One more');
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0);
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array;
+SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9);
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1);
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1);
+SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]');
+#
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array;
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1;
+SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1;
+SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1;
+SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array;
+#
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0);
+SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2);
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2');
+SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2); /* WARNING VOID */
+#
+SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL);
+SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty);
+SELECT Bson_Make_Object();
+SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL);
+SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL);
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL);
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty');
+#
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color);
+SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price);
+SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1);
+#
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc');
+SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose');
+#
+SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List";
+SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List";
+SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List";
+
+--echo #
+--echo # Test UDF's with column arguments
+--echo #
+CREATE TABLE t2
+(
+ ISBN CHAR(15),
+ LANG CHAR(2),
+ SUBJECT CHAR(32),
+ AUTHOR CHAR(64),
+ TITLE CHAR(32),
+ TRANSLATION CHAR(32),
+ TRANSLATOR CHAR(80),
+ PUBLISHER CHAR(32),
+ DATEPUB int(4)
+) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json';
+
+SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2;
+SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2;
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2;
+SELECT Bson_Array_Grp(TITLE) FROM t2;
+
+CREATE TABLE t3 (
+ SERIALNO CHAR(5) NOT NULL,
+ NAME VARCHAR(12) NOT NULL FLAG=6,
+ SEX SMALLINT(1) NOT NULL,
+ TITLE VARCHAR(15) NOT NULL FLAG=20,
+ MANAGER CHAR(5) DEFAULT NULL,
+ DEPARTMENT CHAr(4) NOT NULL FLAG=41,
+ SECRETARY CHAR(5) DEFAULT NULL FLAG=46,
+ SALARY DOUBLE(8,2) NOT NULL FLAG=52
+) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1;
+
+SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT';
+SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT;
+#SET connect_json_grp_size=30; Deprecated
+SELECT BsonSet_Grp_Size(30);
+SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title;
+SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE;
+--error ER_CANT_INITIALIZE_UDF
+SELECT Bson_Object_Grp(SALARY) FROM t3;
+SELECT Bson_Object_Grp(NAME, SALARY) FROM t3;
+SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT;
+SELECT Bson_Array_Grp(NAME) FROM t3;
+#
+SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318;
+SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318;
+
+--echo #
+--echo # Test value getting UDF's
+--echo #
+SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3;
+SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3;
+SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3;
+SET @j1 = '[45,28,36,45,89]';
+SELECT BsonGet_String(@j1,'1');
+SELECT BsonGet_String(@j1 json_,'3');
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3');
+SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum";
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0');
+SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*');
+SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc');
+SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}';
+SELECT BsonGet_String(@j2 json_,'truc');
+SELECT BsonGet_String(@j2,'truc');
+SELECT BsonGet_String(@j2,'chose');
+SELECT BsonGet_String(NULL json_, NULL); /* NULL WARNING */
+SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+#
+SELECT BsonGet_Int(@j1, '4');
+SELECT BsonGet_Int(@j1, '[#]');
+SELECT BsonGet_Int(@j1, '[+]');
+SELECT BsonGet_Int(@j1 json_, '3');
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3');
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]');
+SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]');
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1');
+SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty');
+SELECT BsonGet_Int(@j2 json_, 'price');
+SELECT BsonGet_Int(@j2, 'qty');
+SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum;
+SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+#
+SELECT BsonGet_Real(@j1, '2');
+SELECT BsonGet_Real(@j1 json_, '3', 2);
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3');
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]');
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]');
+SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]');
+SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0');
+SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price');
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty');
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price');
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4);
+SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose');
+SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
+
+--echo #
+--echo # Documentation examples
+--echo #
+SELECT
+ BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank",
+ BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number",
+ BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat",
+ BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum",
+ BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg";
+SELECT
+ BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String",
+ BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int",
+ BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real";
+SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real";
+
+--echo #
+--echo # Testing Locate
+--echo #
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin');
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56);
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416);
+SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose');
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path;
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path;
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path;
+SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path;
+SELECT BsonLocate('[45,28,36,45,89]',36);
+SELECT BsonLocate('[45,28,36,45,89]' json_,28.0);
+SELECT Bson_Locate_All('[45,28,36,45,89]',10);
+SELECT Bson_Locate_All('[45,28,36,45,89]',45);
+SELECT Bson_Locate_All('[[45,28],36,45,89]',45);
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45);
+SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]'));
+SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1;
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1;
+SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL;
+SELECT Bson_Locate_All('[45,28,[36,45,89]]',45);
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0));
+SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0);
+SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_);
+SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_);
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths";
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_);
+SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs";
+SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2);
+SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0');
+SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab');
+SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab');
+
+--echo #
+--echo # Testing json files
+--echo #
+SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},
+{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},
+{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},
+{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile;
+SELECT Bfile_Make('test/fx.json', 1);
+SELECT Bfile_Make('test/fx.json' jfile_);
+SELECT Bfile_Make(Bbin_File('test/fx.json'), 0);
+SELECT Bson_File('test/fx.json', 1);
+SELECT Bson_File('test/fx.json', 2);
+SELECT Bson_File('test/fx.json', 0);
+SELECT Bson_File('test/fx.json', '0');
+SELECT Bson_File('test/fx.json', '[?]');
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*');
+SELECT BsonGet_String(Bson_File('test/fx.json'), '1');
+SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage;
+SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price;
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings');
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings');
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1);
+SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0);
+SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1);
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin);
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size');
+SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size');
+SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size'));
+
+--echo #
+--echo # Testing new functions
+--echo #
+SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result";
+SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result";
+SELECT
+Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set",
+Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert",
+Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update";
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux');
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]');
+SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux');
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+SELECT BsonSet_Grp_Size(10);
+
+#
+# Clean up
+#
+--source bson_udf2.inc
+--remove_file $MYSQLD_DATADIR/test/biblio.json
+--remove_file $MYSQLD_DATADIR/test/employee.dat
+--remove_file $MYSQLD_DATADIR/test/fx.json
+
diff --git a/storage/connect/mysql-test/connect/t/bson_udf2.inc b/storage/connect/mysql-test/connect/t/bson_udf2.inc
new file mode 100644
index 00000000000..ceddf8b0632
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/bson_udf2.inc
@@ -0,0 +1,61 @@
+--disable_query_log
+
+DROP FUNCTION bson_test;
+DROP FUNCTION bsonvalue;
+DROP FUNCTION bson_make_array;
+DROP FUNCTION bson_array_add_values;
+DROP FUNCTION bson_array_add;
+DROP FUNCTION bson_array_delete;
+DROP FUNCTION bson_make_object;
+DROP FUNCTION bson_object_nonull;
+DROP FUNCTION bson_object_key;
+DROP FUNCTION bson_object_add;
+DROP FUNCTION bson_object_delete;
+DROP FUNCTION bson_object_list;
+DROP FUNCTION bson_object_values;
+DROP FUNCTION bsonset_grp_size;
+DROP FUNCTION bsonget_grp_size;
+DROP FUNCTION bson_array_grp;
+DROP FUNCTION bson_object_grp;
+DROP FUNCTION bsonlocate;
+DROP FUNCTION bson_locate_all;
+DROP FUNCTION bson_contains;
+DROP FUNCTION bsoncontains_path;
+DROP FUNCTION bson_item_merge;
+DROP FUNCTION bson_get_item;
+DROP FUNCTION bson_delete_item;
+DROP FUNCTION bsonget_string;
+DROP FUNCTION bsonget_int;
+DROP FUNCTION bsonget_real;
+DROP FUNCTION bson_set_item;
+DROP FUNCTION bson_insert_item;
+DROP FUNCTION bson_update_item;
+DROP FUNCTION bson_serialize;
+DROP FUNCTION bson_file;
+DROP FUNCTION bfile_make;
+DROP FUNCTION bfile_convert;
+DROP FUNCTION bfile_bjson;
+DROP FUNCTION bbin_make_array;
+DROP FUNCTION bbin_array_add;
+DROP FUNCTION bbin_array_add_values;
+DROP FUNCTION bbin_array_delete;
+DROP FUNCTION bbin_array_grp;
+DROP FUNCTION bbin_object_grp;
+DROP FUNCTION bbin_make_object;
+DROP FUNCTION bbin_object_nonull;
+DROP FUNCTION bbin_object_key;
+DROP FUNCTION bbin_object_add;
+DROP FUNCTION bbin_object_delete;
+DROP FUNCTION bbin_object_list;
+DROP FUNCTION bbin_object_values;
+DROP FUNCTION bbin_get_item;
+DROP FUNCTION bbin_set_item;
+DROP FUNCTION bbin_insert_item;
+DROP FUNCTION bbin_update_item;
+DROP FUNCTION bbin_item_merge;
+DROP FUNCTION bbin_delete_item;
+DROP FUNCTION bbin_locate_all;
+DROP FUNCTION bbin_file;
+
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/ini_grant.result b/storage/connect/mysql-test/connect/t/ini_grant.result
deleted file mode 100644
index 96d5e192c7d..00000000000
--- a/storage/connect/mysql-test/connect/t/ini_grant.result
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# Checking FILE privileges
-#
-set sql_mode="";
-GRANT ALL PRIVILEGES ON *.* TO user@localhost;
-REVOKE FILE ON *.* FROM user@localhost;
-set sql_mode=default;
-connect user,localhost,user,,;
-connection user;
-SELECT user();
-user()
-user@localhost
-CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI;
-Warnings:
-Warning 1105 No file name. Table will use t1.ini
-INSERT INTO t1 VALUES ('sec1','val1');
-SELECT * FROM t1;
-sec val
-sec1 val1
-UPDATE t1 SET val='val11';
-SELECT * FROM t1;
-sec val
-sec1 val11
-DELETE FROM t1;
-SELECT * FROM t1;
-sec val
-INSERT INTO t1 VALUES('sec2','val2');
-TRUNCATE TABLE t1;
-SELECT * FROM t1;
-sec val
-CREATE VIEW v1 AS SELECT * FROM t1;
-SELECT * FROM v1;
-sec val
-DROP VIEW v1;
-DROP TABLE t1;
-CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI FILE_NAME='t1.EXT';
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-connection default;
-SELECT user();
-user()
-root@localhost
-CREATE TABLE t1 (sec CHAR(10) NOT NULL FLAG=1, val CHAR(10) NOT NULL) ENGINE=CONNECT TABLE_TYPE=INI FILE_NAME='t1.EXT';
-INSERT INTO t1 VALUES ('sec1','val1');
-connection user;
-SELECT user();
-user()
-user@localhost
-INSERT INTO t1 VALUES ('sec2','val2');
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-SELECT * FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-UPDATE t1 SET val='val11';
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-DELETE FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-TRUNCATE TABLE t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-ALTER TABLE t1 READONLY=1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-DROP TABLE t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-CREATE VIEW v1 AS SELECT * FROM t1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-# Testing a VIEW created with FILE privileges but accessed with no FILE
-connection default;
-SELECT user();
-user()
-root@localhost
-CREATE SQL SECURITY INVOKER VIEW v1 AS SELECT * FROM t1;
-connection user;
-SELECT user();
-user()
-user@localhost
-SELECT * FROM v1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-INSERT INTO v1 VALUES ('sec3','val3');
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-UPDATE v1 SET val='val11';
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-DELETE FROM v1;
-ERROR 42000: Access denied; you need (at least one of) the FILE privilege(s) for this operation
-disconnect user;
-connection default;
-DROP VIEW v1;
-DROP TABLE t1;
-DROP USER user@localhost;
-#
-# Checking FILE privileges: done
-#
diff --git a/storage/connect/mysql-test/connect/t/jdbc_oracle.test b/storage/connect/mysql-test/connect/t/jdbc_oracle.test
index 10cb7a7b77d..1316352d4f5 100644
--- a/storage/connect/mysql-test/connect/t/jdbc_oracle.test
+++ b/storage/connect/mysql-test/connect/t/jdbc_oracle.test
@@ -8,20 +8,20 @@ CREATE TABLE t2 (
number int(5) not null flag=1,
message varchar(255) flag=2)
ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager,Execsrc=1';
+OPTION_LIST='User=system,Password=Choupy01,Execsrc=1';
SELECT * FROM t2 WHERE command = 'drop table employee';
SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))';
SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)";
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1 WHERE table_name='employee';
DROP TABLE t1;
CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns
CONNECTION='jdbc:oracle:thin:@localhost:1521:xe'
-OPTION_LIST='User=system,Password=manager';
+OPTION_LIST='User=system,Password=Choupy01';
SELECT * FROM t1;
DROP TABLE t1;
@@ -32,7 +32,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP
HOST 'jdbc:oracle:thin:@localhost:1521:xe',
DATABASE 'SYSTEM',
USER 'system',
-PASSWORD 'manager',
+PASSWORD 'Choupy01',
PORT 0,
SOCKET '',
OWNER 'SYSTEM');
diff --git a/storage/connect/mysql-test/connect/t/json.test b/storage/connect/mysql-test/connect/t/json.test
index 018489525f7..8b42ef9cfab 100644
--- a/storage/connect/mysql-test/connect/t/json.test
+++ b/storage/connect/mysql-test/connect/t/json.test
@@ -35,15 +35,15 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
- Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB'
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ Authors INT(2) JPATH='$.AUTHOR[#]',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -55,16 +55,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME',
- AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
- Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB'
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -76,16 +76,16 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15),
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
- AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION',
- Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB'
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATION',
+ Translator CHAR(80) JPATH='$.TRANSLATOR',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB'
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json';
SELECT * FROM t1;
@@ -122,17 +122,17 @@ DROP TABLE t1;
CREATE TABLE t1
(
ISBN CHAR(15) NOT NULL,
- Language CHAR(2) FIELD_FORMAT='$.LANG',
- Subject CHAR(32) FIELD_FORMAT='$.SUBJECT',
- AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME',
- AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME',
- Title CHAR(32) FIELD_FORMAT='$.TITLE',
- Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX',
- TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
- TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME',
- Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME',
- Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE',
- Year int(4) FIELD_FORMAT='$.DATEPUB',
+ Language CHAR(2) JPATH='$.LANG',
+ Subject CHAR(32) JPATH='$.SUBJECT',
+ AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME',
+ AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME',
+ Title CHAR(32) JPATH='$.TITLE',
+ Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX',
+ TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME',
+ TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME',
+ Publisher CHAR(20) JPATH='$.PUBLISHER.NAME',
+ Location CHAR(16) JPATH='$.PUBLISHER.PLACE',
+ Year int(4) JPATH='$.DATEPUB',
INDEX IX(ISBN)
)
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0';
@@ -148,9 +148,9 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
@@ -160,9 +160,9 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[*].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
#--error ER_GET_ERRMSG
SELECT * FROM t1;
@@ -173,14 +173,14 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (
WHO CHAR(12) NOT NULL,
-WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER',
-SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT',
-SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT',
-AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT',
-SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT',
-AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT',
-AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT',
-AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT')
+WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER',
+SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT',
+SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT',
+AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT',
+SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT',
+AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT',
+AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT',
+AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t1;
DROP TABLE t1;
@@ -190,25 +190,25 @@ DROP TABLE t1;
--echo #
CREATE TABLE t2 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[0].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[1].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
-WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER',
-WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT')
+WEEK INT(2) JPATH='$.WEEK[2].NUMBER',
+WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json';
SELECT * FROM t4;
@@ -230,24 +230,24 @@ DROP TABLE t1, t2, t3, t4;
CREATE TABLE t2 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json';
SELECT * FROM t2;
CREATE TABLE t3 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json';
SELECT * FROM t3;
CREATE TABLE t4 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json';
SELECT * FROM t4;
@@ -257,8 +257,8 @@ SELECT * FROM t4;
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1;
SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT;
DROP TABLE t1;
@@ -269,8 +269,8 @@ DROP TABLE t1;
CREATE TABLE t1 (
WHO CHAR(12),
WEEK INT(2),
-WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT',
-AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT')
+WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT',
+AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT')
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json';
ALTER TABLE t1
PARTITION BY LIST COLUMNS(WEEK) (
diff --git a/storage/connect/mysql-test/connect/t/json_java_2.test b/storage/connect/mysql-test/connect/t/json_java_2.test
index 2f64d8e2eed..03202828bb1 100644
--- a/storage/connect/mysql-test/connect/t/json_java_2.test
+++ b/storage/connect/mysql-test/connect/t/json_java_2.test
@@ -3,6 +3,7 @@
--disable_query_log
eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar';
+set connect_json_all_path=0;
--enable_query_log
let $DRV= Java;
let $VERS= 2;
diff --git a/storage/connect/mysql-test/connect/t/json_java_3.test b/storage/connect/mysql-test/connect/t/json_java_3.test
index cee8343772a..238808a833f 100644
--- a/storage/connect/mysql-test/connect/t/json_java_3.test
+++ b/storage/connect/mysql-test/connect/t/json_java_3.test
@@ -3,6 +3,7 @@
--disable_query_log
eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar';
+set connect_json_all_path=0;
--enable_query_log
let $DRV= Java;
let $VERS= 3;
diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc
index 357fa55240b..6e7c78e81ac 100644
--- a/storage/connect/mysql-test/connect/t/mongo_test.inc
+++ b/storage/connect/mysql-test/connect/t/mongo_test.inc
@@ -1,9 +1,10 @@
set connect_enable_mongo=1;
+set connect_json_all_path=0;
--echo #
--echo # Test the MONGO table type
--echo #
-eval CREATE TABLE t1 (Document varchar(1024) field_format='*')
+eval CREATE TABLE t1 (Document varchar(1024) JPATH='*')
ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants $CONN
OPTION_LIST='Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8;
SELECT * from t1 limit 3;
@@ -13,7 +14,7 @@ DROP TABLE t1;
--echo # Test catfunc
--echo #
eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants CATFUNC=columns
-OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN;
+OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN;
SELECT * from t1;
DROP TABLE t1;
@@ -36,7 +37,7 @@ DROP TABLE t1;
--echo # Test discovery
--echo #
eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants
-OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8;
+OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8;
SHOW CREATE TABLE t1;
SELECT * FROM t1 LIMIT 5;
DROP TABLE t1;
@@ -58,12 +59,12 @@ _id VARCHAR(24) NOT NULL,
name VARCHAR(64) NOT NULL,
cuisine CHAR(200) NOT NULL,
borough CHAR(16) NOT NULL,
-street VARCHAR(65) FIELD_FORMAT='address.street',
-building CHAR(16) FIELD_FORMAT='address.building',
-zipcode CHAR(5) FIELD_FORMAT='address.zipcode',
-grade CHAR(1) FIELD_FORMAT='grades.0.grade',
-score INT(4) NOT NULL FIELD_FORMAT='grades.0.score',
-`date` DATE FIELD_FORMAT='grades.0.date',
+street VARCHAR(65) JPATH='address.street',
+building CHAR(16) JPATH='address.building',
+zipcode CHAR(5) JPATH='address.zipcode',
+grade CHAR(1) JPATH='grades.0.grade',
+score INT(4) NOT NULL JPATH='grades.0.score',
+`date` DATE JPATH='grades.0.date',
restaurant_id VARCHAR(255) NOT NULL)
ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='restaurants' DATA_CHARSET=utf8
OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN;
@@ -125,6 +126,10 @@ IF ($TYPE == JSON)
{
SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
}
+IF ($TYPE == BSON)
+{
+SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B';
+}
DROP TABLE t1;
--echo #
@@ -156,8 +161,8 @@ DROP TABLE t1;
eval CREATE TABLE t1 (
_id char(5) NOT NULL,
city char(16) NOT NULL,
- loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0',
- loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1',
+ loc_0 double(12,6) NOT NULL `JPATH`='loc.0',
+ loc_1 char(12) NOT NULL `JPATH`='loc.1',
pop int(11) NOT NULL,
state char(2) NOT NULL)
ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=$TYPE TABNAME='cities'
@@ -181,11 +186,11 @@ DROP TABLE t1;
eval CREATE TABLE t1 (
_id int(4) NOT NULL,
item CHAR(8) NOT NULL,
- prices_0 INT(6) FIELD_FORMAT='prices.0',
- prices_1 INT(6) FIELD_FORMAT='prices.1',
- prices_2 INT(6) FIELD_FORMAT='prices.2',
- prices_3 INT(6) FIELD_FORMAT='prices.3',
- prices_4 INT(6) FIELD_FORMAT='prices.4')
+ prices_0 INT(6) JPATH='prices.0',
+ prices_1 INT(6) JPATH='prices.1',
+ prices_2 INT(6) JPATH='prices.2',
+ prices_3 INT(6) JPATH='prices.3',
+ prices_4 INT(6) JPATH='prices.4')
ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='testcoll' DATA_CHARSET=utf8
OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN;
INSERT INTO t1 VALUES
diff --git a/storage/connect/mysql-test/connect/t/odbc_oracle.test b/storage/connect/mysql-test/connect/t/odbc_oracle.test
index 9de742a2647..18d29f69f1a 100644
--- a/storage/connect/mysql-test/connect/t/odbc_oracle.test
+++ b/storage/connect/mysql-test/connect/t/odbc_oracle.test
@@ -78,42 +78,42 @@ SET NAMES utf8;
--echo # All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables;
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.%';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Tables TABNAME='MTR.%';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
@@ -127,7 +127,7 @@ DROP TABLE t1;
--echo # All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns;
# Disable warnings to avoid "Result limited to 20000 lines"
--disable_warnings
@@ -137,7 +137,7 @@ DROP TABLE t1;
--echo # All columns in all schemas (limited with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.%';
# Disable warnings to avoid "Result limited to 20000 lines"
--disable_warnings
@@ -146,20 +146,20 @@ SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (limited with WHERE)
-CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1';
+CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='MTR.T1';
SELECT * FROM t1 ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
--echo # All tables "T1" in all schemas (filtered with WHERE)
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
CATFUNC=Columns TABNAME='%.T1';
SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name;
DROP TABLE t1;
@@ -172,7 +172,7 @@ DROP TABLE t1;
--echo # Table "T1" in the default schema ("MTR")
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='T1';
SHOW CREATE TABLE t1;
SELECT * FROM t1 ORDER BY A;
@@ -189,7 +189,7 @@ DROP TABLE t1;
--echo # Table "T1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T1';
SHOW CREATE TABLE t1;
SELECT * FROM t1;
@@ -197,7 +197,7 @@ DROP TABLE t1;
--echo # View "V1" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.V1';
SHOW CREATE TABLE t1;
SELECT * FROM t1;
@@ -214,7 +214,7 @@ DROP TABLE t1;
--echo # Table "T2" in the schema "MTR"
CREATE TABLE t1 ENGINE=CONNECT
-TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr'
+TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr'
TABNAME='MTR.T2';
SHOW CREATE TABLE t1;
SELECT * FROM t1;
diff --git a/storage/connect/mysql-test/connect/t/rest.inc b/storage/connect/mysql-test/connect/t/rest.inc
new file mode 100644
index 00000000000..6848e4b6965
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/rest.inc
@@ -0,0 +1,17 @@
+--disable_query_log
+--error 0,ER_UNKNOWN_ERROR
+CREATE TABLE t1
+ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='users.json'
+HTTP='http://jsonplaceholder.typicode.com/users';
+
+if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES
+ WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1'
+ AND ENGINE='CONNECT'
+ AND CREATE_OPTIONS LIKE "%`table_type`='JSON'%"`)
+{
+ DROP TABLE IF EXISTS t1;
+ Skip Need Curl or Casablanca;
+}
+DROP TABLE t1;
+--enable_query_log
+
diff --git a/storage/connect/mysql-test/connect/t/rest.test b/storage/connect/mysql-test/connect/t/rest.test
new file mode 100644
index 00000000000..67066ed4639
--- /dev/null
+++ b/storage/connect/mysql-test/connect/t/rest.test
@@ -0,0 +1,17 @@
+--source rest.inc
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+--echo #
+--echo # Testing REST query
+--echo #
+CREATE TABLE t1
+ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json'
+HTTP='http://jsonplaceholder.typicode.com/users';
+SELECT * FROM t1;
+DROP TABLE t1;
+
+#
+# Clean up
+#
+--remove_file $MYSQLD_DATADIR/test/users.json
diff --git a/storage/connect/mysql-test/connect/t/xml.test b/storage/connect/mysql-test/connect/t/xml.test
index 0fdf8e90b6e..e837ec79604 100644
--- a/storage/connect/mysql-test/connect/t/xml.test
+++ b/storage/connect/mysql-test/connect/t/xml.test
@@ -300,6 +300,7 @@ CREATE TABLE t1 (node VARCHAR(50))
ENGINE=connect TABLE_TYPE=xml FILE_NAME='t1.xml'
OPTION_LIST='xmlsup=domdoc,rownode=line,encoding=iso-8859-1';
INSERT INTO t1 VALUES (_latin1 0xC0C1C2C3);
+--replace_regex /.*iso-8859-1.*/warning about characters outside of iso-8859-1/
INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3);
INSERT INTO t1 VALUES ('&<>"\'');
SELECT node, hex(node) FROM t1;
diff --git a/storage/connect/mysql-test/connect/t/xml2.test b/storage/connect/mysql-test/connect/t/xml2.test
index 7bbc3dbd87c..9c5f685d399 100644
--- a/storage/connect/mysql-test/connect/t/xml2.test
+++ b/storage/connect/mysql-test/connect/t/xml2.test
@@ -77,9 +77,9 @@ DROP TABLE t1;
--echo # Testing mixed tag and attribute values
--echo #
CREATE TABLE t1 (
- ISBN CHAR(15) FIELD_FORMAT='@',
- LANG CHAR(2) FIELD_FORMAT='@',
- SUBJECT CHAR(32) FIELD_FORMAT='@',
+ ISBN CHAR(15) XPATH='@',
+ LANG CHAR(2) XPATH='@',
+ SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -98,9 +98,9 @@ DROP TABLE t1;
--copy_file $MTR_SUITE_DIR/std_data/xsample.xml $MYSQLD_DATADIR/test/xsample2.xml
--chmod 0644 $MYSQLD_DATADIR/test/xsample2.xml
CREATE TABLE t1 (
- ISBN CHAR(15) FIELD_FORMAT='@',
- LANG CHAR(2) FIELD_FORMAT='@',
- SUBJECT CHAR(32) FIELD_FORMAT='@',
+ ISBN CHAR(15) XPATH='@',
+ LANG CHAR(2) XPATH='@',
+ SUBJECT CHAR(32) XPATH='@',
AUTHOR CHAR(50),
TITLE CHAR(32),
TRANSLATOR CHAR(40),
@@ -123,18 +123,18 @@ DROP TABLE t1;
--echo # Testing XPath
--echo #
CREATE TABLE t1 (
- isbn CHAR(15) FIELD_FORMAT='@ISBN',
- language CHAR(2) FIELD_FORMAT='@LANG',
- subject CHAR(32) FIELD_FORMAT='@SUBJECT',
- authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME',
- authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME',
- title CHAR(32) FIELD_FORMAT='TITLE',
- translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX',
- tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
- tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME',
- publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME',
- location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE',
- year INT(4) FIELD_FORMAT='DATEPUB'
+ isbn CHAR(15) XPATH='@ISBN',
+ language CHAR(2) XPATH='@LANG',
+ subject CHAR(32) XPATH='@SUBJECT',
+ authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME',
+ authorln CHAR(20) XPATH='AUTHOR/LASTNAME',
+ title CHAR(32) XPATH='TITLE',
+ translated CHAR(32) XPATH='TRANSLATOR/@PREFIX',
+ tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME',
+ tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME',
+ publisher CHAR(20) XPATH='PUBLISHER/NAME',
+ location CHAR(20) XPATH='PUBLISHER/PLACE',
+ year INT(4) XPATH='DATEPUB'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
@@ -150,8 +150,8 @@ DROP TABLE t1;
#--echo # Relative paths are not supported
#--echo #
#CREATE TABLE t1 (
-# authorfn CHAR(20) FIELD_FORMAT='//FIRSTNAME',
-# authorln CHAR(20) FIELD_FORMAT='//LASTNAME'
+# authorfn CHAR(20) XPATH='//FIRSTNAME',
+# authorln CHAR(20) XPATH='//LASTNAME'
#) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
# TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1';
#SELECT * FROM t1;
@@ -165,8 +165,8 @@ DROP TABLE t1;
#--echo # Absolute path is not supported
#--echo #
#CREATE TABLE t1 (
-# authorfn CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/FIRSTNAME',
-# authorln CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/LASTNAME'
+# authorfn CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/FIRSTNAME',
+# authorln CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/LASTNAME'
#) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
# TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1';
#SELECT * FROM t1;
@@ -178,7 +178,7 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1
(
- isbn CHAR(15) FIELD_FORMAT='@isbn'
+ isbn CHAR(15) XPATH='@isbn'
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml'
TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2';
SELECT * FROM t1;
diff --git a/storage/connect/mysql-test/connect/t/xml2_html.test b/storage/connect/mysql-test/connect/t/xml2_html.test
index 1c84b46ec38..2f4fc50e5e6 100644
--- a/storage/connect/mysql-test/connect/t/xml2_html.test
+++ b/storage/connect/mysql-test/connect/t/xml2_html.test
@@ -11,9 +11,9 @@ SET NAMES utf8;
--echo # Testing HTML like XML file
--echo #
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/t/xml2_mult.test b/storage/connect/mysql-test/connect/t/xml2_mult.test
index cd83827fe34..e9914c71aad 100644
--- a/storage/connect/mysql-test/connect/t/xml2_mult.test
+++ b/storage/connect/mysql-test/connect/t/xml2_mult.test
@@ -15,9 +15,9 @@ SET NAMES utf8;
--echo # Testing expanded values
--echo #
CREATE TABLE `bookstore` (
- `category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+ `category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
- `lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+ `lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/t/xml2_zip.test b/storage/connect/mysql-test/connect/t/xml2_zip.test
index d8c7894f861..df69f9dace3 100644
--- a/storage/connect/mysql-test/connect/t/xml2_zip.test
+++ b/storage/connect/mysql-test/connect/t/xml2_zip.test
@@ -11,26 +11,26 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Testing zipped XML tables
--echo #
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
#testing discovery
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=libxml2';
+OPTION_LIST='depth=0,xmlsup=libxml2';
SELECT * FROM t2;
DROP TABLE t1,t2;
diff --git a/storage/connect/mysql-test/connect/t/xml_html.test b/storage/connect/mysql-test/connect/t/xml_html.test
index 34d29953f68..1430f68d2b2 100644
--- a/storage/connect/mysql-test/connect/t/xml_html.test
+++ b/storage/connect/mysql-test/connect/t/xml_html.test
@@ -11,9 +11,9 @@ SET NAMES utf8;
--echo # Testing HTML like XML file
--echo #
CREATE TABLE beers (
-`Name` CHAR(16) FIELD_FORMAT='brandName',
-`Origin` CHAR(16) FIELD_FORMAT='origin',
-`Description` CHAR(32) FIELD_FORMAT='details')
+`Name` CHAR(16) XPATH='brandName',
+`Origin` CHAR(16) XPATH='origin',
+`Description` CHAR(32) XPATH='details')
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml'
TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td';
SELECT * FROM beers;
diff --git a/storage/connect/mysql-test/connect/t/xml_mult.test b/storage/connect/mysql-test/connect/t/xml_mult.test
index cf703e90da4..221d6734546 100644
--- a/storage/connect/mysql-test/connect/t/xml_mult.test
+++ b/storage/connect/mysql-test/connect/t/xml_mult.test
@@ -15,9 +15,9 @@ SET NAMES utf8;
--echo # Testing expanded values
--echo #
CREATE TABLE `bookstore` (
- `category` CHAR(16) NOT NULL FIELD_FORMAT='@',
+ `category` CHAR(16) NOT NULL XPATH='@',
`title` VARCHAR(50) NOT NULL,
- `lang` char(2) NOT NULL FIELD_FORMAT='title/@',
+ `lang` char(2) NOT NULL XPATH='title/@',
`author` VARCHAR(24) NOT NULL,
`year` INT(4) NOT NULL,
`price` DOUBLE(8,2) NOT NULL)
diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test
index ad31ca46d4c..29ee2e0e607 100644
--- a/storage/connect/mysql-test/connect/t/xml_zip.test
+++ b/storage/connect/mysql-test/connect/t/xml_zip.test
@@ -11,26 +11,26 @@ let $MYSQLD_DATADIR= `select @@datadir`;
--echo # Testing zipped XML tables
--echo #
CREATE TABLE t1 (
-ISBN CHAR(13) NOT NULL FIELD_FORMAT='@',
-LANG CHAR(2) NOT NULL FIELD_FORMAT='@',
-SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@',
-AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME',
-AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME',
-TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX',
-TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME',
-TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME',
+ISBN CHAR(13) NOT NULL XPATH='@',
+LANG CHAR(2) NOT NULL XPATH='@',
+SUBJECT CHAR(12) NOT NULL XPATH='@',
+AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME',
+AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME',
+TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX',
+TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME',
+TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME',
TITLE CHAR(30) NOT NULL,
-PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME',
-PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE',
+PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME',
+PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE',
DATEPUB CHAR(4) NOT NULL
) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
+OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR';
SELECT * FROM t1;
#testing discovery
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES
-OPTION_LIST='xmlsup=domdoc';
+OPTION_LIST='depth=0,xmlsup=domdoc';
SELECT * FROM t2;
DROP TABLE t1,t2;
diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test
index dce68c17eee..1f0a4eedee9 100644
--- a/storage/connect/mysql-test/connect/t/zip.test
+++ b/storage/connect/mysql-test/connect/t/zip.test
@@ -83,37 +83,37 @@ DROP TABLE t1,t2,t3,t4;
--echo #
CREATE TABLE t1 (
_id INT(2) NOT NULL,
-name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+name_first CHAR(9) NOT NULL JPATH='$.name.first',
+name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+name_last CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth CHAR(20) DEFAULT NULL,
death CHAR(20) DEFAULT NULL,
-contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs',
-awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award',
-awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year',
-awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by'
+contribs CHAR(50) NOT NULL JPATH='$.contribs',
+awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award',
+awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year',
+awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by'
) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES;
SELECT * FROM t1;
# Test discovery
CREATE TABLE t2
ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1
-OPTION_LIST='LEVEL=5';
+OPTION_LIST='DEPTH=5';
SELECT * FROM t2;
CREATE TABLE t3 (
_id INT(2) NOT NULL,
-firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first',
-aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka',
-lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last',
+firstname CHAR(9) NOT NULL JPATH='$.name.first',
+aka CHAR(4) DEFAULT NULL JPATH='$.name.aka',
+lastname CHAR(10) NOT NULL JPATH='$.name.last',
title CHAR(12) DEFAULT NULL,
birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'",
-contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]',
-award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award',
-year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year',
-`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by'
+contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]',
+award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award',
+year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year',
+`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by'
) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES;
SELECT * FROM t3 WHERE _id = 1;
diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp
index 89b18f86323..e53ee1310e4 100644
--- a/storage/connect/myutil.cpp
+++ b/storage/connect/myutil.cpp
@@ -169,10 +169,9 @@ const char *PLGtoMYSQLtype(int type, bool dbf, char v)
case TYPE_BIGINT: return "BIGINT";
case TYPE_TINY: return "TINYINT";
case TYPE_DECIM: return "DECIMAL";
- default: return "CHAR(0)";
+ default: return (v) ? "VARCHAR" : "CHAR";
} // endswitch mytype
- return "CHAR(0)";
} // end of PLGtoMYSQLtype
/************************************************************************/
diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h
index a40e32bcfb2..dd204d065ed 100644
--- a/storage/connect/plgdbsem.h
+++ b/storage/connect/plgdbsem.h
@@ -83,7 +83,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */
TAB_ZIP = 27, /* ZIP file info table */
TAB_MONGO = 28, /* Table retrieved from MongoDB */
TAB_REST = 29, /* Table retrieved from Rest */
- TAB_NIY = 30}; /* Table not implemented yet */
+ TAB_BSON = 30, /* BSON Table (development) */
+ TAB_NIY = 31}; /* Table not implemented yet */
enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */
TYPE_AM_ROWID = 1, /* ROWID type (special column) */
@@ -160,7 +161,7 @@ enum RECFM {RECFM_DFLT = 0, /* Default table type */
RECFM_FMT = 8, /* FMT formatted file */
RECFM_VCT = 9, /* VCT formatted files */
RECFM_XML = 10, /* XML formatted files */
- RECFM_JASON = 11, /* JASON formatted files */
+ RECFM_JSON = 11, /* JSON formatted files */
RECFM_DIR = 12, /* DIR table */
RECFM_ODBC = 13, /* Table accessed via ODBC */
RECFM_JDBC = 14, /* Table accessed via JDBC */
diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp
index 522366dda9c..2ade1488ec3 100644
--- a/storage/connect/plugutil.cpp
+++ b/storage/connect/plugutil.cpp
@@ -96,7 +96,7 @@ char *msglang(void);
typedef struct {
ushort Segsize;
ushort Size;
- } AREASIZE;
+} AREASIZE;
ACTIVITY defActivity = { /* Describes activity and language */
NULL, /* Points to user work area(s) */
@@ -184,7 +184,7 @@ PGLOBAL PlugInit(LPCSTR Language, size_t worksize)
/***********************************************************************/
/* PlugExit: Terminate Plug operations. */
/***********************************************************************/
-int PlugExit(PGLOBAL g)
+PGLOBAL PlugExit(PGLOBAL g)
{
if (g) {
PDBUSER dup = PlgGetUser(g);
@@ -196,7 +196,7 @@ int PlugExit(PGLOBAL g)
delete g;
} // endif g
- return 0;
+ return NULL;
} // end of PlugExit
/***********************************************************************/
@@ -204,7 +204,7 @@ int PlugExit(PGLOBAL g)
/* Note: this routine is not really implemented for Unix. */
/***********************************************************************/
LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName)
- {
+{
#if defined(__WIN__)
char drive[_MAX_DRIVE];
#else
@@ -228,8 +228,7 @@ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName)
htrc("buff='%-.256s'\n", pBuff);
return pBuff;
- } // end of PlugRemoveType
-
+} // end of PlugRemoveType
BOOL PlugIsAbsolutePath(LPCSTR path)
{
@@ -246,7 +245,7 @@ BOOL PlugIsAbsolutePath(LPCSTR path)
/* Note: this routine is not really implemented for Unix. */
/***********************************************************************/
LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath)
- {
+{
char newname[_MAX_PATH];
char direc[_MAX_DIR], defdir[_MAX_DIR], tmpdir[_MAX_DIR];
char fname[_MAX_FNAME];
@@ -347,14 +346,14 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath)
} else
return FileName; // Error, return unchanged name
- } // end of PlugSetPath
+} // end of PlugSetPath
#if defined(XMSG)
/***********************************************************************/
/* PlugGetMessage: get a message from the message file. */
/***********************************************************************/
char *PlugReadMessage(PGLOBAL g, int mid, char *m)
- {
+{
char msgfile[_MAX_PATH], msgid[32], buff[256];
char *msg;
FILE *mfile = NULL;
@@ -405,14 +404,14 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m)
msg = stmsg;
return msg;
- } // end of PlugReadMessage
+} // end of PlugReadMessage
#elif defined(NEWMSG)
/***********************************************************************/
/* PlugGetMessage: get a message from the resource string table. */
/***********************************************************************/
char *PlugGetMessage(PGLOBAL g, int mid)
- {
+{
char *msg;
#if 0 // was !defined(UNIX) && !defined(UNIV_LINUX)
@@ -440,7 +439,7 @@ char *PlugGetMessage(PGLOBAL g, int mid)
msg = stmsg;
return msg;
- } // end of PlugGetMessage
+} // end of PlugGetMessage
#endif // NEWMSG
#if defined(__WIN__)
@@ -448,13 +447,13 @@ char *PlugGetMessage(PGLOBAL g, int mid)
/* Return the line length of the console screen buffer. */
/***********************************************************************/
short GetLineLength(PGLOBAL g)
- {
+{
CONSOLE_SCREEN_BUFFER_INFO coninfo;
HANDLE hcons = GetStdHandle(STD_OUTPUT_HANDLE);
BOOL b = GetConsoleScreenBufferInfo(hcons, &coninfo);
return (b) ? coninfo.dwSize.X : 0;
- } // end of GetLineLength
+} // end of GetLineLength
#endif // __WIN__
/***********************************************************************/
@@ -475,17 +474,19 @@ bool AllocSarea(PGLOBAL g, size_t size)
if (!g->Sarea) {
sprintf(g->Message, MSG(MALLOC_ERROR), "malloc");
g->Sarea_Size = 0;
- } else
- g->Sarea_Size = size;
+ } else {
+ g->Sarea_Size = size;
+ PlugSubSet(g->Sarea, size);
+ } // endif Sarea
#if defined(DEVELOPMENT)
if (true) {
#else
if (trace(8)) {
#endif
- if (g->Sarea)
+ if (g->Sarea) {
htrc("Work area of %zd allocated at %p\n", size, g->Sarea);
- else
+ } else
htrc("SareaAlloc: %-.256s\n", g->Message);
} // endif trace
@@ -526,13 +527,13 @@ void FreeSarea(PGLOBAL g)
/* the address and size not larger than memory size. */
/***********************************************************************/
BOOL PlugSubSet(void *memp, size_t size)
- {
+{
PPOOLHEADER pph = (PPOOLHEADER)memp;
pph->To_Free = (size_t)sizeof(POOLHEADER);
pph->FreeBlk = size - pph->To_Free;
return FALSE;
- } /* end of PlugSubSet */
+} /* end of PlugSubSet */
/***********************************************************************/
/* Use it to export a function that do throwing. */
@@ -595,7 +596,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
/* Program for sub-allocating and copying a string in a storage area. */
/***********************************************************************/
char *PlugDup(PGLOBAL g, const char *str)
- {
+{
if (str) {
char *sm = (char*)PlugSubAlloc(g, NULL, strlen(str) + 1);
@@ -604,6 +605,33 @@ char *PlugDup(PGLOBAL g, const char *str)
} else
return NULL;
- } // end of PlugDup
+} // end of PlugDup
+
+/*************************************************************************/
+/* This routine makes a pointer from an offset to a memory pointer. */
+/*************************************************************************/
+void* MakePtr(void* memp, size_t offset)
+{
+ // return ((offset == 0) ? NULL : &((char*)memp)[offset]);
+ return (!offset) ? NULL : (char *)memp + offset;
+} /* end of MakePtr */
+
+/*************************************************************************/
+/* This routine makes an offset from a pointer new format. */
+/*************************************************************************/
+size_t MakeOff(void* memp, void* ptr)
+{
+ if (ptr) {
+#if defined(_DEBUG) || defined(DEVELOPMENT)
+ if (ptr <= memp) {
+ fprintf(stderr, "ptr %p <= memp %p", ptr, memp);
+ DoThrow(999);
+ } // endif ptr
+#endif // _DEBUG || DEVELOPMENT
+ return (size_t)((char*)ptr - (size_t)memp);
+ } else
+ return 0;
+
+} /* end of MakeOff */
-/*--------------------- End of PLUGUTIL program -----------------------*/
+/*---------------------- End of PLUGUTIL program ------------------------*/
diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp
new file mode 100644
index 00000000000..ba4380c5f89
--- /dev/null
+++ b/storage/connect/tabbson.cpp
@@ -0,0 +1,2542 @@
+/************* tabbson C++ Program Source Code File (.CPP) *************/
+/* PROGRAM NAME: tabbson Version 1.0 */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* This program are the BSON class DB execution routines. */
+/***********************************************************************/
+
+/***********************************************************************/
+/* Include relevant sections of the MariaDB header file. */
+/***********************************************************************/
+#include <my_global.h>
+
+/***********************************************************************/
+/* Include application header files: */
+/* global.h is header containing all global declarations. */
+/* plgdbsem.h is header containing the DB application declarations. */
+/* tdbdos.h is header containing the TDBDOS declarations. */
+/* json.h is header containing the JSON classes declarations. */
+/***********************************************************************/
+#include "global.h"
+#include "plgdbsem.h"
+#include "maputil.h"
+#include "filamtxt.h"
+#include "tabdos.h"
+#include "tabbson.h"
+#include "filamap.h"
+#if defined(GZ_SUPPORT)
+#include "filamgz.h"
+#endif // GZ_SUPPORT
+#if defined(ZIP_SUPPORT)
+#include "filamzip.h"
+#endif // ZIP_SUPPORT
+#if defined(JAVA_SUPPORT)
+#include "jmgfam.h"
+#endif // JAVA_SUPPORT
+#if defined(CMGO_SUPPORT)
+#include "cmgfam.h"
+#endif // CMGO_SUPPORT
+#include "tabmul.h"
+#include "checklvl.h"
+#include "resource.h"
+#include "mycat.h" // for FNC_COL
+
+/***********************************************************************/
+/* This should be an option. */
+/***********************************************************************/
+#define MAXCOL 200 /* Default max column nb in result */
+//#define TYPE_UNKNOWN 12 /* Must be greater than other types */
+
+/***********************************************************************/
+/* External functions. */
+/***********************************************************************/
+USETEMP UseTemp(void);
+bool JsonAllPath(void);
+int GetDefaultDepth(void);
+char *GetJsonNull(void);
+
+/***********************************************************************/
+/* BSONColumns: construct the result blocks containing the description */
+/* of all the columns of a table contained inside a JSON file. */
+/***********************************************************************/
+PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
+{
+ static int buftyp[] = { TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT,
+ TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING };
+ static XFLD fldtyp[] = { FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC,
+ FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT };
+ static unsigned int length[] = { 0, 6, 8, 10, 10, 6, 6, 0 };
+ int i, n = 0;
+ int ncol = sizeof(buftyp) / sizeof(int);
+ PJCL jcp;
+ BSONDISC* pjdc = NULL;
+ PQRYRES qrp;
+ PCOLRES crp;
+
+ if (info) {
+ length[0] = 128;
+ length[7] = 256;
+ goto skipit;
+ } // endif info
+
+ if (GetIntegerTableOption(g, topt, "Multiple", 0)) {
+ strcpy(g->Message, "Cannot find column definition for multiple table");
+ return NULL;
+ } // endif Multiple
+
+ pjdc = new(g) BSONDISC(g, length);
+
+ if (!(n = pjdc->GetColumns(g, db, dsn, topt)))
+ return NULL;
+
+skipit:
+ if (trace(1))
+ htrc("BSONColumns: n=%d len=%d\n", n, length[0]);
+
+ /*********************************************************************/
+ /* Allocate the structures used to refer to the result set. */
+ /*********************************************************************/
+ qrp = PlgAllocResult(g, ncol, n, IDS_COLUMNS + 3,
+ buftyp, fldtyp, length, false, false);
+
+ crp = qrp->Colresp->Next->Next->Next->Next->Next->Next;
+ crp->Name = PlugDup(g, "Nullable");
+ crp->Next->Name = PlugDup(g, "Jpath");
+
+ if (info || !qrp)
+ return qrp;
+
+ qrp->Nblin = n;
+
+ /*********************************************************************/
+ /* Now get the results into blocks. */
+ /*********************************************************************/
+ for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) {
+ if (jcp->Type == TYPE_UNKNOWN)
+ jcp->Type = TYPE_STRG; // Void column
+
+ crp = qrp->Colresp; // Column Name
+ crp->Kdata->SetValue(jcp->Name, i);
+ crp = crp->Next; // Data Type
+ crp->Kdata->SetValue(jcp->Type, i);
+ crp = crp->Next; // Type Name
+ crp->Kdata->SetValue(GetTypeName(jcp->Type), i);
+ crp = crp->Next; // Precision
+ crp->Kdata->SetValue(jcp->Len, i);
+ crp = crp->Next; // Length
+ crp->Kdata->SetValue(jcp->Len, i);
+ crp = crp->Next; // Scale (precision)
+ crp->Kdata->SetValue(jcp->Scale, i);
+ crp = crp->Next; // Nullable
+ crp->Kdata->SetValue(jcp->Cbn ? 1 : 0, i);
+ crp = crp->Next; // Field format
+
+ if (crp->Kdata)
+ crp->Kdata->SetValue(jcp->Fmt, i);
+
+ } // endfor i
+
+/*********************************************************************/
+/* Return the result pointer. */
+/*********************************************************************/
+ return qrp;
+} // end of BSONColumns
+
+/* -------------------------- Class BSONDISC ------------------------- */
+
+/***********************************************************************/
+/* Class used to get the columns of a JSON table. */
+/***********************************************************************/
+BSONDISC::BSONDISC(PGLOBAL g, uint* lg)
+{
+ length = lg;
+ jcp = fjcp = pjcp = NULL;
+ tdp = NULL;
+ tjnp = NULL;
+ jpp = NULL;
+ tjsp = NULL;
+ jsp = NULL;
+ bp = NULL;
+ row = NULL;
+ sep = NULL;
+ i = n = bf = ncol = lvl = sz = limit = 0;
+ all = strfy = false;
+} // end of BSONDISC constructor
+
+int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
+{
+ char filename[_MAX_PATH];
+ bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
+ PBVAL bdp = NULL;
+
+ lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
+ lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
+ sep = GetStringTableOption(g, topt, "Separator", ".");
+ sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
+ limit = GetIntegerTableOption(g, topt, "Limit", 10);
+ strfy = GetBooleanTableOption(g, topt, "Stringify", false);
+
+ /*********************************************************************/
+ /* Open the input file. */
+ /*********************************************************************/
+ tdp = new(g) BSONDEF;
+ tdp->G = NULL;
+#if defined(ZIP_SUPPORT)
+ tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL);
+ tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false);
+#endif // ZIP_SUPPORT
+ tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
+
+ if (!(tdp->Database = SetPath(g, db)))
+ return 0;
+
+ tdp->Objname = GetStringTableOption(g, topt, "Object", NULL);
+ tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0;
+ tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2);
+ tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL);
+ tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false);
+ tdp->Uri = (dsn && *dsn ? dsn : NULL);
+
+ if (!tdp->Fn && !tdp->Uri) {
+ strcpy(g->Message, MSG(MISSING_FNAME));
+ return 0;
+ } // endif Fn
+
+ if (tdp->Fn) {
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, tdp->Fn, tdp->GetPath());
+ tdp->Fn = PlugDup(g, filename);
+ } // endif Fn
+
+ if (trace(1))
+ htrc("File %s objname=%s pretty=%d lvl=%d\n",
+ tdp->Fn, tdp->Objname, tdp->Pretty, lvl);
+
+ if (tdp->Uri) {
+#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
+ tdp->Collname = GetStringTableOption(g, topt, "Name", NULL);
+ tdp->Collname = GetStringTableOption(g, topt, "Tabname", tdp->Collname);
+ tdp->Schema = GetStringTableOption(g, topt, "Dbname", "test");
+ tdp->Options = (PSZ)GetStringTableOption(g, topt, "Colist", "all");
+ tdp->Pipe = GetBooleanTableOption(g, topt, "Pipeline", false);
+ tdp->Driver = (PSZ)GetStringTableOption(g, topt, "Driver", NULL);
+ tdp->Version = GetIntegerTableOption(g, topt, "Version", 3);
+ tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper",
+ (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface");
+ tdp->Pretty = 0;
+#else // !MONGO_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return 0;
+#endif // !MONGO_SUPPORT
+ } // endif Uri
+
+ if (tdp->Pretty == 2) {
+ tdp->G = g;
+
+ if (tdp->Zipped) {
+#if defined(ZIP_SUPPORT)
+ tjsp = new(g) TDBBSON(g, tdp, new(g) UNZFAM(tdp));
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return 0;
+#endif // !ZIP_SUPPORT
+ } else
+ tjsp = new(g) TDBBSON(g, tdp, new(g) MAPFAM(tdp));
+
+ if (tjsp->MakeDocument(g))
+ return 0;
+
+ bp = tjsp->Bp;
+// bdp = tjsp->GetDoc() ? bp->GetBson(tjsp->GetDoc()) : NULL;
+ bdp = tjsp->GetDoc();
+ jsp = bdp ? bp->GetArrayValue(bdp, 0) : NULL;
+ } else {
+ if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) {
+ if (!mgo) {
+ sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty);
+ return 0;
+ } else
+ tdp->Lrecl = 8192; // Should be enough
+
+ } // endif Lrecl
+
+ // Allocate the parse work memory
+ tdp->G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 4 : 2));
+ tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF);
+
+ if (tdp->Zipped) {
+#if defined(ZIP_SUPPORT)
+ tjnp = new(g)TDBBSN(g, tdp, new(g) UNZFAM(tdp));
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else if (tdp->Uri) {
+ if (tdp->Driver && toupper(*tdp->Driver) == 'C') {
+#if defined(CMGO_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp));
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "C");
+ return 0;
+#endif
+ } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') {
+#if defined(JAVA_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp));
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "Java");
+ return 0;
+#endif
+ } else { // Driver not specified
+#if defined(CMGO_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp));
+#elif defined(JAVA_SUPPORT)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp));
+#else
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return 0;
+#endif
+ } // endif Driver
+
+ } else if (tdp->Pretty >= 0)
+ tjnp = new(g) TDBBSN(g, tdp, new(g) DOSFAM(tdp));
+ else
+ tjnp = new(g) TDBBSN(g, tdp, new(g) BINFAM(tdp));
+
+ tjnp->SetMode(MODE_READ);
+ bp = tjnp->Bp;
+
+ if (tjnp->OpenDB(g))
+ return 0;
+
+ switch (tjnp->ReadDB(g)) {
+ case RC_EF:
+ strcpy(g->Message, "Void json table");
+ case RC_FX:
+ goto err;
+ default:
+ jsp = tjnp->Row;
+ } // endswitch ReadDB
+
+ } // endif pretty
+
+ if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) {
+ strcpy(g->Message, "Can only retrieve columns from object rows");
+ goto err;
+ } // endif row
+
+ all = GetBooleanTableOption(g, topt, "Fullarray", false);
+ jcol.Name = jcol.Fmt = NULL;
+ jcol.Next = NULL;
+ jcol.Found = true;
+ colname[0] = 0;
+
+ if (!tdp->Uri) {
+ fmt[0] = '$';
+ fmt[1] = '.';
+ bf = 2;
+ } // endif Uri
+
+ /*********************************************************************/
+ /* Analyse the JSON tree and define columns. */
+ /*********************************************************************/
+ for (i = 1; ; i++) {
+ for (jpp = row; jpp; jpp = bp->GetNext(jpp)) {
+ strncpy(colname, bp->GetKey(jpp), 64);
+ fmt[bf] = 0;
+
+ if (Find(g, bp->GetVlp(jpp), colname, MY_MIN(lvl, 0)))
+ goto err;
+
+ } // endfor jpp
+
+ // Missing column can be null
+ for (jcp = fjcp; jcp; jcp = jcp->Next) {
+ jcp->Cbn |= !jcp->Found;
+ jcp->Found = false;
+ } // endfor jcp
+
+ if (tdp->Pretty != 2) {
+ // Read next record
+ switch (tjnp->ReadDB(g)) {
+ case RC_EF:
+ jsp = NULL;
+ break;
+ case RC_FX:
+ goto err;
+ default:
+ jsp = tjnp->Row;
+ } // endswitch ReadDB
+
+ } else
+ jsp = bp->GetArrayValue(bdp, i);
+
+ if (!(row = (jsp) ? bp->GetObject(jsp) : NULL))
+ break;
+
+ } // endfor i
+
+ if (tdp->Pretty != 2)
+ tjnp->CloseDB(g);
+
+ return n;
+
+err:
+ if (tdp->Pretty != 2)
+ tjnp->CloseDB(g);
+
+ return 0;
+} // end of GetColumns
+
+bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
+{
+ char *p, *pc = colname + strlen(colname), buf[32];
+ int ars;
+ size_t n;
+ PBVAL job;
+ PBVAL jar;
+
+ if (jvp && !bp->IsJson(jvp)) {
+ if (JsonAllPath() && !fmt[bf])
+ strcat(fmt, colname);
+
+ jcol.Type = (JTYP)jvp->Type;
+
+ switch (jvp->Type) {
+ case TYPE_STRG:
+ case TYPE_DTM:
+ jcol.Len = (int)strlen(bp->GetString(jvp));
+ break;
+ case TYPE_INTG:
+ case TYPE_BINT:
+ jcol.Len = (int)strlen(bp->GetString(jvp, buf));
+ break;
+ case TYPE_DBL:
+ case TYPE_FLOAT:
+ jcol.Len = (int)strlen(bp->GetString(jvp, buf));
+ jcol.Scale = jvp->Nd;
+ break;
+ case TYPE_BOOL:
+ jcol.Len = 1;
+ break;
+ default:
+ jcol.Len = 0;
+ break;
+ } // endswitch Type
+
+ jcol.Scale = jvp->Nd;
+ jcol.Cbn = jvp->Type == TYPE_NULL;
+ } else if (!jvp || bp->IsValueNull(jvp)) {
+ jcol.Type = TYPE_UNKNOWN;
+ jcol.Len = jcol.Scale = 0;
+ jcol.Cbn = true;
+ } else if (j < lvl) {
+ if (!fmt[bf])
+ strcat(fmt, colname);
+
+ p = fmt + strlen(fmt);
+ jsp = jvp;
+
+ switch (jsp->Type) {
+ case TYPE_JOB:
+ job = jsp;
+
+ for (PBPR jrp = bp->GetObject(job); jrp; jrp = bp->GetNext(jrp)) {
+ PCSZ k = bp->GetKey(jrp);
+
+ if (*k != '$') {
+ n = sizeof(fmt) - strlen(fmt) - 1;
+ strncat(strncat(fmt, sep, n), k, n - strlen(sep));
+ n = sizeof(colname) - strlen(colname) - 1;
+ strncat(strncat(colname, "_", n), k, n - 1);
+ } // endif Key
+
+ if (Find(g, bp->GetVlp(jrp), k, j + 1))
+ return true;
+
+ *p = *pc = 0;
+ } // endfor jrp
+
+ return false;
+ case TYPE_JAR:
+ jar = jsp;
+
+ if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key)))
+ ars = MY_MIN(bp->GetArraySize(jar), limit);
+ else
+ ars = MY_MIN(bp->GetArraySize(jar), 1);
+
+ for (int k = 0; k < ars; k++) {
+ n = sizeof(fmt) - (strlen(fmt) + 1);
+
+ if (!tdp->Xcol || stricmp(tdp->Xcol, key)) {
+ sprintf(buf, "%d", k);
+
+ if (tdp->Uri) {
+ strncat(strncat(fmt, sep, n), buf, n - strlen(sep));
+ } else {
+ strncat(strncat(fmt, "[", n), buf, n - 1);
+ strncat(fmt, "]", n - (strlen(buf) + 1));
+ } // endif uri
+
+ if (all) {
+ n = sizeof(colname) - (strlen(colname) + 1);
+ strncat(strncat(colname, "_", n), buf, n - 1);
+ } // endif all
+
+ } else {
+ strncat(fmt, (tdp->Uri ? sep : "[*]"), n);
+ }
+
+ if (Find(g, bp->GetArrayValue(jar, k), "", j))
+ return true;
+
+ *p = *pc = 0;
+ } // endfor k
+
+ return false;
+ default:
+ sprintf(g->Message, "Logical error after %s", fmt);
+ return true;
+ } // endswitch Type
+
+ } else if (lvl >= 0) {
+ if (strfy) {
+ if (!fmt[bf])
+ strcat(fmt, colname);
+
+ strcat(fmt, ".*");
+ } else if (JsonAllPath() && !fmt[bf])
+ strcat(fmt, colname);
+
+ jcol.Type = TYPE_STRG;
+ jcol.Len = sz;
+ jcol.Scale = 0;
+ jcol.Cbn = true;
+ } else
+ return false;
+
+ AddColumn(g);
+ return false;
+} // end of Find
+
+void BSONDISC::AddColumn(PGLOBAL g)
+{
+ bool b = fmt[bf] != 0; // True if formatted
+
+ // Check whether this column was already found
+ for (jcp = fjcp; jcp; jcp = jcp->Next)
+ if (!strcmp(colname, jcp->Name))
+ break;
+
+ if (jcp) {
+ if (jcp->Type != jcol.Type) {
+ if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL)
+ jcp->Type = jcol.Type;
+ // else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID)
+ // jcp->Type = TYPE_STRING;
+ else if (jcp->Type != TYPE_STRG)
+ switch (jcol.Type) {
+ case TYPE_STRG:
+ case TYPE_DBL:
+ jcp->Type = jcol.Type;
+ break;
+ case TYPE_BINT:
+ if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ case TYPE_INTG:
+ if (jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ default:
+ break;
+ } // endswith Type
+
+ } // endif Type
+
+ if (b && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) {
+ jcp->Fmt = PlugDup(g, fmt);
+ length[7] = MY_MAX(length[7], strlen(fmt));
+ } // endif fmt
+
+ jcp->Len = MY_MAX(jcp->Len, jcol.Len);
+ jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale);
+ jcp->Cbn |= jcol.Cbn;
+ jcp->Found = true;
+ } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) {
+ // New column
+ jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL));
+ *jcp = jcol;
+ jcp->Cbn |= (i > 1);
+ jcp->Name = PlugDup(g, colname);
+ length[0] = MY_MAX(length[0], strlen(colname));
+
+ if (b) {
+ jcp->Fmt = PlugDup(g, fmt);
+ length[7] = MY_MAX(length[7], strlen(fmt));
+ } else
+ jcp->Fmt = NULL;
+
+ if (pjcp) {
+ jcp->Next = pjcp->Next;
+ pjcp->Next = jcp;
+ } else
+ fjcp = jcp;
+
+ n++;
+ } // endif jcp
+
+ if (jcp)
+ pjcp = jcp;
+
+} // end of AddColumn
+
+/* -------------------------- Class BTUTIL --------------------------- */
+
+/***********************************************************************/
+/* Find the row in the tree structure. */
+/***********************************************************************/
+PBVAL BTUTIL::FindRow(PGLOBAL g)
+{
+ char *p, *objpath;
+ PBVAL jsp = Tp->Row;
+ PBVAL val = NULL;
+
+ for (objpath = PlugDup(g, Tp->Objname); jsp && objpath; objpath = p) {
+ if ((p = strchr(objpath, Tp->Sep)))
+ *p++ = 0;
+
+ if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key
+ val = (jsp->Type == TYPE_JOB) ?
+ GetKeyValue(jsp, objpath) : NULL;
+ } else {
+ if (*objpath == '[') {
+ if (objpath[strlen(objpath) - 1] == ']')
+ objpath++;
+ else
+ return NULL;
+ } // endif [
+
+ val = (jsp->Type == TYPE_JAR) ?
+ GetArrayValue(GetArray(jsp), atoi(objpath) - Tp->B) : NULL;
+ } // endif objpath
+
+ // jsp = (val) ? val->GetJson() : NULL;
+ jsp = val;
+ } // endfor objpath
+
+ return jsp;
+} // end of FindRow
+
+/***********************************************************************/
+/* Parse the read line. */
+/***********************************************************************/
+PBVAL BTUTIL::ParseLine(PGLOBAL g, int prty, bool cma)
+{
+ pretty = prty;
+ comma = cma;
+ return ParseJson(g, Tp->To_Line, strlen(Tp->To_Line));
+} // end of ParseLine
+
+/***********************************************************************/
+/* Make the top tree from the object path. */
+/***********************************************************************/
+PBVAL BTUTIL::MakeTopTree(PGLOBAL g, int type)
+{
+ PBVAL top = NULL, val = NULL;
+
+ if (Tp->Objname) {
+ if (!Tp->Row) {
+ // Parse and allocate Objpath item(s)
+ char* p;
+ char *objpath = PlugDup(g, Tp->Objname);
+ int i;
+ PBVAL objp = NULL;
+ PBVAL arp = NULL;
+
+ for (; objpath; objpath = p) {
+ if ((p = strchr(objpath, Tp->Sep)))
+ *p++ = 0;
+
+ if (*objpath != '[' && !IsNum(objpath)) {
+ objp = NewVal(TYPE_JOB);
+
+ if (!top)
+ top = objp;
+
+ if (val)
+ SetValueObj(val, objp);
+
+ val = NewVal();
+ SetKeyValue(objp, MOF(val), objpath);
+ } else {
+ if (*objpath == '[') {
+ // Old style
+ if (objpath[strlen(objpath) - 1] != ']') {
+ sprintf(g->Message, "Invalid Table path %s", Tp->Objname);
+ return NULL;
+ } else
+ objpath++;
+
+ } // endif objpath
+
+ if (!top)
+ top = NewVal(TYPE_JAR);
+
+ if (val)
+ SetValueArr(val, arp);
+
+ val = NewVal();
+ i = atoi(objpath) - Tp->B;
+ SetArrayValue(arp, val, i);
+ } // endif objpath
+
+ } // endfor p
+
+ } // endif Val
+
+ Tp->Row = val;
+ if (Tp->Row) Tp->Row->Type = type;
+ } else
+ top = Tp->Row = NewVal(type);
+
+ return top;
+} // end of MakeTopTree
+
+PSZ BTUTIL::SerialVal(PGLOBAL g, PBVAL vlp, int pretty)
+{
+ return Serialize(g, vlp, NULL, pretty);
+} // en of SerialTop
+
+/* -------------------------- Class BCUTIL --------------------------- */
+
+/***********************************************************************/
+/* SetValue: Set a value from a BVALUE contains. */
+/***********************************************************************/
+void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp)
+{
+ if (jvp) {
+ vp->SetNull(false);
+
+ switch (jvp->Type) {
+ case TYPE_STRG:
+ case TYPE_INTG:
+ case TYPE_BINT:
+ case TYPE_DBL:
+ case TYPE_DTM:
+ case TYPE_FLOAT:
+ switch (vp->GetType()) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ case TYPE_DECIM:
+ vp->SetValue_psz(GetString(jvp));
+ break;
+ case TYPE_INT:
+ case TYPE_SHORT:
+ case TYPE_TINY:
+ vp->SetValue(GetInteger(jvp));
+ break;
+ case TYPE_BIGINT:
+ vp->SetValue(GetBigint(jvp));
+ break;
+ case TYPE_DOUBLE:
+ vp->SetValue(GetDouble(jvp));
+
+ if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT)
+ vp->SetPrec(jvp->Nd);
+
+ break;
+ default:
+ sprintf(G->Message, "Unsupported column type %d", vp->GetType());
+ throw 888;
+ } // endswitch Type
+
+ break;
+ case TYPE_BOOL:
+ if (vp->IsTypeNum())
+ vp->SetValue(GetInteger(jvp) ? 1 : 0);
+ else
+ vp->SetValue_psz((PSZ)(GetInteger(jvp) ? "true" : "false"));
+
+ break;
+ case TYPE_JAR:
+ case TYPE_JOB:
+ // SetJsonValue(g, vp, val->GetArray()->GetValue(0));
+ vp->SetValue_psz(GetValueText(g, jvp, NULL));
+ break;
+ default:
+ vp->Reset();
+ vp->SetNull(true);
+ } // endswitch Type
+
+ } else {
+ vp->Reset();
+ vp->SetNull(true);
+ } // endif val
+
+} // end of SetJsonValue
+
+/***********************************************************************/
+/* MakeJson: Serialize the json item and set value to it. */
+/***********************************************************************/
+PVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp)
+{
+ if (Cp->Value->IsTypeNum()) {
+ strcpy(g->Message, "Cannot make Json for a numeric column");
+
+ if (!Cp->Warned) {
+ PushWarning(g, Tp);
+ Cp->Warned = true;
+ } // endif Warned
+
+ Cp->Value->Reset();
+#if 0
+ } else if (Value->GetType() == TYPE_BIN) {
+ if ((unsigned)Value->GetClen() >= sizeof(BSON)) {
+ ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500;
+ PBSON bsp = JbinAlloc(g, NULL, len, jsp);
+
+ strcat(bsp->Msg, " column");
+ ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON));
+ } else {
+ strcpy(g->Message, "Column size too small");
+ Value->SetValue_char(NULL, 0);
+ } // endif Clen
+#endif // 0
+ } else
+ Cp->Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
+
+ return Cp->Value;
+} // end of MakeJson
+
+/***********************************************************************/
+/* GetColumnValue: */
+/***********************************************************************/
+PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i)
+{
+ int nod = Cp->Nod, n = nod - 1;
+ JNODE *nodes = Cp->Nodes;
+ PVAL value = Cp->Value;
+ PBVAL arp;
+ PBVAL bvp = NULL;
+
+ for (; i < nod && row; i++) {
+ if (nodes[i].Op == OP_NUM) {
+ value->SetValue(row->Type == TYPE_JAR ? GetSize(row) : 1);
+ return(value);
+ } else if (nodes[i].Op == OP_XX) {
+ return MakeBson(g, row);
+ } else switch (row->Type) {
+ case TYPE_JOB:
+ if (!nodes[i].Key) {
+ // Expected Array was not there, wrap the value
+ if (i < nod - 1)
+ continue;
+ else
+ bvp = row;
+
+ } else
+ bvp = GetKeyValue(row, nodes[i].Key);
+
+ break;
+ case TYPE_JAR:
+ arp = row;
+
+ if (!nodes[i].Key) {
+ if (nodes[i].Op == OP_EQ)
+ bvp = GetArrayValue(arp, nodes[i].Rank);
+ else if (nodes[i].Op == OP_EXP)
+ return ExpandArray(g, arp, i);
+ else
+ return CalculateArray(g, arp, i);
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ bvp = GetArrayValue(arp, 0);
+ i--;
+ } // endif's
+
+ break;
+ case TYPE_JVAL:
+ bvp = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ bvp = NULL;
+ } // endswitch Type
+
+ if (i < nod - 1)
+ row = bvp;
+
+ } // endfor i
+
+ SetJsonValue(g, value, bvp);
+ return value;
+} // end of GetColumnValue
+
+/***********************************************************************/
+/* ExpandArray: */
+/***********************************************************************/
+PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n)
+{
+ int nod = Cp->Nod, ars = MY_MIN(Tp->Limit, GetArraySize(arp));
+ JNODE *nodes = Cp->Nodes;
+ PVAL value = Cp->Value;
+ PBVAL bvp;
+ BVAL bval;
+
+ if (!ars) {
+ value->Reset();
+ value->SetNull(true);
+ Tp->NextSame = 0;
+ return value;
+ } // endif ars
+
+ if (!(bvp = GetArrayValue(arp, (nodes[n].Rx = nodes[n].Nx)))) {
+ strcpy(g->Message, "Logical error expanding array");
+ throw 666;
+ } // endif jvp
+
+ if (n < nod - 1 && IsJson(bvp)) {
+ SetValue(&bval, GetColumnValue(g, bvp, n + 1));
+ bvp = &bval;
+ } // endif n
+
+ if (n >= Tp->NextSame) {
+ if (++nodes[n].Nx == ars) {
+ nodes[n].Nx = 0;
+ Cp->Xnod = 0;
+ } else
+ Cp->Xnod = n;
+
+ Tp->NextSame = Cp->Xnod;
+ } // endif NextSame
+
+ SetJsonValue(g, value, bvp);
+ return value;
+} // end of ExpandArray
+
+/***********************************************************************/
+/* CalculateArray: */
+/***********************************************************************/
+PVAL BCUTIL::CalculateArray(PGLOBAL g, PBVAL arp, int n)
+{
+ int i, ars, nv = 0, nextsame = Tp->NextSame;
+ bool err;
+ int nod = Cp->Nod;
+ JNODE *nodes = Cp->Nodes;
+ OPVAL op = nodes[n].Op;
+ PVAL val[2], vp = nodes[n].Valp, mulval = Cp->MulVal;
+ PBVAL jvrp, jvp;
+ BVAL jval;
+
+ vp->Reset();
+ ars = MY_MIN(Tp->Limit, GetArraySize(arp));
+ xtrc(1,"CalculateArray: size=%d op=%d nextsame=%d\n", ars, op, nextsame);
+
+ for (i = 0; i < ars; i++) {
+ jvrp = GetArrayValue(arp, i);
+ xtrc(1, "i=%d nv=%d\n", i, nv);
+
+ if (!IsValueNull(jvrp) || (op == OP_CNC && GetJsonNull())) do {
+ if (IsValueNull(jvrp)) {
+ SetString(jvrp, PlugDup(G, GetJsonNull()));
+ jvp = jvrp;
+ } else if (n < nod - 1 && IsJson(jvrp)) {
+ Tp->NextSame = nextsame;
+ SetValue(&jval, GetColumnValue(g, jvrp, n + 1));
+ jvp = &jval;
+ } else
+ jvp = jvrp;
+
+ xtrc(1, "jvp=%s null=%d\n", GetString(jvp), IsValueNull(jvp) ? 1 : 0);
+
+ if (!nv++) {
+ SetJsonValue(g, vp, jvp);
+ continue;
+ } else
+ SetJsonValue(g, mulval, jvp);
+
+ if (!mulval->IsNull()) {
+ switch (op) {
+ case OP_CNC:
+ if (nodes[n].CncVal) {
+ val[0] = nodes[n].CncVal;
+ err = vp->Compute(g, val, 1, op);
+ } // endif CncVal
+
+ val[0] = mulval;
+ err = vp->Compute(g, val, 1, op);
+ break;
+ // case OP_NUM:
+ case OP_SEP:
+ val[0] = nodes[n].Valp;
+ val[1] = mulval;
+ err = vp->Compute(g, val, 2, OP_ADD);
+ break;
+ default:
+ val[0] = nodes[n].Valp;
+ val[1] = mulval;
+ err = vp->Compute(g, val, 2, op);
+ } // endswitch Op
+
+ if (err)
+ vp->Reset();
+
+ if (trace(1)) {
+ char buf(32);
+
+ htrc("vp='%s' err=%d\n",
+ vp->GetCharString(&buf), err ? 1 : 0);
+
+ } // endif trace
+
+ } // endif Null
+
+ } while (Tp->NextSame > nextsame);
+
+ } // endfor i
+
+ if (op == OP_SEP) {
+ // Calculate average
+ mulval->SetValue(nv);
+ val[0] = vp;
+ val[1] = mulval;
+
+ if (vp->Compute(g, val, 2, OP_DIV))
+ vp->Reset();
+
+ } // endif Op
+
+ Tp->NextSame = nextsame;
+ return vp;
+} // end of CalculateArray
+
+/***********************************************************************/
+/* GetRow: Get the object containing this column. */
+/***********************************************************************/
+PBVAL BCUTIL::GetRow(PGLOBAL g)
+{
+ int nod = Cp->Nod;
+ JNODE *nodes = Cp->Nodes;
+ PBVAL val = NULL;
+ PBVAL arp;
+ PBVAL nwr, row = Tp->Row;
+
+ for (int i = 0; i < nod && row; i++) {
+ if (i < nod-1 && nodes[i+1].Op == OP_XX)
+ break;
+ else switch (row->Type) {
+ case TYPE_JOB:
+ if (!nodes[i].Key)
+ // Expected Array was not there, wrap the value
+ continue;
+
+ val = GetKeyValue(row, nodes[i].Key);
+ break;
+ case TYPE_JAR:
+ arp = row;
+
+ if (!nodes[i].Key) {
+ if (nodes[i].Op == OP_EQ)
+ val = GetArrayValue(arp, nodes[i].Rank);
+ else
+ val = GetArrayValue(arp, nodes[i].Rx);
+
+ } else {
+ // Unexpected array, unwrap it as [0]
+ val = GetArrayValue(arp, 0);
+ i--;
+ } // endif Nodes
+
+ break;
+ case TYPE_JVAL:
+ val = row;
+ break;
+ default:
+ sprintf(g->Message, "Invalid row JSON type %d", row->Type);
+ val = NULL;
+ } // endswitch Type
+
+ if (val) {
+ row = val;
+ } else {
+ // Construct missing objects
+ for (i++; row && i < nod; i++) {
+ int type;
+
+ if (nodes[i].Op == OP_XX)
+ break;
+ else if (!nodes[i].Key)
+ // Construct intermediate array
+ type = TYPE_JAR;
+ else
+ type = TYPE_JOB;
+
+ if (row->Type == TYPE_JOB) {
+ nwr = AddPair(row, nodes[i - 1].Key, type);
+ } else if (row->Type == TYPE_JAR) {
+ AddArrayValue(row, (nwr = NewVal(type)));
+ } else {
+ strcpy(g->Message, "Wrong type when writing new row");
+ nwr = NULL;
+ } // endif's
+
+ row = nwr;
+ } // endfor i
+
+ break;
+ } // endelse
+
+ } // endfor i
+
+ return row;
+} // end of GetRow
+
+
+/* -------------------------- Class BSONDEF -------------------------- */
+
+BSONDEF::BSONDEF(void)
+{
+ Jmode = MODE_OBJECT;
+ Objname = NULL;
+ Xcol = NULL;
+ Pretty = 2;
+ Limit = 1;
+ Base = 0;
+ Strict = false;
+ Sep = '.';
+ Uri = NULL;
+ Collname = Options = Filter = NULL;
+ Pipe = false;
+ Driver = NULL;
+ Version = 0;
+ Wrapname = NULL;
+} // end of BSONDEF constructor
+
+/***********************************************************************/
+/* DefineAM: define specific AM block values. */
+/***********************************************************************/
+bool BSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
+{
+ G = g;
+ Schema = GetStringCatInfo(g, "DBname", Schema);
+ Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT);
+ Objname = GetStringCatInfo(g, "Object", NULL);
+ Xcol = GetStringCatInfo(g, "Expand", NULL);
+ Pretty = GetIntCatInfo("Pretty", 2);
+ Limit = GetIntCatInfo("Limit", 10);
+ Base = GetIntCatInfo("Base", 0) ? 1 : 0;
+ Sep = *GetStringCatInfo(g, "Separator", ".");
+ Accept = GetBoolCatInfo("Accept", false);
+
+ // Don't use url as MONGO uri when called from REST
+ if (stricmp(am, "REST") && (Uri = GetStringCatInfo(g, "Connect", NULL))) {
+#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
+ Collname = GetStringCatInfo(g, "Name",
+ (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name);
+ Collname = GetStringCatInfo(g, "Tabname", Collname);
+ Options = GetStringCatInfo(g, "Colist", NULL);
+ Filter = GetStringCatInfo(g, "Filter", NULL);
+ Pipe = GetBoolCatInfo("Pipeline", false);
+ Driver = GetStringCatInfo(g, "Driver", NULL);
+ Version = GetIntCatInfo("Version", 3);
+ Pretty = 0;
+#if defined(JAVA_SUPPORT)
+ if (Version == 2)
+ Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo2Interface");
+ else
+ Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo3Interface");
+#endif // JAVA_SUPPORT
+#else // !MONGO_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return true;
+#endif // !MONGO_SUPPORT
+ } // endif Uri
+
+ return DOSDEF::DefineAM(g, (Uri ? "XMGO" : "DOS"), poff);
+} // end of DefineAM
+
+/***********************************************************************/
+/* GetTable: makes a new Table Description Block. */
+/***********************************************************************/
+PTDB BSONDEF::GetTable(PGLOBAL g, MODE m)
+{
+ if (trace(1))
+ htrc("BSON GetTable Pretty=%d Uri=%s\n", Pretty, SVP(Uri));
+
+ if (Catfunc == FNC_COL)
+ return new(g)TDBBCL(this);
+
+ PTDBASE tdbp;
+ PTXF txfp = NULL;
+
+ // JSN not used for pretty=1 for insert or delete
+ if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
+ USETEMP tmp = UseTemp();
+ bool map = Mapped && Pretty >= 0 && m != MODE_INSERT &&
+ !(tmp != TMP_NO && m == MODE_UPDATE) &&
+ !(tmp == TMP_FORCE && (m == MODE_UPDATE || m == MODE_DELETE));
+
+ if (Lrecl) {
+ // Allocate the parse work memory
+ G = PlugInit(NULL, (size_t)Lrecl * (Pretty < 0 ? 2 : 4));
+ } else {
+ strcpy(g->Message, "LRECL is not defined");
+ return NULL;
+ } // endif Lrecl
+
+ if (Pretty < 0) { // BJsonfile
+ txfp = new(g) BINFAM(this);
+ } else if (Uri) {
+ if (Driver && toupper(*Driver) == 'C') {
+#if defined(CMGO_SUPPORT)
+ txfp = new(g) CMGFAM(this);
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "C");
+ return NULL;
+#endif
+ } else if (Driver && toupper(*Driver) == 'J') {
+#if defined(JAVA_SUPPORT)
+ txfp = new(g) JMGFAM(this);
+#else
+ sprintf(g->Message, "Mongo %s Driver not available", "Java");
+ return NULL;
+#endif
+ } else { // Driver not specified
+#if defined(CMGO_SUPPORT)
+ txfp = new(g) CMGFAM(this);
+#elif defined(JAVA_SUPPORT)
+ txfp = new(g) JMGFAM(this);
+#else // !MONGO_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO");
+ return NULL;
+#endif // !MONGO_SUPPORT
+ } // endif Driver
+
+ } else if (Zipped) {
+#if defined(ZIP_SUPPORT)
+ if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ txfp = new(g) ZIPFAM(this);
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else if (Compressed) {
+#if defined(GZ_SUPPORT)
+ if (Compressed == 1)
+ txfp = new(g) GZFAM(this);
+ else
+ txfp = new(g) ZLBFAM(this);
+#else // !GZ_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ");
+ return NULL;
+#endif // !GZ_SUPPORT
+ } else if (map) {
+ txfp = new(g) MAPFAM(this);
+ } else
+ txfp = new(g) DOSFAM(this);
+
+ // Txfp must be set for TDBBSN
+ tdbp = new(g) TDBBSN(g, this, txfp);
+ } else {
+ if (Zipped) {
+#if defined(ZIP_SUPPORT)
+ if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) {
+ txfp = new(g) UNZFAM(this);
+ } else if (m == MODE_INSERT) {
+ strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0");
+ return NULL;
+ } else {
+ strcpy(g->Message, "UPDATE/DELETE not supported for ZIP");
+ return NULL;
+ } // endif's m
+#else // !ZIP_SUPPORT
+ sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
+ return NULL;
+#endif // !ZIP_SUPPORT
+ } else
+ txfp = new(g) MAPFAM(this);
+
+ tdbp = new(g) TDBBSON(g, this, txfp);
+ } // endif Pretty
+
+ if (Multiple)
+ tdbp = new(g) TDBMUL(tdbp);
+
+ return tdbp;
+} // end of GetTable
+
+/* --------------------------- Class TDBBSN -------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBBSN class (Pretty < 2) */
+/***********************************************************************/
+TDBBSN::TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
+{
+ Bp = new(g) BTUTIL(tdp->G, this);
+ Top = NULL;
+ Row = NULL;
+ Colp = NULL;
+
+ if (tdp) {
+ Jmode = tdp->Jmode;
+ Objname = tdp->Objname;
+ Xcol = tdp->Xcol;
+ Limit = tdp->Limit;
+ Pretty = tdp->Pretty;
+ B = tdp->Base ? 1 : 0;
+ Sep = tdp->Sep;
+ Strict = tdp->Strict;
+ } else {
+ Jmode = MODE_OBJECT;
+ Objname = NULL;
+ Xcol = NULL;
+ Limit = 1;
+ Pretty = 0;
+ B = 0;
+ Sep = '.';
+ Strict = false;
+ } // endif tdp
+
+ Fpos = -1;
+ N = M = 0;
+ NextSame = 0;
+ SameRow = 0;
+ Xval = -1;
+ Comma = false;
+ Bp->SetPretty(Pretty);
+} // end of TDBBSN standard constructor
+
+TDBBSN::TDBBSN(TDBBSN* tdbp) : TDBDOS(NULL, tdbp)
+{
+ Bp = tdbp->Bp;
+ Top = tdbp->Top;
+ Row = tdbp->Row;
+ Colp = tdbp->Colp;
+ Jmode = tdbp->Jmode;
+ Objname = tdbp->Objname;
+ Xcol = tdbp->Xcol;
+ Fpos = tdbp->Fpos;
+ N = tdbp->N;
+ M = tdbp->M;
+ Limit = tdbp->Limit;
+ NextSame = tdbp->NextSame;
+ SameRow = tdbp->SameRow;
+ Xval = tdbp->Xval;
+ B = tdbp->B;
+ Sep = tdbp->Sep;
+ Pretty = tdbp->Pretty;
+ Strict = tdbp->Strict;
+ Comma = tdbp->Comma;
+} // end of TDBBSN copy constructor
+
+// Used for update
+PTDB TDBBSN::Clone(PTABS t)
+{
+ PTDB tp;
+ PBSCOL cp1, cp2;
+ PGLOBAL g = t->G;
+
+ tp = new(g) TDBBSN(this);
+
+ for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) {
+ cp2 = new(g) BSONCOL(cp1, tp); // Make a copy
+ NewPointer(t, cp1, cp2);
+ } // endfor cp1
+
+ return tp;
+} // end of Clone
+
+/***********************************************************************/
+/* Allocate JSN column description block. */
+/***********************************************************************/
+PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
+{
+ PBSCOL colp = new(g) BSONCOL(g, cdp, this, cprec, n);
+
+ return (colp->ParseJpath(g)) ? NULL : colp;
+} // end of MakeCol
+
+/***********************************************************************/
+/* InsertSpecialColumn: Put a special column ahead of the column list.*/
+/***********************************************************************/
+PCOL TDBBSN::InsertSpecialColumn(PCOL colp)
+{
+ if (!colp->IsSpecial())
+ return NULL;
+
+ //if (Xcol && ((SPCBLK*)colp)->GetRnm())
+ // colp->SetKey(0); // Rownum is no more a key
+
+ colp->SetNext(Columns);
+ Columns = colp;
+ return colp;
+} // end of InsertSpecialColumn
+
+/***********************************************************************/
+/* JSON Cardinality: returns table size in number of rows. */
+/***********************************************************************/
+int TDBBSN::Cardinality(PGLOBAL g)
+{
+ if (!g)
+ return 0;
+ else if (Cardinal < 0) {
+ Cardinal = TDBDOS::Cardinality(g);
+
+ } // endif Cardinal
+
+ return Cardinal;
+} // end of Cardinality
+
+/***********************************************************************/
+/* JSON GetMaxSize: returns file size estimate in number of lines. */
+/***********************************************************************/
+int TDBBSN::GetMaxSize(PGLOBAL g)
+{
+ if (MaxSize < 0)
+ MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
+
+ return MaxSize;
+} // end of GetMaxSize
+
+/***********************************************************************/
+/* JSON EstimatedLength. Returns an estimated minimum line length. */
+/***********************************************************************/
+int TDBBSN::EstimatedLength(void)
+{
+ if (AvgLen <= 0)
+ return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better
+ else
+ return AvgLen;
+
+} // end of Estimated Length
+
+/***********************************************************************/
+/* OpenDB: Data Base open routine for JSN access method. */
+/***********************************************************************/
+bool TDBBSN::OpenDB(PGLOBAL g)
+{
+ TUSE use = Use;
+
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open replace it at its beginning. ??? */
+ /*******************************************************************/
+ Fpos = -1;
+ NextSame = 0;
+ SameRow = 0;
+ } // endif Use
+
+ /*********************************************************************/
+ /* Open according to logical input/output mode required. */
+ /*********************************************************************/
+ if (TDBDOS::OpenDB(g))
+ return true;
+
+ if (use == USE_OPEN)
+ return false;
+
+ if (Pretty < 0) {
+ /*********************************************************************/
+ /* Binary BJSON table. */
+ /*********************************************************************/
+ xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n",
+ this, Tdb_No, Use, Mode);
+
+ // Lrecl is Ok
+ size_t linelen = Lrecl;
+ MODE mode = Mode;
+
+ // Buffer must be allocated in G->Sarea
+ Mode = MODE_ANY;
+ Txfp->AllocateBuffer(Bp->G);
+ Mode = mode;
+
+ if (Mode == MODE_INSERT)
+ Bp->SubSet(true);
+ else
+ Bp->MemSave();
+
+ To_Line = Txfp->GetBuf();
+ memset(To_Line, 0, linelen);
+ xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line);
+ } // endif Pretty
+
+ /***********************************************************************/
+ /* First opening. */
+ /***********************************************************************/
+ if (Mode == MODE_INSERT) {
+ int type;
+
+ switch (Jmode) {
+ case MODE_OBJECT: type = TYPE_JOB; break;
+ case MODE_ARRAY: type = TYPE_JAR; break;
+ case MODE_VALUE: type = TYPE_JVAL; break;
+ default:
+ sprintf(g->Message, "Invalid Jmode %d", Jmode);
+ return true;
+ } // endswitch Jmode
+
+ Top = Bp->MakeTopTree(g, type);
+ Bp->MemSave();
+ } // endif Mode
+
+ if (Xcol)
+ To_Filter = NULL; // Not compatible
+
+ return false;
+} // end of OpenDB
+
+/***********************************************************************/
+/* SkipHeader: Physically skip first header line if applicable. */
+/* This is called from TDBDOS::OpenDB and must be executed before */
+/* Kindex construction if the file is accessed using an index. */
+/***********************************************************************/
+bool TDBBSN::SkipHeader(PGLOBAL g)
+{
+ int len = GetFileLength(g);
+ bool rc = false;
+
+#if defined(_DEBUG)
+ if (len < 0)
+ return true;
+#endif // _DEBUG
+
+ if (Pretty == 1) {
+ if (Mode == MODE_INSERT || Mode == MODE_DELETE) {
+ // Mode Insert and delete are no more handled here
+ DBUG_ASSERT(false);
+ } else if (len > 0) // !Insert && !Delete
+ rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g));
+
+ } // endif Pretty
+
+ return rc;
+} // end of SkipHeader
+
+/***********************************************************************/
+/* ReadDB: Data Base read routine for JSN access method. */
+/***********************************************************************/
+int TDBBSN::ReadDB(PGLOBAL g)
+{
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow = NextSame;
+ NextSame = 0;
+ M++;
+ return RC_OK;
+ } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) {
+ if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK))
+ return rc; // Deferred reading failed
+
+ if (Pretty >= 0) {
+ // Recover the memory used for parsing
+ Bp->SubSet();
+
+ if ((Row = Bp->ParseLine(g, Pretty, Comma))) {
+ Top = Row;
+ Row = Bp->FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } else if (Pretty != 1 || strcmp(To_Line, "]")) {
+ Bp->GetMsg(g);
+ rc = RC_FX;
+ } else
+ rc = RC_EF;
+
+ } else { // Here we get a movable Json binary tree
+ Bp->MemSet(((BINFAM*)Txfp)->Recsize); // Useful when updating
+ Row = Top = (PBVAL)To_Line;
+ Row = Bp->FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } // endif Pretty
+
+ } // endif ReadDB
+
+ return rc;
+} // end of ReadDB
+
+/***********************************************************************/
+/* PrepareWriting: Prepare the line for WriteDB. */
+/***********************************************************************/
+bool TDBBSN::PrepareWriting(PGLOBAL g)
+{
+ if (Pretty >= 0) {
+ PSZ s;
+
+// if (!(Top = Bp->MakeTopTree(g, Row->Type)))
+// return true;
+
+ if ((s = Bp->SerialVal(g, Top, Pretty))) {
+ if (Comma)
+ strcat(s, ",");
+
+ if ((signed)strlen(s) > Lrecl) {
+ strncpy(To_Line, s, Lrecl);
+ sprintf(g->Message, "Line truncated (lrecl=%d)", Lrecl);
+ return PushWarning(g, this);
+ } else
+ strcpy(To_Line, s);
+
+ return false;
+ } else
+ return true;
+ } else
+ ((BINFAM*)Txfp)->Recsize = ((size_t)PlugSubAlloc(Bp->G, NULL, 0)
+ - (size_t)To_Line);
+ return false;
+} // end of PrepareWriting
+
+/***********************************************************************/
+/* WriteDB: Data Base write routine for JSON access method. */
+/***********************************************************************/
+int TDBBSN::WriteDB(PGLOBAL g) {
+ int rc = TDBDOS::WriteDB(g);
+
+ Bp->SubSet();
+ Bp->Clear(Row);
+ return rc;
+} // end of WriteDB
+
+/***********************************************************************/
+/* Data Base close routine for JSON access method. */
+/***********************************************************************/
+void TDBBSN::CloseDB(PGLOBAL g)
+{
+ TDBDOS::CloseDB(g);
+ Bp->G = PlugExit(Bp->G);
+} // end of CloseDB
+
+/* ---------------------------- BSONCOL ------------------------------ */
+
+/***********************************************************************/
+/* BSONCOL public constructor. */
+/***********************************************************************/
+BSONCOL::BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
+ : DOSCOL(g, cdp, tdbp, cprec, i, "DOS")
+{
+ Tbp = (TDBBSN*)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
+ Cp = new(g) BCUTIL(((PBDEF)Tbp->To_Def)->G, this, Tbp);
+ Jpath = cdp->GetFmt();
+ MulVal = NULL;
+ Nodes = NULL;
+ Nod = 0;
+ Sep = Tbp->Sep;
+ Xnod = -1;
+ Xpd = false;
+ Parsed = false;
+ Warned = false;
+} // end of BSONCOL constructor
+
+/***********************************************************************/
+/* BSONCOL constructor used for copying columns. */
+/* tdbp is the pointer to the new table descriptor. */
+/***********************************************************************/
+BSONCOL::BSONCOL(BSONCOL* col1, PTDB tdbp) : DOSCOL(col1, tdbp)
+{
+ Tbp = col1->Tbp;
+ Cp = col1->Cp;
+ Jpath = col1->Jpath;
+ MulVal = col1->MulVal;
+ Nodes = col1->Nodes;
+ Nod = col1->Nod;
+ Sep = col1->Sep;
+ Xnod = col1->Xnod;
+ Xpd = col1->Xpd;
+ Parsed = col1->Parsed;
+ Warned = col1->Warned;
+} // end of BSONCOL copy constructor
+
+/***********************************************************************/
+/* SetBuffer: prepare a column block for write operation. */
+/***********************************************************************/
+bool BSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
+{
+ if (DOSCOL::SetBuffer(g, value, ok, check))
+ return true;
+
+ // Parse the json path
+ if (ParseJpath(g))
+ return true;
+
+ Tbp = (TDBBSN*)To_Tdb;
+ return false;
+} // end of SetBuffer
+
+/***********************************************************************/
+/* Check whether this object is expanded. */
+/***********************************************************************/
+bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
+{
+ if ((Tbp->Xcol && nm && !strcmp(nm, Tbp->Xcol) &&
+ (Tbp->Xval < 0 || Tbp->Xval == i)) || Xpd) {
+ Xpd = true; // Expandable object
+ Nodes[i].Op = OP_EXP;
+ } else if (b) {
+ strcpy(g->Message, "Cannot expand more than one branch");
+ return true;
+ } // endif Xcol
+
+ return false;
+} // end of CheckExpand
+
+/***********************************************************************/
+/* Analyse array processing options. */
+/***********************************************************************/
+bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm)
+{
+ int n;
+ bool dg = true, b = false;
+ PJNODE jnp = &Nodes[i];
+
+ //if (*p == '[') p++; // Old syntax .[ or :[
+ n = (int)strlen(p);
+
+ if (*p) {
+ if (p[n - 1] == ']') {
+ p[--n] = 0;
+ } else if (!IsNum(p)) {
+ // Wrong array specification
+ sprintf(g->Message, "Invalid array specification %s for %s", p, Name);
+ return true;
+ } // endif p
+
+ } else
+ b = true;
+
+ // To check whether a numeric Rank was specified
+ dg = IsNum(p);
+
+ if (!n) {
+ // Default specifications
+ if (CheckExpand(g, i, nm, false))
+ return true;
+ else if (jnp->Op != OP_EXP) {
+ if (b) {
+ // Return 1st value (B is the index base)
+ jnp->Rank = Tbp->B;
+ jnp->Op = OP_EQ;
+ } else if (!Value->IsTypeNum()) {
+ jnp->CncVal = AllocateValue(g, (void*)", ", TYPE_STRING);
+ jnp->Op = OP_CNC;
+ } else
+ jnp->Op = OP_ADD;
+
+ } // endif OP
+
+ } else if (dg) {
+ // Return nth value
+ jnp->Rank = atoi(p) - Tbp->B;
+ jnp->Op = OP_EQ;
+ } else if (n == 1) {
+ // Set the Op value;
+ if (Sep == ':')
+ switch (*p) {
+ case '*': *p = 'x'; break;
+ case 'x':
+ case 'X': *p = '*'; break; // Expand this array
+ default: break;
+ } // endswitch p
+
+ switch (*p) {
+ case '+': jnp->Op = OP_ADD; break;
+ case 'x': jnp->Op = OP_MULT; break;
+ case '>': jnp->Op = OP_MAX; break;
+ case '<': jnp->Op = OP_MIN; break;
+ case '!': jnp->Op = OP_SEP; break; // Average
+ case '#': jnp->Op = OP_NUM; break;
+ case '*': // Expand this array
+ if (!Tbp->Xcol && nm) {
+ Xpd = true;
+ jnp->Op = OP_EXP;
+ Tbp->Xval = i;
+ Tbp->Xcol = nm;
+ } else if (CheckExpand(g, i, nm, true))
+ return true;
+
+ break;
+ default:
+ sprintf(g->Message,
+ "Invalid function specification %c for %s", *p, Name);
+ return true;
+ } // endswitch *p
+
+ } else if (*p == '"' && p[n - 1] == '"') {
+ // This is a concat specification
+ jnp->Op = OP_CNC;
+
+ if (n > 2) {
+ // Set concat intermediate string
+ p[n - 1] = 0;
+ jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING);
+ } // endif n
+
+ } else {
+ sprintf(g->Message, "Wrong array specification for %s", Name);
+ return true;
+ } // endif's
+
+ // For calculated arrays, a local Value must be used
+ switch (jnp->Op) {
+ case OP_NUM:
+ jnp->Valp = AllocateValue(g, TYPE_INT);
+ break;
+ case OP_ADD:
+ case OP_MULT:
+ case OP_SEP:
+ if (!IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
+
+ break;
+ case OP_MIN:
+ case OP_MAX:
+ jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
+ break;
+ case OP_CNC:
+ if (IsTypeChar(Buf_Type))
+ jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
+ else
+ jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
+
+ break;
+ default:
+ break;
+ } // endswitch Op
+
+ if (jnp->Valp)
+ MulVal = AllocateValue(g, jnp->Valp);
+
+ return false;
+} // end of SetArrayOptions
+
+/***********************************************************************/
+/* Parse the eventual passed Jpath information. */
+/* This information can be specified in the Fieldfmt column option */
+/* when creating the table. It permits to indicate the position of */
+/* the node corresponding to that column. */
+/***********************************************************************/
+bool BSONCOL::ParseJpath(PGLOBAL g)
+{
+ char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL;
+ int i;
+ bool a;
+
+ if (Parsed)
+ return false; // Already done
+ else if (InitValue(g))
+ return true;
+ else if (!Jpath)
+ Jpath = Name;
+
+ if (To_Tdb->GetOrig()) {
+ // This is an updated column, get nodes from origin
+ for (PBSCOL colp = (PBSCOL)Tbp->GetColumns(); colp;
+ colp = (PBSCOL)colp->GetNext())
+ if (!stricmp(Name, colp->GetName())) {
+ Nod = colp->Nod;
+ Nodes = colp->Nodes;
+ Xpd = colp->Xpd;
+ goto fin;
+ } // endif Name
+
+ sprintf(g->Message, "Cannot parse updated column %s", Name);
+ return true;
+ } // endif To_Orig
+
+ pbuf = PlugDup(g, Jpath);
+ if (*pbuf == '$') pbuf++;
+ if (*pbuf == Sep) pbuf++;
+ if (*pbuf == '[') p1 = pbuf++;
+
+ // Estimate the required number of nodes
+ for (i = 0, p = pbuf; (p = NextChr(p, Sep)); i++, p++)
+ Nod++; // One path node found
+
+ Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE));
+ memset(Nodes, 0, (Nod) * sizeof(JNODE));
+
+ // Analyze the Jpath for this column
+ for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, Sep);
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[ or :[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ // Jpath must be explicit
+ if (a || *p == 0 || *p == '[' || IsNum(p)) {
+ // Analyse intermediate array processing
+ if (SetArrayOptions(g, p, i, Nodes[i - 1].Key))
+ return true;
+
+ } else if (*p == '*') {
+ // Return JSON
+ Nodes[i].Op = OP_XX;
+ } else {
+ Nodes[i].Key = p;
+ Nodes[i].Op = OP_EXIST;
+ } // endif's
+
+ } // endfor i, p
+
+ Nod = i;
+
+fin:
+ MulVal = AllocateValue(g, Value);
+ Parsed = true;
+ return false;
+} // end of ParseJpath
+
+/***********************************************************************/
+/* Get Jpath converted to Mongo path. */
+/***********************************************************************/
+PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj)
+{
+ if (Jpath) {
+ char* p1, * p2, * mgopath;
+ int i = 0;
+
+ if (strcmp(Jpath, "*")) {
+ p1 = Jpath;
+ if (*p1 == '$') p1++;
+ if (*p1 == '.') p1++;
+ mgopath = PlugDup(g, p1);
+ } else
+ return NULL;
+
+ for (p1 = p2 = mgopath; *p1; p1++)
+ if (i) { // Inside []
+ if (isdigit(*p1)) {
+ if (!proj)
+ *p2++ = *p1;
+
+ } else if (*p1 == ']' && i == 1) {
+ if (proj && p1[1] == '.')
+ p1++;
+
+ i = 0;
+ } else if (*p1 == '.' && i == 2) {
+ if (!proj)
+ *p2++ = '.';
+
+ i = 0;
+ } else if (!proj)
+ return NULL;
+
+ } else switch (*p1) {
+ case ':':
+ case '.':
+ if (isdigit(p1[1]))
+ i = 2;
+
+ *p2++ = '.';
+ break;
+ case '[':
+ if (*(p2 - 1) != '.')
+ *p2++ = '.';
+
+ i = 1;
+ break;
+ case '*':
+ if (*(p2 - 1) == '.' && !*(p1 + 1)) {
+ p2--; // Suppress last :*
+ break;
+ } // endif p2
+
+ default:
+ *p2++ = *p1;
+ break;
+ } // endswitch p1;
+
+ *p2 = 0;
+ return mgopath;
+ } else
+ return NULL;
+
+} // end of GetJpath
+
+/***********************************************************************/
+/* ReadColumn: */
+/***********************************************************************/
+void BSONCOL::ReadColumn(PGLOBAL g)
+{
+ if (!Tbp->SameRow || Xnod >= Tbp->SameRow)
+ Value->SetValue_pval(Cp->GetColumnValue(g, Tbp->Row, 0));
+
+#if defined(DEVELOPMENT)
+ if (Xpd && Value->IsNull() && !((PBDEF)Tbp->To_Def)->Accept)
+ htrc("Null expandable JSON value for column %s\n", Name);
+#endif // DEVELOPMENT
+
+ // Set null when applicable
+ if (!Nullable)
+ Value->SetNull(false);
+
+} // end of ReadColumn
+
+/***********************************************************************/
+/* WriteColumn: */
+/***********************************************************************/
+void BSONCOL::WriteColumn(PGLOBAL g)
+{
+ if (Xpd && Tbp->Pretty < 2) {
+ strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
+ throw 666;
+ } // endif Xpd
+
+ /*********************************************************************/
+ /* Check whether this node must be written. */
+ /*********************************************************************/
+ if (Value != To_Val)
+ Value->SetValue_pval(To_Val, FALSE); // Convert the updated value
+
+ /*********************************************************************/
+ /* On INSERT Null values are represented by no node. */
+ /*********************************************************************/
+ if (Value->IsNull() && Tbp->Mode == MODE_INSERT)
+ return;
+
+ PBVAL jsp, row = Cp->GetRow(g);
+
+ if (row) switch (Buf_Type) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ case TYPE_INT:
+ case TYPE_TINY:
+ case TYPE_SHORT:
+ case TYPE_BIGINT:
+ case TYPE_DOUBLE:
+ if (Buf_Type == TYPE_STRING && Nodes[Nod - 1].Op == OP_XX) {
+ char *s = Value->GetCharValue();
+
+ if (!(jsp = Cp->ParseJson(g, s, strlen(s)))) {
+ strcpy(g->Message, s);
+ throw 666;
+ } // endif jsp
+
+ switch (row->Type) {
+ case TYPE_JAR:
+ if (Nod > 1 && Nodes[Nod - 2].Op == OP_EQ)
+ Cp->SetArrayValue(row, jsp, Nodes[Nod - 2].Rank);
+ else
+ Cp->AddArrayValue(row, jsp);
+
+ break;
+ case TYPE_JOB:
+ if (Nod > 1 && Nodes[Nod - 2].Key)
+ Cp->SetKeyValue(row, jsp, Nodes[Nod - 2].Key);
+
+ break;
+ case TYPE_JVAL:
+ default:
+ Cp->SetValueVal(row, jsp);
+ } // endswitch Type
+
+ break;
+ } else
+ jsp = Cp->NewVal(Value);
+
+ switch (row->Type) {
+ case TYPE_JAR:
+ if (Nodes[Nod - 1].Op == OP_EQ)
+ Cp->SetArrayValue(row, jsp, Nodes[Nod - 1].Rank);
+ else
+ Cp->AddArrayValue(row, jsp);
+
+ break;
+ case TYPE_JOB:
+ if (Nodes[Nod - 1].Key)
+ Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key);
+
+ break;
+ case TYPE_JVAL:
+ default:
+ Cp->SetValueVal(row, jsp);
+ } // endswitch Type
+
+ break;
+ default: // ??????????
+ sprintf(g->Message, "Invalid column type %d", Buf_Type);
+ } // endswitch Type
+
+} // end of WriteColumn
+
+/* -------------------------- Class TDBBSON -------------------------- */
+
+/***********************************************************************/
+/* Implementation of the TDBBSON class. */
+/***********************************************************************/
+TDBBSON::TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBBSN(g, tdp, txfp)
+{
+ Docp = NULL;
+ Multiple = tdp->Multiple;
+ Done = Changed = false;
+ Bp->SetPretty(2);
+} // end of TDBBSON standard constructor
+
+TDBBSON::TDBBSON(PBTDB tdbp) : TDBBSN(tdbp)
+{
+ Docp = tdbp->Docp;
+ Multiple = tdbp->Multiple;
+ Done = tdbp->Done;
+ Changed = tdbp->Changed;
+} // end of TDBBSON copy constructor
+
+// Used for update
+PTDB TDBBSON::Clone(PTABS t)
+{
+ PTDB tp;
+ PBSCOL cp1, cp2;
+ PGLOBAL g = t->G;
+
+ tp = new(g) TDBBSON(this);
+
+ for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) {
+ cp2 = new(g) BSONCOL(cp1, tp); // Make a copy
+ NewPointer(t, cp1, cp2);
+ } // endfor cp1
+
+ return tp;
+} // end of Clone
+
+/***********************************************************************/
+/* Make the document tree from the object path. */
+/***********************************************************************/
+int TDBBSON::MakeNewDoc(PGLOBAL g)
+{
+ // Create a void table that will be populated
+ Docp = Bp->NewVal(TYPE_JAR);
+
+ if (!(Top = Bp->MakeTopTree(g, TYPE_JAR)))
+ return RC_FX;
+
+ Docp = Row;
+ Done = true;
+ return RC_OK;
+} // end of MakeNewDoc
+
+/***********************************************************************/
+/* Make the document tree from a file. */
+/***********************************************************************/
+int TDBBSON::MakeDocument(PGLOBAL g)
+{
+ char *p, *p1, *p2, *memory, *objpath, *key = NULL;
+ int i = 0;
+ size_t len;
+ my_bool a;
+ MODE mode = Mode;
+ PBVAL jsp;
+ PBVAL objp = NULL;
+ PBVAL arp = NULL;
+ PBVAL val = NULL;
+
+ if (Done)
+ return RC_OK;
+
+ /*********************************************************************/
+ /* Create the mapping file object in mode read. */
+ /*********************************************************************/
+ Mode = MODE_READ;
+
+ if (!Txfp->OpenTableFile(g)) {
+ PFBLOCK fp = Txfp->GetTo_Fb();
+
+ if (fp) {
+ len = fp->Length;
+ memory = fp->Memory;
+ } else {
+ Mode = mode; // Restore saved Mode
+ return MakeNewDoc(g);
+ } // endif fp
+
+ } else
+ return RC_FX;
+
+ /*********************************************************************/
+ /* Parse the json file and allocate its tree structure. */
+ /*********************************************************************/
+ g->Message[0] = 0;
+ jsp = Top = Bp->ParseJson(g, memory, len);
+ Txfp->CloseTableFile(g, false);
+ Mode = mode; // Restore saved Mode
+
+ if (!jsp && g->Message[0])
+ return RC_FX;
+
+ if ((objpath = PlugDup(g, Objname))) {
+ if (*objpath == '$') objpath++;
+ if (*objpath == '.') objpath++;
+ p1 = (*objpath == '[') ? objpath++ : NULL;
+
+ /*********************************************************************/
+ /* Find the table in the tree structure. */
+ /*********************************************************************/
+ for (p = objpath; jsp && p; p = (p2 ? p2 : NULL)) {
+ a = (p1 != NULL);
+ p1 = strchr(p, '[');
+ p2 = strchr(p, '.');
+
+ if (!p2)
+ p2 = p1;
+ else if (p1) {
+ if (p1 < p2)
+ p2 = p1;
+ else if (p1 == p2 + 1)
+ *p2++ = 0; // Old syntax .[
+ else
+ p1 = NULL;
+
+ } // endif p1
+
+ if (p2)
+ *p2++ = 0;
+
+ if (!a && *p && *p != '[' && !IsNum(p)) {
+ // obj is a key
+ if (jsp->Type != TYPE_JOB) {
+ strcpy(g->Message, "Table path does not match the json file");
+ return RC_FX;
+ } // endif Type
+
+ key = p;
+ objp = jsp;
+ arp = NULL;
+ val = Bp->GetKeyValue(objp, key);
+
+ if (!val || !(jsp = Bp->GetBson(val))) {
+ sprintf(g->Message, "Cannot find object key %s", key);
+ return RC_FX;
+ } // endif val
+
+ } else {
+ if (*p == '[') {
+ // Old style
+ if (p[strlen(p) - 1] != ']') {
+ sprintf(g->Message, "Invalid Table path near %s", p);
+ return RC_FX;
+ } else
+ p++;
+
+ } // endif p
+
+ if (jsp->Type != TYPE_JAR) {
+ strcpy(g->Message, "Table path does not match the json file");
+ return RC_FX;
+ } // endif Type
+
+ arp = jsp;
+ objp = NULL;
+ i = atoi(p) - B;
+ val = Bp->GetArrayValue(arp, i);
+
+ if (!val) {
+ sprintf(g->Message, "Cannot find array value %d", i);
+ return RC_FX;
+ } // endif val
+
+ } // endif
+
+ jsp = val;
+ } // endfor p
+
+ } // endif objpath
+
+ if (jsp && jsp->Type == TYPE_JAR)
+ Docp = jsp;
+ else {
+ // The table is void or is just one object or one value
+ if (objp) {
+ Docp = Bp->GetKeyValue(objp, key);
+ Docp->To_Val = Bp->MOF(Bp->DupVal(Docp));
+ Docp->Type = TYPE_JAR;
+ } else if (arp) {
+ Docp = Bp->NewVal(TYPE_JAR);
+ Bp->AddArrayValue(Docp, jsp);
+ Bp->SetArrayValue(arp, Docp, i);
+ } else {
+ Top = Docp = Bp->NewVal(TYPE_JAR);
+ Bp->AddArrayValue(Docp, jsp);
+ } // endif's
+
+ } // endif jsp
+
+ Done = true;
+ return RC_OK;
+} // end of MakeDocument
+
+/***********************************************************************/
+/* JSON Cardinality: returns table size in number of rows. */
+/***********************************************************************/
+int TDBBSON::Cardinality(PGLOBAL g)
+{
+ if (!g)
+ return (Xcol || Multiple) ? 0 : 1;
+ else if (Cardinal < 0) {
+ if (!Multiple) {
+ if (MakeDocument(g) == RC_OK)
+ Cardinal = Bp->GetSize(Docp);
+
+ } else
+ return 10;
+
+ } // endif Cardinal
+
+ return Cardinal;
+} // end of Cardinality
+
+/***********************************************************************/
+/* JSON GetMaxSize: returns table size estimate in number of rows. */
+/***********************************************************************/
+int TDBBSON::GetMaxSize(PGLOBAL g)
+{
+ if (MaxSize < 0)
+ MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1);
+
+ return MaxSize;
+} // end of GetMaxSize
+
+/***********************************************************************/
+/* ResetSize: call by TDBMUL when calculating size estimate. */
+/***********************************************************************/
+void TDBBSON::ResetSize(void)
+{
+ MaxSize = Cardinal = -1;
+ Fpos = -1;
+ N = 0;
+ Done = false;
+} // end of ResetSize
+
+/***********************************************************************/
+/* TDBBSON is not indexable. */
+/***********************************************************************/
+int TDBBSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool)
+{
+ if (pxdf) {
+ strcpy(g->Message, "JSON not indexable when pretty = 2");
+ return RC_FX;
+ } else
+ return RC_OK;
+
+} // end of MakeIndex
+
+/***********************************************************************/
+/* Return the position in the table. */
+/***********************************************************************/
+int TDBBSON::GetRecpos(void)
+{
+#if 0
+ union {
+ uint Rpos;
+ BYTE Spos[4];
+ };
+
+ Rpos = htonl(Fpos);
+ Spos[0] = (BYTE)NextSame;
+ return Rpos;
+#endif // 0
+ return Fpos;
+} // end of GetRecpos
+
+/***********************************************************************/
+/* Set the position in the table. */
+/***********************************************************************/
+bool TDBBSON::SetRecpos(PGLOBAL, int recpos)
+{
+#if 0
+ union {
+ uint Rpos;
+ BYTE Spos[4];
+ };
+
+ Rpos = recpos;
+ NextSame = Spos[0];
+ Spos[0] = 0;
+ Fpos = (signed)ntohl(Rpos);
+
+ //if (Fpos != (signed)ntohl(Rpos)) {
+ // Fpos = ntohl(Rpos);
+ // same = false;
+ //} else
+ // same = true;
+#endif // 0
+
+ Fpos = recpos - 1;
+ return false;
+} // end of SetRecpos
+
+/***********************************************************************/
+/* JSON Access Method opening routine. */
+/***********************************************************************/
+bool TDBBSON::OpenDB(PGLOBAL g)
+{
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open replace it at its beginning. */
+ /*******************************************************************/
+ Fpos = -1;
+ NextSame = false;
+ SameRow = 0;
+ return false;
+ } // endif use
+
+/*********************************************************************/
+/* OpenDB: initialize the JSON file processing. */
+/*********************************************************************/
+ if (MakeDocument(g) != RC_OK)
+ return true;
+
+ if (Mode == MODE_INSERT)
+ switch (Jmode) {
+ case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break;
+ case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break;
+ case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break;
+ default:
+ sprintf(g->Message, "Invalid Jmode %d", Jmode);
+ return true;
+ } // endswitch Jmode
+
+ if (Xcol)
+ To_Filter = NULL; // Imcompatible
+
+ Use = USE_OPEN;
+ return false;
+} // end of OpenDB
+
+/***********************************************************************/
+/* ReadDB: Data Base read routine for JSON access method. */
+/***********************************************************************/
+int TDBBSON::ReadDB(PGLOBAL)
+{
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow = NextSame;
+ NextSame = false;
+ M++;
+ rc = RC_OK;
+ } else if (++Fpos < (signed)Bp->GetSize(Docp)) {
+ Row = Bp->GetArrayValue(Docp, Fpos);
+
+ if (Row->Type == TYPE_JVAL)
+ Row = Bp->GetBson(Row);
+
+ SameRow = 0;
+ M = 1;
+ rc = RC_OK;
+ } else
+ rc = RC_EF;
+
+ return rc;
+} // end of ReadDB
+
+/***********************************************************************/
+/* WriteDB: Data Base write routine for JSON access method. */
+/***********************************************************************/
+int TDBBSON::WriteDB(PGLOBAL g)
+{
+ if (Mode == MODE_INSERT) {
+ Bp->AddArrayValue(Docp, Row);
+
+ switch(Jmode) {
+ case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break;
+ case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break;
+ default: Row = Bp->NewVal(); break;
+ } // endswitch Jmode
+
+ } else
+ Bp->SetArrayValue(Docp, Row, Fpos);
+
+ Changed = true;
+ return RC_OK;
+} // end of WriteDB
+
+/***********************************************************************/
+/* Data Base delete line routine for JSON access method. */
+/***********************************************************************/
+int TDBBSON::DeleteDB(PGLOBAL g, int irc)
+{
+ if (irc == RC_OK)
+ // Deleted current row
+ Bp->DeleteValue(Docp, Fpos);
+ else if (irc == RC_FX)
+ // Delete all
+ Docp->To_Val = 0;
+
+ Changed = true;
+ return RC_OK;
+} // end of DeleteDB
+
+/***********************************************************************/
+/* Data Base close routine for JSON access methods. */
+/***********************************************************************/
+void TDBBSON::CloseDB(PGLOBAL g)
+{
+ if (!Changed)
+ return;
+
+ // Save the modified document
+ char filename[_MAX_PATH];
+
+//Docp->InitArray(g);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, ((PBDEF)To_Def)->Fn, GetPath());
+
+ // Serialize the modified table
+ if (!Bp->Serialize(g, Top, filename, Pretty))
+ puts(g->Message);
+
+} // end of CloseDB
+
+/* ---------------------------TDBBCL class --------------------------- */
+
+/***********************************************************************/
+/* TDBBCL class constructor. */
+/***********************************************************************/
+TDBBCL::TDBBCL(PBDEF tdp) : TDBCAT(tdp) {
+ Topt = tdp->GetTopt();
+ Db = tdp->Schema;
+ Dsn = tdp->Uri;
+} // end of TDBBCL constructor
+
+/***********************************************************************/
+/* GetResult: Get the list the JSON file columns. */
+/***********************************************************************/
+PQRYRES TDBBCL::GetResult(PGLOBAL g) {
+ return BSONColumns(g, Db, Dsn, Topt, false);
+} // end of GetResult
+
+/* --------------------------- End of json --------------------------- */
diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h
new file mode 100644
index 00000000000..bb3f32bd945
--- /dev/null
+++ b/storage/connect/tabbson.h
@@ -0,0 +1,336 @@
+/*************** tabbson H Declares Source Code File (.H) **************/
+/* Name: tabbson.h Version 1.0 */
+/* */
+/* (C) Copyright to the author Olivier BERTRAND 2020 */
+/* */
+/* This file contains the BSON classes declares. */
+/***********************************************************************/
+#pragma once
+#include "block.h"
+#include "colblk.h"
+#include "bson.h"
+#include "tabjson.h"
+
+typedef class BTUTIL* PBTUT;
+typedef class BCUTIL* PBCUT;
+typedef class BSONDEF* PBDEF;
+typedef class TDBBSON* PBTDB;
+typedef class BSONCOL* PBSCOL;
+class TDBBSN;
+DllExport PQRYRES BSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool);
+
+/***********************************************************************/
+/* Class used to get the columns of a mongo collection. */
+/***********************************************************************/
+class BSONDISC : public BLOCK {
+public:
+ // Constructor
+ BSONDISC(PGLOBAL g, uint* lg);
+
+ // Functions
+ int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt);
+ bool Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j);
+ void AddColumn(PGLOBAL g);
+
+ // Members
+ JCOL jcol;
+ PJCL jcp, fjcp, pjcp;
+ //PVL vlp;
+ PBDEF tdp;
+ TDBBSN *tjnp;
+ PBTDB tjsp;
+ PBPR jpp;
+ PBVAL jsp;
+ PBPR row;
+ PBTUT bp;
+ PCSZ sep;
+ char colname[65], fmt[129], buf[16];
+ uint *length;
+ int i, n, bf, ncol, lvl, sz, limit;
+ bool all, strfy;
+}; // end of BSONDISC
+
+/***********************************************************************/
+/* JSON table. */
+/***********************************************************************/
+class DllExport BSONDEF : public DOSDEF { /* Table description */
+ friend class TDBBSON;
+ friend class TDBBSN;
+ friend class TDBBCL;
+ friend class BSONDISC;
+ friend class BSONCOL;
+#if defined(CMGO_SUPPORT)
+ friend class CMGFAM;
+#endif // CMGO_SUPPORT
+#if defined(JAVA_SUPPORT)
+ friend class JMGFAM;
+#endif // JAVA_SUPPORT
+public:
+ // Constructor
+ BSONDEF(void);
+
+ // Implementation
+ virtual const char* GetType(void) { return "BSON"; }
+
+ // Methods
+ virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff);
+ virtual PTDB GetTable(PGLOBAL g, MODE m);
+
+protected:
+ // Members
+ PGLOBAL G; /* Bson utility memory */
+ JMODE Jmode; /* MODE_OBJECT by default */
+ PCSZ Objname; /* Name of first level object */
+ PCSZ Xcol; /* Name of expandable column */
+ int Limit; /* Limit of multiple values */
+ int Pretty; /* Depends on file structure */
+ int Base; /* The array index base */
+ bool Strict; /* Strict syntax checking */
+ char Sep; /* The Jpath separator */
+ const char* Uri; /* MongoDB connection URI */
+ PCSZ Collname; /* External collection name */
+ PSZ Options; /* Colist ; Pipe */
+ PSZ Filter; /* Filter */
+ PSZ Driver; /* MongoDB Driver (C or JAVA) */
+ bool Pipe; /* True if Colist is a pipeline */
+ int Version; /* Driver version */
+ PSZ Wrapname; /* MongoDB java wrapper name */
+}; // end of BSONDEF
+
+
+/* -------------------------- BTUTIL class --------------------------- */
+
+/***********************************************************************/
+/* Handles all BJSON actions for a BSON table. */
+/***********************************************************************/
+class BTUTIL : public BDOC {
+public:
+ // Constructor
+ BTUTIL(PGLOBAL G, TDBBSN* tp) : BDOC(G) { Tp = tp; }
+
+ // Utility functions
+ PBVAL FindRow(PGLOBAL g);
+ PBVAL ParseLine(PGLOBAL g, int prty, bool cma);
+ PBVAL MakeTopTree(PGLOBAL g, int type);
+ PSZ SerialVal(PGLOBAL g, PBVAL top, int pretty);
+
+protected:
+ // Members
+ TDBBSN* Tp;
+}; // end of class BTUTIL
+
+/* -------------------------- BCUTIL class --------------------------- */
+
+/***********************************************************************/
+/* Handles all BJSON actions for a BSON columns. */
+/***********************************************************************/
+class BCUTIL : public BTUTIL {
+public:
+ // Constructor
+ BCUTIL(PGLOBAL G, PBSCOL cp, TDBBSN* tp) : BTUTIL(G, tp) { Cp = cp; }
+
+ // Utility functions
+ void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp);
+ PVAL MakeBson(PGLOBAL g, PBVAL jsp);
+ PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i);
+ PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n);
+ PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n);
+ PBVAL GetRow(PGLOBAL g);
+
+protected:
+ // Member
+ PBSCOL Cp;
+}; // end of class BCUTIL
+
+ /* -------------------------- TDBBSN class --------------------------- */
+
+/***********************************************************************/
+/* This is the BSN Access Method class declaration. */
+/* The table is a DOS file, each record being a JSON object. */
+/***********************************************************************/
+class DllExport TDBBSN : public TDBDOS {
+ friend class BSONCOL;
+ friend class BSONDEF;
+ friend class BTUTIL;
+ friend class BCUTIL;
+ friend class BSONDISC;
+#if defined(CMGO_SUPPORT)
+ friend class CMGFAM;
+#endif // CMGO_SUPPORT
+#if defined(JAVA_SUPPORT)
+ friend class JMGFAM;
+#endif // JAVA_SUPPORT
+public:
+ // Constructor
+ TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp);
+ TDBBSN(TDBBSN* tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) { return TYPE_AM_JSN; }
+ virtual bool SkipHeader(PGLOBAL g);
+ virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSN(this); }
+ PBVAL GetRow(void) { return Row; }
+
+ // Methods
+ virtual PTDB Clone(PTABS t);
+ virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
+ virtual PCOL InsertSpecialColumn(PCOL colp);
+ virtual int RowNumber(PGLOBAL g, bool b = FALSE) {return (b) ? M : N;}
+ virtual bool CanBeFiltered(void)
+ {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;}
+
+ // Database routines
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual bool PrepareWriting(PGLOBAL g);
+ virtual int WriteDB(PGLOBAL g);
+ virtual void CloseDB(PGLOBAL g);
+
+ // Specific routine
+ virtual int EstimatedLength(void);
+
+protected:
+ PBVAL FindRow(PGLOBAL g);
+//int MakeTopTree(PGLOBAL g, PBVAL jsp);
+
+ // Members
+ PBTUT Bp; // The BSUTIL handling class
+ PBVAL Top; // The top JSON tree
+ PBVAL Row; // The current row
+ PBSCOL Colp; // The multiple column
+ JMODE Jmode; // MODE_OBJECT by default
+ PCSZ Objname; // The table object name
+ PCSZ Xcol; // Name of expandable column
+ int Fpos; // The current row index
+ int N; // The current Rownum
+ int M; // Index of multiple value
+ int Limit; // Limit of multiple values
+ int Pretty; // Depends on file structure
+ int NextSame; // Same next row
+ int SameRow; // Same row nb
+ int Xval; // Index of expandable array
+ int B; // Array index base
+ char Sep; // The Jpath separator
+ bool Strict; // Strict syntax checking
+ bool Comma; // Row has final comma
+}; // end of class TDBBSN
+
+/* -------------------------- BSONCOL class -------------------------- */
+
+/***********************************************************************/
+/* Class BSONCOL: JSON access method column descriptor. */
+/***********************************************************************/
+class DllExport BSONCOL : public DOSCOL {
+ friend class TDBBSN;
+ friend class TDBBSON;
+ friend class BCUTIL;
+#if defined(CMGO_SUPPORT)
+ friend class CMGFAM;
+#endif // CMGO_SUPPORT
+#if defined(JAVA_SUPPORT)
+ friend class JMGFAM;
+#endif // JAVA_SUPPORT
+public:
+ // Constructors
+ BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i);
+ BSONCOL(BSONCOL* colp, PTDB tdbp); // Constructor used in copy process
+
+ // Implementation
+ virtual int GetAmType(void) { return Tbp->GetAmType(); }
+
+ // Methods
+ virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check);
+ bool ParseJpath(PGLOBAL g);
+ virtual PSZ GetJpath(PGLOBAL g, bool proj);
+ virtual void ReadColumn(PGLOBAL g);
+ virtual void WriteColumn(PGLOBAL g);
+
+protected:
+ bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b);
+ bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm);
+
+ // Default constructor not to be used
+ BSONCOL(void) {}
+
+ // Members
+ TDBBSN *Tbp; // To the JSN table block
+ PBCUT Cp; // To the BCUTIL handling class
+ PVAL MulVal; // To value used by multiple column
+ char *Jpath; // The json path
+ JNODE *Nodes; // The intermediate objects
+ int Nod; // The number of intermediate objects
+ int Xnod; // Index of multiple values
+ char Sep; // The Jpath separator
+ bool Xpd; // True for expandable column
+ bool Parsed; // True when parsed
+ bool Warned; // True when warning issued
+}; // end of class BSONCOL
+
+/* -------------------------- TDBBSON class -------------------------- */
+
+/***********************************************************************/
+/* This is the JSON Access Method class declaration. */
+/***********************************************************************/
+class DllExport TDBBSON : public TDBBSN {
+ friend class BSONDEF;
+ friend class BSONCOL;
+public:
+ // Constructor
+ TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp);
+ TDBBSON(PBTDB tdbp);
+
+ // Implementation
+ virtual AMT GetAmType(void) { return TYPE_AM_JSON; }
+ virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSON(this); }
+ PBVAL GetDoc(void) { return Docp; }
+
+ // Methods
+ virtual PTDB Clone(PTABS t);
+
+ // Database routines
+ virtual int Cardinality(PGLOBAL g);
+ virtual int GetMaxSize(PGLOBAL g);
+ virtual void ResetSize(void);
+ virtual int GetProgCur(void) { return N; }
+ virtual int GetRecpos(void);
+ virtual bool SetRecpos(PGLOBAL g, int recpos);
+ virtual bool OpenDB(PGLOBAL g);
+ virtual int ReadDB(PGLOBAL g);
+ virtual bool PrepareWriting(PGLOBAL g) { return false; }
+ virtual int WriteDB(PGLOBAL g);
+ virtual int DeleteDB(PGLOBAL g, int irc);
+ virtual void CloseDB(PGLOBAL g);
+ int MakeDocument(PGLOBAL g);
+
+ // Optimization routines
+ virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add);
+
+protected:
+ int MakeNewDoc(PGLOBAL g);
+
+ // Members
+ PBVAL Docp; // The document array
+ int Multiple; // 0: No 1: DIR 2: Section 3: filelist
+ bool Done; // True when document parsing is done
+ bool Changed; // After Update, Insert or Delete
+}; // end of class TDBBSON
+
+/***********************************************************************/
+/* This is the class declaration for the JSON catalog table. */
+/***********************************************************************/
+class DllExport TDBBCL : public TDBCAT {
+public:
+ // Constructor
+ TDBBCL(PBDEF tdp);
+
+protected:
+ // Specific routines
+ virtual PQRYRES GetResult(PGLOBAL g);
+
+ // Members
+ PTOS Topt;
+ PCSZ Db;
+ PCSZ Dsn;
+}; // end of class TDBBCL
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 3002f8906ed..fa764b1f84d 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -1995,7 +1995,7 @@ int TDBDOS::Cardinality(PGLOBAL g)
if (Mode == MODE_ANY && ExactInfo()) {
// Using index impossible or failed, do it the hard way
Mode = MODE_READ;
- To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1);
+ To_Line = (char*)PlugSubAlloc(g, NULL, (size_t)Lrecl + 1);
if (Txfp->OpenTableFile(g))
return (Cardinal = Txfp->Cardinality(g));
@@ -2145,6 +2145,9 @@ bool TDBDOS::OpenDB(PGLOBAL g)
} // endif use
if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS
+#if defined(BSON_SUPPORT)
+ && Txfp->GetAmType() != TYPE_AM_BIN
+#endif // BSON_SUPPORT
&& Txfp->GetAmType() != TYPE_AM_MGO) {
// Delete all lines. Not handled in MAP or block mode
Txfp = new(g) DOSFAM((PDOSDEF)To_Def);
@@ -2229,7 +2232,7 @@ int TDBDOS::ReadDB(PGLOBAL g)
return RC_EF;
case -2: // No match for join
return RC_NF;
- case -3: // Same record as last non null one
+ case -3: // Same record as non null last one
num_there++;
return RC_OK;
default:
diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp
index eed67525f78..37d28b96517 100644
--- a/storage/connect/tabfmt.cpp
+++ b/storage/connect/tabfmt.cpp
@@ -67,7 +67,7 @@
/* This should be an option. */
/***********************************************************************/
#define MAXCOL 200 /* Default max column nb in result */
-#define TYPE_UNKNOWN 10 /* Must be greater than other types */
+#define TYPE_UNKNOWN 12 /* Must be greater than other types */
/***********************************************************************/
/* External function. */
@@ -311,14 +311,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
} else if (*p == q) {
if (phase == 0) {
- if (blank)
- {
+ if (blank) {
if (++nerr > mxr) {
sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read);
goto err;
} else
goto skip;
}
+
n = 0;
phase = digit = 1;
} else if (phase == 1) {
@@ -342,14 +342,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
goto skip;
} else {
- if (phase == 2)
- {
+ if (phase == 2) {
if (++nerr > mxr) {
sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read);
goto err;
} else
goto skip;
}
+
// isdigit cannot be used here because of debug assert
if (!strchr("0123456789", *p)) {
if (!digit && *p == dechar)
@@ -364,14 +364,14 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info)
blank = 1;
} // endif's *p
- if (phase == 1)
- {
+ if (phase == 1) {
if (++nerr > mxr) {
sprintf(g->Message, MSG(UNBALANCE_QUOTE), num_read);
goto err;
} else
goto skip;
}
+
if (n) {
len[i] = MY_MAX(len[i], n);
type = (digit || n == 0 || (dec && n == 1)) ? TYPE_STRING
@@ -744,8 +744,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
int i, len;
PCSVCOL colp;
- if (!Fields) // May have been set in TABFMT::OpenDB
- {
+ if (!Fields) { // May have been set in TABFMT::OpenDB
if (Mode != MODE_UPDATE && Mode != MODE_INSERT) {
for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next)
if (!colp->IsSpecial() && !colp->IsVirtual())
@@ -759,6 +758,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
if (!cdp->IsSpecial() && !cdp->IsVirtual())
Fields++;
}
+
Offset = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields);
Fldlen = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields);
@@ -778,8 +778,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
} // endfor i
- if (Field)
- {
+ if (Field) {
// Prepare writing fields
if (Mode != MODE_UPDATE) {
for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next)
@@ -803,6 +802,7 @@ bool TDBCSV::OpenDB(PGLOBAL g)
Fldtyp[i] = IsTypeNum(cdp->GetType());
} // endif cdp
}
+
} // endif Use
if (Header) {
@@ -1051,8 +1051,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
if (i)
strcat(To_Line, sep);
- if (Field[i])
- {
+ if (Field[i]) {
if (!strlen(Field[i])) {
// Generally null fields are not quoted
if (Quoted > 2)
@@ -1060,7 +1059,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
strcat(strcat(To_Line, qot), qot);
} else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot
- || Quoted > 1 || (Quoted == 1 && !Fldtyp[i])))
+ || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) {
if (strchr(Field[i], Qot)) {
// Field contains quotes that must be doubled
int j, k = strlen(To_Line), n = strlen(Field[i]);
@@ -1078,10 +1077,12 @@ bool TDBCSV::PrepareWriting(PGLOBAL g)
To_Line[k] = '\0';
} else
strcat(strcat(strcat(To_Line, qot), Field[i]), qot);
+ }
else
strcat(To_Line, Field[i]);
}
+
} // endfor i
#if defined(_DEBUG)
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index dbcd590c3de..c79776163b5 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -1,8 +1,9 @@
/************* tabjson C++ Program Source Code File (.CPP) *************/
-/* PROGRAM NAME: tabjson Version 1.7 */
-/* (C) Copyright to the author Olivier BERTRAND 2014 - 2019 */
+/* PROGRAM NAME: tabjson Version 1.8 */
+/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
/* This program are the JSON class DB execution routines. */
/***********************************************************************/
+#undef BSON_SUPPORT
/***********************************************************************/
/* Include relevant sections of the MariaDB header file. */
@@ -46,7 +47,7 @@
/* This should be an option. */
/***********************************************************************/
#define MAXCOL 200 /* Default max column nb in result */
-#define TYPE_UNKNOWN 12 /* Must be greater than other types */
+//#define TYPE_UNKNOWN 12 /* Must be greater than other types */
/***********************************************************************/
/* External functions. */
@@ -114,7 +115,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info)
/*********************************************************************/
for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) {
if (jcp->Type == TYPE_UNKNOWN)
- jcp->Type = TYPE_STRING; // Void column
+ jcp->Type = TYPE_STRG; // Void column
crp = qrp->Colresp; // Column Name
crp->Kdata->SetValue(jcp->Name, i);
@@ -152,26 +153,29 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg)
{
length = lg;
jcp = fjcp = pjcp = NULL;
+ tdp = NULL;
tjnp = NULL;
jpp = NULL;
tjsp = NULL;
jsp = NULL;
row = NULL;
sep = NULL;
- i = n = bf = ncol = lvl = sz = 0;
+ i = n = bf = ncol = lvl = sz = limit = 0;
all = strfy = false;
} // end of JSONDISC constructor
int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
{
- char filename[_MAX_PATH];
- bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
+ char filename[_MAX_PATH];
+ bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
+ PGLOBAL G = NULL;
lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
sep = GetStringTableOption(g, topt, "Separator", ".");
sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
- strfy = GetBooleanTableOption(g, topt, "Stringify", false);
+ limit = GetIntegerTableOption(g, topt, "Limit", 10);
+ strfy = GetBooleanTableOption(g, topt, "Stringify", false);
/*********************************************************************/
/* Open the input file. */
@@ -240,7 +244,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
if (tjsp->MakeDocument(g))
return 0;
- jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL;
+ jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetArrayValue(0) : NULL;
} else {
if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))
{
@@ -286,18 +290,15 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
#endif
} // endif Driver
- } else
+ } else if (tdp->Pretty >= 0)
tjnp = new(g) TDBJSN(tdp, new(g) DOSFAM(tdp));
+ else
+ tjnp = new(g) TDBJSN(tdp, new(g) BINFAM(tdp));
tjnp->SetMode(MODE_READ);
// Allocate the parse work memory
- PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL));
- memset(G, 0, sizeof(GLOBAL));
- G->Sarea_Size = (size_t)tdp->Lrecl * 10;
- G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
- PlugSubSet(G->Sarea, G->Sarea_Size);
- G->jump_level = 0;
+ G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2));
tjnp->SetG(G);
if (tjnp->OpenDB(g))
@@ -309,7 +310,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
- jsp = tjnp->GetRow();
+// jsp = tjnp->FindRow(g); // FindRow was done in ReadDB
+ jsp = tjnp->Row;
} // endswitch ReadDB
} // endif pretty
@@ -335,11 +337,11 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
/* Analyse the JSON tree and define columns. */
/*********************************************************************/
for (i = 1; ; i++) {
- for (jpp = row->GetFirst(); jpp; jpp = jpp->GetNext()) {
- strncpy(colname, jpp->GetKey(), 64);
+ for (jpp = row->GetFirst(); jpp; jpp = jpp->Next) {
+ strncpy(colname, jpp->Key, 64);
fmt[bf] = 0;
- if (Find(g, jpp->GetVal(), colname, MY_MIN(lvl, 0)))
+ if (Find(g, jpp->Val, colname, MY_MIN(lvl, 0)))
goto err;
} // endfor jpp
@@ -359,11 +361,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
- jsp = tjnp->GetRow();
+// jsp = tjnp->FindRow(g);
+ jsp = tjnp->Row;
} // endswitch ReadDB
} else
- jsp = tjsp->GetDoc()->GetValue(i);
+ jsp = tjsp->GetDoc()->GetArrayValue(i);
if (!(row = (jsp) ? jsp->GetObject() : NULL))
break;
@@ -390,14 +393,35 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
PJOB job;
PJAR jar;
- if ((valp = jvp ? jvp->GetValue() : NULL)) {
- if (JsonAllPath() && !fmt[bf])
+ if (jvp && jvp->DataType != TYPE_JSON) {
+ if (JsonAllPath() && !fmt[bf])
strcat(fmt, colname);
- jcol.Type = valp->GetType();
- jcol.Len = valp->GetValLen();
- jcol.Scale = valp->GetValPrec();
- jcol.Cbn = valp->IsNull();
+ jcol.Type = jvp->DataType;
+
+ switch (jvp->DataType) {
+ case TYPE_STRG:
+ case TYPE_DTM:
+ jcol.Len = (int)strlen(jvp->Strp);
+ break;
+ case TYPE_INTG:
+ case TYPE_BINT:
+ jcol.Len = (int)strlen(jvp->GetString(g));
+ break;
+ case TYPE_DBL:
+ jcol.Len = (int)strlen(jvp->GetString(g));
+ jcol.Scale = jvp->Nd;
+ break;
+ case TYPE_BOOL:
+ jcol.Len = 1;
+ break;
+ default:
+ jcol.Len = 0;
+ break;
+ } // endswitch Type
+
+ jcol.Scale = jvp->Nd;
+ jcol.Cbn = jvp->DataType == TYPE_NULL;
} else if (!jvp || jvp->IsNull()) {
jcol.Type = TYPE_UNKNOWN;
jcol.Len = jcol.Scale = 0;
@@ -413,8 +437,8 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
case TYPE_JOB:
job = (PJOB)jsp;
- for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) {
- PCSZ k = jrp->GetKey();
+ for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->Next) {
+ PCSZ k = jrp->Key;
if (*k != '$') {
n = sizeof(fmt) - strlen(fmt) -1;
@@ -423,7 +447,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
strncat(strncat(colname, "_", n), k, n - 1);
} // endif Key
- if (Find(g, jrp->GetVal(), k, j + 1))
+ if (Find(g, jrp->Val, k, j + 1))
return true;
*p = *pc = 0;
@@ -434,7 +458,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
jar = (PJAR)jsp;
if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key)))
- ars = jar->GetSize(false);
+ ars = MY_MIN(jar->GetSize(false), limit);
else
ars = MY_MIN(jar->GetSize(false), 1);
@@ -460,7 +484,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
strncat(fmt, (tdp->Uri ? sep : "[*]"), n);
}
- if (Find(g, jar->GetValue(k), "", j))
+ if (Find(g, jar->GetArrayValue(k), "", j))
return true;
*p = *pc = 0;
@@ -481,7 +505,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
} else if (JsonAllPath() && !fmt[bf])
strcat(fmt, colname);
- jcol.Type = TYPE_STRING;
+ jcol.Type = TYPE_STRG;
jcol.Len = sz;
jcol.Scale = 0;
jcol.Cbn = true;
@@ -503,10 +527,29 @@ void JSONDISC::AddColumn(PGLOBAL g)
if (jcp) {
if (jcp->Type != jcol.Type) {
- if (jcp->Type == TYPE_UNKNOWN)
+ if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL)
jcp->Type = jcol.Type;
- else if (jcol.Type != TYPE_UNKNOWN)
- jcp->Type = TYPE_STRING;
+// else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID)
+// jcp->Type = TYPE_STRING;
+ else if (jcp->Type != TYPE_STRG)
+ switch (jcol.Type) {
+ case TYPE_STRG:
+ case TYPE_DBL:
+ jcp->Type = jcol.Type;
+ break;
+ case TYPE_BINT:
+ if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ case TYPE_INTG:
+ if (jcp->Type == TYPE_BOOL)
+ jcp->Type = jcol.Type;
+
+ break;
+ default:
+ break;
+ } // endswith Type
} // endif Type
@@ -625,9 +668,9 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
PTXF txfp = NULL;
// JSN not used for pretty=1 for insert or delete
- if (!Pretty || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
+ if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) {
USETEMP tmp = UseTemp();
- bool map = Mapped && m != MODE_INSERT &&
+ bool map = Mapped && Pretty >= 0 && m != MODE_INSERT &&
!(tmp != TMP_NO && m == MODE_UPDATE) &&
!(tmp == TMP_FORCE &&
(m == MODE_UPDATE || m == MODE_DELETE));
@@ -684,21 +727,26 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
#endif // !GZ_SUPPORT
} else if (map)
txfp = new(g) MAPFAM(this);
- else
+ else if (Pretty < 0) // BJsonfile
+ txfp = new(g) BINFAM(this);
+ else
txfp = new(g) DOSFAM(this);
- // Txfp must be set for TDBDOS
+ // Txfp must be set for TDBJSN
tdbp = new(g) TDBJSN(this, txfp);
if (Lrecl) {
// Allocate the parse work memory
+#if 0
PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL));
memset(G, 0, sizeof(GLOBAL));
- G->Sarea_Size = Lrecl * 10;
+ G->Sarea_Size = (size_t)Lrecl * 10;
G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size);
PlugSubSet(G->Sarea, G->Sarea_Size);
G->jump_level = 0;
((TDBJSN*)tdbp)->G = G;
+#endif // 0
+ ((TDBJSN*)tdbp)->G = PlugInit(NULL, (size_t)Lrecl * (Pretty >= 0 ? 10 : 2));
} else {
strcpy(g->Message, "LRECL is not defined");
return NULL;
@@ -736,10 +784,10 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
/* --------------------------- Class TDBJSN -------------------------- */
/***********************************************************************/
-/* Implementation of the TDBJSN class. */
+/* Implementation of the TDBJSN class (Pretty < 2) */
/***********************************************************************/
TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
- {
+{
G = NULL;
Top = NULL;
Row = NULL;
@@ -772,35 +820,35 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
SameRow = 0;
Xval = -1;
Comma = false;
- } // end of TDBJSN standard constructor
+} // end of TDBJSN standard constructor
-TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp)
- {
- G = NULL;
- Top = tdbp->Top;
- Row = tdbp->Row;
- Val = tdbp->Val;
- Colp = tdbp->Colp;
- Jmode = tdbp->Jmode;
- Objname = tdbp->Objname;
- Xcol = tdbp->Xcol;
- Fpos = tdbp->Fpos;
- N = tdbp->N;
- M = tdbp->M;
- Limit = tdbp->Limit;
- NextSame = tdbp->NextSame;
- SameRow = tdbp->SameRow;
- Xval = tdbp->Xval;
- B = tdbp->B;
- Sep = tdbp->Sep;
- Pretty = tdbp->Pretty;
- Strict = tdbp->Strict;
- Comma = tdbp->Comma;
- } // end of TDBJSN copy constructor
+TDBJSN::TDBJSN(TDBJSN* tdbp) : TDBDOS(NULL, tdbp)
+{
+ G = NULL;
+ Top = tdbp->Top;
+ Row = tdbp->Row;
+ Val = tdbp->Val;
+ Colp = tdbp->Colp;
+ Jmode = tdbp->Jmode;
+ Objname = tdbp->Objname;
+ Xcol = tdbp->Xcol;
+ Fpos = tdbp->Fpos;
+ N = tdbp->N;
+ M = tdbp->M;
+ Limit = tdbp->Limit;
+ NextSame = tdbp->NextSame;
+ SameRow = tdbp->SameRow;
+ Xval = tdbp->Xval;
+ B = tdbp->B;
+ Sep = tdbp->Sep;
+ Pretty = tdbp->Pretty;
+ Strict = tdbp->Strict;
+ Comma = tdbp->Comma;
+} // end of TDBJSN copy constructor
// Used for update
PTDB TDBJSN::Clone(PTABS t)
- {
+{
G = NULL;
PTDB tp;
PJCOL cp1, cp2;
@@ -814,23 +862,23 @@ PTDB TDBJSN::Clone(PTABS t)
} // endfor cp1
return tp;
- } // end of Clone
+} // end of Clone
/***********************************************************************/
/* Allocate JSN column description block. */
/***********************************************************************/
PCOL TDBJSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
- {
+{
PJCOL colp = new(g) JSONCOL(g, cdp, this, cprec, n);
return (colp->ParseJpath(g)) ? NULL : colp;
- } // end of MakeCol
+} // end of MakeCol
/***********************************************************************/
/* InsertSpecialColumn: Put a special column ahead of the column list.*/
/***********************************************************************/
PCOL TDBJSN::InsertSpecialColumn(PCOL colp)
- {
+{
if (!colp->IsSpecial())
return NULL;
@@ -840,31 +888,47 @@ PCOL TDBJSN::InsertSpecialColumn(PCOL colp)
colp->SetNext(Columns);
Columns = colp;
return colp;
- } // end of InsertSpecialColumn
+} // end of InsertSpecialColumn
+#if 0
/***********************************************************************/
/* JSON Cardinality: returns table size in number of rows. */
/***********************************************************************/
int TDBJSN::Cardinality(PGLOBAL g)
- {
+{
if (!g)
return 0;
- else if (Cardinal < 0)
- Cardinal = TDBDOS::Cardinality(g);
+ else if (Cardinal < 0) {
+ Cardinal = TDBDOS::Cardinality(g);
+
+ } // endif Cardinal
return Cardinal;
- } // end of Cardinality
+} // end of Cardinality
/***********************************************************************/
/* JSON GetMaxSize: returns file size estimate in number of lines. */
/***********************************************************************/
int TDBJSN::GetMaxSize(PGLOBAL g)
- {
- if (MaxSize < 0)
- MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
+{
+ if (MaxSize < 0)
+ MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1);
return MaxSize;
- } // end of GetMaxSize
+} // end of GetMaxSize
+#endif // 0
+
+/***********************************************************************/
+/* JSON EstimatedLength. Returns an estimated minimum line length. */
+/***********************************************************************/
+int TDBJSN::EstimatedLength(void)
+{
+ if (AvgLen <= 0)
+ return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better
+ else
+ return AvgLen;
+
+} // end of Estimated Length
/***********************************************************************/
/* Find the row in the tree structure. */
@@ -881,7 +945,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key
val = (jsp->GetType() == TYPE_JOB) ?
- jsp->GetObject()->GetValue(objpath) : NULL;
+ jsp->GetObject()->GetKeyValue(objpath) : NULL;
} else {
if (*objpath == '[') {
if (objpath[strlen(objpath) - 1] == ']')
@@ -891,7 +955,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
} // endif [
val = (jsp->GetType() == TYPE_JAR) ?
- jsp->GetArray()->GetValue(atoi(objpath) - B) : NULL;
+ jsp->GetArray()->GetArrayValue(atoi(objpath) - B) : NULL;
} // endif objpath
jsp = (val) ? val->GetJson() : NULL;
@@ -904,7 +968,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g)
/* OpenDB: Data Base open routine for JSN access method. */
/***********************************************************************/
bool TDBJSN::OpenDB(PGLOBAL g)
- {
+{
if (Use == USE_OPEN) {
/*******************************************************************/
/* Table already open replace it at its beginning. */
@@ -928,7 +992,45 @@ bool TDBJSN::OpenDB(PGLOBAL g)
} // endif Use
- if (TDBDOS::OpenDB(g))
+ if (Pretty < 0) {
+ /*******************************************************************/
+ /* Binary BJSON table. */
+ /*******************************************************************/
+ xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n",
+ this, Tdb_No, Use, Mode);
+
+ if (Use == USE_OPEN) {
+ /*******************************************************************/
+ /* Table already open, just replace it at its beginning. */
+ /*******************************************************************/
+ if (!To_Kindex) {
+ Txfp->Rewind(); // see comment in Work.log
+ } else // Table is to be accessed through a sorted index table
+ To_Kindex->Reset();
+
+ return false;
+ } // endif use
+
+ /*********************************************************************/
+ /* Open according to logical input/output mode required. */
+ /* Use conventionnal input/output functions. */
+ /*********************************************************************/
+ if (Txfp->OpenTableFile(g))
+ return true;
+
+ Use = USE_OPEN; // Do it now in case we are recursively called
+
+ /*********************************************************************/
+ /* Lrecl is Ok. */
+ /*********************************************************************/
+ size_t linelen = Lrecl;
+
+ //To_Line = (char*)PlugSubAlloc(g, NULL, linelen);
+ //memset(To_Line, 0, linelen);
+ To_Line = Txfp->GetBuf();
+ xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line);
+ return false;
+ } else if (TDBDOS::OpenDB(g))
return true;
if (Xcol)
@@ -943,7 +1045,7 @@ bool TDBJSN::OpenDB(PGLOBAL g)
/* Kindex construction if the file is accessed using an index. */
/***********************************************************************/
bool TDBJSN::SkipHeader(PGLOBAL g)
- {
+{
int len = GetFileLength(g);
bool rc = false;
@@ -952,62 +1054,71 @@ bool TDBJSN::SkipHeader(PGLOBAL g)
return true;
#endif // _DEBUG
-#if defined(__WIN__)
-#define Ending 2
-#else // !__WIN__
-#define Ending 1
-#endif // !__WIN__
-
if (Pretty == 1) {
if (Mode == MODE_INSERT || Mode == MODE_DELETE) {
// Mode Insert and delete are no more handled here
- assert(false);
- } else if (len) // !Insert && !Delete
+ DBUG_ASSERT(false);
+ } else if (len > 0) // !Insert && !Delete
rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g));
- } // endif Pretty
+ } // endif Pretty
return rc;
- } // end of SkipHeader
+} // end of SkipHeader
/***********************************************************************/
/* ReadDB: Data Base read routine for JSN access method. */
/***********************************************************************/
-int TDBJSN::ReadDB(PGLOBAL g)
- {
- int rc;
+int TDBJSN::ReadDB(PGLOBAL g) {
+ int rc;
+
+ N++;
+
+ if (NextSame) {
+ SameRow = NextSame;
+ NextSame = 0;
+ M++;
+ return RC_OK;
+ } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) {
+ if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK))
+ return rc; // Deferred reading failed
+
+ if (Pretty >= 0) {
+ // Recover the memory used for parsing
+ PlugSubSet(G->Sarea, G->Sarea_Size);
+
+ if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) {
+ Row = FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } else if (Pretty != 1 || strcmp(To_Line, "]")) {
+ strcpy(g->Message, G->Message);
+ rc = RC_FX;
+ } else
+ rc = RC_EF;
- N++;
+ } else {
+ // Here we get a movable Json binary tree
+ PJSON jsp;
+ SWAP* swp;
- if (NextSame) {
- SameRow = NextSame;
- NextSame = 0;
- M++;
- return RC_OK;
- } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) {
- if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK))
- // Deferred reading failed
- return rc;
-
- // Recover the memory used for parsing
- PlugSubSet(G->Sarea, G->Sarea_Size);
-
- if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) {
- Row = FindRow(g);
- SameRow = 0;
- Fpos++;
- M = 1;
- rc = RC_OK;
- } else if (Pretty != 1 || strcmp(To_Line, "]")) {
- strcpy(g->Message, G->Message);
- rc = RC_FX;
- } else
- rc = RC_EF;
+ jsp = (PJSON)To_Line;
+ swp = new(g) SWAP(G, jsp);
+ swp->SwapJson(jsp, false); // Restore pointers from offsets
+ Row = jsp;
+ Row = FindRow(g);
+ SameRow = 0;
+ Fpos++;
+ M = 1;
+ rc = RC_OK;
+ } // endif Pretty
- } // endif ReadDB
+ } // endif ReadDB
- return rc;
- } // end of ReadDB
+ return rc;
+} // end of ReadDB
/***********************************************************************/
/* Make the top tree from the object path. */
@@ -1040,7 +1151,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
val->SetValue(objp);
val = new(g) JVALUE;
- objp->SetValue(g, val, objpath);
+ objp->SetKeyValue(g, val, objpath);
} else {
if (*objpath == '[') {
// Old style
@@ -1062,7 +1173,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
val = new(g) JVALUE;
i = atoi(objpath) - B;
- arp->SetValue(g, val, i);
+ arp->SetArrayValue(g, val, i);
arp->InitArray(g);
} // endif objpath
@@ -1081,8 +1192,8 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
/***********************************************************************/
/* PrepareWriting: Prepare the line for WriteDB. */
/***********************************************************************/
- bool TDBJSN::PrepareWriting(PGLOBAL g)
- {
+bool TDBJSN::PrepareWriting(PGLOBAL g)
+{
PSZ s;
if (MakeTopTree(g, Row))
@@ -1103,7 +1214,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp)
} else
return true;
- } // end of PrepareWriting
+} // end of PrepareWriting
/***********************************************************************/
/* WriteDB: Data Base write routine for JSON access method. */
@@ -1117,7 +1228,16 @@ int TDBJSN::WriteDB(PGLOBAL g)
return rc;
} // end of WriteDB
-/* ---------------------------- JSONCOL ------------------------------ */
+/***********************************************************************/
+/* Data Base close routine for JSON access method. */
+/***********************************************************************/
+void TDBJSN::CloseDB(PGLOBAL g)
+{
+ TDBDOS::CloseDB(g);
+ G = PlugExit(G);
+} // end of CloseDB
+
+ /* ---------------------------- JSONCOL ------------------------------ */
/***********************************************************************/
/* JSONCOL public constructor. */
@@ -1125,7 +1245,7 @@ int TDBJSN::WriteDB(PGLOBAL g)
JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
: DOSCOL(g, cdp, tdbp, cprec, i, "DOS")
{
- Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
+ Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp);
G = Tjp->G;
Jpath = cdp->GetFmt();
MulVal = NULL;
@@ -1135,6 +1255,7 @@ JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i)
Xnod = -1;
Xpd = false;
Parsed = false;
+ Warned = false;
} // end of JSONCOL constructor
/***********************************************************************/
@@ -1153,13 +1274,14 @@ JSONCOL::JSONCOL(JSONCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp)
Xnod = col1->Xnod;
Xpd = col1->Xpd;
Parsed = col1->Parsed;
+ Warned = col1->Warned;
} // end of JSONCOL copy constructor
/***********************************************************************/
/* SetBuffer: prepare a column block for write operation. */
/***********************************************************************/
bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
- {
+{
if (DOSCOL::SetBuffer(g, value, ok, check))
return true;
@@ -1170,13 +1292,13 @@ bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check)
Tjp = (TDBJSN*)To_Tdb;
G = Tjp->G;
return false;
- } // end of SetBuffer
+} // end of SetBuffer
/***********************************************************************/
/* Check whether this object is expanded. */
/***********************************************************************/
bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
- {
+{
if ((Tjp->Xcol && nm && !strcmp(nm, Tjp->Xcol) &&
(Tjp->Xval < 0 || Tjp->Xval == i)) || Xpd) {
Xpd = true; // Expandable object
@@ -1187,7 +1309,7 @@ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b)
} // endif Xcol
return false;
- } // end of CheckExpand
+} // end of CheckExpand
/***********************************************************************/
/* Analyse array processing options. */
@@ -1487,7 +1609,14 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
{
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric column");
+
+ if (!Warned) {
+ PushWarning(g, Tjp);
+ Warned = true;
+ } // endif Warned
+
Value->Reset();
+#if 0
} else if (Value->GetType() == TYPE_BIN) {
if ((unsigned)Value->GetClen() >= sizeof(BSON)) {
ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500;
@@ -1499,41 +1628,67 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
strcpy(g->Message, "Column size too small");
Value->SetValue_char(NULL, 0);
} // endif Clen
+#endif // 0
} else
Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
return Value;
- } // end of MakeJson
+} // end of MakeJson
/***********************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/***********************************************************************/
-void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
- {
- if (val) {
+void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
+{
+ if (jvp) {
vp->SetNull(false);
- switch (val->GetValType()) {
+ switch (jvp->GetValType()) {
case TYPE_STRG:
case TYPE_INTG:
case TYPE_BINT:
case TYPE_DBL:
case TYPE_DTM:
- vp->SetValue_pval(val->GetValue());
+ switch (vp->GetType()) {
+ case TYPE_STRING:
+ case TYPE_DATE:
+ vp->SetValue_psz(jvp->GetString(g));
+ break;
+ case TYPE_INT:
+ case TYPE_SHORT:
+ case TYPE_TINY:
+ vp->SetValue(jvp->GetInteger());
+ break;
+ case TYPE_BIGINT:
+ vp->SetValue(jvp->GetBigint());
+ break;
+ case TYPE_DOUBLE:
+ vp->SetValue(jvp->GetFloat());
+
+ if (jvp->GetValType() == TYPE_DBL)
+ vp->SetPrec(jvp->Nd);
+
+ break;
+ default:
+ sprintf(g->Message, "Unsupported column type %d\n", vp->GetType());
+ throw 888;
+ } // endswitch Type
+
break;
case TYPE_BOOL:
if (vp->IsTypeNum())
- vp->SetValue(val->GetInteger() ? 1 : 0);
+ vp->SetValue(jvp->GetInteger() ? 1 : 0);
else
- vp->SetValue_psz((PSZ)(val->GetInteger() ? "true" : "false"));
+ vp->SetValue_psz((PSZ)(jvp->GetInteger() ? "true" : "false"));
break;
case TYPE_JAR:
- SetJsonValue(g, vp, val->GetArray()->GetValue(0), n);
- break;
+// SetJsonValue(g, vp, val->GetArray()->GetValue(0));
+ vp->SetValue_psz(jvp->GetArray()->GetText(g, NULL));
+ break;
case TYPE_JOB:
// if (!vp->IsTypeNum() || !Strict) {
- vp->SetValue_psz(val->GetObject()->GetText(g, NULL));
+ vp->SetValue_psz(jvp->GetObject()->GetText(g, NULL));
break;
// } // endif Type
@@ -1547,37 +1702,37 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n)
vp->SetNull(true);
} // endif val
- } // end of SetJsonValue
+} // end of SetJsonValue
/***********************************************************************/
/* ReadColumn: */
/***********************************************************************/
void JSONCOL::ReadColumn(PGLOBAL g)
- {
+{
if (!Tjp->SameRow || Xnod >= Tjp->SameRow)
Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0));
- if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept)
- throw("Null expandable JSON value");
+// if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept)
+// throw("Null expandable JSON value");
// Set null when applicable
if (!Nullable)
Value->SetNull(false);
- } // end of ReadColumn
+} // end of ReadColumn
/***********************************************************************/
/* GetColumnValue: */
/***********************************************************************/
PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
- {
+{
int n = Nod - 1;
PJAR arp;
PJVAL val = NULL;
for (; i < Nod && row; i++) {
if (Nodes[i].Op == OP_NUM) {
- Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1);
+ Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1);
return(Value);
} else if (Nodes[i].Op == OP_XX) {
return MakeJson(G, row);
@@ -1591,7 +1746,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
val = new(G) JVALUE(row);
} else
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
@@ -1599,7 +1754,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else if (Nodes[i].Op == OP_EXP)
return ExpandArray(g, arp, i);
else
@@ -1607,7 +1762,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif's
@@ -1625,15 +1780,15 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
} // endfor i
- SetJsonValue(g, Value, val, n);
+ SetJsonValue(g, Value, val);
return Value;
- } // end of GetColumnValue
+} // end of GetColumnValue
/***********************************************************************/
/* ExpandArray: */
/***********************************************************************/
PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
- {
+{
int ars = MY_MIN(Tjp->Limit, arp->size());
PJVAL jvp;
JVALUE jval;
@@ -1645,13 +1800,13 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
return Value;
} // endif ars
- if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) {
+ if (!(jvp = arp->GetArrayValue((Nodes[n].Rx = Nodes[n].Nx)))) {
strcpy(g->Message, "Logical error expanding array");
throw 666;
} // endif jvp
if (n < Nod - 1 && jvp->GetJson()) {
- jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1));
+ jval.SetValue(g, GetColumnValue(g, jvp->GetJson(), n + 1));
jvp = &jval;
} // endif n
@@ -1665,15 +1820,15 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n)
Tjp->NextSame = Xnod;
} // endif NextSame
- SetJsonValue(g, Value, jvp, n);
+ SetJsonValue(g, Value, jvp);
return Value;
- } // end of ExpandArray
+} // end of ExpandArray
/***********************************************************************/
/* CalculateArray: */
/***********************************************************************/
PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
- {
+{
int i, ars, nv = 0, nextsame = Tjp->NextSame;
bool err;
OPVAL op = Nodes[n].Op;
@@ -1689,18 +1844,19 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
ars, op, nextsame);
for (i = 0; i < ars; i++) {
- jvrp = arp->GetValue(i);
+ jvrp = arp->GetArrayValue(i);
if (trace(1))
htrc("i=%d nv=%d\n", i, nv);
if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do {
if (jvrp->IsNull()) {
- jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING);
+ jvrp->Strp = PlugDup(g, GetJsonNull());
+ jvrp->DataType = TYPE_STRG;
jvp = jvrp;
} else if (n < Nod - 1 && jvrp->GetJson()) {
Tjp->NextSame = nextsame;
- jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1));
+ jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1));
jvp = &jval;
} else
jvp = jvrp;
@@ -1710,10 +1866,10 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
jvp->GetString(g), jvp->IsNull() ? 1 : 0);
if (!nv++) {
- SetJsonValue(g, vp, jvp, n);
+ SetJsonValue(g, vp, jvp);
continue;
} else
- SetJsonValue(g, MulVal, jvp, n);
+ SetJsonValue(g, MulVal, jvp);
if (!MulVal->IsNull()) {
switch (op) {
@@ -1768,19 +1924,19 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n)
Tjp->NextSame = nextsame;
return vp;
- } // end of CalculateArray
+} // end of CalculateArray
/***********************************************************************/
/* GetRow: Get the object containing this column. */
/***********************************************************************/
PJSON JSONCOL::GetRow(PGLOBAL g)
- {
+{
PJVAL val = NULL;
PJAR arp;
PJSON nwr, row = Tjp->Row;
for (int i = 0; i < Nod && row; i++) {
- if (Nodes[i+1].Op == OP_XX)
+ if (i < Nod-1 && Nodes[i+1].Op == OP_XX)
break;
else switch (row->GetType()) {
case TYPE_JOB:
@@ -1788,20 +1944,20 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
// Expected Array was not there, wrap the value
continue;
- val = ((PJOB)row)->GetValue(Nodes[i].Key);
+ val = ((PJOB)row)->GetKeyValue(Nodes[i].Key);
break;
case TYPE_JAR:
arp = (PJAR)row;
if (!Nodes[i].Key) {
if (Nodes[i].Op == OP_EQ)
- val = arp->GetValue(Nodes[i].Rank);
+ val = arp->GetArrayValue(Nodes[i].Rank);
else
- val = arp->GetValue(Nodes[i].Rx);
+ val = arp->GetArrayValue(Nodes[i].Rx);
} else {
// Unexpected array, unwrap it as [0]
- val = arp->GetValue(0);
+ val = arp->GetArrayValue(0);
i--;
} // endif Nodes
@@ -1828,9 +1984,9 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
nwr = new(G) JOBJECT;
if (row->GetType() == TYPE_JOB) {
- ((PJOB)row)->SetValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key);
+ ((PJOB)row)->SetKeyValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key);
} else if (row->GetType() == TYPE_JAR) {
- ((PJAR)row)->AddValue(G, new(G) JVALUE(nwr));
+ ((PJAR)row)->AddArrayValue(G, new(G) JVALUE(nwr));
((PJAR)row)->InitArray(G);
} else {
strcpy(g->Message, "Wrong type when writing new row");
@@ -1846,13 +2002,13 @@ PJSON JSONCOL::GetRow(PGLOBAL g)
} // endfor i
return row;
- } // end of GetRow
+} // end of GetRow
/***********************************************************************/
/* WriteColumn: */
/***********************************************************************/
void JSONCOL::WriteColumn(PGLOBAL g)
- {
+{
if (Xpd && Tjp->Pretty < 2) {
strcpy(g->Message, "Cannot write expanded column when Pretty is not 2");
throw 666;
@@ -1888,21 +2044,21 @@ void JSONCOL::WriteColumn(PGLOBAL g)
if (Nodes[Nod-1].Op == OP_XX) {
s = Value->GetCharValue();
- if (!(jsp = ParseJson(G, s, (int)strlen(s)))) {
+ if (!(jsp = ParseJson(G, s, strlen(s)))) {
strcpy(g->Message, s);
throw 666;
} // endif jsp
if (arp) {
if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ)
- arp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank);
+ arp->SetArrayValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank);
else
- arp->AddValue(G, new(G) JVALUE(jsp));
+ arp->AddArrayValue(G, new(G) JVALUE(jsp));
arp->InitArray(G);
} else if (objp) {
if (Nod > 1 && Nodes[Nod-2].Key)
- objp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key);
+ objp->SetKeyValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key);
} else if (jvp)
jvp->SetValue(jsp);
@@ -1919,24 +2075,24 @@ void JSONCOL::WriteColumn(PGLOBAL g)
case TYPE_DOUBLE:
if (arp) {
if (Nodes[Nod-1].Op == OP_EQ)
- arp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank);
+ arp->SetArrayValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank);
else
- arp->AddValue(G, new(G) JVALUE(G, Value));
+ arp->AddArrayValue(G, new(G) JVALUE(G, Value));
arp->InitArray(G);
} else if (objp) {
if (Nodes[Nod-1].Key)
- objp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key);
+ objp->SetKeyValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key);
} else if (jvp)
- jvp->SetValue(Value);
+ jvp->SetValue(g, Value);
break;
default: // ??????????
sprintf(g->Message, "Invalid column type %d", Buf_Type);
} // endswitch Type
- } // end of WriteColumn
+} // end of WriteColumn
/* -------------------------- Class TDBJSON -------------------------- */
@@ -1944,23 +2100,23 @@ void JSONCOL::WriteColumn(PGLOBAL g)
/* Implementation of the TDBJSON class. */
/***********************************************************************/
TDBJSON::TDBJSON(PJDEF tdp, PTXF txfp) : TDBJSN(tdp, txfp)
- {
+{
Doc = NULL;
Multiple = tdp->Multiple;
Done = Changed = false;
- } // end of TDBJSON standard constructor
+} // end of TDBJSON standard constructor
TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp)
- {
+{
Doc = tdbp->Doc;
Multiple = tdbp->Multiple;
Done = tdbp->Done;
Changed = tdbp->Changed;
- } // end of TDBJSON copy constructor
+} // end of TDBJSON copy constructor
// Used for update
PTDB TDBJSON::Clone(PTABS t)
- {
+{
PTDB tp;
PJCOL cp1, cp2;
PGLOBAL g = t->G;
@@ -1973,13 +2129,13 @@ PTDB TDBJSON::Clone(PTABS t)
} // endfor cp1
return tp;
- } // end of Clone
+} // end of Clone
/***********************************************************************/
/* Make the document tree from the object path. */
/***********************************************************************/
int TDBJSON::MakeNewDoc(PGLOBAL g)
- {
+{
// Create a void table that will be populated
Doc = new(g) JARRAY;
@@ -1988,15 +2144,16 @@ int TDBJSON::MakeNewDoc(PGLOBAL g)
Done = true;
return RC_OK;
- } // end of MakeNewDoc
+} // end of MakeNewDoc
/***********************************************************************/
/* Make the document tree from a file. */
/***********************************************************************/
int TDBJSON::MakeDocument(PGLOBAL g)
- {
+{
char *p, *p1, *p2, *memory, *objpath, *key = NULL;
- int len, i = 0;
+ int i = 0;
+ size_t len;
my_bool a;
MODE mode = Mode;
PJSON jsp;
@@ -2075,7 +2232,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
key = p;
objp = jsp->GetObject();
arp = NULL;
- val = objp->GetValue(key);
+ val = objp->GetKeyValue(key);
if (!val || !(jsp = val->GetJson())) {
sprintf(g->Message, "Cannot find object key %s", key);
@@ -2101,7 +2258,7 @@ int TDBJSON::MakeDocument(PGLOBAL g)
arp = jsp->GetArray();
objp = NULL;
i = atoi(p) - B;
- val = arp->GetValue(i);
+ val = arp->GetArrayValue(i);
if (!val) {
sprintf(g->Message, "Cannot find array value %d", i);
@@ -2122,17 +2279,17 @@ int TDBJSON::MakeDocument(PGLOBAL g)
Doc = new(g) JARRAY;
if (val) {
- Doc->AddValue(g, val);
+ Doc->AddArrayValue(g, val);
Doc->InitArray(g);
} else if (jsp) {
- Doc->AddValue(g, new(g) JVALUE(jsp));
+ Doc->AddArrayValue(g, new(g) JVALUE(jsp));
Doc->InitArray(g);
} // endif val
if (objp)
- objp->SetValue(g, new(g) JVALUE(Doc), key);
+ objp->SetKeyValue(g, new(g) JVALUE(Doc), key);
else if (arp)
- arp->SetValue(g, new(g) JVALUE(Doc), i);
+ arp->SetArrayValue(g, new(g) JVALUE(Doc), i);
else
Top = Doc;
@@ -2140,13 +2297,13 @@ int TDBJSON::MakeDocument(PGLOBAL g)
Done = true;
return RC_OK;
- } // end of MakeDocument
+} // end of MakeDocument
/***********************************************************************/
/* JSON Cardinality: returns table size in number of rows. */
/***********************************************************************/
int TDBJSON::Cardinality(PGLOBAL g)
- {
+{
if (!g)
return (Xcol || Multiple) ? 0 : 1;
else if (Cardinal < 0)
@@ -2159,48 +2316,48 @@ int TDBJSON::Cardinality(PGLOBAL g)
return 10;
}
return Cardinal;
- } // end of Cardinality
+} // end of Cardinality
/***********************************************************************/
/* JSON GetMaxSize: returns table size estimate in number of rows. */
/***********************************************************************/
int TDBJSON::GetMaxSize(PGLOBAL g)
- {
+{
if (MaxSize < 0)
MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1);
return MaxSize;
- } // end of GetMaxSize
+} // end of GetMaxSize
/***********************************************************************/
/* ResetSize: call by TDBMUL when calculating size estimate. */
/***********************************************************************/
void TDBJSON::ResetSize(void)
- {
+{
MaxSize = Cardinal = -1;
Fpos = -1;
N = 0;
Done = false;
- } // end of ResetSize
+} // end of ResetSize
/***********************************************************************/
/* TDBJSON is not indexable. */
/***********************************************************************/
int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool)
- {
+{
if (pxdf) {
strcpy(g->Message, "JSON not indexable when pretty = 2");
return RC_FX;
} else
return RC_OK;
- } // end of MakeIndex
+} // end of MakeIndex
/***********************************************************************/
/* Return the position in the table. */
/***********************************************************************/
int TDBJSON::GetRecpos(void)
- {
+{
#if 0
union {
uint Rpos;
@@ -2212,13 +2369,13 @@ int TDBJSON::GetRecpos(void)
return Rpos;
#endif // 0
return Fpos;
- } // end of GetRecpos
+} // end of GetRecpos
/***********************************************************************/
/* Set the position in the table. */
/***********************************************************************/
bool TDBJSON::SetRecpos(PGLOBAL, int recpos)
- {
+{
#if 0
union {
uint Rpos;
@@ -2239,13 +2396,13 @@ bool TDBJSON::SetRecpos(PGLOBAL, int recpos)
Fpos = recpos - 1;
return false;
- } // end of SetRecpos
+} // end of SetRecpos
/***********************************************************************/
/* JSON Access Method opening routine. */
/***********************************************************************/
bool TDBJSON::OpenDB(PGLOBAL g)
- {
+{
if (Use == USE_OPEN) {
/*******************************************************************/
/* Table already open replace it at its beginning. */
@@ -2277,13 +2434,13 @@ bool TDBJSON::OpenDB(PGLOBAL g)
Use = USE_OPEN;
return false;
- } // end of OpenDB
+} // end of OpenDB
/***********************************************************************/
/* ReadDB: Data Base read routine for JSON access method. */
/***********************************************************************/
int TDBJSON::ReadDB(PGLOBAL)
- {
+{
int rc;
N++;
@@ -2294,61 +2451,61 @@ int TDBJSON::ReadDB(PGLOBAL)
M++;
rc = RC_OK;
} else if (++Fpos < (signed)Doc->size()) {
- Row = Doc->GetValue(Fpos);
+ Row = Doc->GetArrayValue(Fpos);
if (Row->GetType() == TYPE_JVAL)
Row = ((PJVAL)Row)->GetJson();
SameRow = 0;
M = 1;
- rc = RC_OK;
+ rc = RC_OK;
} else
rc = RC_EF;
return rc;
- } // end of ReadDB
+} // end of ReadDB
/***********************************************************************/
/* WriteDB: Data Base write routine for JSON access method. */
/***********************************************************************/
int TDBJSON::WriteDB(PGLOBAL g)
- {
+{
if (Jmode == MODE_OBJECT) {
PJVAL vp = new(g) JVALUE(Row);
if (Mode == MODE_INSERT) {
- Doc->AddValue(g, vp);
+ Doc->AddArrayValue(g, vp);
Row = new(g) JOBJECT;
- } else if (Doc->SetValue(g, vp, Fpos))
+ } else if (Doc->SetArrayValue(g, vp, Fpos))
return RC_FX;
} else if (Jmode == MODE_ARRAY) {
PJVAL vp = new(g) JVALUE(Row);
if (Mode == MODE_INSERT) {
- Doc->AddValue(g, vp);
+ Doc->AddArrayValue(g, vp);
Row = new(g) JARRAY;
- } else if (Doc->SetValue(g, vp, Fpos))
+ } else if (Doc->SetArrayValue(g, vp, Fpos))
return RC_FX;
} else { // if (Jmode == MODE_VALUE)
if (Mode == MODE_INSERT) {
- Doc->AddValue(g, (PJVAL)Row);
+ Doc->AddArrayValue(g, (PJVAL)Row);
Row = new(g) JVALUE;
- } else if (Doc->SetValue(g, (PJVAL)Row, Fpos))
+ } else if (Doc->SetArrayValue(g, (PJVAL)Row, Fpos))
return RC_FX;
} // endif Jmode
Changed = true;
return RC_OK;
- } // end of WriteDB
+} // end of WriteDB
/***********************************************************************/
/* Data Base delete line routine for JSON access method. */
/***********************************************************************/
int TDBJSON::DeleteDB(PGLOBAL g, int irc)
- {
+{
if (irc == RC_OK) {
// Deleted current row
if (Doc->DeleteValue(Fpos)) {
@@ -2365,13 +2522,13 @@ int TDBJSON::DeleteDB(PGLOBAL g, int irc)
} // endfor i
return RC_OK;
- } // end of DeleteDB
+} // end of DeleteDB
/***********************************************************************/
/* Data Base close routine for JSON access methods. */
/***********************************************************************/
void TDBJSON::CloseDB(PGLOBAL g)
- {
+{
if (!Changed)
return;
@@ -2387,7 +2544,7 @@ void TDBJSON::CloseDB(PGLOBAL g)
if (!Serialize(g, Top, filename, Pretty))
puts(g->Message);
- } // end of CloseDB
+} // end of CloseDB
/* ---------------------------TDBJCL class --------------------------- */
@@ -2395,18 +2552,18 @@ void TDBJSON::CloseDB(PGLOBAL g)
/* TDBJCL class constructor. */
/***********************************************************************/
TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp)
- {
+{
Topt = tdp->GetTopt();
Db = tdp->Schema;
Dsn = tdp->Uri;
- } // end of TDBJCL constructor
+} // end of TDBJCL constructor
/***********************************************************************/
/* GetResult: Get the list the JSON file columns. */
/***********************************************************************/
PQRYRES TDBJCL::GetResult(PGLOBAL g)
- {
+{
return JSONColumns(g, Db, Dsn, Topt, false);
- } // end of GetResult
+} // end of GetResult
/* --------------------------- End of json --------------------------- */
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index 88aa5e2ee8b..b47dc9b0665 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -5,6 +5,7 @@
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
+#pragma once
//#include "osutil.h" // Unuseful and bad for OEM
#include "block.h"
#include "colblk.h"
@@ -35,7 +36,7 @@ typedef struct _jncol {
struct _jncol *Next;
char *Name;
char *Fmt;
- int Type;
+ JTYP Type;
int Len;
int Scale;
bool Cbn;
@@ -58,7 +59,7 @@ public:
// Members
JCOL jcol;
PJCL jcp, fjcp, pjcp;
- PVAL valp;
+//PVL vlp;
PJDEF tdp;
TDBJSN *tjnp;
PJTDB tjsp;
@@ -68,7 +69,7 @@ public:
PCSZ sep;
char colname[65], fmt[129], buf[16];
uint *length;
- int i, n, bf, ncol, lvl, sz;
+ int i, n, bf, ncol, lvl, sz, limit;
bool all, strfy;
}; // end of JSONDISC
@@ -126,6 +127,7 @@ public:
class DllExport TDBJSN : public TDBDOS {
friend class JSONCOL;
friend class JSONDEF;
+ friend class JSONDISC;
#if defined(CMGO_SUPPORT)
friend class CMGFAM;
#endif // CMGO_SUPPORT
@@ -154,14 +156,18 @@ public:
{return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;}
// Database routines
- virtual int Cardinality(PGLOBAL g);
- virtual int GetMaxSize(PGLOBAL g);
+ //virtual int Cardinality(PGLOBAL g);
+ //virtual int GetMaxSize(PGLOBAL g);
virtual bool OpenDB(PGLOBAL g);
virtual int ReadDB(PGLOBAL g);
virtual bool PrepareWriting(PGLOBAL g);
virtual int WriteDB(PGLOBAL g);
+ virtual void CloseDB(PGLOBAL g);
- protected:
+ // Specific routine
+ virtual int EstimatedLength(void);
+
+protected:
PJSON FindRow(PGLOBAL g);
int MakeTopTree(PGLOBAL g, PJSON jsp);
@@ -169,7 +175,7 @@ public:
PGLOBAL G; // Support of parse memory
PJSON Top; // The top JSON tree
PJSON Row; // The current row
- PJSON Val; // The value of the current row
+ PJVAL Val; // The value of the current row
PJCOL Colp; // The multiple column
JMODE Jmode; // MODE_OBJECT by default
PCSZ Objname; // The table object name
@@ -186,7 +192,8 @@ public:
char Sep; // The Jpath separator
bool Strict; // Strict syntax checking
bool Comma; // Row has final comma
- }; // end of class TDBJSN
+ bool Xpdable; // False: expandable columns are NULL
+}; // end of class TDBJSN
/* -------------------------- JSONCOL class -------------------------- */
@@ -224,8 +231,8 @@ public:
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
PVAL MakeJson(PGLOBAL g, PJSON jsp);
- void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n);
- PJSON GetRow(PGLOBAL g);
+ void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
+ PJSON GetRow(PGLOBAL g);
// Default constructor not to be used
JSONCOL(void) {}
@@ -241,7 +248,8 @@ public:
char Sep; // The Jpath separator
bool Xpd; // True for expandable column
bool Parsed; // True when parsed
- }; // end of class JSONCOL
+ bool Warned; // True when warning issued
+}; // end of class JSONCOL
/* -------------------------- TDBJSON class -------------------------- */
diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp
index b1bdeffc880..1efda6e3bca 100644
--- a/storage/connect/tabrest.cpp
+++ b/storage/connect/tabrest.cpp
@@ -1,8 +1,11 @@
/************** tabrest C++ Program Source Code File (.CPP) ************/
-/* PROGRAM NAME: tabrest Version 1.7 */
-/* (C) Copyright to the author Olivier BERTRAND 2018 - 2019 */
+/* PROGRAM NAME: tabrest Version 1.8 */
+/* (C) Copyright to the author Olivier BERTRAND 2018 - 2020 */
/* This program is the REST Web API support for MariaDB. */
/* When compiled without MARIADB defined, it is the EOM module code. */
+/* The way Connect handles NOSQL data returned by REST queries is */
+/* just by retrieving it as a file and then leave the existing data */
+/* type tables (JSON, XML or CSV) process it as usual. */
/***********************************************************************/
/***********************************************************************/
@@ -10,6 +13,8 @@
/***********************************************************************/
#if defined(MARIADB)
#include <my_global.h> // All MariaDB stuff
+#include <mysqld.h>
+#include <sql_error.h>
#else // !MARIADB OEM module
#include "mini-global.h"
#define _MAX_PATH 260
@@ -42,7 +47,19 @@
#include "tabfmt.h"
#include "tabrest.h"
+#if defined(connect_EXPORTS)
+#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M)
+#else
+#define PUSH_WARNING(M) htrc(M)
+#endif
+
+#if defined(__WIN__) || defined(_WINDOWS)
+#define popen _popen
+#define pclose _pclose
+#endif
+
static XGETREST getRestFnc = NULL;
+static int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename);
#if !defined(MARIADB)
/***********************************************************************/
@@ -72,7 +89,41 @@ PTABDEF __stdcall GetREST(PGLOBAL g, void *memp)
#endif // !MARIADB
/***********************************************************************/
-/* GetREST: get the external TABDEF from OEM module. */
+/* Xcurl: retrieve the REST answer by executing cURL. */
+/***********************************************************************/
+int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename)
+{
+ char buf[1024];
+ int rc;
+ FILE *pipe;
+
+ if (Uri) {
+ if (*Uri == '/' || Http[strlen(Http) - 1] == '/')
+ sprintf(buf, "curl %s%s -o %s", Http, Uri, filename);
+ else
+ sprintf(buf, "curl %s/%s -o %s", Http, Uri, filename);
+
+ } else
+ sprintf(buf, "curl %s -o %s", Http, filename);
+
+ if ((pipe = popen(buf, "rt"))) {
+ if (trace(515))
+ while (fgets(buf, sizeof(buf), pipe)) {
+ htrc("%s", buf);
+ } // endwhile
+
+ pclose(pipe);
+ rc = 0;
+ } else {
+ sprintf(g->Message, "curl failed, errno =%d", errno);
+ rc = 1;
+ } // endif pipe
+
+ return rc;
+} // end od Xcurl
+
+/***********************************************************************/
+/* GetREST: load the Rest lib and get the Rest function. */
/***********************************************************************/
XGETREST GetRestFunction(PGLOBAL g)
{
@@ -148,13 +199,15 @@ PQRYRES RESTColumns(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
#endif // !MARIADB
{
- PQRYRES qrp= NULL;
- char filename[_MAX_PATH + 1]; // MAX PATH ???
- PCSZ http, uri, fn, ftype;
+ PQRYRES qrp= NULL;
+ char filename[_MAX_PATH + 1]; // MAX PATH ???
+ int rc;
+ bool curl = false;
+ PCSZ http, uri, fn, ftype;
XGETREST grf = GetRestFunction(g);
if (!grf)
- return NULL;
+ curl = true;
http = GetStringTableOption(g, tp, "Http", NULL);
uri = GetStringTableOption(g, tp, "Uri", NULL);
@@ -178,17 +231,27 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
fn = filename;
tp->filename = PlugDup(g, fn);
+ sprintf(g->Message, "No file name. Table will use %s", fn);
+ PUSH_WARNING(g->Message);
} // endif fn
// We used the file name relative to recorded datapath
PlugSetPath(filename, fn, db);
- //strcat(strcat(strcat(strcpy(filename, "."), slash), db), slash);
- //strncat(filename, fn, _MAX_PATH - strlen(filename));
+ curl = GetBooleanTableOption(g, tp, "Curl", curl);
// Retrieve the file from the web and copy it locally
- if (http && grf(g->Message, trace(515), http, uri, filename)) {
- // sprintf(g->Message, "Failed to get file at %s", http);
- } else if (!stricmp(ftype, "JSON"))
+ if (curl)
+ rc = Xcurl(g, http, uri, filename);
+ else if (grf)
+ rc = grf(g->Message, trace(515), http, uri, filename);
+ else {
+ strcpy(g->Message, "Cannot access to curl nor casablanca");
+ rc = 1;
+ } // endif !grf
+
+ if (rc)
+ return NULL;
+ else if (!stricmp(ftype, "JSON"))
qrp = JSONColumns(g, db, NULL, tp, info);
else if (!stricmp(ftype, "CSV"))
qrp = CSVColumns(g, NULL, tp, info);
@@ -209,14 +272,14 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
/***********************************************************************/
bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
- char filename[_MAX_PATH + 1];
- int rc = 0, n;
- bool xt = trace(515);
- LPCSTR ftype;
+ char filename[_MAX_PATH + 1];
+ int rc = 0, n;
+ bool curl = false, xt = trace(515);
+ LPCSTR ftype;
XGETREST grf = GetRestFunction(g);
if (!grf)
- return true;
+ curl = true;
#if defined(MARIADB)
ftype = GetStringCatInfo(g, "Type", "JSON");
@@ -235,8 +298,8 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
: (!stricmp(ftype, "CSV")) ? 3 : 0;
if (n == 0) {
- htrc("DefineAM: Unsupported REST table type %s", am);
- sprintf(g->Message, "Unsupported REST table type %s", am);
+ htrc("DefineAM: Unsupported REST table type %s\n", ftype);
+ sprintf(g->Message, "Unsupported REST table type %s", ftype);
return true;
} // endif n
@@ -247,11 +310,19 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
// We used the file name relative to recorded datapath
PlugSetPath(filename, Fn, GetPath());
- // Retrieve the file from the web and copy it locally
- rc = grf(g->Message, xt, Http, Uri, filename);
+ curl = GetBoolCatInfo("Curl", curl);
- if (xt)
- htrc("Return from restGetFile: rc=%d\n", rc);
+ // Retrieve the file from the web and copy it locally
+ if (curl) {
+ rc = Xcurl(g, Http, Uri, filename);
+ xtrc(515, "Return from Xcurl: rc=%d\n", rc);
+ } else if (grf) {
+ rc = grf(g->Message, xt, Http, Uri, filename);
+ xtrc(515, "Return from restGetFile: rc=%d\n", rc);
+ } else {
+ strcpy(g->Message, "Cannot access to curl nor casablanca");
+ rc = 1;
+ } // endif !grf
if (rc)
return true;
diff --git a/storage/connect/tabrest.h b/storage/connect/tabrest.h
index f08ac7984c9..9cf2d10a6b8 100644
--- a/storage/connect/tabrest.h
+++ b/storage/connect/tabrest.h
@@ -5,7 +5,10 @@
/***********************************************************************/
#pragma once
-#ifndef __WIN__
+#if defined(__WIN__)
+static PCSZ slash = "\\";
+#else // !__WIN__
+static PCSZ slash = "/";
#define stricmp strcasecmp
#endif // !__WIN__
diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc
index c8f38b68015..5268651d080 100644
--- a/storage/connect/user_connect.cc
+++ b/storage/connect/user_connect.cc
@@ -112,8 +112,7 @@ bool user_connect::user_init()
if (g)
printf("%s\n", g->Message);
- int rc __attribute__((unused))= PlugExit(g);
- g= NULL;
+ g= PlugExit(g);
if (dup)
free(dup);
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index 5951b26e81e..412cb808936 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -197,7 +197,7 @@ const char *GetFormatType(int type)
case TYPE_DOUBLE: c = "F"; break;
case TYPE_DATE: c = "D"; break;
case TYPE_TINY: c = "T"; break;
- case TYPE_DECIM: c = "M"; break;
+ case TYPE_DECIM: c = "F"; break;
case TYPE_BIN: c = "B"; break;
case TYPE_PCHAR: c = "P"; break;
} // endswitch type
@@ -380,8 +380,8 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec,
case TYPE_STRING:
valp = new(g) TYPVAL<PSZ>(g, (PSZ)NULL, len, prec);
break;
- case TYPE_DATE:
- valp = new(g) DTVAL(g, len, prec, fmt);
+ case TYPE_DATE:
+ valp = new(g) DTVAL(g, len, prec, fmt);
break;
case TYPE_INT:
if (uns)
diff --git a/storage/connect/value.h b/storage/connect/value.h
index ee7a1c8032f..df6a55501b6 100644
--- a/storage/connect/value.h
+++ b/storage/connect/value.h
@@ -65,7 +65,8 @@ DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc);
/***********************************************************************/
class DllExport VALUE : public BLOCK {
friend class CONSTANT; // The only object allowed to use SetConstFormat
- public:
+ friend class SWAP; // The only class allowed to access protected
+public:
// Constructors
// Implementation
@@ -260,7 +261,8 @@ class DllExport TYPVAL : public VALUE {
/***********************************************************************/
template <>
class DllExport TYPVAL<PSZ>: public VALUE {
- public:
+ friend class SWAP; // The only class allowed to offsets Strg
+public:
// Constructors
TYPVAL(PSZ s, short c = 0);
TYPVAL(PGLOBAL g, PSZ s, int n, int c);
@@ -346,7 +348,8 @@ class DllExport DECVAL: public TYPVAL<PSZ> {
/* Specific BINARY class. */
/***********************************************************************/
class DllExport BINVAL: public VALUE {
- public:
+ friend class SWAP; // The only class allowed to offsets pointers
+public:
// Constructors
//BINVAL(void *p);
BINVAL(PGLOBAL g, void *p, int cl, int n);
@@ -415,7 +418,8 @@ class DllExport DTVAL : public TYPVAL<int> {
virtual bool SetValue_char(const char *p, int n);
virtual void SetValue_psz(PCSZ s);
virtual void SetValue_pvblk(PVBLK blk, int n);
- virtual char *GetCharString(char *p);
+ virtual PSZ GetCharValue(void) { return Sdate; }
+ virtual char *GetCharString(char *p);
virtual int ShowValue(char *buf, int len);
virtual bool FormatValue(PVAL vp, PCSZ fmt);
bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0);
diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h
index bc5912d3054..5b50e9320f5 100644
--- a/storage/connect/xobject.h
+++ b/storage/connect/xobject.h
@@ -130,6 +130,7 @@ class DllExport STRING : public BLOCK {
inline void SetLength(uint n) {Length = n;}
inline PSZ GetStr(void) {return Strp;}
inline uint32 GetSize(void) {return Size;}
+ inline char GetLastChar(void) {return Length ? Strp[Length - 1] : 0;}
inline bool IsTruncated(void) {return Trc;}
// Methods
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index badb515c2b2..6e7ee48d2eb 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -528,7 +528,7 @@ int ha_tina::encode_quote(const uchar *buf)
String attribute(attribute_buffer, sizeof(attribute_buffer),
&my_charset_bin);
bool ietf_quotes= table_share->option_struct->ietf_quotes;
- my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
buffer.length(0);
for (Field **field=table->field ; *field ; field++)
@@ -606,7 +606,7 @@ int ha_tina::encode_quote(const uchar *buf)
//buffer.replace(buffer.length(), 0, "\n", 1);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return (buffer.length());
}
@@ -659,7 +659,6 @@ int ha_tina::find_current_row(uchar *buf)
{
my_off_t end_offset, curr_offset= current_position;
int eoln_len;
- my_bitmap_map *org_bitmap;
int error;
bool read_all;
bool ietf_quotes= table_share->option_struct->ietf_quotes;
@@ -679,7 +678,7 @@ int ha_tina::find_current_row(uchar *buf)
/* We must read all columns in case a table is opened for update */
read_all= !bitmap_is_clear_all(table->write_set);
/* Avoid asserts in ::store() for columns that are not going to be updated */
- org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set);
error= HA_ERR_CRASHED_ON_USAGE;
memset(buf, 0, table->s->null_bytes);
@@ -857,7 +856,7 @@ int ha_tina::find_current_row(uchar *buf)
error= 0;
err:
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
DBUG_RETURN(error);
}
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index ec34cf16858..00407730c31 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -936,7 +936,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record,
{
ulong *lengths;
Field **field;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
DBUG_ENTER("ha_federated::convert_row_to_internal_format");
lengths= mysql_fetch_lengths(result);
@@ -965,7 +965,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record,
}
(*field)->move_field_offset(-old_ptr);
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(0);
}
@@ -1293,14 +1293,13 @@ bool ha_federated::create_where_from_key(String *to,
char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE];
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
const key_range *ranges[2]= { start_key, end_key };
- my_bitmap_map *old_map;
DBUG_ENTER("ha_federated::create_where_from_key");
tmp.length(0);
if (start_key == NULL && end_key == NULL)
DBUG_RETURN(1);
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
for (uint i= 0; i <= 1; i++)
{
bool needs_quotes;
@@ -1477,7 +1476,7 @@ prepare_for_next_key_part:
tmp.c_ptr_quick()));
}
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
if (both_not_null)
if (tmp.append(STRING_WITH_LEN(") ")))
@@ -1492,7 +1491,7 @@ prepare_for_next_key_part:
DBUG_RETURN(0);
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(1);
}
@@ -1841,7 +1840,7 @@ int ha_federated::write_row(const uchar *buf)
String insert_field_value_string(insert_field_value_buffer,
sizeof(insert_field_value_buffer),
&my_charset_bin);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
DBUG_ENTER("ha_federated::write_row");
values_string.length(0);
@@ -1895,7 +1894,7 @@ int ha_federated::write_row(const uchar *buf)
values_string.append(STRING_WITH_LEN(", "));
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
/*
if there were no fields, we don't want to add a closing paren
@@ -2203,7 +2202,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data)
else
{
/* otherwise = */
- my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set);
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value);
if (needs_quote)
@@ -2212,7 +2211,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data)
if (needs_quote)
update_string.append(value_quote_char);
field_value.length(0);
- tmp_restore_column_map(table->read_set, old_map);
+ tmp_restore_column_map(&table->read_set, old_map);
}
update_string.append(STRING_WITH_LEN(", "));
}
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index bc2cdef6509..de4e20ee112 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -871,7 +871,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record,
ulong *lengths;
Field **field;
int column= 0;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
Time_zone *saved_time_zone= table->in_use->variables.time_zone;
DBUG_ENTER("ha_federatedx::convert_row_to_internal_format");
@@ -900,7 +900,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record,
(*field)->move_field_offset(-old_ptr);
}
table->in_use->variables.time_zone= saved_time_zone;
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(0);
}
@@ -1229,7 +1229,6 @@ bool ha_federatedx::create_where_from_key(String *to,
String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info);
const key_range *ranges[2]= { start_key, end_key };
Time_zone *saved_time_zone= table->in_use->variables.time_zone;
- my_bitmap_map *old_map;
DBUG_ENTER("ha_federatedx::create_where_from_key");
tmp.length(0);
@@ -1237,7 +1236,7 @@ bool ha_federatedx::create_where_from_key(String *to,
DBUG_RETURN(1);
table->in_use->variables.time_zone= UTC;
- old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
for (uint i= 0; i <= 1; i++)
{
bool needs_quotes;
@@ -1413,7 +1412,7 @@ prepare_for_next_key_part:
tmp.c_ptr_quick()));
}
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
table->in_use->variables.time_zone= saved_time_zone;
if (both_not_null)
@@ -1429,7 +1428,7 @@ prepare_for_next_key_part:
DBUG_RETURN(0);
err:
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
table->in_use->variables.time_zone= saved_time_zone;
DBUG_RETURN(1);
}
@@ -2004,7 +2003,7 @@ int ha_federatedx::write_row(const uchar *buf)
sizeof(insert_field_value_buffer),
&my_charset_bin);
Time_zone *saved_time_zone= table->in_use->variables.time_zone;
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
DBUG_ENTER("ha_federatedx::write_row");
table->in_use->variables.time_zone= UTC;
@@ -2059,7 +2058,7 @@ int ha_federatedx::write_row(const uchar *buf)
values_string.append(STRING_WITH_LEN(", "));
}
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
table->in_use->variables.time_zone= saved_time_zone;
/*
@@ -2384,7 +2383,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data)
else
{
/* otherwise = */
- my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set);
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value);
if (needs_quote)
@@ -2393,7 +2392,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data)
if (needs_quote)
update_string.append(value_quote_char);
field_value.length(0);
- tmp_restore_column_map(table->read_set, old_map);
+ tmp_restore_column_map(&table->read_set, old_map);
}
update_string.append(STRING_WITH_LEN(", "));
}
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 59c6d06d5af..041cd8cd1b4 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -3,7 +3,7 @@
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -67,6 +67,9 @@ Created 10/16/1994 Heikki Tuuri
#include "srv0start.h"
#include "mysql_com.h"
#include "dict0stats.h"
+#ifdef WITH_WSREP
+#include "mysql/service_wsrep.h"
+#endif /* WITH_WSREP */
/** Buffered B-tree operation types, introduced as part of delete buffering. */
enum btr_op_t {
@@ -3288,7 +3291,8 @@ btr_cur_ins_lock_and_undo(
/* Check if there is predicate or GAP lock preventing the insertion */
if (!(flags & BTR_NO_LOCKING_FLAG)) {
- if (dict_index_is_spatial(index)) {
+ const unsigned type = index->type;
+ if (UNIV_UNLIKELY(type & DICT_SPATIAL)) {
lock_prdt_t prdt;
rtr_mbr_t mbr;
@@ -3305,9 +3309,30 @@ btr_cur_ins_lock_and_undo(
index, thr, mtr, &prdt);
*inherit = false;
} else {
+#ifdef WITH_WSREP
+ trx_t* trx= thr_get_trx(thr);
+ /* If transaction scanning an unique secondary
+ key is wsrep high priority thread (brute
+ force) this scanning may involve GAP-locking
+ in the index. As this locking happens also
+ when applying replication events in high
+ priority applier threads, there is a
+ probability for lock conflicts between two
+ wsrep high priority threads. To avoid this
+ GAP-locking we mark that this transaction
+ is using unique key scan here. */
+ if ((type & (DICT_CLUSTERED | DICT_UNIQUE)) == DICT_UNIQUE
+ && trx->is_wsrep()
+ && wsrep_thd_is_BF(trx->mysql_thd, false)) {
+ trx->wsrep_UK_scan= true;
+ }
+#endif /* WITH_WSREP */
err = lock_rec_insert_check_and_lock(
flags, rec, btr_cur_get_block(cursor),
index, thr, mtr, inherit);
+#ifdef WITH_WSREP
+ trx->wsrep_UK_scan= false;
+#endif /* WITH_WSREP */
}
}
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 5442ee285db..d6277ae13d8 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -2802,7 +2802,6 @@ fil_rename_tablespace(
ut_ad(strchr(new_file_name, OS_PATH_SEPARATOR) != NULL);
if (!recv_recovery_is_on()) {
- fil_name_write_rename(id, old_file_name, new_file_name);
log_mutex_enter();
}
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 4bdc556cee8..0adec8eb304 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1708,7 +1708,7 @@ fts_drop_tables(
error = fts_drop_common_tables(trx, &fts_table);
- if (error == DB_SUCCESS) {
+ if (error == DB_SUCCESS && table->fts) {
error = fts_drop_all_index_tables(trx, table->fts);
}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index d4c15c0b254..d5eb4ac6bcd 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -62,6 +62,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <my_service_manager.h>
#include <key.h>
+#include <sql_manager.h>
/* Include necessary InnoDB headers */
#include "btr0btr.h"
@@ -1849,9 +1850,7 @@ thd_to_trx_id(
return(thd_to_trx(thd)->id);
}
-static int
-wsrep_abort_transaction(handlerton* hton, THD *bf_thd, THD *victim_thd,
- my_bool signal);
+static void wsrep_abort_transaction(handlerton*, THD *, THD *, my_bool);
static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid);
static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid);
#endif /* WITH_WSREP */
@@ -3496,6 +3495,12 @@ static int innodb_init_abort()
DBUG_RETURN(1);
}
+/** Deprecation message about innodb_idle_flush_pct */
+static const char* deprecated_idle_flush_pct
+ = "innodb_idle_flush_pct is DEPRECATED and has no effect.";
+
+static ulong innodb_idle_flush_pct;
+
/** If applicable, emit a message that log checksums cannot be disabled.
@param[in,out] thd client session, or NULL if at startup
@param[in] check whether redo log block checksums are enabled
@@ -5028,6 +5033,7 @@ innobase_close_connection(
if (trx) {
+ thd_set_ha_data(thd, hton, NULL);
if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) {
sql_print_error("Transaction not registered for MariaDB 2PC, "
@@ -17111,7 +17117,8 @@ innodb_io_capacity_update(
" higher than innodb_io_capacity_max %lu",
in_val, srv_max_io_capacity);
- srv_max_io_capacity = in_val * 2;
+ srv_max_io_capacity = (in_val & ~(~0UL >> 1))
+ ? in_val : in_val * 2;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_ARGUMENTS,
@@ -18549,6 +18556,7 @@ static struct st_mysql_storage_engine innobase_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
#ifdef WITH_WSREP
+<<<<<<< HEAD
/** This function is used to kill one transaction.
@@ -18584,9 +18592,72 @@ wsrep_innobase_kill_one_trx(
ut_ad(victim_trx);
ut_ad(lock_mutex_own());
ut_ad(trx_mutex_own(victim_trx));
+||||||| 75538f94ca0
+void
+wsrep_abort_slave_trx(
+/*==================*/
+ wsrep_seqno_t bf_seqno,
+ wsrep_seqno_t victim_seqno)
+{
+ WSREP_ERROR("Trx %lld tries to abort slave trx %lld. This could be "
+ "caused by:\n\t"
+ "1) unsupported configuration options combination, please check documentation.\n\t"
+ "2) a bug in the code.\n\t"
+ "3) a database corruption.\n Node consistency compromized, "
+ "need to abort. Restart the node to resync with cluster.",
+ (long long)bf_seqno, (long long)victim_seqno);
+ abort();
+}
+/*******************************************************************//**
+This function is used to kill one transaction in BF. */
+UNIV_INTERN
+int
+wsrep_innobase_kill_one_trx(
+/*========================*/
+ void * const bf_thd_ptr,
+ const trx_t * const bf_trx,
+ trx_t *victim_trx,
+ ibool signal)
+{
+ ut_ad(lock_mutex_own());
+ ut_ad(trx_mutex_own(victim_trx));
+ ut_ad(bf_thd_ptr);
+ ut_ad(victim_trx);
+=======
+void
+wsrep_abort_slave_trx(
+/*==================*/
+ wsrep_seqno_t bf_seqno,
+ wsrep_seqno_t victim_seqno)
+{
+ WSREP_ERROR("Trx %lld tries to abort slave trx %lld. This could be "
+ "caused by:\n\t"
+ "1) unsupported configuration options combination, please check documentation.\n\t"
+ "2) a bug in the code.\n\t"
+ "3) a database corruption.\n Node consistency compromized, "
+ "need to abort. Restart the node to resync with cluster.",
+ (long long)bf_seqno, (long long)victim_seqno);
+ abort();
+}
+>>>>>>> bb-10.3-release
+<<<<<<< HEAD
+ DBUG_ENTER("wsrep_innobase_kill_one_trx");
+||||||| 75538f94ca0
DBUG_ENTER("wsrep_innobase_kill_one_trx");
+ THD *bf_thd = bf_thd_ptr ? (THD*) bf_thd_ptr : NULL;
+ THD *thd = (THD *) victim_trx->mysql_thd;
+ int64_t bf_seqno = (bf_thd) ? wsrep_thd_trx_seqno(bf_thd) : 0;
+=======
+struct bg_wsrep_kill_trx_arg {
+ my_thread_id thd_id;
+ trx_id_t trx_id;
+ int64_t bf_seqno;
+ ibool signal;
+};
+>>>>>>> bb-10.3-release
+<<<<<<< HEAD
THD *thd= (THD *) victim_trx->mysql_thd;
ut_ad(thd);
/* Note that bf_trx might not exist here e.g. on MDL conflict
@@ -18596,11 +18667,104 @@ wsrep_innobase_kill_one_trx(
for BF transaction (test: galera_many_columns)*/
trx_t* bf_trx= thd_to_trx(bf_thd);
DBUG_ASSERT(wsrep_on(bf_thd));
+||||||| 75538f94ca0
+ if (!thd) {
+ DBUG_PRINT("wsrep", ("no thd for conflicting lock"));
+ WSREP_WARN("no THD for trx: " TRX_ID_FMT, victim_trx->id);
+ DBUG_RETURN(1);
+ }
+
+ if (!bf_thd) {
+ DBUG_PRINT("wsrep", ("no BF thd for conflicting lock"));
+ WSREP_WARN("no BF THD for trx: " TRX_ID_FMT,
+ bf_trx ? bf_trx->id : 0);
+ DBUG_RETURN(1);
+ }
+
+ WSREP_LOG_CONFLICT(bf_thd, thd, TRUE);
+
+ WSREP_DEBUG("BF kill (" ULINTPF ", seqno: " INT64PF
+ "), victim: (%lu) trx: " TRX_ID_FMT,
+ signal, bf_seqno,
+ thd_get_thread_id(thd),
+ victim_trx->id);
+
+ WSREP_DEBUG("Aborting query: %s conf %d trx: %" PRId64,
+ (thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void",
+ wsrep_thd_conflict_state(thd, FALSE),
+ wsrep_thd_ws_handle(thd)->trx_id);
+=======
+static void bg_wsrep_kill_trx(
+ void *void_arg)
+{
+ bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)void_arg;
+ THD *thd = find_thread_by_id(arg->thd_id, false);
+ trx_t *victim_trx = NULL;
+ bool awake = false;
+ DBUG_ENTER("bg_wsrep_kill_trx");
+
+ if (thd) {
+ victim_trx = thd_to_trx(thd);
+ lock_mutex_enter();
+ trx_mutex_enter(victim_trx);
+ if (victim_trx->id != arg->trx_id)
+ {
+ trx_mutex_exit(victim_trx);
+ lock_mutex_exit();
+ wsrep_thd_UNLOCK(thd);
+ victim_trx = NULL;
+ }
+ }
+
+ if (!victim_trx) {
+ /* it can happen that trx_id was meanwhile rolled back */
+ DBUG_PRINT("wsrep", ("no thd for conflicting lock"));
+ goto ret;
+ }
+
+ WSREP_DEBUG("BF kill (" ULINTPF ", seqno: " INT64PF
+ "), victim: (%lu) trx: " TRX_ID_FMT,
+ arg->signal, arg->bf_seqno,
+ thd_get_thread_id(thd),
+ victim_trx->id);
+ WSREP_DEBUG("Aborting query: %s conf %d trx: %" PRId64,
+ (wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void",
+ wsrep_thd_conflict_state(thd, FALSE),
+ wsrep_thd_ws_handle(thd)->trx_id);
+>>>>>>> bb-10.3-release
+
+<<<<<<< HEAD
+ wsrep_thd_LOCK(thd);
+||||||| 75538f94ca0
wsrep_thd_LOCK(thd);
+ DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock",
+ {
+ const char act[]=
+ "now "
+ "wait_for signal.wsrep_after_BF_victim_lock";
+ DBUG_ASSERT(!debug_sync_set_action(bf_thd,
+ STRING_WITH_LEN(act)));
+ };);
+
+
+ if (wsrep_thd_query_state(thd) == QUERY_EXITING) {
+ WSREP_DEBUG("kill trx EXITING for " TRX_ID_FMT,
+ victim_trx->id);
+ wsrep_thd_UNLOCK(thd);
+ DBUG_RETURN(0);
+ }
+=======
+ if (wsrep_thd_query_state(thd) == QUERY_EXITING) {
+ WSREP_DEBUG("kill trx EXITING for " TRX_ID_FMT,
+ victim_trx->id);
+ goto ret_unlock;
+ }
+>>>>>>> bb-10.3-release
WSREP_LOG_CONFLICT(bf_thd, thd, TRUE);
+<<<<<<< HEAD
WSREP_DEBUG("Aborter %s trx_id: " TRX_ID_FMT " thread: %ld "
"seqno: %lld client_state: %s client_mode: %s transaction_mode: %s "
"query: %s",
@@ -18632,6 +18796,42 @@ wsrep_innobase_kill_one_trx(
WSREP_DEBUG("innodb kill transaction skipped due to wsrep_aborter set");
wsrep_thd_UNLOCK(thd);
DBUG_RETURN(0);
+||||||| 75538f94ca0
+ switch (wsrep_thd_get_conflict_state(thd)) {
+ case NO_CONFLICT:
+ wsrep_thd_set_conflict_state(thd, MUST_ABORT);
+ break;
+ case MUST_ABORT:
+ WSREP_DEBUG("victim " TRX_ID_FMT " in MUST ABORT state",
+ victim_trx->id);
+ wsrep_thd_UNLOCK(thd);
+ wsrep_thd_awake(thd, signal);
+ DBUG_RETURN(0);
+ break;
+ case ABORTED:
+ case ABORTING: // fall through
+ default:
+ WSREP_DEBUG("victim " TRX_ID_FMT " in state %d",
+ victim_trx->id, wsrep_thd_get_conflict_state(thd));
+ wsrep_thd_UNLOCK(thd);
+ DBUG_RETURN(0);
+ break;
+=======
+ switch (wsrep_thd_get_conflict_state(thd)) {
+ case NO_CONFLICT:
+ wsrep_thd_set_conflict_state(thd, MUST_ABORT);
+ break;
+ case MUST_ABORT:
+ WSREP_DEBUG("victim " TRX_ID_FMT " in MUST ABORT state",
+ victim_trx->id);
+ goto ret_awake;
+ case ABORTED:
+ case ABORTING: // fall through
+ default:
+ WSREP_DEBUG("victim " TRX_ID_FMT " in state %d",
+ victim_trx->id, wsrep_thd_get_conflict_state(thd));
+ goto ret_unlock;
+>>>>>>> bb-10.3-release
}
/* Note that we need to release this as it will be acquired
@@ -18639,21 +18839,290 @@ wsrep_innobase_kill_one_trx(
wsrep_thd_UNLOCK(thd);
DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort");
+<<<<<<< HEAD
if (wsrep_thd_bf_abort(bf_thd, thd, signal))
+||||||| 75538f94ca0
+ WSREP_DEBUG("kill query for: %ld",
+ thd_get_thread_id(thd));
+ WSREP_DEBUG("kill trx QUERY_COMMITTING for " TRX_ID_FMT,
+ victim_trx->id);
+
+ if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
+ wsrep_abort_slave_trx(bf_seqno,
+ wsrep_thd_trx_seqno(thd));
+ } else {
+ wsrep_t *wsrep= get_wsrep();
+ rcode = wsrep->abort_pre_commit(
+ wsrep, bf_seqno,
+ (wsrep_trx_id_t)wsrep_thd_ws_handle(thd)->trx_id
+ );
+
+ switch (rcode) {
+ case WSREP_WARNING:
+ WSREP_DEBUG("cancel commit warning: "
+ TRX_ID_FMT,
+ victim_trx->id);
+ wsrep_thd_UNLOCK(thd);
+ wsrep_thd_awake(thd, signal);
+ DBUG_RETURN(1);
+ break;
+ case WSREP_OK:
+ break;
+ default:
+ WSREP_ERROR(
+ "cancel commit bad exit: %d "
+ TRX_ID_FMT,
+ rcode, victim_trx->id);
+ /* unable to interrupt, must abort */
+ /* note: kill_mysql() will block, if we cannot.
+ * kill the lock holder first.
+ */
+ abort();
+ break;
+ }
+ }
+ wsrep_thd_UNLOCK(thd);
+ wsrep_thd_awake(thd, signal);
+ break;
+ case QUERY_EXEC:
+ /* it is possible that victim trx is itself waiting for some
+ * other lock. We need to cancel this waiting
+ */
+ WSREP_DEBUG("kill trx QUERY_EXEC for " TRX_ID_FMT,
+ victim_trx->id);
+
+ victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
+
+ if (victim_trx->lock.wait_lock) {
+ WSREP_DEBUG("victim has wait flag: %ld",
+ thd_get_thread_id(thd));
+ lock_t* wait_lock = victim_trx->lock.wait_lock;
+
+ if (wait_lock) {
+ WSREP_DEBUG("canceling wait lock");
+ victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
+ lock_cancel_waiting_and_release(wait_lock);
+ }
+
+ wsrep_thd_UNLOCK(thd);
+ wsrep_thd_awake(thd, signal);
+ } else {
+ /* abort currently executing query */
+ DBUG_PRINT("wsrep",("sending KILL_QUERY to: %lu",
+ thd_get_thread_id(thd)));
+ WSREP_DEBUG("kill query for: %ld",
+ thd_get_thread_id(thd));
+ /* Note that innobase_kill_query will take lock_mutex
+ and trx_mutex */
+ wsrep_thd_UNLOCK(thd);
+ wsrep_thd_awake(thd, signal);
+
+ /* for BF thd, we need to prevent him from committing */
+ if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
+ wsrep_abort_slave_trx(bf_seqno,
+ wsrep_thd_trx_seqno(thd));
+ }
+ }
+ break;
+ case QUERY_IDLE:
+=======
+ WSREP_DEBUG("kill query for: %ld",
+ thd_get_thread_id(thd));
+ WSREP_DEBUG("kill trx QUERY_COMMITTING for " TRX_ID_FMT,
+ victim_trx->id);
+
+ if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
+ wsrep_abort_slave_trx(arg->bf_seqno,
+ wsrep_thd_trx_seqno(thd));
+ } else {
+ wsrep_t *wsrep= get_wsrep();
+ rcode = wsrep->abort_pre_commit(
+ wsrep, arg->bf_seqno,
+ (wsrep_trx_id_t)wsrep_thd_ws_handle(thd)->trx_id
+ );
+
+ switch (rcode) {
+ case WSREP_WARNING:
+ WSREP_DEBUG("cancel commit warning: "
+ TRX_ID_FMT,
+ victim_trx->id);
+ goto ret_awake;
+ case WSREP_OK:
+ break;
+ default:
+ WSREP_ERROR(
+ "cancel commit bad exit: %d "
+ TRX_ID_FMT,
+ rcode, victim_trx->id);
+ /* unable to interrupt, must abort */
+ /* note: kill_mysql() will block, if we cannot.
+ * kill the lock holder first.
+ */
+ abort();
+ }
+ }
+ goto ret_awake;
+ case QUERY_EXEC:
+ /* it is possible that victim trx is itself waiting for some
+ * other lock. We need to cancel this waiting
+ */
+ WSREP_DEBUG("kill trx QUERY_EXEC for " TRX_ID_FMT,
+ victim_trx->id);
+
+ victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
+
+ if (victim_trx->lock.wait_lock) {
+ WSREP_DEBUG("victim has wait flag: %ld",
+ thd_get_thread_id(thd));
+ lock_t* wait_lock = victim_trx->lock.wait_lock;
+
+ if (wait_lock) {
+ WSREP_DEBUG("canceling wait lock");
+ victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
+ lock_cancel_waiting_and_release(wait_lock);
+ }
+
+ } else {
+ /* abort currently executing query */
+ DBUG_PRINT("wsrep",("sending KILL_QUERY to: %lu",
+ thd_get_thread_id(thd)));
+ WSREP_DEBUG("kill query for: %ld",
+ thd_get_thread_id(thd));
+
+ /* for BF thd, we need to prevent him from committing */
+ if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
+ wsrep_abort_slave_trx(arg->bf_seqno,
+ wsrep_thd_trx_seqno(thd));
+ }
+ }
+ goto ret_awake;
+ case QUERY_IDLE:
+>>>>>>> bb-10.3-release
{
+<<<<<<< HEAD
lock_t* wait_lock = victim_trx->lock.wait_lock;
if (wait_lock) {
DBUG_ASSERT(victim_trx->is_wsrep());
WSREP_DEBUG("victim has wait flag: %lu",
thd_get_thread_id(thd));
+||||||| 75538f94ca0
+ WSREP_DEBUG("kill IDLE for " TRX_ID_FMT, victim_trx->id);
+
+ if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
+ WSREP_DEBUG("kill BF IDLE, seqno: %lld",
+ (long long)wsrep_thd_trx_seqno(thd));
+ wsrep_thd_UNLOCK(thd);
+ wsrep_abort_slave_trx(bf_seqno,
+ wsrep_thd_trx_seqno(thd));
+ DBUG_RETURN(0);
+ }
+ /* This will lock thd from proceeding after net_read() */
+ wsrep_thd_set_conflict_state(thd, ABORTING);
+
+ wsrep_lock_rollback();
+=======
+ WSREP_DEBUG("kill IDLE for " TRX_ID_FMT, victim_trx->id);
+
+ if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
+ WSREP_DEBUG("kill BF IDLE, seqno: %lld",
+ (long long)wsrep_thd_trx_seqno(thd));
+ wsrep_abort_slave_trx(arg->bf_seqno,
+ wsrep_thd_trx_seqno(thd));
+ goto ret_unlock;
+ }
+ /* This will lock thd from proceeding after net_read() */
+ wsrep_thd_set_conflict_state(thd, ABORTING);
+
+ wsrep_lock_rollback();
+>>>>>>> bb-10.3-release
WSREP_DEBUG("canceling wait lock");
victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
lock_cancel_waiting_and_release(wait_lock);
}
+<<<<<<< HEAD
+||||||| 75538f94ca0
+
+ DBUG_PRINT("wsrep",("signalling wsrep rollbacker"));
+ WSREP_DEBUG("signaling aborter");
+ wsrep_unlock_rollback();
+ wsrep_thd_UNLOCK(thd);
+
+ break;
}
+ default:
+ WSREP_WARN("bad wsrep query state: %d",
+ wsrep_thd_query_state(thd));
+ wsrep_thd_UNLOCK(thd);
+ break;
+=======
+
+ DBUG_PRINT("wsrep",("signalling wsrep rollbacker"));
+ WSREP_DEBUG("signaling aborter");
+ wsrep_unlock_rollback();
+ goto ret_unlock;
+ }
+ default:
+ WSREP_WARN("bad wsrep query state: %d",
+ wsrep_thd_query_state(thd));
+ goto ret_unlock;
+>>>>>>> bb-10.3-release
+ }
+
+ret_awake:
+ awake = true;
+
+ret_unlock:
+ trx_mutex_exit(victim_trx);
+ lock_mutex_exit();
+ if (awake)
+ wsrep_thd_awake(thd, arg->signal);
+ wsrep_thd_UNLOCK(thd);
+
+ret:
+ free(arg);
+ DBUG_VOID_RETURN;
- DBUG_RETURN(0);
+}
+
+/*******************************************************************//**
+This function is used to kill one transaction in BF. */
+UNIV_INTERN
+void
+wsrep_innobase_kill_one_trx(
+/*========================*/
+ MYSQL_THD const bf_thd,
+ const trx_t * const bf_trx,
+ trx_t *victim_trx,
+ ibool signal)
+{
+ ut_ad(bf_thd);
+ ut_ad(victim_trx);
+ ut_ad(lock_mutex_own());
+ ut_ad(trx_mutex_own(victim_trx));
+
+ bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)malloc(sizeof(*arg));
+ arg->thd_id = thd_get_thread_id(victim_trx->mysql_thd);
+ arg->trx_id = victim_trx->id;
+ arg->bf_seqno = wsrep_thd_trx_seqno((THD*)bf_thd);
+ arg->signal = signal;
+
+ DBUG_ENTER("wsrep_innobase_kill_one_trx");
+
+ WSREP_LOG_CONFLICT(bf_thd, victim_trx->mysql_thd, TRUE);
+
+ DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock",
+ {
+ const char act[]=
+ "now "
+ "wait_for signal.wsrep_after_BF_victim_lock";
+ DBUG_ASSERT(!debug_sync_set_action(bf_thd,
+ STRING_WITH_LEN(act)));
+ };);
+
+
+ mysql_manager_submit(bg_wsrep_kill_trx, arg);
+ DBUG_VOID_RETURN;
}
/** This function forces the victim transaction to abort. Aborting the
@@ -18666,16 +19135,22 @@ wsrep_innobase_kill_one_trx(
@return -1 victim thread was aborted (no transaction)
*/
static
-int
+void
wsrep_abort_transaction(
handlerton*,
THD *bf_thd,
THD *victim_thd,
my_bool signal)
{
+<<<<<<< HEAD
DBUG_ENTER("wsrep_innobase_abort_thd");
ut_ad(bf_thd);
ut_ad(victim_thd);
+||||||| 75538f94ca0
+ DBUG_ENTER("wsrep_innobase_abort_thd");
+=======
+ DBUG_ENTER("wsrep_abort_transaction");
+>>>>>>> bb-10.3-release
trx_t* victim_trx = thd_to_trx(victim_thd);
@@ -18687,17 +19162,26 @@ wsrep_abort_transaction(
if (victim_trx) {
lock_mutex_enter();
trx_mutex_enter(victim_trx);
+<<<<<<< HEAD
int rcode= wsrep_innobase_kill_one_trx(bf_thd,
victim_trx, signal);
+||||||| 75538f94ca0
+ int rcode = wsrep_innobase_kill_one_trx(bf_thd, bf_trx,
+ victim_trx, signal);
+ lock_mutex_exit();
+=======
+ wsrep_innobase_kill_one_trx(bf_thd, bf_trx, victim_trx, signal);
+ lock_mutex_exit();
+>>>>>>> bb-10.3-release
trx_mutex_exit(victim_trx);
lock_mutex_exit();
wsrep_srv_conc_cancel_wait(victim_trx);
- DBUG_RETURN(rcode);
+ DBUG_VOID_RETURN;
} else {
wsrep_thd_bf_abort(bf_thd, victim_thd, signal);
}
- DBUG_RETURN(-1);
+ DBUG_VOID_RETURN;
}
static
@@ -18732,6 +19216,14 @@ innobase_wsrep_get_checkpoint(
}
#endif /* WITH_WSREP */
+static void innodb_idle_flush_pct_update(THD *thd, st_mysql_sys_var *var,
+ void*, const void *save)
+{
+ innodb_idle_flush_pct = *static_cast<const ulong*>(save);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_COMMAND, deprecated_idle_flush_pct);
+}
+
/* plugin options */
static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm,
@@ -18821,12 +19313,10 @@ static MYSQL_SYSVAR_ULONG(io_capacity_max, srv_max_io_capacity,
SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT, 100,
SRV_MAX_IO_CAPACITY_LIMIT, 0);
-static MYSQL_SYSVAR_ULONG(idle_flush_pct,
- srv_idle_flush_pct,
+static MYSQL_SYSVAR_ULONG(idle_flush_pct, innodb_idle_flush_pct,
PLUGIN_VAR_RQCMDARG,
- "Up to what percentage of dirty pages should be flushed when innodb "
- "finds it has spare resources to do so.",
- NULL, NULL, 100, 0, 100, 0);
+ "DEPRECATED. This setting has no effect.",
+ NULL, innodb_idle_flush_pct_update, 100, 0, 100, 0);
#ifdef UNIV_DEBUG
static MYSQL_SYSVAR_BOOL(background_drop_list_empty,
@@ -20359,7 +20849,7 @@ static bool table_name_parse(
memcpy(tbl_buf, tbl_name.m_name + dbnamelen + 1, tblnamelen);
tbl_buf[tblnamelen] = 0;
- filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true);
+ dbnamelen = filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true);
if (tblnamelen > TEMP_FILE_PREFIX_LENGTH
&& !strncmp(tbl_buf, TEMP_FILE_PREFIX, TEMP_FILE_PREFIX_LENGTH)) {
@@ -20371,7 +20861,7 @@ static bool table_name_parse(
tblnamelen = is_part - tbl_buf;
}
- filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true);
+ tblnamelen = filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true);
return true;
}
@@ -20803,11 +21293,11 @@ innobase_get_computed_value(
field = dtuple_get_nth_v_field(row, col->v_pos);
- my_bitmap_map* old_write_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->write_set);
- my_bitmap_map* old_read_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->read_set);
+ MY_BITMAP *old_write_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->write_set);
+ MY_BITMAP *old_read_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->read_set);
ret = mysql_table->update_virtual_field(mysql_table->field[col->m_col.ind]);
- dbug_tmp_restore_column_map(mysql_table->read_set, old_read_set);
- dbug_tmp_restore_column_map(mysql_table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(&mysql_table->read_set, old_read_set);
+ dbug_tmp_restore_column_map(&mysql_table->write_set, old_write_set);
if (ret != 0) {
DBUG_RETURN(NULL);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 97714e9dfa7..c996fc58827 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -3433,9 +3433,9 @@ innobase_row_to_mysql(
}
}
if (table->vfield) {
- my_bitmap_map* old_read_set = tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_read_set = tmp_use_all_columns(table, &table->read_set);
table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ);
- tmp_restore_column_map(table->read_set, old_read_set);
+ tmp_restore_column_map(&table->read_set, old_read_set);
}
}
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 28e5d1d4f56..b54c0dfe384 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -233,7 +233,7 @@ innobase_casedn_str(
UNIV_INTERN
int
wsrep_innobase_kill_one_trx(
- THD* bf_thd,
+ MYSQL_THD bf_thd,
trx_t *victim_trx,
bool signal);
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index e1d37613dc9..f7a5cb5b29e 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2019, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -410,8 +410,6 @@ extern double srv_defragment_fill_factor;
extern uint srv_defragment_frequency;
extern ulonglong srv_defragment_interval;
-extern ulong srv_idle_flush_pct;
-
extern uint srv_change_buffer_max_size;
/* Number of IO operations per second the server can do */
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index c32234d923d..daffbacdfe6 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -799,6 +799,9 @@ public:
/** whether wsrep_on(mysql_thd) held at the start of transaction */
bool wsrep;
bool is_wsrep() const { return UNIV_UNLIKELY(wsrep); }
+ /** true, if BF thread is performing unique secondary index scanning */
+ bool wsrep_UK_scan;
+ bool is_wsrep_UK_scan() const { return UNIV_UNLIKELY(wsrep_UK_scan); }
#else /* WITH_WSREP */
bool is_wsrep() const { return false; }
#endif /* WITH_WSREP */
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 99e493acfb4..b66ea937ec2 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index abee7cde995..04522e6e304 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2020, MariaDB Corporation.
+Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -651,6 +651,11 @@ static void wsrep_assert_no_bf_bf_wait(
{
ut_ad(!lock_rec1 || lock_get_type_low(lock_rec1) == LOCK_REC);
ut_ad(lock_get_type_low(lock_rec2) == LOCK_REC);
+ ut_ad(lock_mutex_own());
+
+ /* Note that we are holding lock_sys->mutex, thus we should
+ not acquire THD::LOCK_thd_data mutex below to avoid mutexing
+ order violation. */
if (!trx1->is_wsrep() || !lock_rec2->trx->is_wsrep())
return;
@@ -666,12 +671,8 @@ static void wsrep_assert_no_bf_bf_wait(
/* avoiding BF-BF conflict assert, if victim is already aborting
or rolling back for replaying
*/
- wsrep_thd_LOCK(lock_rec2->trx->mysql_thd);
if (wsrep_thd_is_aborting(lock_rec2->trx->mysql_thd)) {
- wsrep_thd_UNLOCK(lock_rec2->trx->mysql_thd);
return;
- }
- wsrep_thd_UNLOCK(lock_rec2->trx->mysql_thd);
mtr_t mtr;
@@ -728,6 +729,7 @@ lock_rec_has_to_wait(
{
ut_ad(trx && lock2);
ut_ad(lock_get_type_low(lock2) == LOCK_REC);
+ ut_ad(lock_mutex_own());
if (trx == lock2->trx
|| lock_mode_compatible(
@@ -808,9 +810,23 @@ lock_rec_has_to_wait(
}
#ifdef WITH_WSREP
- /* There should not be two conflicting locks that are
- brute force. If there is it is a bug. */
- wsrep_assert_no_bf_bf_wait(NULL, lock2, trx);
+ /* New lock request from a transaction is using unique key
+ scan and this transaction is a wsrep high priority transaction
+ (brute force). If conflicting transaction is also wsrep high
+ priority transaction we should avoid lock conflict because
+ ordering of these transactions is already decided and
+ conflicting transaction will be later replayed. Note
+ that thread holding conflicting lock can't be
+ committed or rolled back while we hold
+ lock_sys->mutex. */
+ if (trx->is_wsrep_UK_scan()
+ && wsrep_thd_is_BF(lock2->trx->mysql_thd, false)) {
+ return false;
+ }
+
+ /* There should not be two conflicting locks that are
+ brute force. If there is it is a bug. */
+ wsrep_assert_no_bf_bf_wait(NULL, lock2, trx);
#endif /* WITH_WSREP */
return true;
@@ -5649,6 +5665,19 @@ lock_sec_rec_modify_check_and_lock(
heap_no = page_rec_get_heap_no(rec);
+#ifdef WITH_WSREP
+ trx_t *trx= thr_get_trx(thr);
+ /* If transaction scanning an unique secondary key is wsrep
+ high priority thread (brute force) this scanning may involve
+ GAP-locking in the index. As this locking happens also when
+ applying replication events in high priority applier threads,
+ there is a probability for lock conflicts between two wsrep
+ high priority threads. To avoid this GAP-locking we mark that
+ this transaction is using unique key scan here. */
+ if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false))
+ trx->wsrep_UK_scan= true;
+#endif /* WITH_WSREP */
+
/* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered
index record, and this would not have been possible if another active
@@ -5657,6 +5686,10 @@ lock_sec_rec_modify_check_and_lock(
err = lock_rec_lock(TRUE, LOCK_X | LOCK_REC_NOT_GAP,
block, heap_no, index, thr);
+#ifdef WITH_WSREP
+ trx->wsrep_UK_scan= false;
+#endif /* WITH_WSREP */
+
#ifdef UNIV_DEBUG
{
mem_heap_t* heap = NULL;
@@ -5748,9 +5781,26 @@ lock_sec_rec_read_check_and_lock(
return DB_SUCCESS;
}
+#ifdef WITH_WSREP
+ trx_t *trx= thr_get_trx(thr);
+ /* If transaction scanning an unique secondary key is wsrep
+ high priority thread (brute force) this scanning may involve
+ GAP-locking in the index. As this locking happens also when
+ applying replication events in high priority applier threads,
+ there is a probability for lock conflicts between two wsrep
+ high priority threads. To avoid this GAP-locking we mark that
+ this transaction is using unique key scan here. */
+ if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false))
+ trx->wsrep_UK_scan= true;
+#endif /* WITH_WSREP */
+
err = lock_rec_lock(FALSE, ulint(mode) | gap_mode,
block, heap_no, index, thr);
+#ifdef WITH_WSREP
+ trx->wsrep_UK_scan= false;
+#endif /* WITH_WSREP */
+
ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets));
return(err);
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index d2f2fe822c8..461cf470d62 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -3871,6 +3871,7 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn)
mutex_enter(&recv_sys.mutex);
recv_sys.apply_log_recs = true;
+ recv_no_ibuf_operations = is_mariabackup_restore_or_export();
mutex_exit(&recv_sys.mutex);
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 7c05084d4d9..6dd3ce9ce2a 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2020, MariaDB Corporation.
+Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2306,7 +2306,9 @@ row_upd_sec_index_entry(
break;
}
- if (!index->is_committed()) {
+ bool uncommitted = !index->is_committed();
+
+ if (uncommitted) {
/* The index->online_status may change if the index is
or was being created online, but not committed yet. It
is protected by index->lock. */
@@ -2503,11 +2505,38 @@ row_upd_sec_index_entry(
mem_heap_empty(heap);
+ DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
+ "before_row_upd_sec_new_index_entry");
+
+ uncommitted = !index->is_committed();
+ if (uncommitted) {
+ mtr.start();
+ /* The index->online_status may change if the index is
+ being rollbacked. It is protected by index->lock. */
+
+ mtr_s_lock_index(index, &mtr);
+
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_COMPLETE:
+ case ONLINE_INDEX_CREATION:
+ break;
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ mtr_commit(&mtr);
+ goto func_exit;
+ }
+
+ }
+
/* Build a new index entry */
entry = row_build_index_entry(node->upd_row, node->upd_ext,
index, heap);
ut_a(entry);
+ if (uncommitted) {
+ mtr_commit(&mtr);
+ }
+
/* Insert new index entry */
err = row_ins_sec_index_entry(index, entry, thr, !node->is_delete);
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index a5305684935..2f930694821 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -3,7 +3,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -248,9 +248,6 @@ ulong srv_buf_pool_load_pages_abort = LONG_MAX;
/** Lock table size in bytes */
ulint srv_lock_table_size = ULINT_MAX;
-/** innodb_idle_flush_pct */
-ulong srv_idle_flush_pct;
-
/** innodb_read_io_threads */
ulong srv_n_read_io_threads;
/** innodb_write_io_threads */
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 6a47751e891..940d3e00670 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -3,7 +3,7 @@
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, Google Inc.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2020, MariaDB Corporation.
+Copyright (c) 2013, 2021, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -2548,7 +2548,9 @@ void innodb_shutdown()
srv_sys_space.shutdown();
if (srv_tmp_space.get_sanity_check_status()) {
- fil_system.temp_space->close();
+ if (fil_system.temp_space) {
+ fil_system.temp_space->close();
+ }
srv_tmp_space.delete_files();
}
srv_tmp_space.shutdown();
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 8170170c193..14bd684e2ce 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -161,6 +161,11 @@ trx_init(
trx->lock.rec_cached = 0;
trx->lock.table_cached = 0;
+#ifdef WITH_WSREP
+ ut_ad(!trx->wsrep);
+ ut_ad(!trx->wsrep_event);
+ ut_ad(!trx->wsrep_UK_scan);
+#endif /* WITH_WSREP */
ut_ad(trx->get_flush_observer() == NULL);
}
@@ -369,6 +374,7 @@ trx_t *trx_create()
#ifdef WITH_WSREP
trx->wsrep_event= NULL;
+ ut_ad(!trx->wsrep_UK_scan);
#endif /* WITH_WSREP */
trx_sys.register_trx(trx);
@@ -475,6 +481,8 @@ void trx_t::free()
MEM_NOACCESS(&flush_observer, sizeof flush_observer);
#ifdef WITH_WSREP
MEM_NOACCESS(&wsrep_event, sizeof wsrep_event);
+ ut_ad(!wsrep_UK_scan);
+ MEM_NOACCESS(&wsrep_UK_scan, sizeof wsrep_UK_scan);
#endif /* WITH_WSREP */
MEM_NOACCESS(&magic_n, sizeof magic_n);
trx_pools->mem_free(this);
diff --git a/storage/maria/ma_recovery_util.c b/storage/maria/ma_recovery_util.c
index fe43d812600..3bbda614991 100644
--- a/storage/maria/ma_recovery_util.c
+++ b/storage/maria/ma_recovery_util.c
@@ -98,7 +98,6 @@ void eprint(FILE *trace_file __attribute__ ((unused)),
fputc('\n', trace_file);
if (trace_file != stderr)
{
- va_start(args, format);
my_printv_error(HA_ERR_INITIALIZATION, format, MYF(0), args);
}
va_end(args);
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index 8638399717e..128bc9b3583 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -5919,7 +5919,7 @@ int ha_mroonga::wrapper_write_row_index(const uchar *buf)
DBUG_RETURN(0);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -5994,7 +5994,7 @@ int ha_mroonga::storage_write_row(const uchar *buf)
DBUG_RETURN(error);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
for (i = 0; i < n_columns; i++) {
Field *field = table->field[i];
@@ -6275,7 +6275,7 @@ int ha_mroonga::storage_write_row_multiple_column_indexes(const uchar *buf,
int error = 0;
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -6569,7 +6569,7 @@ int ha_mroonga::wrapper_update_row_index(const uchar *old_data,
DBUG_RETURN(0);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -6690,7 +6690,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
grn_obj new_value;
GRN_VOID_INIT(&new_value);
{
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
generic_store_bulk(field, &new_value);
}
grn_obj casted_value;
@@ -6719,7 +6719,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
storage_store_fields_for_prep_update(old_data, new_data, record_id);
{
mrn::Lock lock(&(share->record_mutex), have_unique_index());
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
if ((error = storage_prepare_delete_row_unique_indexes(old_data,
record_id))) {
DBUG_RETURN(error);
@@ -6744,7 +6744,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
#endif
if (bitmap_is_set(table->write_set, field->field_index)) {
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
DBUG_PRINT("info", ("mroonga: update column %d(%d)",i,field->field_index));
if (field->is_null()) continue;
@@ -6821,7 +6821,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data,
if (table->found_next_number_field &&
!table->s->next_number_keypart &&
new_data == table->record[0]) {
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
Field_num *field = (Field_num *) table->found_next_number_field;
if (field->unsigned_flag || field->val_int() > 0) {
MRN_LONG_TERM_SHARE *long_term_share = share->long_term_share;
@@ -6878,7 +6878,7 @@ int ha_mroonga::storage_update_row_index(const uchar *old_data,
my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(old_data, table->record[0]);
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
mrn_change_encoding(ctx, NULL);
@@ -7094,7 +7094,7 @@ int ha_mroonga::wrapper_delete_row_index(const uchar *buf)
DBUG_RETURN(0);
}
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
for (i = 0; i < n_keys; i++) {
@@ -7245,7 +7245,7 @@ int ha_mroonga::storage_delete_row_index(const uchar *buf)
GRN_TEXT_INIT(&key, 0);
GRN_TEXT_INIT(&encoded_key, 0);
- mrn::DebugColumnAccess debug_column_access(table, table->read_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->read_set);
uint i;
uint n_keys = table->s->keys;
mrn_change_encoding(ctx, NULL);
@@ -11436,7 +11436,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id)
}
}
- mrn::DebugColumnAccess debug_column_access(table, table->write_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->write_set);
DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index));
field->move_field_offset(ptr_diff);
if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) {
@@ -11501,7 +11501,7 @@ void ha_mroonga::storage_store_fields_for_prep_update(const uchar *old_data,
)
#endif
) {
- mrn::DebugColumnAccess debug_column_access(table, table->write_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->write_set);
DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index));
grn_obj value;
GRN_OBJ_INIT(&value, GRN_BULK, 0, grn_obj_get_range(ctx, grn_columns[i]));
@@ -11537,7 +11537,7 @@ void ha_mroonga::storage_store_fields_by_index(uchar *buf)
if (KEY_N_KEY_PARTS(key_info) == 1) {
my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(buf, table->record[0]);
Field *field = key_info->key_part->field;
- mrn::DebugColumnAccess debug_column_access(table, table->write_set);
+ mrn::DebugColumnAccess debug_column_access(table, &table->write_set);
field->move_field_offset(ptr_diff);
storage_store_field(field, (const char *)key, key_length);
field->move_field_offset(-ptr_diff);
diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp
index 778300a33d6..cb2ce7e35ca 100644
--- a/storage/mroonga/lib/mrn_debug_column_access.cpp
+++ b/storage/mroonga/lib/mrn_debug_column_access.cpp
@@ -20,7 +20,7 @@
#include "mrn_debug_column_access.hpp"
namespace mrn {
- DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap)
+ DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap)
: table_(table),
bitmap_(bitmap) {
#ifdef DBUG_ASSERT_EXISTS
diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp
index 7c2fd60344e..954e04135f8 100644
--- a/storage/mroonga/lib/mrn_debug_column_access.hpp
+++ b/storage/mroonga/lib/mrn_debug_column_access.hpp
@@ -25,12 +25,12 @@
namespace mrn {
class DebugColumnAccess {
TABLE *table_;
- MY_BITMAP *bitmap_;
+ MY_BITMAP **bitmap_;
#ifdef DBUG_ASSERT_EXISTS
- my_bitmap_map *map_;
+ MY_BITMAP *map_;
#endif
public:
- DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap);
+ DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap);
~DebugColumnAccess();
};
}
diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc
index fd715c57a1f..e0e81f7cddc 100644
--- a/storage/oqgraph/ha_oqgraph.cc
+++ b/storage/oqgraph/ha_oqgraph.cc
@@ -908,7 +908,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key,
bmove_align(buf, table->s->default_values, table->s->reclength);
key_restore(buf, (byte*) key, key_info, key_len);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set);
my_ptrdiff_t ptrdiff= buf - table->record[0];
if (ptrdiff)
@@ -937,7 +937,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key,
field[1]->move_field_offset(-ptrdiff);
field[2]->move_field_offset(-ptrdiff);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
return error_code(oqgraph::NO_MORE_DATA);
}
}
@@ -962,7 +962,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key,
field[1]->move_field_offset(-ptrdiff);
field[2]->move_field_offset(-ptrdiff);
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
// Keep the latch around so we can use it in the query result later -
// See fill_record().
@@ -995,7 +995,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row)
bmove_align(record, table->s->default_values, table->s->reclength);
- my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set);
my_ptrdiff_t ptrdiff= record - table->record[0];
if (ptrdiff)
@@ -1071,7 +1071,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row)
field[4]->move_field_offset(-ptrdiff);
field[5]->move_field_offset(-ptrdiff);
}
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return 0;
}
diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc
index 144ebcddff4..e2ad190c435 100644
--- a/storage/perfschema/pfs_engine_table.cc
+++ b/storage/perfschema/pfs_engine_table.cc
@@ -188,17 +188,15 @@ ha_rows PFS_engine_table_share::get_row_count(void) const
int PFS_engine_table_share::write_row(TABLE *table, const unsigned char *buf,
Field **fields) const
{
- my_bitmap_map *org_bitmap;
-
if (m_write_row == NULL)
{
return HA_ERR_WRONG_COMMAND;
}
/* We internally read from Fields to support the write interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
int result= m_write_row(table, buf, fields);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return result;
}
@@ -256,7 +254,6 @@ int PFS_engine_table::read_row(TABLE *table,
unsigned char *buf,
Field **fields)
{
- my_bitmap_map *org_bitmap;
Field *f;
Field **fields_reset;
@@ -264,7 +261,7 @@ int PFS_engine_table::read_row(TABLE *table,
bool read_all= !bitmap_is_clear_all(table->write_set);
/* We internally write to Fields to support the read interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set);
/*
Some callers of the storage engine interface do not honor the
@@ -276,7 +273,7 @@ int PFS_engine_table::read_row(TABLE *table,
f->reset();
int result= read_row_values(table, buf, fields, read_all);
- dbug_tmp_restore_column_map(table->write_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->write_set, org_bitmap);
return result;
}
@@ -294,12 +291,10 @@ int PFS_engine_table::update_row(TABLE *table,
const unsigned char *new_buf,
Field **fields)
{
- my_bitmap_map *org_bitmap;
-
/* We internally read from Fields to support the write interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
int result= update_row_values(table, old_buf, new_buf, fields);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return result;
}
@@ -308,12 +303,10 @@ int PFS_engine_table::delete_row(TABLE *table,
const unsigned char *buf,
Field **fields)
{
- my_bitmap_map *org_bitmap;
-
/* We internally read from Fields to support the delete interface */
- org_bitmap= dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set);
int result= delete_row_values(table, buf, fields);
- dbug_tmp_restore_column_map(table->read_set, org_bitmap);
+ dbug_tmp_restore_column_map(&table->read_set, org_bitmap);
return result;
}
diff --git a/storage/perfschema/table_accounts.cc b/storage/perfschema/table_accounts.cc
index 708f8269a69..550f6614abb 100644
--- a/storage/perfschema/table_accounts.cc
+++ b/storage/perfschema/table_accounts.cc
@@ -43,8 +43,8 @@ table_accounts::m_share=
sizeof(PFS_simple_index), /* ref length */
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE accounts("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"CURRENT_CONNECTIONS bigint not null,"
"TOTAL_CONNECTIONS bigint not null)") }
};
diff --git a/storage/perfschema/table_esgs_by_account_by_event_name.cc b/storage/perfschema/table_esgs_by_account_by_event_name.cc
index 22e4e0040f1..9a983eb076e 100644
--- a/storage/perfschema/table_esgs_by_account_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_account_by_event_name.cc
@@ -49,8 +49,8 @@ table_esgs_by_account_by_event_name::m_share=
sizeof(pos_esgs_by_account_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_account_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esgs_by_host_by_event_name.cc b/storage/perfschema/table_esgs_by_host_by_event_name.cc
index 86cc2eb1b86..5ff9faf0c1e 100644
--- a/storage/perfschema/table_esgs_by_host_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_host_by_event_name.cc
@@ -50,7 +50,7 @@ table_esgs_by_host_by_event_name::m_share=
sizeof(pos_esgs_by_host_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_host_by_event_name("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esgs_by_user_by_event_name.cc b/storage/perfschema/table_esgs_by_user_by_event_name.cc
index af73c1fc5fd..23b7b0f6689 100644
--- a/storage/perfschema/table_esgs_by_user_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_user_by_event_name.cc
@@ -50,7 +50,7 @@ table_esgs_by_user_by_event_name::m_share=
sizeof(pos_esgs_by_user_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_user_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esms_by_account_by_event_name.cc b/storage/perfschema/table_esms_by_account_by_event_name.cc
index 7afdabcbbfe..312050aa9c9 100644
--- a/storage/perfschema/table_esms_by_account_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_account_by_event_name.cc
@@ -49,8 +49,8 @@ table_esms_by_account_by_event_name::m_share=
sizeof(pos_esms_by_account_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_account_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esms_by_host_by_event_name.cc b/storage/perfschema/table_esms_by_host_by_event_name.cc
index 42629ab6c09..b390d1e17a4 100644
--- a/storage/perfschema/table_esms_by_host_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_host_by_event_name.cc
@@ -50,7 +50,7 @@ table_esms_by_host_by_event_name::m_share=
sizeof(pos_esms_by_host_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_host_by_event_name("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_esms_by_user_by_event_name.cc b/storage/perfschema/table_esms_by_user_by_event_name.cc
index f8708ac9a14..1fa1289aa8c 100644
--- a/storage/perfschema/table_esms_by_user_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_user_by_event_name.cc
@@ -50,7 +50,7 @@ table_esms_by_user_by_event_name::m_share=
sizeof(pos_esms_by_user_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_user_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_ews_by_account_by_event_name.cc b/storage/perfschema/table_ews_by_account_by_event_name.cc
index fa6258ec9ac..40e0152f889 100644
--- a/storage/perfschema/table_ews_by_account_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_account_by_event_name.cc
@@ -49,8 +49,8 @@ table_ews_by_account_by_event_name::m_share=
sizeof(pos_ews_by_account_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_account_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
- "HOST CHAR(60) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_ews_by_host_by_event_name.cc b/storage/perfschema/table_ews_by_host_by_event_name.cc
index e3ef7ca3720..d22d6fc8d79 100644
--- a/storage/perfschema/table_ews_by_host_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_host_by_event_name.cc
@@ -50,7 +50,7 @@ table_ews_by_host_by_event_name::m_share=
sizeof(pos_ews_by_host_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_host_by_event_name("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_ews_by_user_by_event_name.cc b/storage/perfschema/table_ews_by_user_by_event_name.cc
index cb99f749a9c..b2f8e1da824 100644
--- a/storage/perfschema/table_ews_by_user_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_user_by_event_name.cc
@@ -50,7 +50,7 @@ table_ews_by_user_by_event_name::m_share=
sizeof(pos_ews_by_user_by_event_name),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_user_by_event_name("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
diff --git a/storage/perfschema/table_hosts.cc b/storage/perfschema/table_hosts.cc
index 8bc5310817c..221e0664590 100644
--- a/storage/perfschema/table_hosts.cc
+++ b/storage/perfschema/table_hosts.cc
@@ -44,7 +44,7 @@ table_hosts::m_share=
sizeof(PFS_simple_index), /* ref length */
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE hosts("
- "HOST CHAR(60) collate utf8_bin default null,"
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null,"
"CURRENT_CONNECTIONS bigint not null,"
"TOTAL_CONNECTIONS bigint not null)") }
};
diff --git a/storage/perfschema/table_setup_actors.cc b/storage/perfschema/table_setup_actors.cc
index b05f6ad004b..f18d6ceee20 100644
--- a/storage/perfschema/table_setup_actors.cc
+++ b/storage/perfschema/table_setup_actors.cc
@@ -49,9 +49,9 @@ table_setup_actors::m_share=
sizeof(PFS_simple_index),
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE setup_actors("
- "HOST CHAR(60) collate utf8_bin default '%' not null,"
- "USER CHAR(16) collate utf8_bin default '%' not null,"
- "ROLE CHAR(16) collate utf8_bin default '%' not null)") }
+ "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default '%' not null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null,"
+ "ROLE CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null)") }
};
PFS_engine_table* table_setup_actors::create()
diff --git a/storage/perfschema/table_threads.cc b/storage/perfschema/table_threads.cc
index b396db1a814..59b0af453fb 100644
--- a/storage/perfschema/table_threads.cc
+++ b/storage/perfschema/table_threads.cc
@@ -46,8 +46,8 @@ table_threads::m_share=
"NAME VARCHAR(128) not null,"
"TYPE VARCHAR(10) not null,"
"PROCESSLIST_ID BIGINT unsigned,"
- "PROCESSLIST_USER VARCHAR(16),"
- "PROCESSLIST_HOST VARCHAR(60),"
+ "PROCESSLIST_USER VARCHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) "),"
+ "PROCESSLIST_HOST VARCHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) "),"
"PROCESSLIST_DB VARCHAR(64),"
"PROCESSLIST_COMMAND VARCHAR(16),"
"PROCESSLIST_TIME BIGINT,"
diff --git a/storage/perfschema/table_users.cc b/storage/perfschema/table_users.cc
index 883ebd36633..e9592c55f55 100644
--- a/storage/perfschema/table_users.cc
+++ b/storage/perfschema/table_users.cc
@@ -44,7 +44,7 @@ table_users::m_share=
sizeof(PFS_simple_index), /* ref length */
&m_table_lock,
{ C_STRING_WITH_LEN("CREATE TABLE users("
- "USER CHAR(16) collate utf8_bin default null,"
+ "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null,"
"CURRENT_CONNECTIONS bigint not null,"
"TOTAL_CONNECTIONS bigint not null)") }
};
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index 52c93c37806..b4fcb097b71 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -6116,8 +6116,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() {
Field *field =
table->key_info[table->s->next_number_index].key_part[0].field;
ulonglong max_val = rdb_get_int_col_max_value(field);
- my_bitmap_map *const old_map =
- dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *const old_map = dbug_tmp_use_all_columns(table, &table->read_set);
last_val = field->val_int();
if (last_val != max_val) {
last_val++;
@@ -6132,7 +6131,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() {
}
}
#endif
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
}
m_keyread_only = save_keyread_only;
@@ -6169,15 +6168,15 @@ void ha_rocksdb::update_auto_incr_val_from_field() {
field = table->key_info[table->s->next_number_index].key_part[0].field;
max_val = rdb_get_int_col_max_value(field);
- my_bitmap_map *const old_map =
- dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *const old_map =
+ dbug_tmp_use_all_columns(table, &table->read_set);
new_val = field->val_int();
// don't increment if we would wrap around
if (new_val != max_val) {
new_val++;
}
- dbug_tmp_restore_column_map(table->read_set, old_map);
+ dbug_tmp_restore_column_map(&table->read_set, old_map);
// Only update if positive value was set for auto_incr column.
if (new_val <= max_val) {
diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc
index f0bc8a49761..99f1178d897 100644
--- a/storage/rocksdb/rdb_datadic.cc
+++ b/storage/rocksdb/rdb_datadic.cc
@@ -1489,12 +1489,12 @@ void Rdb_key_def::pack_with_make_sort_key(
DBUG_ASSERT(*dst != nullptr);
const int max_len = fpi->m_max_image_len;
- my_bitmap_map *old_map;
+ MY_BITMAP*old_map;
old_map= dbug_tmp_use_all_columns(field->table,
- field->table->read_set);
+ &field->table->read_set);
field->sort_string(*dst, max_len);
- dbug_tmp_restore_column_map(field->table->read_set, old_map);
+ dbug_tmp_restore_column_map(&field->table->read_set, old_map);
*dst += max_len;
}
diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc
index b9f5d02bd51..8eae98955c3 100644
--- a/storage/sequence/sequence.cc
+++ b/storage/sequence/sequence.cc
@@ -115,13 +115,13 @@ THR_LOCK_DATA **ha_seq::store_lock(THD *thd, THR_LOCK_DATA **to,
void ha_seq::set(unsigned char *buf)
{
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set);
my_ptrdiff_t offset = (my_ptrdiff_t) (buf - table->record[0]);
Field *field = table->field[0];
field->move_field_offset(offset);
field->store(cur, true);
field->move_field_offset(-offset);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
}
int ha_seq::rnd_init(bool scan)
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 75558d333e0..f2bc24c47d4 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -3052,7 +3052,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
}
#if MYSQL_VERSION_ID>50100
- my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set );
+ MY_BITMAP * org_bitmap = dbug_tmp_use_all_columns ( table, &table->write_set );
#endif
Field ** field = table->field;
@@ -3198,7 +3198,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
m_iCurrentPos++;
#if MYSQL_VERSION_ID > 50100
- dbug_tmp_restore_column_map ( table->write_set, org_bitmap );
+ dbug_tmp_restore_column_map ( &table->write_set, org_bitmap );
#endif
SPH_RET(0);
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index b2600859b88..988c2305b04 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -10112,12 +10112,12 @@ int ha_spider::write_row(
if (!table->auto_increment_field_not_null)
{
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
table->next_number_field->store((longlong) 0, TRUE);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
force_auto_increment = FALSE;
table->file->insert_id_for_cur_row = 0;
@@ -10125,13 +10125,13 @@ int ha_spider::write_row(
} else if (auto_increment_mode == 2)
{
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
table->next_number_field->store((longlong) 0, TRUE);
table->auto_increment_field_not_null = FALSE;
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
force_auto_increment = FALSE;
table->file->insert_id_for_cur_row = 0;
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index 6d2afc1fd55..264f85d74cb 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -1733,7 +1733,7 @@ int spider_db_append_key_where_internal(
DBUG_PRINT("info", ("spider end_key_part_map=%lu", end_key_part_map));
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set);
#endif
if (sql_kind == SPIDER_SQL_KIND_HANDLER)
@@ -2663,7 +2663,7 @@ end:
if (sql_kind == SPIDER_SQL_KIND_SQL)
dbton_hdl->set_order_pos(sql_type);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
DBUG_RETURN(0);
}
@@ -3200,8 +3200,8 @@ int spider_db_fetch_table(
bitmap_is_set(table->write_set, (*field)->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
@@ -3209,7 +3209,7 @@ int spider_db_fetch_table(
spider_db_fetch_row(share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
} else {
DBUG_PRINT("info", ("spider bitmap is not set %s",
@@ -3380,8 +3380,8 @@ int spider_db_fetch_key(
bitmap_is_set(table->write_set, field->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(field)));
@@ -3389,7 +3389,7 @@ int spider_db_fetch_key(
spider_db_fetch_row(share, field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -3504,15 +3504,15 @@ int spider_db_fetch_minimum_columns(
bitmap_is_set(table->write_set, (*field)->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -5670,8 +5670,8 @@ int spider_db_seek_tmp_table(
bitmap_is_set(table->write_set, (*field)->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
@@ -5679,7 +5679,7 @@ int spider_db_seek_tmp_table(
spider_db_fetch_row(spider->share, *field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -5758,8 +5758,8 @@ int spider_db_seek_tmp_key(
bitmap_is_set(table->write_set, field->field_index)
)) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(field)));
@@ -5767,7 +5767,7 @@ int spider_db_seek_tmp_key(
spider_db_fetch_row(spider->share, field, row, ptr_diff)))
DBUG_RETURN(error_num);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
row->next();
@@ -5849,8 +5849,8 @@ int spider_db_seek_tmp_minimum_columns(
bitmap_is_set(table->write_set, (*field)->field_index)));
*/
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
DBUG_PRINT("info", ("spider bitmap is set %s",
SPIDER_field_name_str(*field)));
@@ -5859,7 +5859,7 @@ int spider_db_seek_tmp_minimum_columns(
DBUG_RETURN(error_num);
row->next();
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
}
else if (bitmap_is_set(table->read_set, (*field)->field_index))
@@ -9668,7 +9668,7 @@ int spider_db_open_item_string(
{
THD *thd = NULL;
TABLE *table;
- my_bitmap_map *saved_map;
+ MY_BITMAP *saved_map;
Time_zone *saved_time_zone;
String str_value;
char tmp_buf[MAX_FIELD_WIDTH];
@@ -9697,7 +9697,7 @@ int spider_db_open_item_string(
*/
table = field->table;
thd = table->in_use;
- saved_map = dbug_tmp_use_all_columns(table, table->write_set);
+ saved_map = dbug_tmp_use_all_columns(table, &table->write_set);
item->save_in_field(field, FALSE);
saved_time_zone = thd->variables.time_zone;
thd->variables.time_zone = UTC;
@@ -9742,7 +9742,7 @@ end:
if (thd)
{
thd->variables.time_zone = saved_time_zone;
- dbug_tmp_restore_column_map(table->write_set, saved_map);
+ dbug_tmp_restore_column_map(&table->write_set, saved_map);
}
}
@@ -9784,7 +9784,7 @@ int spider_db_open_item_int(
{
THD *thd = NULL;
TABLE *table;
- my_bitmap_map *saved_map;
+ MY_BITMAP *saved_map;
Time_zone *saved_time_zone;
String str_value;
bool print_quoted_string;
@@ -9812,7 +9812,7 @@ int spider_db_open_item_int(
*/
table = field->table;
thd = table->in_use;
- saved_map = dbug_tmp_use_all_columns(table, table->write_set);
+ saved_map = dbug_tmp_use_all_columns(table, &table->write_set);
item->save_in_field(field, FALSE);
saved_time_zone = thd->variables.time_zone;
thd->variables.time_zone = UTC;
@@ -9858,7 +9858,7 @@ end:
if (thd)
{
thd->variables.time_zone = saved_time_zone;
- dbug_tmp_restore_column_map(table->write_set, saved_map);
+ dbug_tmp_restore_column_map(&table->write_set, saved_map);
}
}
@@ -10178,8 +10178,8 @@ int spider_db_udf_fetch_table(
DBUG_RETURN(HA_ERR_END_OF_FILE);
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->write_set);
#endif
for (
roop_count = 0,
@@ -10192,7 +10192,7 @@ int spider_db_udf_fetch_table(
spider_db_udf_fetch_row(trx, *field, row)))
{
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
DBUG_RETURN(error_num);
}
@@ -10202,7 +10202,7 @@ int spider_db_udf_fetch_table(
for (; roop_count < set_off; roop_count++, field++)
(*field)->set_default();
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->write_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->write_set, tmp_map);
#endif
table->status = 0;
DBUG_RETURN(0);
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 420f14f9919..86ce0c530b1 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -9690,8 +9690,7 @@ int spider_mbase_handler::append_update_set(
mysql_share->append_column_name(str, (*fields)->field_index);
str->q_append(SPIDER_SQL_EQUAL_STR, SPIDER_SQL_EQUAL_LEN);
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table,
- table->read_set);
+ MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set);
#endif
if (
spider_db_mbase_utility->
@@ -9700,12 +9699,12 @@ int spider_mbase_handler::append_update_set(
str->reserve(SPIDER_SQL_COMMA_LEN)
) {
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
}
str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN);
@@ -12426,8 +12425,8 @@ int spider_mbase_handler::append_insert_values(
bitmap_is_set(table->read_set, (*field)->field_index)
) {
#ifndef DBUG_OFF
- my_bitmap_map *tmp_map =
- dbug_tmp_use_all_columns(table, table->read_set);
+ MY_BITMAP *tmp_map =
+ dbug_tmp_use_all_columns(table, &table->read_set);
#endif
add_value = TRUE;
DBUG_PRINT("info",("spider is_null()=%s",
@@ -12449,7 +12448,7 @@ int spider_mbase_handler::append_insert_values(
if (str->reserve(SPIDER_SQL_NULL_LEN + SPIDER_SQL_COMMA_LEN))
{
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
str->length(0);
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -12463,7 +12462,7 @@ int spider_mbase_handler::append_insert_values(
str->reserve(SPIDER_SQL_COMMA_LEN)
) {
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
str->length(0);
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -12471,7 +12470,7 @@ int spider_mbase_handler::append_insert_values(
}
str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN);
#ifndef DBUG_OFF
- dbug_tmp_restore_column_map(table->read_set, tmp_map);
+ dbug_tmp_restore_column_map(&table->read_set, tmp_map);
#endif
}
}
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index 3734233552d..d36f7624adf 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -2313,7 +2313,7 @@ int ha_tokudb::pack_row_in_buff(
int r = ENOSYS;
memset((void *) row, 0, sizeof(*row));
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set);
// Copy null bytes
memcpy(row_buff, record, table_share->null_bytes);
@@ -2362,7 +2362,7 @@ int ha_tokudb::pack_row_in_buff(
row->size = (size_t) (var_field_data_ptr - row_buff);
r = 0;
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return r;
}
@@ -2758,7 +2758,7 @@ DBT* ha_tokudb::create_dbt_key_from_key(
{
uint32_t size = 0;
uchar* tmp_buff = buff;
- my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set);
key->data = buff;
@@ -2797,7 +2797,7 @@ DBT* ha_tokudb::create_dbt_key_from_key(
key->size = size;
DBUG_DUMP("key", (uchar *) key->data, key->size);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
return key;
}
@@ -2890,7 +2890,7 @@ DBT* ha_tokudb::pack_key(
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
- my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -2927,7 +2927,7 @@ DBT* ha_tokudb::pack_key(
key->size = (buff - (uchar *) key->data);
DBUG_DUMP("key", (uchar *) key->data, key->size);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(key);
}
@@ -2955,7 +2955,7 @@ DBT* ha_tokudb::pack_ext_key(
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
- my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
+ MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
@@ -3034,7 +3034,7 @@ DBT* ha_tokudb::pack_ext_key(
key->size = (buff - (uchar *) key->data);
DBUG_DUMP("key", (uchar *) key->data, key->size);
- dbug_tmp_restore_column_map(table->write_set, old_map);
+ dbug_tmp_restore_column_map(&table->write_set, old_map);
DBUG_RETURN(key);
}
#endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS
diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c
index 586289c51fd..0c153793e8e 100644
--- a/strings/ctype-ucs2.c
+++ b/strings/ctype-ucs2.c
@@ -1162,9 +1162,12 @@ static size_t
my_snprintf_mb2(CHARSET_INFO *cs __attribute__((unused)),
char* to, size_t n, const char* fmt, ...)
{
+ size_t ret;
va_list args;
va_start(args,fmt);
- return my_vsnprintf_mb2(to, n, fmt, args);
+ ret= my_vsnprintf_mb2(to, n, fmt, args);
+ va_end(args);
+ return ret;
}
@@ -2391,9 +2394,12 @@ static size_t
my_snprintf_utf32(CHARSET_INFO *cs __attribute__((unused)),
char* to, size_t n, const char* fmt, ...)
{
+ size_t ret;
va_list args;
va_start(args,fmt);
- return my_vsnprintf_utf32(to, n, fmt, args);
+ ret= my_vsnprintf_utf32(to, n, fmt, args);
+ va_end(args);
+ return ret;
}
diff --git a/unittest/mysys/stacktrace-t.c b/unittest/mysys/stacktrace-t.c
index 8fa0db15b36..d8408f80d76 100644
--- a/unittest/mysys/stacktrace-t.c
+++ b/unittest/mysys/stacktrace-t.c
@@ -29,6 +29,7 @@ void test_my_safe_print_str()
memcpy(b_stack, "LEGAL", 6);
memcpy(b_bss, "LEGAL", 6);
+#ifdef HAVE_STACKTRACE
#ifndef __SANITIZE_ADDRESS__
fprintf(stderr, "\n===== stack =====\n");
my_safe_print_str(b_stack, 65535);
@@ -48,6 +49,7 @@ void test_my_safe_print_str()
fprintf(stderr, "\n===== (const char*) 1 =====\n");
my_safe_print_str((const char*)1, 5);
#endif /*__SANITIZE_ADDRESS__*/
+#endif /*HAVE_STACKTRACE*/
free(b_heap);