summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Botchkov <holyfoot@askmonty.org>2021-02-10 16:25:55 +0400
committerAlexey Botchkov <holyfoot@askmonty.org>2021-02-10 16:25:55 +0400
commit76b26a165b2e69e53c951abf43cae51f79837c4c (patch)
treed1dbe60b8413be77dbe3149113392bad7ab51bef
parent1015cbde5985e7e89c887ab30ce7c772671194c5 (diff)
downloadmariadb-git-bb-17399-hf.tar.gz
MDEV-17399 Add support for JSON_TABLE.bb-17399-hf
The specific table handler for the table functions was introduced, and used to implement JSON_TABLE.
-rw-r--r--libmysqld/CMakeLists.txt1
-rw-r--r--mysql-test/main/json_table_mysql.result2173
-rw-r--r--mysql-test/main/json_table_mysql.test1538
-rw-r--r--mysql-test/main/opt_trace.test15
-rw-r--r--mysql-test/main/query_cache.result16
-rw-r--r--mysql-test/main/query_cache.test11
-rw-r--r--mysql-test/suite/json/r/json_table.result371
-rw-r--r--mysql-test/suite/json/r/json_table2.result266
-rw-r--r--mysql-test/suite/json/t/json_table.test284
-rw-r--r--mysql-test/suite/json/t/json_table2.test1563
-rw-r--r--sql/CMakeLists.txt1
-rw-r--r--sql/field.cc3
-rw-r--r--sql/field.h7
-rw-r--r--sql/handler.cc6
-rw-r--r--sql/handler.h2
-rw-r--r--sql/item_geofunc.cc7
-rw-r--r--sql/item_jsonfunc.cc46
-rw-r--r--sql/item_jsonfunc.h7
-rw-r--r--sql/lex.h5
-rw-r--r--sql/opt_subselect.cc12
-rw-r--r--sql/opt_trace.cc6
-rw-r--r--sql/share/errmsg-utf8.txt2
-rw-r--r--sql/sql_acl.cc10
-rw-r--r--sql/sql_base.cc12
-rw-r--r--sql/sql_class.h4
-rw-r--r--sql/sql_explain.cc6
-rw-r--r--sql/sql_explain.h1
-rw-r--r--sql/sql_lex.h6
-rw-r--r--sql/sql_parse.cc12
-rw-r--r--sql/sql_select.cc25
-rw-r--r--sql/sql_select.h1
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy220
-rw-r--r--sql/table.cc16
-rw-r--r--sql/table.h4
-rw-r--r--sql/table_function.cc1249
-rw-r--r--sql/table_function.h234
37 files changed, 8101 insertions, 43 deletions
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index e370ac80a8d..5944d370f91 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -134,6 +134,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/item_vers.cc
../sql/opt_trace.cc
../sql/xa.cc
+ ../sql/table_function.cc
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
diff --git a/mysql-test/main/json_table_mysql.result b/mysql-test/main/json_table_mysql.result
new file mode 100644
index 00000000000..025a0455976
--- /dev/null
+++ b/mysql-test/main/json_table_mysql.result
@@ -0,0 +1,2173 @@
+#
+# WL#8867: Add JSON_TABLE table function
+#
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b')
+) as tt;
+id jpath jexst
+1 3 0
+2 2 0
+3 NULL 1
+4 0 0
+explain select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b')
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`jexst` AS `jexst` from json_table('[{"a":"3"},{"a":2},{"b":1},{"a":0}]', '$[*]' columns (`id` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a', `jexst` int exists path '$.b')) `tt`
+explain format=tree select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b')
+) as tt;
+EXPLAIN
+-> Materialize table function
+
+select * from
+json_table(
+'[{"x":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a' default '[99]' on error,
+jexst int exists path '$.b')
+) as tt;
+ERROR 42000: Invalid default value for 'jpath'
+select * from
+json_table(
+'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a'
+ default '33' on empty
+default '66' on error,
+jsn_path json path '$.a' default '{"x":33}' on empty,
+jexst int exists path '$.b')
+) as tt;
+id jpath jsn_path jexst
+1 33 {"x": 33} 0
+2 2 2 0
+3 33 {"x": 33} 1
+4 0 0 0
+5 66 [1, 2] 0
+explain select * from
+json_table(
+'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a'
+ default '33' on empty
+default '66' on error,
+jsn_path json path '$.a' default '{"x":33}' on empty,
+jexst int exists path '$.b')
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`jsn_path` AS `jsn_path`,`tt`.`jexst` AS `jexst` from json_table('[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]', '$[*]' columns (`id` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a' default '33' on empty default '66' on error, `jsn_path` json path '$.a' default '{"x":33}' on empty, `jexst` int exists path '$.b')) `tt`
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]',
+'$[*]' columns (id for ordinality,
+jpath_i int path '$.a'
+ default '33' on empty
+default '66' on error,
+jpath_r real path '$.a'
+ default '33.3' on empty
+default '77.7' on error,
+jsn_path json path '$.a' default '{"x":33}' on empty,
+jexst int exists path '$.b')
+) as tt;
+id jpath_i jpath_r jsn_path jexst
+1 3 3 "3" 0
+2 2 2 2 0
+3 33 33.3 {"x": 33} 1
+4 0 0.33 0.33 0
+5 66 77.7 "asd" 0
+explain select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]',
+'$[*]' columns (id for ordinality,
+jpath_i int path '$.a'
+ default '33' on empty
+default '66' on error,
+jpath_r real path '$.a'
+ default '33.3' on empty
+default '77.7' on error,
+jsn_path json path '$.a' default '{"x":33}' on empty,
+jexst int exists path '$.b')
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath_i` AS `jpath_i`,`tt`.`jpath_r` AS `jpath_r`,`tt`.`jsn_path` AS `jsn_path`,`tt`.`jexst` AS `jexst` from json_table('[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]', '$[*]' columns (`id` for ordinality, `jpath_i` int path '$.a' default '33' on empty default '66' on error, `jpath_r` double path '$.a' default '33.3' on empty default '77.7' on error, `jsn_path` json path '$.a' default '{"x":33}' on empty, `jexst` int exists path '$.b')) `tt`
+select * from
+json_table(
+'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a'
+ default '33' on empty
+default '66' on error,
+jsn_path json path '$.a' default '{"x":33}' on empty,
+jexst int exists path '$.b')
+) as tt;
+id jpath jsn_path jexst
+1 33 {"x": 33} 0
+2 2 2 0
+3 33 {"x": 33} 1
+4 0 0 0
+5 66 [1, 2] 0
+explain select * from
+json_table(
+'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a'
+ default '33' on empty
+default '66' on error,
+jsn_path json path '$.a' default '{"x":33}' on empty,
+jexst int exists path '$.b')
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`jsn_path` AS `jsn_path`,`tt`.`jexst` AS `jexst` from json_table('[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]', '$[*]' columns (`id` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a' default '33' on empty default '66' on error, `jsn_path` json path '$.a' default '{"x":33}' on empty, `jexst` int exists path '$.b')) `tt`
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+json_path json path '$.a',
+jexst int exists path '$.b')
+) as tt;
+id jpath json_path jexst
+1 3 "3" 0
+2 2 2 0
+3 NULL NULL 1
+4 0 0 0
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+json_path json path '$.a',
+jexst int exists path '$.b')
+) as tt
+where id = 3;
+id jpath json_path jexst
+3 NULL NULL 1
+explain select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+json_path json path '$.a',
+jexst int exists path '$.b')
+) as tt
+where id = 3;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ref <auto_key0> <auto_key0> 9 const 1 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`json_path` AS `json_path`,`tt`.`jexst` AS `jexst` from json_table('[{"a":"3"},{"a":2},{"b":1},{"a":0}]', '$[*]' columns (`id` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a', `json_path` json path '$.a', `jexst` int exists path '$.b')) `tt` where (`tt`.`id` = 3)
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a' error on empty,
+jexst int exists path '$.b')
+) as tt;
+ERROR 22035: Missing value for JSON_TABLE column 'jpath'
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"a":1},{"a":[0,1]}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a' error on error,
+jexst int exists path '$.b')
+) as tt;
+ERROR 2203F: Can't store an array or an object in the scalar JSON_TABLE column 'jpath'
+select * from
+json_table(
+'!#@$!@#$',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b')
+) as tt;
+ERROR 22032: Invalid JSON text in argument 1 to function json_table: "Invalid value." at position 0.
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+"!@#$!@#$" columns (id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b')
+) as tt;
+ERROR 42000: Invalid JSON path expression. The error is around character position 1.
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+jpath varchar(100) path "!@#$!@#$",
+jexst int exists path '$.b')
+) as tt;
+ERROR 42000: Invalid JSON path expression. The error is around character position 1.
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+id for ordinality)
+) as tt;
+ERROR 42S21: Duplicate column name 'id'
+select * from
+json_table(
+'[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+'$[*]' columns (id for ordinality,
+_id for ordinality)
+) as tt;
+id _id
+1 1
+2 2
+3 3
+4 4
+select * from
+json_table(
+'[
+ {"a":"3", "n": { "l": 1}},
+ {"a":2, "n": { "l": 1}},
+ {"b":1, "n": { "l": 1}},
+ {"a":0, "n": { "l": 1}}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n' columns (
+id_n for ordinality,
+jpath_n varchar(50) path '$.l')
+)
+) as tt;
+id jpath jexst id_n jpath_n
+1 3 0 1 1
+2 2 0 1 1
+3 NULL 1 1 1
+4 0 0 1 1
+explain select * from
+json_table(
+'[
+ {"a":"3", "n": { "l": 1}},
+ {"a":2, "n": { "l": 1}},
+ {"b":1, "n": { "l": 1}},
+ {"a":0, "n": { "l": 1}}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n' columns (
+id_n for ordinality,
+jpath_n varchar(50) path '$.l')
+)
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`jexst` AS `jexst`,`tt`.`id_n` AS `id_n`,`tt`.`jpath_n` AS `jpath_n` from json_table('[\n {"a":"3", "n": { "l": 1}},\n {"a":2, "n": { "l": 1}},\n {"b":1, "n": { "l": 1}},\n {"a":0, "n": { "l": 1}}\n ]', '$[*]' columns (`id` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a', `jexst` int exists path '$.b', nested path '$.n' columns (`id_n` for ordinality, `jpath_n` varchar(50) character set utf8mb4 path '$.l'))) `tt`
+explain format=json select * from
+json_table(
+'[
+ {"a":"3", "n": { "l": 1}},
+ {"a":2, "n": { "l": 1}},
+ {"b":1, "n": { "l": 1}},
+ {"a":0, "n": { "l": 1}}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n' columns (
+id_n for ordinality,
+jpath_n varchar(50) path '$.l')
+)
+) as tt;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "cost_info": {
+ "query_cost": "2.72"
+ },
+ "table": {
+ "table_name": "tt",
+ "access_type": "ALL",
+ "rows_examined_per_scan": 2,
+ "rows_produced_per_join": 2,
+ "filtered": "100.00",
+ "table_function": "json_table",
+ "using_temporary_table": true,
+ "cost_info": {
+ "read_cost": "2.52",
+ "eval_cost": "0.20",
+ "prefix_cost": "2.73",
+ "data_read_per_join": "1K"
+ },
+ "used_columns": [
+ "id",
+ "jpath",
+ "jexst",
+ "id_n",
+ "jpath_n"
+ ]
+ }
+ }
+}
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`jexst` AS `jexst`,`tt`.`id_n` AS `id_n`,`tt`.`jpath_n` AS `jpath_n` from json_table('[\n {"a":"3", "n": { "l": 1}},\n {"a":2, "n": { "l": 1}},\n {"b":1, "n": { "l": 1}},\n {"a":0, "n": { "l": 1}}\n ]', '$[*]' columns (`id` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a', `jexst` int exists path '$.b', nested path '$.n' columns (`id_n` for ordinality, `jpath_n` varchar(50) character set utf8mb4 path '$.l'))) `tt`
+select * from
+json_table(
+'[
+ {"a":2, "n": [{ "l": 1}, {"l": 11}]},
+ {"a":1, "n": [{ "l": 2}, {"l": 22}]}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(50) path '$.a',
+nested path '$.n[*]' columns (
+id_n for ordinality,
+jpath_n varchar(50) path '$.l'),
+nested path '$.n[*]' columns (
+id_m for ordinality,
+jpath_m varchar(50) path '$.l')
+)
+) as tt;
+id jpath id_n jpath_n id_m jpath_m
+1 2 1 1 NULL NULL
+1 2 2 11 NULL NULL
+1 2 NULL NULL 1 1
+1 2 NULL NULL 2 11
+2 1 1 2 NULL NULL
+2 1 2 22 NULL NULL
+2 1 NULL NULL 1 2
+2 1 NULL NULL 2 22
+explain select * from
+json_table(
+'[
+ {"a":2, "n": [{ "l": 1}, {"l": 11}]},
+ {"a":1, "n": [{ "l": 2}, {"l": 22}]}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(50) path '$.a',
+nested path '$.n[*]' columns (
+id_n for ordinality,
+jpath_n varchar(50) path '$.l'),
+nested path '$.n[*]' columns (
+id_m for ordinality,
+jpath_m varchar(50) path '$.l')
+)
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id` AS `id`,`tt`.`jpath` AS `jpath`,`tt`.`id_n` AS `id_n`,`tt`.`jpath_n` AS `jpath_n`,`tt`.`id_m` AS `id_m`,`tt`.`jpath_m` AS `jpath_m` from json_table('[\n {"a":2, "n": [{ "l": 1}, {"l": 11}]},\n {"a":1, "n": [{ "l": 2}, {"l": 22}]}\n ]', '$[*]' columns (`id` for ordinality, `jpath` varchar(50) character set utf8mb4 path '$.a', nested path '$.n[*]' columns (`id_n` for ordinality, `jpath_n` varchar(50) character set utf8mb4 path '$.l'), nested path '$.n[*]' columns (`id_m` for ordinality, `jpath_m` varchar(50) character set utf8mb4 path '$.l'))) `tt`
+select * from json_table(
+'[
+ {"a":"3", "n": ["b","a","c"]},
+ {"a":2, "n": [1,2]},
+ {"b":1, "n": ["zzz"]},
+ {"a":0, "n": [0.1, 0.02]}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n[*]' columns (
+id_n for ordinality,
+jpath_n varchar(50) path '$')
+)
+) as tt;
+id jpath jexst id_n jpath_n
+1 3 0 1 b
+1 3 0 2 a
+1 3 0 3 c
+2 2 0 1 1
+2 2 0 2 2
+3 NULL 1 1 zzz
+4 0 0 1 0.1
+4 0 0 2 0.02
+select * from json_table(
+'[
+ {"a":"3", "n": ["b","a","c"]},
+ {"a":2, "n": [1,2]},
+ {"b":1, "n": ["zzz"]},
+ {"a":0, "n": [0.1, 0.02]}
+ ]',
+'$[*]' columns (
+id for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n[*]' columns (
+id_n1 for ordinality,
+jpath_n1 varchar(50) path '$') ,
+nested path '$.n[*]' columns (
+id_n2 for ordinality,
+jpath_n2 varchar(50) path '$')
+)
+) as tt;
+id jpath jexst id_n1 jpath_n1 id_n2 jpath_n2
+1 3 0 1 b NULL NULL
+1 3 0 2 a NULL NULL
+1 3 0 3 c NULL NULL
+1 3 0 NULL NULL 1 b
+1 3 0 NULL NULL 2 a
+1 3 0 NULL NULL 3 c
+2 2 0 1 1 NULL NULL
+2 2 0 2 2 NULL NULL
+2 2 0 NULL NULL 1 1
+2 2 0 NULL NULL 2 2
+3 NULL 1 1 zzz NULL NULL
+3 NULL 1 NULL NULL 1 zzz
+4 0 0 1 0.1 NULL NULL
+4 0 0 2 0.02 NULL NULL
+4 0 0 NULL NULL 1 0.1
+4 0 0 NULL NULL 2 0.02
+select * from json_table(
+'[
+ {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]},
+ {"ll":["c"]} ]},
+ {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]},
+ {"b":1, "n": [{"ll":["zzz"]}]},
+ {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]}
+ ]',
+'$[*]' columns (
+id1 for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n[*]' columns (
+id2 for ordinality,
+nested path '$.ll[*]' columns (
+id3 for ordinality,
+jpath_3 varchar(50) path '$')
+),
+nested path '$.n[*]' columns (
+id4 for ordinality,
+jpath_4 json path '$')
+)
+) as tt;
+id1 jpath jexst id2 id3 jpath_3 id4 jpath_4
+1 3 0 1 1 b1 NULL NULL
+1 3 0 1 2 b2 NULL NULL
+1 3 0 1 3 b3 NULL NULL
+1 3 0 2 1 a1 NULL NULL
+1 3 0 2 2 a2 NULL NULL
+1 3 0 3 1 c NULL NULL
+1 3 0 NULL NULL NULL 1 {"ll": ["b1", "b2", "b3"]}
+1 3 0 NULL NULL NULL 2 {"ll": ["a1", "a2"]}
+1 3 0 NULL NULL NULL 3 {"ll": ["c"]}
+2 2 0 1 1 1 NULL NULL
+2 2 0 1 2 11 NULL NULL
+2 2 0 1 3 111 NULL NULL
+2 2 0 2 1 2 NULL NULL
+2 2 0 NULL NULL NULL 1 {"ll": [1, 11, 111]}
+2 2 0 NULL NULL NULL 2 {"ll": [2]}
+3 NULL 1 1 1 zzz NULL NULL
+3 NULL 1 NULL NULL NULL 1 {"ll": ["zzz"]}
+4 0 0 1 1 0.1 NULL NULL
+4 0 0 1 2 0.01 NULL NULL
+4 0 0 2 1 0.02 NULL NULL
+4 0 0 2 2 0.002 NULL NULL
+4 0 0 2 3 0.0002 NULL NULL
+4 0 0 NULL NULL NULL 1 {"ll": [0.1, 0.01]}
+4 0 0 NULL NULL NULL 2 {"ll": [0.02, 0.002, 0.0002]}
+explain select * from json_table(
+'[
+ {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]},
+ {"ll":["c"]} ]},
+ {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]},
+ {"b":1, "n": [{"ll":["zzz"]}]},
+ {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]}
+ ]',
+'$[*]' columns (
+id1 for ordinality,
+jpath varchar(100) path '$.a',
+jexst int exists path '$.b',
+nested path '$.n[*]' columns (
+id2 for ordinality,
+nested path '$.ll[*]' columns (
+id3 for ordinality,
+jpath_3 varchar(50) path '$')
+),
+nested path '$.n[*]' columns (
+id4 for ordinality,
+jpath_4 json path '$')
+)
+) as tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`id1` AS `id1`,`tt`.`jpath` AS `jpath`,`tt`.`jexst` AS `jexst`,`tt`.`id2` AS `id2`,`tt`.`id3` AS `id3`,`tt`.`jpath_3` AS `jpath_3`,`tt`.`id4` AS `id4`,`tt`.`jpath_4` AS `jpath_4` from json_table('[\n {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]},\n {"ll":["c"]} ]},\n {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]},\n {"b":1, "n": [{"ll":["zzz"]}]},\n {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]}\n ]', '$[*]' columns (`id1` for ordinality, `jpath` varchar(100) character set utf8mb4 path '$.a', `jexst` int exists path '$.b', nested path '$.n[*]' columns (`id2` for ordinality, nested path '$.ll[*]' columns (`id3` for ordinality, `jpath_3` varchar(50) character set utf8mb4 path '$')), nested path '$.n[*]' columns (`id4` for ordinality, `jpath_4` json path '$'))) `tt`
+ord should be 1,1,1,2, which tells that first two values of 'l' are
+from the same object, and next two are from different objects
+SELECT *
+FROM JSON_TABLE(
+'[{"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [1,2]}
+ ]
+ }, {"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [11]},
+ {"c": "c_val",
+ "l": [22]}
+ ]
+ }]',
+'$[*]' COLUMNS (
+apath VARCHAR(10) PATH '$.a',
+NESTED PATH '$.b[*]' COLUMNS (
+bpath VARCHAR(10) PATH '$.c',
+ord FOR ORDINALITY,
+NESTED PATH '$.l[*]' COLUMNS (
+lpath varchar(10) PATH '$'
+ )
+)
+)) as jt;
+apath bpath ord lpath
+a_val c_val 1 1
+a_val c_val 1 2
+a_val c_val 1 11
+a_val c_val 2 22
+explain SELECT *
+FROM JSON_TABLE(
+'[{"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [1,2]}
+ ]
+ }, {"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [11]},
+ {"c": "c_val",
+ "l": [22]}
+ ]
+ }]',
+'$[*]' COLUMNS (
+apath VARCHAR(10) PATH '$.a',
+NESTED PATH '$.b[*]' COLUMNS (
+bpath VARCHAR(10) PATH '$.c',
+ord FOR ORDINALITY,
+NESTED PATH '$.l[*]' COLUMNS (
+lpath varchar(10) PATH '$'
+ )
+)
+)) as jt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `jt`.`apath` AS `apath`,`jt`.`bpath` AS `bpath`,`jt`.`ord` AS `ord`,`jt`.`lpath` AS `lpath` from json_table('[{"a": "a_val",\n "b": [\n {"c": "c_val",\n "l": [1,2]}\n ]\n }, {"a": "a_val",\n "b": [\n {"c": "c_val",\n "l": [11]},\n {"c": "c_val",\n "l": [22]}\n ]\n }]', '$[*]' columns (`apath` varchar(10) character set utf8mb4 path '$.a', nested path '$.b[*]' columns (`bpath` varchar(10) character set utf8mb4 path '$.c', `ord` for ordinality, nested path '$.l[*]' columns (`lpath` varchar(10) character set utf8mb4 path '$')))) `jt`
+CREATE TABLE jt( i JSON );
+SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+WHERE a=1;
+i a
+EXPLAIN SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+WHERE a=1;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 1 100.00 NULL
+1 SIMPLE tt NULL ref <auto_key0> <auto_key0> 5 const 1 100.00 Table function: json_table; Using temporary; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`jt`.`i` AS `i`,`tt`.`a` AS `a` from `test`.`jt` join json_table(`test`.`jt`.`i`, '$' columns (`a` int path '$')) `tt` where (`tt`.`a` = 1)
+SELECT * FROM (
+SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+WHERE a=1) AS ttt;
+i a
+EXPLAIN SELECT * FROM (
+SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+WHERE a=1) AS ttt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 1 100.00 NULL
+1 SIMPLE tt NULL ref <auto_key0> <auto_key0> 5 const 1 100.00 Table function: json_table; Using temporary; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`jt`.`i` AS `i`,`tt`.`a` AS `a` from `test`.`jt` join json_table(`test`.`jt`.`i`, '$' columns (`a` int path '$')) `tt` where (`tt`.`a` = 1)
+DROP TABLE jt;
+SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON),
+'$' COLUMNS (dt DATE PATH '$')) as tt;
+dt
+NULL
+SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON),
+'$' COLUMNS (dt TIME PATH '$')) as tt;
+dt
+11:22:33
+SELECT * FROM JSON_TABLE(CAST(CAST("2001.02.03" AS DATE) AS JSON),
+'$' COLUMNS (dt DATE PATH '$')) as tt;
+dt
+2001-02-03
+CREATE VIEW v AS
+SELECT * FROM JSON_TABLE('[1,2,3]',
+'$[*]' COLUMNS (num INT PATH '$.a'
+ DEFAULT '123' ON EMPTY
+DEFAULT '456' ON ERROR)) AS jt;
+SELECT * FROM v;
+num
+123
+123
+123
+SHOW CREATE VIEW v;
+View Create View character_set_client collation_connection
+v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `jt`.`num` AS `num` from json_table('[1,2,3]', '$[*]' columns (`num` int path '$.a' default '123' on empty default '456' on error)) `jt` utf8mb4 utf8mb4_0900_ai_ci
+DROP VIEW v;
+SELECT * FROM JSON_TABLE('"asdf"',
+'$' COLUMNS (a INT PATH '$' ERROR ON ERROR)) AS jt;
+ERROR 22018: Invalid JSON value for CAST to INTEGER from column a at row 1
+SELECT * FROM
+JSON_TABLE('[{"a":1},{"a":2}]',
+'$' COLUMNS (a INT PATH '$[*].a' ERROR ON ERROR)) AS jt;
+ERROR 2203F: Can't store an array or an object in the scalar JSON_TABLE column 'a'
+SELECT * FROM
+JSON_TABLE('[{"a":1},{"a":2}]',
+'$' COLUMNS (a JSON PATH '$[*].a' ERROR ON ERROR)) AS jt;
+a
+[1, 2]
+SELECT * FROM
+JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$' ERROR ON ERROR)) AS jt;
+ERROR 22003: Value is out of range for JSON_TABLE's column 'a'
+SELECT * FROM
+JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$')) AS jt;
+a
+NULL
+SELECT * FROM
+JSON_TABLE('{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{}}}}}}}}}}}}}}}}}}}',
+'$' COLUMNS (i0 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i1 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i2 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i3 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i4 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i5 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i6 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i7 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i8 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i9 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i10 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i11 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i12 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i13 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i14 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i15 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i16 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i17 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i18 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i19 INT PATH '$.a',
+NESTED PATH '$.b' COLUMNS (i20 INT PATH '$.a'
+ )))))))))))))))))))))) jt;
+ERROR 42000: More than supported 16 NESTED PATHs were found in JSON_TABLE 'jt'
+CREATE TABLE t1(id int, jd JSON);
+INSERT INTO t1 values (1, '[1,3,5]'),(2,'[2,4,6]');
+SELECT id, jt.* FROM t1,
+JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+val INT PATH '$')) AS jt;
+id jid val
+1 1 1
+1 2 3
+1 3 5
+2 1 2
+2 2 4
+2 3 6
+SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.*
+FROM t1,
+JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+val INT PATH '$')) AS jt;
+id jid val
+1 1 1
+1 2 3
+1 3 5
+2 1 2
+2 2 4
+2 3 6
+EXPLAIN SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.*
+FROM t1,
+JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+val INT PATH '$')) AS jt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 NULL
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`jt`.`jid` AS `jid`,`jt`.`val` AS `val` from `test`.`t1` join json_table(`test`.`t1`.`jd`, '$[*]' columns (`jid` for ordinality, `val` int path '$')) `jt`
+SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.*
+FROM t1,
+JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+val INT PATH '$')) AS jt,
+t1 AS t2;
+id id jid val
+1 1 1 1
+1 1 2 3
+1 1 3 5
+1 2 1 1
+1 2 2 3
+1 2 3 5
+2 1 1 2
+2 1 2 4
+2 1 3 6
+2 2 1 2
+2 2 2 4
+2 2 3 6
+EXPLAIN SELECT t1.id, t2.id, jt.*
+FROM t1,
+JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+val INT PATH '$')) AS jt,
+t1 AS t2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 NULL
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+1 SIMPLE t2 NULL ALL NULL NULL NULL NULL 2 100.00 Using join buffer (hash join)
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t2`.`id` AS `id`,`jt`.`jid` AS `jid`,`jt`.`val` AS `val` from `test`.`t1` join json_table(`test`.`t1`.`jd`, '$[*]' columns (`jid` for ordinality, `val` int path '$')) `jt` join `test`.`t1` `t2`
+EXPLAIN SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.*
+FROM t1,
+JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+val INT PATH '$')) AS jt,
+t1 AS t2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 NULL
+1 SIMPLE t2 NULL ALL NULL NULL NULL NULL 2 100.00 Using join buffer (hash join)
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select /*+ JOIN_ORDER(@`select#1` `t2`,`jt`) */ `test`.`t1`.`id` AS `id`,`test`.`t2`.`id` AS `id`,`jt`.`jid` AS `jid`,`jt`.`val` AS `val` from `test`.`t1` join json_table(`test`.`t1`.`jd`, '$[*]' columns (`jid` for ordinality, `val` int path '$')) `jt` join `test`.`t1` `t2`
+SELECT * FROM t1 WHERE id IN
+(SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS
+(id INT PATH '$')) AS jt);
+id jd
+1 [1, 3, 5]
+2 [2, 4, 6]
+EXPLAIN SELECT * FROM t1 WHERE id IN
+(SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS
+(id INT PATH '$')) AS jt);
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 Using where
+1 SIMPLE jt NULL ref <auto_key0> <auto_key0> 5 test.t1.id 2 100.00 Table function: json_table; Using temporary; Using index; FirstMatch(t1)
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`jd` AS `jd` from `test`.`t1` semi join (json_table('[1,2]', '$[*]' columns (`id` int path '$')) `jt`) where (`jt`.`id` = `test`.`t1`.`id`)
+SELECT * FROM t1 WHERE id IN
+(SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS
+(id INT PATH '$')) AS jt);
+id jd
+1 [1, 3, 5]
+2 [2, 4, 6]
+EXPLAIN SELECT * FROM t1 WHERE id IN
+(SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS
+(id INT PATH '$')) AS jt);
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 Using where
+1 SIMPLE jt NULL ref <auto_key0> <auto_key0> 5 test.t1.id 2 100.00 Table function: json_table; Using temporary; Using index; FirstMatch(t1)
+Warnings:
+Note 1276 Field or reference 'test.t1.jd' of SELECT #2 was resolved in SELECT #1
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`jd` AS `jd` from `test`.`t1` semi join (json_table(`test`.`t1`.`jd`, '$[*]' columns (`id` int path '$')) `jt`) where (`jt`.`id` = `test`.`t1`.`id`)
+SELECT id, jt1.*, jt2.*
+FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1,
+JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2;
+id data1 id2
+1 [1, 3, 5] 1
+1 [1, 3, 5] 3
+1 [1, 3, 5] 5
+2 [2, 4, 6] 2
+2 [2, 4, 6] 4
+2 [2, 4, 6] 6
+EXPLAIN SELECT id, jt1.*, jt2.*
+FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1,
+JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 NULL
+1 SIMPLE jt1 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+1 SIMPLE jt2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`jt1`.`data1` AS `data1`,`jt2`.`id2` AS `id2` from `test`.`t1` join json_table(`test`.`t1`.`jd`, '$' columns (`data1` json path '$')) `jt1` join json_table(`jt1`.`data1`, '$[*]' columns (`id2` int path '$')) `jt2`
+DROP TABLE t1;
+SELECT * FROM JSON_TABLE ('"asdf"', '$' COLUMNS(
+tm TIME PATH '$',
+dt DATE PATH '$',
+i INT PATH '$',
+f FLOAT PATH '$',
+d DECIMAL PATH '$')) AS jt;
+tm dt i f d
+NULL NULL NULL NULL NULL
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON EMPTY)) jt;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'NULL ON EMPTY)) jt' at line 2
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON ERROR)) jt;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'NULL ON ERROR)) jt' at line 2
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON EMPTY)) jt;
+ERROR 42000: This version of MySQL doesn't yet support 'non-string DEFAULT value for a column in a JSON_TABLE expression'
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON ERROR)) jt;
+ERROR 42000: This version of MySQL doesn't yet support 'non-string DEFAULT value for a column in a JSON_TABLE expression'
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x DATE
+PATH '$.x'
+ DEFAULT DATE'2020-01-01' ON EMPTY)) jt;
+ERROR 42000: This version of MySQL doesn't yet support 'non-string DEFAULT value for a column in a JSON_TABLE expression'
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x DATE
+PATH '$.x'
+ DEFAULT DATE'2020-01-01' ON ERROR)) jt;
+ERROR 42000: This version of MySQL doesn't yet support 'non-string DEFAULT value for a column in a JSON_TABLE expression'
+#
+# Bug#25413069: SIG11 IN CHECK_COLUMN_GRANT_IN_TABLE_REF
+#
+SELECT a FROM JSON_TABLE(abc, '$[*]' COLUMNS ( a int path '$.a')) AS jt;
+ERROR 42S22: Unknown column 'abc' in 'a table function argument'
+#
+# Bug#25420680: ASSERTION `THD->IS_ERROR()' FAILED IN SQL/SQL_SELECT.CC
+#
+SELECT * FROM JSON_TABLE('{"a":"2017-11-1"}',
+'$' COLUMNS (jpath DATE PATH '$.a')) AS jt;
+jpath
+2017-11-01
+#
+# Bug#25413826: ASSERTION `TABLE_LIST->ALIAS' FAILED
+#
+SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]',
+'$[*]' COLUMNS ( a int path '$.b'));
+ERROR 42000: Every table function must have an alias
+#
+# Bug#25421464: ASSERTION `!STRCMP(TABLE_REF->TABLE_NAME, ...
+#
+CREATE VIEW v1 AS
+SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]',
+'$[*]' COLUMNS ( a INT PATH '$.b')) AS jt;
+SELECT * FROM v1;
+a
+2
+SHOW CREATE VIEW v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `jt`.`a` AS `a` from json_table('[{"a": 1, "b": 2}]', '$[*]' columns (`a` int path '$.b')) `jt` utf8mb4 utf8mb4_0900_ai_ci
+DROP VIEW v1;
+#
+# Bug#25427457: ASSERTION `!((*REG_FIELD)->FLAGS & 16)'
+#
+SELECT * FROM JSON_TABLE('{"a":"1"}',
+'$' COLUMNS (jpath JSON PATH '$.a',
+o FOR ORDINALITY)) AS jt
+WHERE o = 1;
+jpath o
+"1" 1
+#
+# Bug#25427982: ASSERTION `DERIVED' FAILED IN SQL/TABLE.H
+#
+SELECT je,o FROM JSON_TABLE('{"a":"1"}',
+'$' COLUMNS (o FOR ORDINALITY,
+je BIGINT EXISTS PATH '$.a')) AS jt
+GROUP BY je;
+ERROR 42000: Expression #2 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'jt.o' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by
+SELECT je,COUNT(o) FROM JSON_TABLE('{"a":"1"}',
+'$' COLUMNS (o FOR ORDINALITY,
+je BIGINT EXISTS PATH '$.a')) AS jt
+GROUP BY je;
+je COUNT(o)
+1 1
+#
+# Bug#25413194: ASSERTION `!(WANT_PRIVILEGE & ~(GRANT->WANT_PRIVILEGE
+#
+CREATE TABLE t1 (j JSON);
+SELECT * FROM t1,JSON_TABLE(t1.j, '$[*]' COLUMNS ( a int path '$.b')) AS jt;
+j a
+DROP TABLE t1;
+#
+# Bug#25460537:SIG 11 IN NEXT_FAST AT SQL/SQL_LIST.H
+#
+PREPARE STMT FROM
+"SELECT * FROM JSON_TABLE(
+ \'[{\"a\":\"3\"},{\"a\":2},{\"b\":1},{\"a\":0}]\',
+ \'$[*]\' COLUMNS (id
+ FOR ORDINALITY,
+ jpath VARCHAR(100) PATH \'$.a\',
+ jexst INT EXISTS PATH \'$.b\')
+ ) as tt";
+EXECUTE STMT;
+id jpath jexst
+1 3 0
+2 2 0
+3 NULL 1
+4 0 0
+EXECUTE STMT;
+id jpath jexst
+1 3 0
+2 2 0
+3 NULL 1
+4 0 0
+DEALLOCATE PREPARE stmt;
+#
+# Bug#25522353: SIG 11 IN JOIN::MAKE_JOIN_PLAN | SQL/SQL_OPTIMIZER.CC
+#
+CREATE TABLE t1 (id INT, jc JSON);
+SELECT * FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+ERROR 42S02: Unknown table 't1' in a table function argument
+SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt LEFT JOIN t1 ON t1.jc=jt.id;
+ERROR 42S02: Unknown table 't1' in a table function argument
+SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt RIGHT JOIN t1 ON t1.jc=jt.id;
+id id jc
+SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+id jc id
+EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 1 100.00 NULL
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary; Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`jc` AS `jc`,`jt`.`id` AS `id` from `test`.`t1` left join json_table(`test`.`t1`.`jc`, '$' columns (`id` for ordinality)) `jt` on((`test`.`t1`.`jc` = `jt`.`id`)) where true
+SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id
+LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+id jc id jc id
+EXPLAIN SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id
+LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 1 100.00 NULL
+1 SIMPLE t1o NULL ALL NULL NULL NULL NULL 1 100.00 Using where; Using join buffer (hash join)
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary; Using where
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1o`.`id` AS `id`,`test`.`t1o`.`jc` AS `jc`,`test`.`t1`.`id` AS `id`,`test`.`t1`.`jc` AS `jc`,`jt`.`id` AS `id` from `test`.`t1` left join `test`.`t1` `t1o` on((`test`.`t1o`.`id` = `test`.`t1`.`id`)) left join json_table(`test`.`t1`.`jc`, '$' columns (`id` for ordinality)) `jt` on((`test`.`t1`.`jc` = `jt`.`id`)) where true
+SELECT * FROM t1 AS t1o RIGHT JOIN
+(t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ON t1o.id=t1i.id;
+ERROR 42S22: Unknown column 't1o.jc' in 'a table function argument'
+SELECT * FROM t1 AS t1o RIGHT JOIN
+(t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ON t1o.id=t1i.id;
+ERROR 42S02: Unknown table 't1o' in a table function argument
+WITH qn AS
+(SELECT jt.* FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id)
+SELECT * from qn;
+ERROR 42S02: Unknown table 't1' in a table function argument
+WITH qn AS
+(SELECT 1 UNION
+SELECT jt.id FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1.jc=jt.id)
+SELECT * from qn;
+ERROR 42S02: Unknown table 't1' in a table function argument
+SELECT * FROM t1 AS t1o RIGHT JOIN
+(t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ON t1o.id=t1i.id;
+ERROR 42S22: Unknown column 't1o.jc' in 'a table function argument'
+SELECT * FROM t1 AS t1o RIGHT JOIN
+(t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+(id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ON t1o.id=t1i.id;
+ERROR 42S02: Unknown table 't1o' in a table function argument
+INSERT INTO t1 VALUES(1,"1"),(2,"4"),(3,"3");
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id INT PATH '$')) as jt ON t1.id=jt.id;
+id jc id
+1 1 1
+2 4 NULL
+3 3 3
+EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+(id INT PATH '$')) as jt ON t1.id=jt.id;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 3 100.00 NULL
+1 SIMPLE jt NULL ref <auto_key0> <auto_key0> 5 test.t1.id 2 100.00 Table function: json_table; Using temporary; Using index
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`jc` AS `jc`,`jt`.`id` AS `id` from `test`.`t1` left join json_table(`test`.`t1`.`jc`, '$' columns (`id` int path '$')) `jt` on((`jt`.`id` = `test`.`t1`.`id`)) where true
+SELECT * FROM t1
+LEFT JOIN
+JSON_TABLE(t1.jc, '$' COLUMNS (id FOR ORDINALITY)) as jt ON t1.jc=jt.id
+RIGHT JOIN
+JSON_TABLE(jt.id, '$' COLUMNS (id FOR ORDINALITY)) as jt1 ON jt.id=jt1.id;
+ERROR 42S02: Unknown table 'jt' in a table function argument
+DROP TABLE t1;
+set @save_mem_se= @@internal_tmp_mem_storage_engine;
+set @@internal_tmp_mem_storage_engine=MEMORY;
+set @save_heap_size= @@max_heap_table_size;
+set @@max_heap_table_size= 16384;
+FLUSH STATUS;
+SELECT * FROM JSON_TABLE(
+'[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]',
+'$[*]' COLUMNS (
+c1 CHAR(255) PATH '$',
+c2 CHAR(255) PATH '$',
+c3 CHAR(255) PATH '$',
+c4 CHAR(255) PATH '$',
+c5 CHAR(255) PATH '$',
+c6 CHAR(255) PATH '$',
+c7 CHAR(255) PATH '$',
+c8 CHAR(255) PATH '$')) AS jt;
+c1 c2 c3 c4 c5 c6 c7 c8
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+6 6 6 6 6 6 6 6
+7 7 7 7 7 7 7 7
+8 8 8 8 8 8 8 8
+9 9 9 9 9 9 9 9
+10 10 10 10 10 10 10 10
+11 11 11 11 11 11 11 11
+12 12 12 12 12 12 12 12
+13 13 13 13 13 13 13 13
+14 14 14 14 14 14 14 14
+15 15 15 15 15 15 15 15
+16 16 16 16 16 16 16 16
+17 17 17 17 17 17 17 17
+18 18 18 18 18 18 18 18
+19 19 19 19 19 19 19 19
+20 20 20 20 20 20 20 20
+SHOW STATUS LIKE '%tmp%';
+Variable_name Value
+Created_tmp_disk_tables 1
+Created_tmp_files 0
+Created_tmp_tables 1
+set @@max_heap_table_size= @save_heap_size;
+set @@internal_tmp_mem_storage_engine= @save_mem_se;
+#
+# Bug#25504063: EXPLAIN OF JSON_TABLE QUERY USES INTERNAL TEMP TABLES
+#
+FLUSH STATUS;
+SELECT * FROM
+JSON_TABLE(
+'[{"a":"3"}]',
+'$[*]' COLUMNS (id FOR ORDINALITY)
+) AS tt;
+id
+1
+SHOW STATUS LIKE '%tmp%';
+Variable_name Value
+Created_tmp_disk_tables 0
+Created_tmp_files 0
+Created_tmp_tables 1
+#
+# Bug#25525409: ASSERTION `TABLE_LIST->TABLE' FAILED IN SQL/SQL_BASE.CC
+#
+SELECT * FROM JSON_TABLE ( ( SELECT a ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+ERROR 42S22: Unknown column 'a' in 'field list'
+SELECT * FROM JSON_TABLE ( ( SELECT 1 ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+ERROR HY000: Incorrect arguments to JSON_TABLE
+SELECT * FROM JSON_TABLE ( ( SUM(1) ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+ERROR HY000: Invalid use of group function
+#
+# Bug# #25472875: ERROR SHOULD BE THROWN FOR INCORRECT VALUES
+#
+SELECT *
+FROM JSON_TABLE('{"a":"1993-01-01"}',
+'$' COLUMNS (jp DATE PATH '$.b' DEFAULT '1000' ON EMPTY))
+AS jt;
+ERROR 22003: Value is out of range for JSON_TABLE's column 'jp'
+#
+# Bug#25532429: INVALID JSON ERROR NOT THROWN WITH EMPTY TABLES JOIN
+#
+CREATE TABLE t1(j JSON);
+SELECT * FROM t1,
+JSON_TABLE( 'dqwfjqjf' , '$[*]' COLUMNS (col5 FOR ORDINALITY) ) AS alias7;
+ERROR 22032: Invalid JSON text in argument 1 to function json_table: "Invalid value." at position 0.
+DROP TABLE t1;
+#
+# Bug#25540370: SIG 11 IN SHOW_SQL_TYPE|SQL/SQL_SHOW.CC:7063
+#
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 CHAR(70) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` char(70) character set utf8mb4 path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 CHAR(70) PATH '$')
+) AS alias2;
+col18
+3.14159
+# Too short field causes truncation, error and triggers ON ERROR clause
+EXPLAIN SELECT * FROM
+JSON_TABLE ('["3.14159"]',
+'$[*]' COLUMNS (col18 CHAR(6) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('["3.14159"]', '$[*]' columns (`col18` char(6) character set utf8mb4 path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('["3.14159"]',
+'$[*]' COLUMNS (col18 CHAR(6) PATH '$')
+) AS alias2;
+col18
+NULL
+#Truncated space doesn't trigger ON ERROR
+EXPLAIN SELECT * FROM
+JSON_TABLE ('["3.14159 "]',
+'$[*]' COLUMNS (col18 CHAR(7) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('["3.14159 "]', '$[*]' columns (`col18` char(7) character set utf8mb4 path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('["3.14159 "]',
+'$[*]' COLUMNS (col18 CHAR(7) PATH '$')
+) AS alias2;
+col18
+3.14159
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 CHAR(255) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` char(255) character set utf8mb4 path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 CHAR(255) PATH '$')
+) AS alias2;
+col18
+3.14159
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 VARCHAR(70) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` varchar(70) character set utf8mb4 path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 VARCHAR(70) PATH '$')
+) AS alias2;
+col18
+3.14159
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 VARCHAR(255) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` varchar(255) character set utf8mb4 path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 VARCHAR(255) PATH '$')
+) AS alias2;
+col18
+3.14159
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 FLOAT PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` float path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 FLOAT PATH '$')
+) AS alias2;
+col18
+3.14159
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 DOUBLE PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` double path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 DOUBLE PATH '$')
+) AS alias2;
+col18
+3.14159
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[3.14159]', '$[*]' columns (`col18` decimal(3,3) path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$')
+) AS alias2;
+col18
+NULL
+SELECT * FROM
+JSON_TABLE ('[3.14159]',
+'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$' ERROR ON ERROR)
+) AS alias2;
+ERROR 22003: Value is out of range for JSON_TABLE's column 'col18'
+EXPLAIN SELECT * FROM
+JSON_TABLE ('[0.9]',
+'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$')
+) AS alias2;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias2 NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias2`.`col18` AS `col18` from json_table('[0.9]', '$[*]' columns (`col18` decimal(3,3) path '$')) `alias2`
+SELECT * FROM
+JSON_TABLE ('[0.9]',
+'$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$')
+) AS alias2;
+col18
+0.900
+SELECT * FROM
+JSON_TABLE ('["asdf","ghjk"]',
+'$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$'
+DEFAULT "3.14159" ON ERROR)
+) AS alias2;
+col18
+3.142
+3.142
+Warnings:
+Note 1265 Data truncated for column 'col18' at row 1
+CREATE TABLE t1(jd JSON);
+INSERT INTO t1 VALUES('["asdf"]'),('["ghjk"]');
+SELECT * FROM t1,
+JSON_TABLE (jd,
+'$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$'
+DEFAULT "3.14159" ON ERROR)
+) AS alias2;
+jd col18
+["asdf"] 3.142
+["ghjk"] 3.142
+Warnings:
+Note 1265 Data truncated for column 'col18' at row 1
+DROP TABLE t1;
+#
+# Bug#25540027: SIG 11 IN FIND_FIELD_IN_TABLE | SQL/SQL_BASE.CC
+#
+CREATE TABLE t1(c1 JSON);
+UPDATE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+SET jt1.a=1;
+ERROR HY000: The target table jt1 of the UPDATE is not updatable
+DELETE JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+FROM t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+FROM t1' at line 1
+DELETE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+USING t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+USING t1' at line 1
+DROP TABLE t1;
+#
+# Bug#25547244: ASSERTION `!TABLE || (!TABLE->READ_SET || BITMAP_IS_SET(
+#
+CREATE TABLE t1(i INT);
+INSERT INTO t1 VALUES(1);
+WITH cte_query AS
+(SELECT * FROM t1, JSON_TABLE ( JSON_OBJECT('ISSKF',i) ,
+'$[*]' COLUMNS (jtcol1 INT EXISTS PATH '$[*]') ) AS alias2)
+SELECT jtcol1 AS field1 FROM cte_query;
+field1
+DROP TABLE t1;
+#
+# Bug#25540675: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED
+#
+CREATE TABLE j1(j JSON);
+INSERT INTO j1 VALUES('[1,2,3]'),('[1,2,4]');
+SELECT * FROM j1,
+JSON_TABLE ( JSON_OBJECT('key1', j) ,
+'$.*' COLUMNS (NESTED PATH '$.*' COLUMNS (col11 FOR ORDINALITY))) AS alias2;
+j col11
+[1, 2, 3] NULL
+[1, 2, 4] NULL
+DROP TABLE j1;
+#
+# Bug#25584335: SIG 11 IN TABLE_LIST::FETCH_NUMBER_OF_ROWS
+#
+CREATE TABLE t1(i INT);
+PREPARE stmt FROM "SELECT alias1.i AS field1 FROM (
+ t1 AS alias1,
+ (SELECT * FROM
+ JSON_TABLE ('[1,2,3]' ,
+ '$[*]' COLUMNS (`col_varchar` FOR ORDINALITY)) AS SQ1_alias1
+ ) AS alias2 )";
+EXECUTE stmt;
+field1
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1;
+#
+# Bug#25604048: COLUMN NAMES WITH SAME 33-CHAR PREFIX ARE EQUAL
+#
+SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS(
+column_name_is_thirty_four_or_more VARCHAR(17) PATH '$.key1',
+column_name_is_thirty_four_or_more_yes_indeed VARCHAR(17) PATH '$.key1'
+) ) AS alias1;
+column_name_is_thirty_four_or_more column_name_is_thirty_four_or_more_yes_indeed
+test test
+SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS(
+`column_name_is_thirty_four_or_more ` VARCHAR(17) PATH '$.key1'
+) ) AS alias1;
+ERROR 42000: Incorrect column name 'column_name_is_thirty_four_or_more '
+#
+# Bug#25604404: JSON_TABLE MORE RESTRICTIVE WITH IDENTIFIERS THAN
+# CREATE TABLE
+#
+SELECT * FROM JSON_TABLE( '[1, 2]', '$' COLUMNS(
+one INT PATH '$[0]', two INT PATH '$[1]'
+)) AS jt;
+one two
+1 2
+#
+# Bug#25588450: SIG 6 IN JSON_WRAPPER::SEEK|SQL/JSON_DOM.CC
+#
+CREATE TABLE t1(c VARCHAR(10)) ENGINE=MEMORY;
+INSERT INTO t1 VALUES('fiheife');
+SELECT * FROM `t1` AS alias1, JSON_TABLE ( `c` , '$[*]' COLUMNS (jtcol1 JSON
+PATH '$.*')) AS alias2 WHERE jtcol1 <= 'kjfh';
+ERROR 22032: Invalid JSON text in argument 1 to function json_table: "Invalid value." at position 1.
+DROP TABLE t1;
+#
+# Bug#25587754: ASSERTION `FIXED == 0 || BASIC_CONST_ITEM()' FAILED
+#
+PREPARE stmt FROM
+"SELECT * FROM JSON_TABLE ( '[1,2]', '$[*]'
+ COLUMNS (jtcol1 JSON PATH '$.*')) AS alias2";
+EXECUTE stmt;
+jtcol1
+NULL
+NULL
+DEALLOCATE PREPARE stmt;
+#
+# Bug#25584593: UNABLE TO USE JSON_TABLE() ON TEXT/BLOB JSON DATA
+#
+SELECT * FROM JSON_TABLE (NULL, '$.k' COLUMNS (id FOR ORDINALITY)) AS aLias;
+id
+SELECT * FROM JSON_TABLE (@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias;
+id
+SET @myjson = '{"k": 42}';
+SELECT * FROM JSON_TABLE (@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias;
+id
+1
+CREATE TABLE t1(
+txt TEXT, ty TINYTEXT, tm MEDIUMTEXT, tl LONGTEXT);
+INSERT INTO t1 values (
+'{"k": "text"}','{"k": "tinytext"}','{"k": "mediumtext"}','{"k": "longtext"}');
+SELECT alias.* FROM t1, JSON_TABLE (t1.txt,'$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+id
+text
+SELECT alias.* FROM t1, JSON_TABLE (t1.ty, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+id
+tinytext
+SELECT alias.* FROM t1, JSON_TABLE (t1.tm, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+id
+mediumtext
+SELECT alias.* FROM t1, JSON_TABLE (t1.tl, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+id
+longtext
+SELECT alias.* FROM t1, JSON_TABLE (t1.txt,'$.k' COLUMNS (id TEXT PATH '$')) AS alias;
+id
+text
+SELECT alias.* FROM t1, JSON_TABLE (t1.txt,'$.k' COLUMNS (id TINYTEXT PATH '$')) AS alias;
+id
+text
+SELECT alias.* FROM t1, JSON_TABLE (t1.txt,'$.k' COLUMNS (id MEDIUMTEXT PATH '$')) AS alias;
+id
+text
+SELECT alias.* FROM t1, JSON_TABLE (t1.txt,'$.k' COLUMNS (id LONGTEXT PATH '$')) AS alias;
+id
+text
+SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias;
+blb
+asd123
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` blob path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb TINYBLOB PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` tinyblob path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb MEDIUMBLOB PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` mediumblob path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb LONGBLOB PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` longblob path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb TEXT PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` text character set utf8mb4 path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb TINYTEXT PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` tinytext character set utf8mb4 path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb MEDIUMTEXT PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` mediumtext character set utf8mb4 path '$')) `alias`
+EXPLAIN SELECT alias.* FROM JSON_TABLE ('"asd123"', '$' COLUMNS (blb LONGTEXT PATH '$')) as alias;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE alias NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `alias`.`blb` AS `blb` from json_table('"asd123"', '$' columns (`blb` longtext character set utf8mb4 path '$')) `alias`
+SELECT * FROM
+(SELECT CAST(blb AS JSON) jf FROM
+JSON_TABLE ('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) AS jti) AS dt,
+JSON_TABLE (jf, '$' COLUMNS (blb BLOB PATH '$')) AS jto;
+ERROR HY000: Incorrect arguments to JSON_TABLE
+DROP TABLE t1;
+#
+# Bug#26500384: ASSERT FAILURE IN QUERY WITH WINDOW FUNCTION AND
+# JSON_TABLE
+#
+CREATE TABLE t (x INT);
+INSERT INTO t VALUES (1), (2), (3);
+SELECT MAX(t.x) OVER () m, jt.* FROM t,
+JSON_TABLE(JSON_ARRAY(m), '$[*]' COLUMNS (i INT PATH '$')) jt;
+ERROR 42S22: Unknown column 'm' in 'a table function argument'
+DROP TABLE t;
+#
+# Bug#26583283: ASSERTION `!THD->IS_ERROR()' FAILED IN SQL_RESOLVER.CC
+#
+EXPLAIN SELECT * FROM JSON_TABLE('null', '$' COLUMNS(AA DECIMAL PATH '$')) tt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `tt`.`AA` AS `AA` from json_table('null', '$' columns (`AA` decimal(10,0) path '$')) `tt`
+CREATE VIEW v1 AS SELECT * FROM
+JSON_TABLE ( 'null', '$' COLUMNS (c1 DECIMAL PATH '$' ) ) AS jt;
+SELECT * FROM v1;
+c1
+NULL
+EXPLAIN SELECT * FROM v1;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE jt NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `jt`.`c1` AS `c1` from json_table('null', '$' columns (`c1` decimal(10,0) path '$')) `jt`
+DROP VIEW v1;
+#
+# Bug#25822408: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED
+#
+PREPARE stmt FROM "SELECT * FROM
+ JSON_TABLE('{\"a\":1}','$' COLUMNS (c1 CHAR(20) PATH '$.b' ERROR ON EMPTY)) jt";
+EXECUTE stmt;
+ERROR 22035: Missing value for JSON_TABLE column 'c1'
+EXECUTE stmt;
+ERROR 22035: Missing value for JSON_TABLE column 'c1'
+#
+# Bug#25594571: CRASH AT ITEM::CONST_ITEM|SQL/ITEM.H
+#
+CREATE TABLE t1 (i INT);
+INSERT INTO t1 VALUES(1),(2),(3),(4),(5),(6),(7);
+PREPARE stmt FROM "SELECT * FROM t1 AS alias1 LEFT JOIN t1 AS alias2
+LEFT JOIN JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (i FOR ORDINALITY )) AS
+alias3 ON alias2 . `i` = alias3 . `i` ON alias1 . `i` = alias2 . `i`";
+EXECUTE stmt;
+i i i
+1 1 1
+2 2 2
+3 3 3
+4 4 NULL
+5 5 NULL
+6 6 NULL
+7 7 NULL
+EXECUTE stmt;
+i i i
+1 1 1
+2 2 2
+3 3 3
+4 4 NULL
+5 5 NULL
+6 6 NULL
+7 7 NULL
+DROP TABLE t1;
+#
+# Bug#26648617: ASSERTION `IS_VIEW_OR_DERIVED() &&
+# USES_MATERIALIZATION()' FAILED.
+#
+CREATE TABLE t1 (
+col_varchar_key varchar(1) DEFAULT NULL
+) ENGINE=InnoDB;
+INSERT INTO t1 VALUES(1),(4);
+SELECT * FROM t1 WHERE col_varchar_key NOT IN (
+SELECT col_varchar_key FROM JSON_TABLE(
+'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+col_varchar_key
+4
+EXPLAIN SELECT * FROM t1 WHERE col_varchar_key NOT IN (
+SELECT col_varchar_key FROM JSON_TABLE(
+'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 NULL ALL NULL NULL NULL NULL 2 100.00 Using where
+2 DEPENDENT SUBQUERY innr1 NULL index_subquery <auto_key0> <auto_key0> 43 func 4 100.00 Table function: json_table; Using temporary; Using where; Using index; Full scan on NULL key
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`col_varchar_key` AS `col_varchar_key` from `test`.`t1` where <in_optimizer>(`test`.`t1`.`col_varchar_key`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`col_varchar_key`) in innr1 on <auto_key0> checking NULL where <if>(outer_field_is_not_null, ((<cache>(`test`.`t1`.`col_varchar_key`) = `innr1`.`col_varchar_key`) or (`innr1`.`col_varchar_key` is null)), true) having <if>(outer_field_is_not_null, <is_not_null_test>(`innr1`.`col_varchar_key`), true))) is false)
+SELECT * FROM t1 WHERE col_varchar_key IN (
+SELECT col_varchar_key FROM JSON_TABLE(
+'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+col_varchar_key
+1
+EXPLAIN SELECT * FROM t1 WHERE col_varchar_key IN (
+SELECT col_varchar_key FROM JSON_TABLE(
+'[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+(col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 2 100.00 Using where
+1 SIMPLE innr1 NULL ref <auto_key0> <auto_key0> 43 test.t1.col_varchar_key 2 100.00 Table function: json_table; Using temporary; Using where; Using index; FirstMatch(t1)
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`col_varchar_key` AS `col_varchar_key` from `test`.`t1` semi join (json_table('[{"col_key": 1},{"col_key": 2}]', '$[*]' columns (`col_varchar_key` varchar(10) character set utf8mb4 path '$.col_key')) `innr1`) where (`test`.`t1`.`col_varchar_key` = `innr1`.`col_varchar_key`)
+DROP TABLE t1;
+#
+# Bug#26711551: WL8867:CONDITIONAL JUMP IN JSON_TABLE_COLUMN::CLEANUP
+#
+CREATE TABLE t(x int, y int);
+INSERT INTO t(x) VALUES (1);
+UPDATE t t1, JSON_TABLE('[2]', '$[*]' COLUMNS (x INT PATH '$')) t2
+SET t1.y = t2.x;
+SELECT * FROM t;
+x y
+1 2
+DROP TABLE t;
+#
+# Bug#26679671: SIG 11 IN JSON_BINARY::PARSE_BINARY()
+#
+CREATE TABLE t1(id INT, f1 JSON);
+INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'),
+(4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+SELECT * FROM t1 as jj1,
+(SELECT tt2.* FROM t1 as tt2,
+JSON_TABLE (f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt;
+id f1 id f1
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 2 {"1": 2}
+1 {"1": 1} 3 {"1": 3}
+1 {"1": 1} 4 {"1": 4}
+1 {"1": 1} 5 {"1": 5}
+1 {"1": 1} 6 {"1": 6}
+2 {"1": 2} 1 {"1": 1}
+2 {"1": 2} 2 {"1": 2}
+2 {"1": 2} 3 {"1": 3}
+2 {"1": 2} 4 {"1": 4}
+2 {"1": 2} 5 {"1": 5}
+2 {"1": 2} 6 {"1": 6}
+3 {"1": 3} 1 {"1": 1}
+3 {"1": 3} 2 {"1": 2}
+3 {"1": 3} 3 {"1": 3}
+3 {"1": 3} 4 {"1": 4}
+3 {"1": 3} 5 {"1": 5}
+3 {"1": 3} 6 {"1": 6}
+4 {"1": 4} 1 {"1": 1}
+4 {"1": 4} 2 {"1": 2}
+4 {"1": 4} 3 {"1": 3}
+4 {"1": 4} 4 {"1": 4}
+4 {"1": 4} 5 {"1": 5}
+4 {"1": 4} 6 {"1": 6}
+5 {"1": 5} 1 {"1": 1}
+5 {"1": 5} 2 {"1": 2}
+5 {"1": 5} 3 {"1": 3}
+5 {"1": 5} 4 {"1": 4}
+5 {"1": 5} 5 {"1": 5}
+5 {"1": 5} 6 {"1": 6}
+6 {"1": 6} 1 {"1": 1}
+6 {"1": 6} 2 {"1": 2}
+6 {"1": 6} 3 {"1": 3}
+6 {"1": 6} 4 {"1": 4}
+6 {"1": 6} 5 {"1": 5}
+6 {"1": 6} 6 {"1": 6}
+EXPLAIN SELECT * FROM t1 as jj1,
+(SELECT tt2.* FROM t1 as tt2,
+JSON_TABLE (f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE jj1 NULL ALL NULL NULL NULL NULL 6 100.00 NULL
+1 SIMPLE tt2 NULL ALL NULL NULL NULL NULL 6 100.00 Using join buffer (hash join)
+1 SIMPLE tbl NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+Warnings:
+Note 1003 /* select#1 */ select `test`.`jj1`.`id` AS `id`,`test`.`jj1`.`f1` AS `f1`,`test`.`tt2`.`id` AS `id`,`test`.`tt2`.`f1` AS `f1` from `test`.`t1` `jj1` join `test`.`t1` `tt2` join json_table(`test`.`tt2`.`f1`, '$' columns (`id` for ordinality)) `tbl`
+SELECT * FROM t1 as jj1,
+(SELECT tt2.* FROM t1 as tt2,
+JSON_TABLE (f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN
+t1 AS tt3) dt ORDER BY 1,3 LIMIT 10;
+id f1 id f1
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 1 {"1": 1}
+1 {"1": 1} 2 {"1": 2}
+1 {"1": 1} 2 {"1": 2}
+1 {"1": 1} 2 {"1": 2}
+1 {"1": 1} 2 {"1": 2}
+EXPLAIN SELECT * FROM t1 as jj1,
+(SELECT tt2.* FROM t1 as tt2,
+JSON_TABLE (f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN
+t1 AS tt3) dt ORDER BY 1,3 LIMIT 11;
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE jj1 NULL ALL NULL NULL NULL NULL 6 100.00 Using temporary; Using filesort
+1 SIMPLE tt2 NULL ALL NULL NULL NULL NULL 6 100.00 Using join buffer (hash join)
+1 SIMPLE tbl NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary
+1 SIMPLE tt3 NULL ALL NULL NULL NULL NULL 6 100.00 Using join buffer (hash join)
+Warnings:
+Note 1003 /* select#1 */ select `test`.`jj1`.`id` AS `id`,`test`.`jj1`.`f1` AS `f1`,`test`.`tt2`.`id` AS `id`,`test`.`tt2`.`f1` AS `f1` from `test`.`t1` `jj1` join `test`.`t1` `tt2` join json_table(`test`.`tt2`.`f1`, '$' columns (`id` for ordinality)) `tbl` straight_join `test`.`t1` `tt3` order by `test`.`jj1`.`id`,`test`.`tt2`.`id` limit 11
+SELECT * FROM t1 WHERE id IN
+(SELECT id FROM t1 as tt2,
+JSON_TABLE (f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+id f1
+1 {"1": 1}
+2 {"1": 2}
+3 {"1": 3}
+4 {"1": 4}
+5 {"1": 5}
+6 {"1": 6}
+EXPLAIN SELECT * FROM t1 WHERE id IN
+(SELECT id FROM t1 as tt2,
+JSON_TABLE (f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE tt2 NULL ALL NULL NULL NULL NULL 6 100.00 Start temporary
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 6 16.67 Using where; Using join buffer (hash join)
+1 SIMPLE tbl NULL ALL NULL NULL NULL NULL 2 100.00 Table function: json_table; Using temporary; End temporary
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`id` AS `id`,`test`.`t1`.`f1` AS `f1` from `test`.`t1` semi join (`test`.`t1` `tt2` join json_table(`test`.`tt2`.`f1`, '$' columns (`jf` for ordinality)) `tbl`) where (`test`.`t1`.`id` = `test`.`tt2`.`id`)
+DROP TABLE t1;
+#
+# Bug#26760811: WL#8867: MEMORY LEAK REPORTED BY ASAN AND VALGRIND
+#
+CREATE TABLE t (j JSON);
+INSERT INTO t VALUES
+('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'),
+('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]');
+SELECT COUNT(*) FROM t, JSON_TABLE(j, '$[*]' COLUMNS (i INT PATH '$')) AS jt;
+COUNT(*)
+34
+PREPARE ps FROM
+'SELECT COUNT(*) FROM t, JSON_TABLE(j, ''$[*]'' COLUMNS (i INT PATH ''$'')) AS jt';
+EXECUTE ps;
+COUNT(*)
+34
+EXECUTE ps;
+COUNT(*)
+34
+DROP PREPARE ps;
+DROP TABLE t;
+#
+# Bug #26781759: NON-UNIQUE ALIAS ERROR NOT BEING THROWN
+#
+SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias,
+JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias;
+ERROR 42000: Not unique table/alias: 'jalias'
+#
+# Bug #26761470: WL#8867: JOIN::MAKE_JOIN_PLAN():
+# ASSERTION `SELECT_LEX->IS_RECURSIVE()' FAILED
+#
+CREATE TABLE t1 (x INT);
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (j JSON);
+INSERT INTO t2 (j) VALUES ('[1,2,3]');
+SELECT * FROM t1 RIGHT JOIN
+(SELECT o FROM t2, JSON_TABLE(j, '$[*]' COLUMNS (o FOR ORDINALITY)) AS jt) AS t3
+ON (t3.o = t1.x);
+x o
+1 1
+NULL 2
+NULL 3
+DROP TABLE t1, t2;
+#
+# Bug#27152428 JSON_TABLE + PREPARED STATEMENT + VIEW HAS PROBLEM IN DURING RESOLUTION
+#
+CREATE TABLE t1 (a INT, b INT);
+CREATE VIEW v2 AS SELECT * FROM t1 LIMIT 2;
+SELECT b
+FROM (SELECT * FROM v2) vq1,
+JSON_TABLE(CONCAT(vq1.b,'[{\"a\":\"3\"}]'),
+'$[*]' COLUMNS (id FOR ORDINALITY,
+jpath VARCHAR(100) PATH '$.a',
+JEXST INT EXISTS PATH '$.b')
+) AS dt;
+b
+DROP TABLE t1;
+DROP VIEW v2;
+#
+# Bug#27189940: CREATE VIEW FAILS ON JSON_TABLE() IN SCHEMA-LESS CONNECTIONS
+# BUG#27217897: JSON_TABLE() FAILS IF NO DATABASE SELECTED
+#
+CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+num
+1
+2
+3
+use test;
+SHOW CREATE VIEW test.v;
+View Create View character_set_client collation_connection
+v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `jt`.`num` AS `num` from json_table('[1,2,3]', '$[*]' columns (`num` int path '$[0]')) `jt` utf8mb4 utf8mb4_0900_ai_ci
+SELECT * FROM test.v;
+num
+1
+2
+3
+DROP VIEW test.v;
+#
+# Bug#27729112 JSON_TABLE SHOULD DECODE BASE64-ENCODED STRINGS
+#
+SELECT v
+FROM JSON_TABLE(JSON_OBJECT('foo', _binary'bar'), '$'
+ COLUMNS(v VARCHAR(255) PATH '$.foo')) tbl;
+v
+bar
+CREATE TABLE t1 (col1 VARCHAR(255) CHARACTER SET ucs2,
+col2 VARCHAR(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_as_cs);
+INSERT INTO t1 VALUES ("æ", "ハ"), ("å", "ø"), ("ø", "パ"), ("@", "バ");
+ANALYZE TABLE t1 UPDATE HISTOGRAM ON col1, col2;
+Table Op Msg_type Msg_text
+test.t1 histogram status Histogram statistics created for column 'col1'.
+test.t1 histogram status Histogram statistics created for column 'col2'.
+SELECT v value, c cumulfreq
+FROM INFORMATION_SCHEMA.COLUMN_STATISTICS,
+JSON_TABLE(histogram->'$.buckets', '$[*]'
+ COLUMNS(v VARCHAR(255) CHARACTER SET ucs2 PATH '$[0]',
+c double PATH '$[1]')) hist
+WHERE column_name = "col1";
+value cumulfreq
+@ 0.25
+å 0.5
+æ 0.75
+ø 1
+SELECT v value, c cumulfreq
+FROM INFORMATION_SCHEMA.COLUMN_STATISTICS,
+JSON_TABLE(histogram->'$.buckets', '$[*]'
+ COLUMNS(v VARCHAR(255) CHARACTER SET utf8mb4 PATH '$[0]',
+c double PATH '$[1]')) hist
+WHERE column_name = "col2";
+value cumulfreq
+ø 0.25
+ハ 0.5
+バ 0.75
+パ 1
+DROP TABLE t1;
+#
+# Bug#28254268: JSON_TABLE() FUNCTION REJECTS SELECT PERMISSIONS
+#
+CREATE DATABASE db2;
+USE db2;
+CREATE TABLE t1 (c JSON);
+INSERT INTO t1 VALUES('[1,2,3]');
+CREATE USER user1@localhost;
+GRANT SELECT ON db2.t1 TO user1@localhost;
+USE db2;
+SELECT t1.c FROM t1;
+c
+[1, 2, 3]
+SELECT jt.* FROM t1, JSON_TABLE(t1.c, '$[*]' COLUMNS (num INT PATH '$[0]'))
+AS jt;
+num
+1
+2
+3
+DROP USER user1@localhost;
+DROP DATABASE db2;
+#
+# Bug#27856835 JSON_TABLE RETURNS WRONG DATATYPE WHEN INT-VALUE IS GRATER
+# THAN (2^31-1)
+#
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775807"}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+id
+9223372036854775807
+# As we currently have no way of telling if a JSON string value is
+# signed or unsigned, this value will overflow.
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+id
+-9223372036854775808
+# Here the JSON value is a NUMERIC value, and we thus know if the value
+# is signed or unsigned.
+SELECT id FROM JSON_TABLE('[{"id":9223372036854775808}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+id
+NULL
+# If we tell the JSON table column to be unsigned, we get to store the
+# full value correctly.
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS
+(id BIGINT UNSIGNED PATH '$.id')) AS json;
+id
+9223372036854775808
+SELECT id FROM JSON_TABLE('[{"id":"2147483648"}]', '$[*]' COLUMNS
+(id INT UNSIGNED PATH '$.id')) AS json;
+id
+2147483648
+# Check that we preserve the signedness of the columns.
+USE test;
+CREATE TABLE t1 AS SELECT id, value FROM
+JSON_TABLE('[{"id":9223372036854775808, "value":9223372036854775807}]',
+'$[*]' COLUMNS (id BIGINT UNSIGNED PATH '$.id',
+value BIGINT PATH '$.value'))
+AS json;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `id` bigint unsigned DEFAULT NULL,
+ `value` bigint DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
+DROP TABLE t1;
+#
+# Bug#28255453: VIEW USING JSON_TABLE FAILS IF NO SCHEMA IS SELECTED
+#
+CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+SELECT * FROM test.v;
+num
+1
+2
+3
+DROP VIEW test.v;
+# Check that a user without access to the schema 'foo' cannot query
+# a JSON_TABLE view in that schema.
+CREATE SCHEMA foo;
+CREATE VIEW foo.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+CREATE USER foo@localhost;
+SELECT * FROM foo.v;
+ERROR 42000: SELECT command denied to user 'foo'@'localhost' for table 'v'
+DROP USER foo@localhost;
+DROP SCHEMA foo;
+# Check that a user with access to the schema 'foo' can do a SELECT with
+# a JSON_TABLE function.
+CREATE SCHEMA foo;
+CREATE USER foo@localhost;
+GRANT EXECUTE ON foo.* TO foo@localhost;
+SELECT 1 FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$.a')) AS jt;
+1
+1
+1
+1
+DROP USER foo@localhost;
+DROP SCHEMA foo;
+#
+# Bug#27923406 ERROR 1142 (42000) WHEN USING JSON_TABLE
+#
+CREATE SCHEMA my_schema;
+CREATE USER foo@localhost;
+GRANT EXECUTE ON my_schema.* TO foo@localhost;
+SELECT
+*
+FROM
+JSON_TABLE(
+'[{"x":2,"y":"8"},{"x":"3","y":"7"},{"x":"4","y":6}]',
+"$[*]" COLUMNS(
+xval VARCHAR(100) PATH "$.x",
+yval VARCHAR(100) PATH "$.y"
+ )
+) AS jt1;
+xval yval
+2 8
+3 7
+4 6
+DROP USER foo@localhost;
+DROP SCHEMA my_schema;
+#
+# Bug#28538315: JSON_TABLE() COLUMN TYPES DON'T SUPPORT COLLATE CLAUSE
+#
+CREATE TABLE t1 SELECT *
+FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10)
+CHARSET utf8mb4
+PATH '$')) AS jt1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `x` varchar(10) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
+CREATE TABLE t2 SELECT *
+FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10)
+CHARSET utf8mb4 COLLATE utf8mb4_bin
+PATH '$')) AS jt1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `x` varchar(10) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
+CREATE TABLE t3 AS SELECT *
+FROM JSON_TABLE('"a"', '$' COLUMNS (a VARCHAR(10)
+COLLATE ascii_bin
+PATH '$')) jt;
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `a` varchar(10) CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
+DROP TABLE t1, t2, t3;
+#
+# Bug#28643862 JSON_TABLE'S "COLUMNS" CLAUSE USES
+# GLOBAL.CHARACTER_SET_RESULTS DEFAULT CHARSET
+#
+SET @@SESSION.character_set_connection = ascii;
+CREATE TABLE t1 SELECT a.col
+FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `col` varchar(10) CHARACTER SET ascii DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
+SET @@SESSION.collation_connection = latin1_bin;
+CREATE TABLE t2 SELECT a.col
+FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `col` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
+DROP TABLE t1, t2;
+SET @@SESSION.character_set_connection = DEFAULT;
+#
+# Bug#28851656: JSON_TABLE RETURN "UNKNOWN DATABASE ''" FROM A FUNCTION
+#
+CREATE FUNCTION FN_COUNT_ROWS(X JSON)
+RETURNS INT DETERMINISTIC
+RETURN (
+SELECT COUNT(*) FROM JSON_TABLE( X, '$[*]' COLUMNS( I INT PATH '$')) der
+);
+SELECT FN_COUNT_ROWS('[1, 2]') CNT;
+CNT
+2
+SELECT FN_COUNT_ROWS('[1, 2, 3]') CNT;
+CNT
+3
+SELECT FN_COUNT_ROWS('[1, 2, 3, 4]') CNT;
+CNT
+4
+DROP FUNCTION FN_COUNT_ROWS;
+#
+# Bug#30310265: VIEWS LOSE THE CHARACTER SET OF JSON_TABLE'S
+# PATH ARGUMENTS
+#
+CREATE VIEW v1 AS
+SELECT * FROM JSON_TABLE('{"å":{"å":1}}', '$.å' COLUMNS (x INT PATH '$.å')) t;
+SELECT * FROM v1;
+x
+1
+SET NAMES latin1;
+SELECT * FROM v1;
+x
+1
+SET NAMES DEFAULT;
+DROP VIEW v1;
+CREATE VIEW v2 AS
+SELECT * FROM JSON_TABLE('{}', '$' COLUMNS (
+x VARCHAR(10) PATH '$.a' DEFAULT '"isn''t here"' ON EMPTY)
+) t;
+SHOW CREATE VIEW v2;
+View Create View character_set_client collation_connection
+v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v2` AS select `t`.`x` AS `x` from json_table('{}', '$' columns (`x` varchar(10) character set utf8mb4 path '$.a' default '"isn\'t here"' on empty)) `t` utf8mb4 utf8mb4_0900_ai_ci
+SELECT * FROM v2;
+x
+isn't here
+DROP VIEW v2;
+SELECT * FROM
+JSON_TABLE(JSON_OBJECT(),
+_utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x' COLUMNS
+(y INT PATH _utf8mb4'$.y'
+ DEFAULT _utf8mb4'1' ON EMPTY
+DEFAULT _utf8mb4'2' ON ERROR))) jt;
+y
+NULL
+#
+# Bug#30382156: STORED PROCEDURE, JSON_TABLE AND "CONST" JOIN TYPE
+#
+CREATE TABLE t (id INT PRIMARY KEY, j JSON);
+INSERT INTO t VALUES (1, '{"x":1}');
+CREATE PROCEDURE p()
+SELECT * FROM t, JSON_TABLE(j, '$' COLUMNS (x INT PATH '$.x')) jt
+WHERE id = 1;
+CALL p();
+id j x
+1 {"x": 1} 1
+CALL p();
+id j x
+1 {"x": 1} 1
+CALL p();
+id j x
+1 {"x": 1} 1
+DROP PROCEDURE p;
+DROP TABLE t;
+#
+# WL#13512: Deprecate ON ERROR before ON EMPTY in JSON_TABLE syntax
+#
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ DEFAULT '"a"' ON ERROR DEFAULT '"b"' ON EMPTY)) jt;
+x
+b
+Warnings:
+Warning 1287 Specifying an ON EMPTY clause after the ON ERROR clause in a JSON_TABLE column definition is deprecated syntax and will be removed in a future release. Specify ON EMPTY before ON ERROR instead.
+SELECT * FROM
+JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ NULL ON ERROR NULL ON EMPTY)) jt;
+x
+NULL
+Warnings:
+Warning 1287 Specifying an ON EMPTY clause after the ON ERROR clause in a JSON_TABLE column definition is deprecated syntax and will be removed in a future release. Specify ON EMPTY before ON ERROR instead.
+SELECT * FROM
+JSON_TABLE('{"x":"c"}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ ERROR ON ERROR ERROR ON EMPTY)) jt;
+x
+c
+Warnings:
+Warning 1287 Specifying an ON EMPTY clause after the ON ERROR clause in a JSON_TABLE column definition is deprecated syntax and will be removed in a future release. Specify ON EMPTY before ON ERROR instead.
+#
+# Bug#30628330: INSERT WITH JSON_TABLE FAILS DUE TO WARNING INVOKED
+# WITH ON ERROR CLAUSE
+#
+CREATE TABLE t(id INT, a TINYINT, b VARCHAR(3), c DATE, d DECIMAL);
+INSERT INTO t SELECT * FROM
+JSON_TABLE(JSON_ARRAY(JSON_OBJECT('a', 1, 'b', 'abc'),
+JSON_OBJECT('a', 2, 'b', 'abcd'),
+JSON_OBJECT('a', 1000, 'b', 'xyz'),
+JSON_OBJECT('c', TIME'12:00:00', 'd', 1e308)),
+'$[*]' COLUMNS (id FOR ORDINALITY,
+a TINYINT PATH '$.a' DEFAULT '111' ON ERROR,
+b VARCHAR(3) PATH '$.b' DEFAULT '"ERR"' ON ERROR,
+c DATE PATH '$.c' DEFAULT '"2001-01-01"' ON ERROR,
+d DECIMAL PATH '$.c' DEFAULT '999' ON ERROR)
+) AS jt;
+SELECT * FROM t ORDER BY id;
+id a b c d
+1 1 abc NULL NULL
+2 2 ERR NULL NULL
+3 111 xyz NULL NULL
+4 NULL NULL 2001-01-01 999
+DROP TABLE t;
+#
+# Bug#30263373: INCORRECT OUTPUT FROM TABLE_FUNCTION_JSON::PRINT()
+#
+CREATE VIEW v AS SELECT * FROM
+JSON_TABLE('[123]', '$[*]' COLUMNS (`name with space 1` INT PATH '$',
+`name with space 2` FOR ORDINALITY)) jt;
+SELECT * FROM v;
+name with space 1 name with space 2
+123 1
+DROP VIEW v;
+CREATE VIEW v AS SELECT HEX(x), HEX(y) FROM
+JSON_TABLE('["abc"]', '$[*]' COLUMNS (x BINARY(10) PATH '$',
+y VARBINARY(10) PATH '$')) jt;
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+VIEW_DEFINITION
+select hex(`jt`.`x`) AS `HEX(x)`,hex(`jt`.`y`) AS `HEX(y)` from json_table('["abc"]', '$[*]' columns (`x` binary(10) path '$', `y` varbinary(10) path '$')) `jt`
+SELECT * FROM v;
+HEX(x) HEX(y)
+61626300000000000000 616263
+DROP VIEW v;
+CREATE VIEW v AS SELECT * FROM
+JSON_TABLE('[123]', '$[*]' COLUMNS(ti TINYINT PATH '$',
+si SMALLINT PATH '$',
+mi MEDIUMINT PATH '$',
+i INT PATH '$',
+bi BIGINT PATH '$',
+tiu TINYINT UNSIGNED PATH '$',
+siu SMALLINT UNSIGNED PATH '$',
+miu MEDIUMINT UNSIGNED PATH '$',
+iu INT UNSIGNED PATH '$',
+biu BIGINT UNSIGNED PATH '$')) jt;
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+VIEW_DEFINITION
+select `jt`.`ti` AS `ti`,`jt`.`si` AS `si`,`jt`.`mi` AS `mi`,`jt`.`i` AS `i`,`jt`.`bi` AS `bi`,`jt`.`tiu` AS `tiu`,`jt`.`siu` AS `siu`,`jt`.`miu` AS `miu`,`jt`.`iu` AS `iu`,`jt`.`biu` AS `biu` from json_table('[123]', '$[*]' columns (`ti` tinyint path '$', `si` smallint path '$', `mi` mediumint path '$', `i` int path '$', `bi` bigint path '$', `tiu` tinyint unsigned path '$', `siu` smallint unsigned path '$', `miu` mediumint unsigned path '$', `iu` int unsigned path '$', `biu` bigint unsigned path '$')) `jt`
+SELECT * FROM v;
+ti si mi i bi tiu siu miu iu biu
+123 123 123 123 123 123 123 123 123 123
+DROP VIEW v;
+CREATE VIEW v AS SELECT * FROM
+JSON_TABLE('[]', '$[*]' COLUMNS (g GEOMETRY PATH '$',
+ls LINESTRING PATH '$')) AS jt;
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+VIEW_DEFINITION
+select `jt`.`g` AS `g`,`jt`.`ls` AS `ls` from json_table('[]', '$[*]' columns (`g` geometry path '$', `ls` linestring path '$')) `jt`
+SELECT * FROM v;
+g ls
+DROP VIEW v;
+CREATE VIEW v AS SELECT * FROM
+JSON_TABLE('["abc"]', '$[*]' COLUMNS
+(x VARCHAR(10) CHARSET latin1 PATH '$',
+y VARCHAR(10) COLLATE utf8mb4_de_pb_0900_ai_ci PATH '$',
+z LONGTEXT COLLATE utf8mb4_bin PATH '$')) jt;
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+VIEW_DEFINITION
+select `jt`.`x` AS `x`,`jt`.`y` AS `y`,`jt`.`z` AS `z` from json_table('["abc"]', '$[*]' columns (`x` varchar(10) character set latin1 path '$', `y` varchar(10) character set utf8mb4 collate utf8mb4_de_pb_0900_ai_ci path '$', `z` longtext character set utf8mb4 collate utf8mb4_bin path '$')) `jt`
+SELECT x, y, z, COLLATION(x) c_x, COLLATION(y) c_y, COLLATION(z) c_z FROM v;
+x y z c_x c_y c_z
+abc abc abc latin1_swedish_ci utf8mb4_de_pb_0900_ai_ci utf8mb4_bin
+DROP VIEW v;
+#
+# Bug#31345503 JSON_TABLE SHOULD RETURN SQL NULL FOR JSON NULL
+#
+SELECT *
+FROM
+JSON_TABLE(
+'[
+ {"c1": null,
+ "c2": [{"c": "c_val", "l": [1,2]}],
+ "c3": null},
+ {"c1": true,
+ "c2": [{"c": "c_val","l": [11]}, {"c": "c_val", "l": [false]}],
+ "c3": true},
+ {"c1": false,
+ "c2": [{"c": null,"l": [true]}, {"c": "c_val", "l": [null]}],
+ "c3": false}
+ ]',
+'$[*]' COLUMNS(
+top_ord FOR ORDINALITY,
+c1path VARCHAR(10) PATH '$.c1' ERROR ON ERROR,
+NESTED PATH '$.c2[*]' COLUMNS (
+c2path VARCHAR(10) PATH '$.c' ERROR ON ERROR,
+ord FOR ORDINALITY,
+NESTED PATH '$.l[*]' COLUMNS (lpath_c VARCHAR(10) PATH '$' ERROR ON ERROR,
+lpath_i INT PATH '$' ERROR ON ERROR)
+),
+c3path INT PATH '$.c3' ERROR ON ERROR
+)
+) as jt;
+top_ord c1path c2path ord lpath_c lpath_i c3path
+1 NULL c_val 1 1 1 NULL
+1 NULL c_val 1 2 2 NULL
+2 true c_val 1 11 11 1
+2 true c_val 2 false 0 1
+3 false NULL 1 true 1 0
+3 false c_val 2 NULL NULL 0
+#
+# Bug #31327187 UBSAN: JSON_TABLE: NULL POINTER PASSED AS ARGUMENT 2,
+# WHICH IS DECLARED TO NEVER BE NULL
+#
+SELECT HEX(a) FROM JSON_TABLE(
+'[{"E":{"e":true,"~":1,"S3":"sTa"},"r":3,"":6.7},"",6.5]',
+'$'
+ COLUMNS(a BINARY(5) PATH '$[1]' NULL ON EMPTY)
+) e;
+HEX(a)
+0000000000
diff --git a/mysql-test/main/json_table_mysql.test b/mysql-test/main/json_table_mysql.test
new file mode 100644
index 00000000000..7f01709b310
--- /dev/null
+++ b/mysql-test/main/json_table_mysql.test
@@ -0,0 +1,1538 @@
+# For stable statistics
+#--source include/have_innodb_16k.inc
+--source include/have_innodb.inc
+--echo #
+--echo # WL#8867: Add JSON_TABLE table function
+--echo #
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a',
+ jexst int exists path '\$.b')
+ ) as tt;
+
+#--error ER_INVALID_DEFAULT
+select * from
+ json_table(
+ '[{"x":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a' default '[99]' on error,
+ jexst int exists path '$.b')
+ ) as tt;
+
+select * from
+ json_table(
+ '[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a'
+ default '33' on empty
+ default '66' on error,
+ jsn_path json path '\$.a' default '{"x":33}' on empty,
+ jexst int exists path '\$.b')
+ ) as tt;
+
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]',
+ '$[*]' columns (id for ordinality,
+ jpath_i int path '$.a'
+ default '33' on empty
+ default '66' on error,
+ jpath_r real path '$.a'
+ default '33.3' on empty
+ default '77.7' on error,
+ jsn_path json path '$.a' default '{"x":33}' on empty,
+ jexst int exists path '$.b')
+ ) as tt;
+#eval $query;
+#eval explain $query;
+
+select * from
+ json_table(
+ '[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a'
+ default '33' on empty
+ default '66' on error,
+ jsn_path json path '$.a' default '{"x":33}' on empty,
+ jexst int exists path '$.b')
+ ) as tt;
+#eval $query;
+#eval explain $query;
+
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a',
+ json_path json path '$.a',
+ jexst int exists path '$.b')
+ ) as tt;
+
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a',
+ json_path json path '$.a',
+ jexst int exists path '$.b')
+ ) as tt
+ where id = 3;
+#eval $query;
+#eval explain $query;
+
+--error 4174
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a' error on empty,
+ jexst int exists path '$.b')
+ ) as tt;
+
+--error 4174
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"a":1},{"a":[0,1]}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a' error on error,
+ jexst int exists path '$.b')
+ ) as tt;
+
+--error ER_JSON_SYNTAX
+select * from
+ json_table(
+ '!#@$!@#$',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b')
+ ) as tt;
+
+--error ER_JSON_SYNTAX
+# psergey-done: CRASH1
+--disable_parsing
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ "!@#$!@#$" columns (id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b')
+ ) as tt;
+--enable_parsing
+
+--error ER_JSON_SYNTAX
+--disable_parsing
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path "!@#$!@#$",
+ jexst int exists path '$.b')
+ ) as tt;
+--enable_parsing
+
+--error ER_DUP_FIELDNAME
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ id for ordinality)
+ ) as tt;
+
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ _id for ordinality)
+ ) as tt;
+
+select * from
+ json_table(
+ '[
+ {"a":"3", "n": { "l": 1}},
+ {"a":2, "n": { "l": 1}},
+ {"b":1, "n": { "l": 1}},
+ {"a":0, "n": { "l": 1}}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '$.l')
+ )
+ ) as tt;
+
+#eval $query;
+#eval explain $query;
+explain format=json
+select * from
+ json_table(
+ '[
+ {"a":"3", "n": { "l": 1}},
+ {"a":2, "n": { "l": 1}},
+ {"b":1, "n": { "l": 1}},
+ {"a":0, "n": { "l": 1}}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '$.l')
+ )
+ ) as tt;
+
+select * from
+ json_table(
+ '[
+ {"a":2, "n": [{ "l": 1}, {"l": 11}]},
+ {"a":1, "n": [{ "l": 2}, {"l": 22}]}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(50) path '$.a',
+ nested path '$.n[*]' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '$.l'),
+ nested path '$.n[*]' columns (
+ id_m for ordinality,
+ jpath_m varchar(50) path '$.l')
+ )
+ ) as tt;
+
+
+select * from json_table(
+ '[
+ {"a":"3", "n": ["b","a","c"]},
+ {"a":2, "n": [1,2]},
+ {"b":1, "n": ["zzz"]},
+ {"a":0, "n": [0.1, 0.02]}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n[*]' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '$')
+ )
+ ) as tt;
+
+select * from json_table(
+ '[
+ {"a":"3", "n": ["b","a","c"]},
+ {"a":2, "n": [1,2]},
+ {"b":1, "n": ["zzz"]},
+ {"a":0, "n": [0.1, 0.02]}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n[*]' columns (
+ id_n1 for ordinality,
+ jpath_n1 varchar(50) path '$') ,
+ nested path '$.n[*]' columns (
+ id_n2 for ordinality,
+ jpath_n2 varchar(50) path '$')
+ )
+ ) as tt;
+
+select * from json_table(
+ '[
+ {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]},
+ {"ll":["c"]} ]},
+ {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]},
+ {"b":1, "n": [{"ll":["zzz"]}]},
+ {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]}
+ ]',
+ '$[*]' columns (
+ id1 for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n[*]' columns (
+ id2 for ordinality,
+ nested path '$.ll[*]' columns (
+ id3 for ordinality,
+ jpath_3 varchar(50) path '$')
+ ),
+ nested path '$.n[*]' columns (
+ id4 for ordinality,
+ jpath_4 json path '$')
+ )
+ ) as tt;
+
+
+--echo ord should be 1,1,1,2, which tells that first two values of 'l' are
+--echo from the same object, and next two are from different objects
+SELECT *
+FROM JSON_TABLE(
+ '[{"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [1,2]}
+ ]
+ }, {"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [11]},
+ {"c": "c_val",
+ "l": [22]}
+ ]
+ }]',
+ '$[*]' COLUMNS (
+ apath VARCHAR(10) PATH '$.a',
+ NESTED PATH '$.b[*]' COLUMNS (
+ bpath VARCHAR(10) PATH '$.c',
+ ord FOR ORDINALITY,
+ NESTED PATH '$.l[*]' COLUMNS (
+ lpath varchar(10) PATH '$'
+ )
+ )
+ )) as jt;
+
+CREATE TABLE jt( i JSON );
+SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1;
+EXPLAIN SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1;
+
+SELECT * FROM (
+ SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1) AS ttt;
+EXPLAIN SELECT * FROM (
+ SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1) AS ttt;
+DROP TABLE jt;
+
+--disable_parsing
+SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON),
+ '$' COLUMNS (dt DATE PATH '$')) as tt;
+SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON),
+ '$' COLUMNS (dt TIME PATH '$')) as tt;
+SELECT * FROM JSON_TABLE(CAST(CAST("2001.02.03" AS DATE) AS JSON),
+ '$' COLUMNS (dt DATE PATH '$')) as tt;
+--enable_parsing
+
+CREATE VIEW v AS
+ SELECT * FROM JSON_TABLE('[1,2,3]',
+ '$[*]' COLUMNS (num INT PATH '$.a'
+ DEFAULT '123' ON EMPTY
+ DEFAULT '456' ON ERROR)) AS jt;
+SELECT * FROM v;
+SHOW CREATE VIEW v;
+DROP VIEW v;
+
+#--error ER_INVALID_JSON_VALUE_FOR_CAST
+SELECT * FROM JSON_TABLE('"asdf"',
+ '$' COLUMNS (a INT PATH '$' ERROR ON ERROR)) AS jt;
+#--error ER_WRONG_JSON_TABLE_VALUE
+--error ER_JSON_TABLE_ERROR_ON_FIELD
+SELECT * FROM
+ JSON_TABLE('[{"a":1},{"a":2}]',
+ '$' COLUMNS (a INT PATH '$[*].a' ERROR ON ERROR)) AS jt;
+# psergey-added:
+--error ER_JSON_TABLE_ERROR_ON_FIELD
+SELECT * FROM
+ JSON_TABLE('[{"a":1},{"a":2}]',
+ '$' COLUMNS (a JSON PATH '$[*].a' ERROR ON ERROR)) AS jt;
+--error ER_JSON_TABLE_ERROR_ON_FIELD
+SELECT * FROM
+ JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$' ERROR ON ERROR)) AS jt;
+SELECT * FROM
+ JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$')) AS jt;
+
+#--error ER_JT_MAX_NESTED_PATH
+SELECT * FROM
+ JSON_TABLE('{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{}}}}}}}}}}}}}}}}}}}',
+ '$' COLUMNS (i0 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i1 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i2 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i3 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i4 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i5 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i6 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i7 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i8 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i9 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i10 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i11 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i12 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i13 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i14 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i15 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i16 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i17 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i18 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i19 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i20 INT PATH '$.a'
+ )))))))))))))))))))))) jt;
+
+CREATE TABLE t1(id int, jd JSON);
+INSERT INTO t1 values (1, '[1,3,5]'),(2,'[2,4,6]');
+SELECT id, jt.* FROM t1,
+ JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt;
+SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.*
+ FROM t1,
+ JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt;
+
+EXPLAIN SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.*
+ FROM t1,
+ JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt;
+
+--sorted_result
+SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.*
+ FROM t1,
+ JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt,
+ t1 AS t2;
+
+EXPLAIN SELECT t1.id, t2.id, jt.*
+ FROM t1,
+ JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt,
+ t1 AS t2;
+
+EXPLAIN SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.*
+ FROM t1,
+ JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt,
+ t1 AS t2;
+
+SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+EXPLAIN SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+
+--disable_parsing
+(psergey:!)
+# JSON_TABLE referring outer scope
+SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+EXPLAIN SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+--enable_parsing
+
+# JSON_TABLE referring another JSON_TABLE
+--disable_parsing
+(psergey:!)
+SELECT id, jt1.*, jt2.*
+ FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1,
+ JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2;
+
+EXPLAIN SELECT id, jt1.*, jt2.*
+ FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1,
+ JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2;
+--enable_parsing
+
+DROP TABLE t1;
+
+
+SELECT * FROM JSON_TABLE('"asdf"', '$' COLUMNS(
+ tm TIME PATH '$',
+ dt DATE PATH '$',
+ i INT PATH '$',
+ f FLOAT PATH '$',
+ d DECIMAL PATH '$')) AS jt;
+
+# DEFAULT NULL is not accepted syntax.
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON EMPTY)) jt;
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON ERROR)) jt;
+
+# The DEFAULT value must be a string on JSON format for now.
+--error 1064
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON EMPTY)) jt;
+--error 1064
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON ERROR)) jt;
+--error 1064
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x DATE
+ PATH '$.x'
+ DEFAULT DATE'2020-01-01' ON EMPTY)) jt;
+--error 1064
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x DATE
+ PATH '$.x'
+ DEFAULT DATE'2020-01-01' ON ERROR)) jt;
+
+--echo #
+--echo # Bug#25413069: SIG11 IN CHECK_COLUMN_GRANT_IN_TABLE_REF
+--echo #
+--error ER_BAD_FIELD_ERROR
+SELECT a FROM JSON_TABLE(abc, '$[*]' COLUMNS ( a int path '$.a')) AS jt;
+
+--echo #
+--echo # Bug#25420680: ASSERTION `THD->IS_ERROR()' FAILED IN SQL/SQL_SELECT.CC
+--echo #
+SELECT * FROM JSON_TABLE('{"a":"2017-11-1"}',
+ '$' COLUMNS (jpath DATE PATH '$.a')) AS jt;
+--echo #
+--echo # Bug#25413826: ASSERTION `TABLE_LIST->ALIAS' FAILED
+--echo #
+#--error ER_TF_MUST_HAVE_ALIAS
+--error ER_PARSE_ERROR
+SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]',
+ '$[*]' COLUMNS ( a int path '$.b'));
+
+--echo #
+--echo # Bug#25421464: ASSERTION `!STRCMP(TABLE_REF->TABLE_NAME, ...
+--echo #
+CREATE VIEW v1 AS
+ SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]',
+ '$[*]' COLUMNS ( a INT PATH '$.b')) AS jt;
+SELECT * FROM v1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+--echo #
+--echo # Bug#25427457: ASSERTION `!((*REG_FIELD)->FLAGS & 16)'
+--echo #
+SELECT * FROM JSON_TABLE('{"a":"1"}',
+ '$' COLUMNS (jpath JSON PATH '$.a',
+ o FOR ORDINALITY)) AS jt
+WHERE o = 1;
+
+--echo #
+--echo # Bug#25427982: ASSERTION `DERIVED' FAILED IN SQL/TABLE.H
+--echo #
+#--error ER_WRONG_FIELD_WITH_GROUP
+SELECT je,o FROM JSON_TABLE('{"a":"1"}',
+ '$' COLUMNS (o FOR ORDINALITY,
+ je BIGINT EXISTS PATH '$.a')) AS jt
+GROUP BY je;
+SELECT je,COUNT(o) FROM JSON_TABLE('{"a":"1"}',
+ '$' COLUMNS (o FOR ORDINALITY,
+ je BIGINT EXISTS PATH '$.a')) AS jt
+GROUP BY je;
+
+--echo #
+--echo # Bug#25413194: ASSERTION `!(WANT_PRIVILEGE & ~(GRANT->WANT_PRIVILEGE
+--echo #
+CREATE TABLE t1 (j JSON);
+SELECT * FROM t1,JSON_TABLE(t1.j, '$[*]' COLUMNS ( a int path '$.b')) AS jt;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25460537:SIG 11 IN NEXT_FAST AT SQL/SQL_LIST.H
+--echo #
+PREPARE STMT FROM
+ "SELECT * FROM JSON_TABLE(
+ \'[{\"a\":\"3\"},{\"a\":2},{\"b\":1},{\"a\":0}]\',
+ \'$[*]\' COLUMNS (id
+ FOR ORDINALITY,
+ jpath VARCHAR(100) PATH \'$.a\',
+ jexst INT EXISTS PATH \'$.b\')
+ ) as tt";
+EXECUTE STMT;
+EXECUTE STMT;
+DEALLOCATE PREPARE stmt;
+
+--echo #
+--echo # Bug#25522353: SIG 11 IN JOIN::MAKE_JOIN_PLAN | SQL/SQL_OPTIMIZER.CC
+--echo #
+CREATE TABLE t1 (id INT, jc JSON);
+
+# psergey!
+#--error ER_UNKNOWN_TABLE
+SELECT * FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+# psergey!
+#--error ER_UNKNOWN_TABLE
+SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt LEFT JOIN t1 ON t1.jc=jt.id;
+
+SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt RIGHT JOIN t1 ON t1.jc=jt.id;
+
+SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id
+ LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+EXPLAIN SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id
+ LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+# Error code can be ER_UNKNOWN_TABLE or ER_BAD_FIELD_ERROR
+# (see find_field_in_tables()), both are acceptable.
+
+# psergey:
+#--error ER_BAD_FIELD_ERROR
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+# psergey:
+#--error ER_UNKNOWN_TABLE
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+# psergey:
+#--error ER_UNKNOWN_TABLE
+WITH qn AS
+ (SELECT jt.* FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id)
+ SELECT * from qn;
+
+#--error ER_UNKNOWN_TABLE
+WITH qn AS
+ (SELECT 1 UNION
+ SELECT jt.id FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id)
+ SELECT * from qn;
+
+#--error ER_BAD_FIELD_ERROR
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+#--error ER_UNKNOWN_TABLE
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+INSERT INTO t1 VALUES(1,"1"),(2,"4"),(3,"3");
+
+ANALYZE TABLE t1;
+
+SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id INT PATH '$')) as jt ON t1.id=jt.id;
+
+EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id INT PATH '$')) as jt ON t1.id=jt.id;
+
+#--error ER_UNKNOWN_TABLE
+--error ER_BAD_FIELD_ERROR
+SELECT * FROM t1
+ LEFT JOIN
+ JSON_TABLE(t1.jc, '$' COLUMNS (id FOR ORDINALITY)) as jt ON t1.jc=jt.id
+ RIGHT JOIN
+ JSON_TABLE(jt.id, '$' COLUMNS (id FOR ORDINALITY)) as jt1 ON jt.id=jt1.id;
+
+DROP TABLE t1;
+
+# Test that tmp table can overflow to disk
+#set @save_mem_se= @@internal_tmp_mem_storage_engine;
+#set @@internal_tmp_mem_storage_engine=MEMORY;
+
+#set @save_heap_size= @@max_heap_table_size;
+#set @@max_heap_table_size= 16384;
+
+FLUSH STATUS;
+SELECT * FROM JSON_TABLE(
+ '[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]',
+ '$[*]' COLUMNS (
+ c1 CHAR(255) PATH '$',
+ c2 CHAR(255) PATH '$',
+ c3 CHAR(255) PATH '$',
+ c4 CHAR(255) PATH '$',
+ c5 CHAR(255) PATH '$',
+ c6 CHAR(255) PATH '$',
+ c7 CHAR(255) PATH '$',
+ c8 CHAR(255) PATH '$')) AS jt;
+SHOW STATUS LIKE '%tmp%';
+#set @@max_heap_table_size= @save_heap_size;
+#set @@internal_tmp_mem_storage_engine= @save_mem_se;
+
+--echo #
+--echo # Bug#25504063: EXPLAIN OF JSON_TABLE QUERY USES INTERNAL TEMP TABLES
+--echo #
+FLUSH STATUS;
+SELECT * FROM
+ JSON_TABLE(
+ '[{"a":"3"}]',
+ '$[*]' COLUMNS (id FOR ORDINALITY)
+ ) AS tt;
+SHOW STATUS LIKE '%tmp%';
+
+--echo #
+--echo # Bug#25525409: ASSERTION `TABLE_LIST->TABLE' FAILED IN SQL/SQL_BASE.CC
+--echo #
+--error ER_BAD_FIELD_ERROR
+SELECT * FROM JSON_TABLE( ( SELECT a ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+
+SELECT * FROM JSON_TABLE( ( SELECT 1 ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+
+# psergey: check this
+--error ER_INVALID_GROUP_FUNC_USE
+SELECT * FROM JSON_TABLE( ( SUM(1) ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+
+--echo #
+--echo # Bug# #25472875: ERROR SHOULD BE THROWN FOR INCORRECT VALUES
+--echo #
+#--error ER_JT_VALUE_OUT_OF_RANGE
+SELECT *
+ FROM JSON_TABLE('{"a":"1993-01-01"}',
+ '$' COLUMNS (jp DATE PATH '$.b' DEFAULT '1000' ON EMPTY))
+ AS jt;
+
+--echo #
+--echo # Bug#25532429: INVALID JSON ERROR NOT THROWN WITH EMPTY TABLES JOIN
+--echo #
+CREATE TABLE t1(j JSON);
+#--error ER_INVALID_JSON_TEXT_IN_PARAM
+SELECT * FROM t1,
+ JSON_TABLE( 'dqwfjqjf' , '$[*]' COLUMNS (col5 FOR ORDINALITY) ) AS alias7;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Bug#25540370: SIG 11 IN SHOW_SQL_TYPE|SQL/SQL_SHOW.CC:7063
+--echo #
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 CHAR(70) PATH '$')
+ ) AS alias2;
+
+--echo # Too short field causes truncation, error and triggers ON ERROR clause
+ SELECT * FROM
+ JSON_TABLE('["3.14159"]',
+ '$[*]' COLUMNS (col18 CHAR(6) PATH '$')
+ ) AS alias2;
+
+--echo #Truncated space doesn't trigger ON ERROR
+ SELECT * FROM
+ JSON_TABLE('["3.14159 "]',
+ '$[*]' COLUMNS (col18 CHAR(7) PATH '$')
+ ) AS alias2;
+SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 CHAR(255) PATH '$')
+ ) AS alias2;
+
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 VARCHAR(70) PATH '$')
+ ) AS alias2;
+
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 VARCHAR(255) PATH '$')
+ ) AS alias2;
+
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 FLOAT PATH '$')
+ ) AS alias2;
+
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 DOUBLE PATH '$')
+ ) AS alias2;
+
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$')
+ ) AS alias2;
+
+#--error ER_JT_VALUE_OUT_OF_RANGE
+--error ER_JSON_TABLE_ERROR_ON_FIELD
+SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$' ERROR ON ERROR)
+ ) AS alias2;
+
+ SELECT * FROM
+ JSON_TABLE('[0.9]',
+ '$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '$')
+ ) AS alias2;
+
+SELECT * FROM
+ JSON_TABLE('["asdf","ghjk"]',
+ '$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$'
+ DEFAULT "3.14159" ON ERROR)
+ ) AS alias2;
+
+CREATE TABLE t1(jd JSON);
+INSERT INTO t1 VALUES('["asdf"]'),('["ghjk"]');
+SELECT * FROM t1,
+ JSON_TABLE(jd,
+ '$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$'
+ DEFAULT "3.14159" ON ERROR)
+ ) AS alias2;
+DROP TABLE t1;
+--echo #
+--echo # Bug#25540027: SIG 11 IN FIND_FIELD_IN_TABLE | SQL/SQL_BASE.CC
+--echo #
+CREATE TABLE t1(c1 JSON);
+--error ER_NON_UPDATABLE_TABLE
+UPDATE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+ SET jt1.a=1;
+--error ER_PARSE_ERROR
+DELETE JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+ FROM t1;
+--error ER_PARSE_ERROR
+DELETE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+ USING t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25547244: ASSERTION `!TABLE || (!TABLE->READ_SET || BITMAP_IS_SET(
+--echo #
+CREATE TABLE t1(i INT);
+INSERT INTO t1 VALUES(1);
+--disable_parsing
+# Crash #3 (not reproducible anymore?)
+WITH cte_query AS
+ (SELECT * FROM t1, JSON_TABLE( JSON_OBJECT('ISSKF',i) ,
+ '$[*]' COLUMNS (jtcol1 INT EXISTS PATH '$[*]') ) AS alias2)
+SELECT jtcol1 AS field1 FROM cte_query;
+--enable_parsing
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25540675: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED
+--echo #
+CREATE TABLE j1(j JSON);
+INSERT INTO j1 VALUES('[1,2,3]'),('[1,2,4]');
+SELECT * FROM j1,
+ JSON_TABLE( JSON_OBJECT('key1', j) ,
+ '$.*' COLUMNS (NESTED PATH '$.*' COLUMNS (col11 FOR ORDINALITY))) AS alias2;
+DROP TABLE j1;
+
+--echo #
+--echo # Bug#25584335: SIG 11 IN TABLE_LIST::FETCH_NUMBER_OF_ROWS
+--echo #
+CREATE TABLE t1(i INT);
+
+PREPARE stmt FROM "SELECT alias1.i AS field1 FROM (
+ t1 AS alias1,
+ (SELECT * FROM
+ JSON_TABLE('[1,2,3]' ,
+ '$[*]' COLUMNS (`col_varchar` FOR ORDINALITY)) AS SQ1_alias1
+ ) AS alias2 )";
+
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25604048: COLUMN NAMES WITH SAME 33-CHAR PREFIX ARE EQUAL
+--echo #
+SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS(
+ column_name_is_thirty_four_or_more VARCHAR(17) PATH '$.key1',
+ column_name_is_thirty_four_or_more_yes_indeed VARCHAR(17) PATH '$.key1'
+) ) AS alias1;
+
+#--error ER_WRONG_COLUMN_NAME
+SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS(
+ `column_name_is_thirty_four_or_more ` VARCHAR(17) PATH '$.key1'
+) ) AS alias1;
+
+--echo #
+--echo # Bug#25604404: JSON_TABLE MORE RESTRICTIVE WITH IDENTIFIERS THAN
+--echo # CREATE TABLE
+--echo #
+SELECT * FROM JSON_TABLE( '[1, 2]', '$' COLUMNS(
+ one INT PATH '$[0]', two INT PATH '$[1]'
+)) AS jt;
+
+--echo #
+--echo # Bug#25588450: SIG 6 IN JSON_WRAPPER::SEEK|SQL/JSON_DOM.CC
+--echo #
+CREATE TABLE t1(c VARCHAR(10)) ENGINE=MEMORY;
+INSERT INTO t1 VALUES('fiheife');
+#--error ER_INVALID_JSON_TEXT_IN_PARAM
+--error ER_JSON_SYNTAX
+SELECT * FROM `t1` AS alias1, JSON_TABLE( `c` , '$[*]' COLUMNS (jtcol1 JSON
+PATH '$.*')) AS alias2 WHERE jtcol1 <= 'kjfh';
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25587754: ASSERTION `FIXED == 0 || BASIC_CONST_ITEM()' FAILED
+--echo #
+PREPARE stmt FROM
+ "SELECT * FROM JSON_TABLE( '[1,2]', '$[*]'
+ COLUMNS (jtcol1 JSON PATH '$.*')) AS alias2";
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+
+--echo #
+--echo # Bug#25584593: UNABLE TO USE JSON_TABLE() ON TEXT/BLOB JSON DATA
+--echo #
+SELECT * FROM JSON_TABLE(NULL, '$.k' COLUMNS (id FOR ORDINALITY)) AS aLias;
+SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias;
+SET @myjson = '{"k": 42}';
+SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias;
+
+CREATE TABLE t1(
+ txt TEXT, ty TINYTEXT, tm MEDIUMTEXT, tl LONGTEXT);
+INSERT INTO t1 values (
+ '{"k": "text"}','{"k": "tinytext"}','{"k": "mediumtext"}','{"k": "longtext"}');
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.ty, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.tm, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.tl, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TEXT PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TINYTEXT PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id MEDIUMTEXT PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id LONGTEXT PATH '$')) AS alias;
+
+# BLOB can store data from JSON
+SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias;
+# Check that type is printed correctly
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYBLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMBLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGBLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TEXT PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYTEXT PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMTEXT PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGTEXT PATH '$')) as alias;
+# But can't be used as a data source
+#--error ER_WRONG_ARGUMENTS
+--error ER_PARSE_ERROR
+SELECT * FROM
+ (SELECT CAST(blb AS JSON) jf FROM
+ JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) AS jti) AS dt,
+ JSON_TABLE(jf, '$' COLUMNS (blb BLOB PATH '$')) AS jto;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26500384: ASSERT FAILURE IN QUERY WITH WINDOW FUNCTION AND
+--echo # JSON_TABLE
+--echo #
+CREATE TABLE t (x INT);
+INSERT INTO t VALUES (1), (2), (3);
+--error ER_BAD_FIELD_ERROR
+SELECT MAX(t.x) OVER () m, jt.* FROM t,
+ JSON_TABLE(JSON_ARRAY(m), '$[*]' COLUMNS (i INT PATH '$')) jt;
+DROP TABLE t;
+
+--echo #
+--echo # Bug#26583283: ASSERTION `!THD->IS_ERROR()' FAILED IN SQL_RESOLVER.CC
+--echo #
+EXPLAIN SELECT * FROM JSON_TABLE('null', '$' COLUMNS(AA DECIMAL PATH '$')) tt;
+
+CREATE VIEW v1 AS SELECT * FROM
+ JSON_TABLE( 'null', '$' COLUMNS (c1 DECIMAL PATH '$' ) ) AS jt;
+SELECT * FROM v1;
+EXPLAIN SELECT * FROM v1;
+DROP VIEW v1;
+
+--echo #
+--echo # Bug#25822408: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED
+--echo #
+PREPARE stmt FROM "SELECT * FROM
+ JSON_TABLE('{\"a\":1}','$' COLUMNS (c1 CHAR(20) PATH '$.b' ERROR ON EMPTY)) jt";
+--error 4174
+EXECUTE stmt;
+--error 4174
+EXECUTE stmt;
+
+--echo #
+--echo # Bug#25594571: CRASH AT ITEM::CONST_ITEM|SQL/ITEM.H
+--echo #
+CREATE TABLE t1 (i INT);
+
+INSERT INTO t1 VALUES(1),(2),(3),(4),(5),(6),(7);
+
+PREPARE stmt FROM "SELECT * FROM t1 AS alias1 LEFT JOIN t1 AS alias2
+LEFT JOIN JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (i FOR ORDINALITY )) AS
+alias3 ON alias2 . `i` = alias3 . `i` ON alias1 . `i` = alias2 . `i`";
+
+EXECUTE stmt;
+EXECUTE stmt;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26648617: ASSERTION `IS_VIEW_OR_DERIVED() &&
+--echo # USES_MATERIALIZATION()' FAILED.
+--echo #
+CREATE TABLE t1 (
+ col_varchar_key varchar(1) DEFAULT NULL
+) ENGINE=InnoDB;
+
+INSERT INTO t1 VALUES(1),(4);
+
+SELECT * FROM t1 WHERE col_varchar_key NOT IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+EXPLAIN SELECT * FROM t1 WHERE col_varchar_key NOT IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+SELECT * FROM t1 WHERE col_varchar_key IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+EXPLAIN SELECT * FROM t1 WHERE col_varchar_key IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26711551: WL8867:CONDITIONAL JUMP IN JSON_TABLE_COLUMN::CLEANUP
+--echo #
+CREATE TABLE t(x int, y int);
+INSERT INTO t(x) VALUES (1);
+UPDATE t t1, JSON_TABLE('[2]', '$[*]' COLUMNS (x INT PATH '$')) t2
+SET t1.y = t2.x;
+SELECT * FROM t;
+DROP TABLE t;
+
+--echo #
+--echo # Bug#26679671: SIG 11 IN JSON_BINARY::PARSE_BINARY()
+--echo #
+CREATE TABLE t1(id INT, f1 JSON);
+INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'),
+(4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}');
+ANALYZE TABLE t1;
+
+ANALYZE TABLE t1;
+
+--sorted_result
+SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt;
+EXPLAIN SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt;
+
+# psergey:name resolution
+--error ER_NON_UNIQ_ERROR
+SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN
+ t1 AS tt3) dt ORDER BY 1,3 LIMIT 10;
+# psergey:name resolution
+--error ER_NON_UNIQ_ERROR
+EXPLAIN SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN
+ t1 AS tt3) dt ORDER BY 1,3 LIMIT 11;
+
+SELECT * FROM t1 WHERE id IN
+ (SELECT id FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+EXPLAIN SELECT * FROM t1 WHERE id IN
+ (SELECT id FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26760811: WL#8867: MEMORY LEAK REPORTED BY ASAN AND VALGRIND
+--echo #
+CREATE TABLE t (j JSON);
+INSERT INTO t VALUES
+('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'),
+('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]');
+SELECT COUNT(*) FROM t, JSON_TABLE(j, '$[*]' COLUMNS (i INT PATH '$')) AS jt;
+PREPARE ps FROM
+'SELECT COUNT(*) FROM t, JSON_TABLE(j, ''$[*]'' COLUMNS (i INT PATH ''$'')) AS jt';
+EXECUTE ps;
+EXECUTE ps;
+DROP PREPARE ps;
+DROP TABLE t;
+
+--echo #
+--echo # Bug #26781759: NON-UNIQUE ALIAS ERROR NOT BEING THROWN
+--echo #
+--error ER_NONUNIQ_TABLE
+SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias,
+JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias;
+
+--echo #
+--echo # Bug #26761470: WL#8867: JOIN::MAKE_JOIN_PLAN():
+--echo # ASSERTION `SELECT_LEX->IS_RECURSIVE()' FAILED
+--echo #
+CREATE TABLE t1 (x INT);
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (j JSON);
+INSERT INTO t2 (j) VALUES ('[1,2,3]');
+--sorted_result
+# psergey:name resolution
+--error ER_WRONG_OUTER_JOIN
+SELECT * FROM t1 RIGHT JOIN
+(SELECT o FROM t2, JSON_TABLE(j, '$[*]' COLUMNS (o FOR ORDINALITY)) AS jt) AS t3
+ON (t3.o = t1.x);
+DROP TABLE t1, t2;
+
+--echo #
+--echo # Bug#27152428 JSON_TABLE + PREPARED STATEMENT + VIEW HAS PROBLEM IN DURING RESOLUTION
+--echo #
+
+CREATE TABLE t1 (a INT, b INT);
+
+CREATE VIEW v2 AS SELECT * FROM t1 LIMIT 2;
+
+--disable_parsing
+# psergey-done: crash in name resolution:
+SELECT b
+ FROM (SELECT * FROM v2) vq1,
+ JSON_TABLE(CONCAT(vq1.b,'[{\"a\":\"3\"}]'),
+ '$[*]' COLUMNS (id FOR ORDINALITY,
+ jpath VARCHAR(100) PATH '$.a',
+ JEXST INT EXISTS PATH '$.b')
+ ) AS dt;
+--enable_parsing
+
+DROP TABLE t1;
+DROP VIEW v2;
+
+
+--echo #
+--echo # Bug#27189940: CREATE VIEW FAILS ON JSON_TABLE() IN SCHEMA-LESS CONNECTIONS
+--echo # BUG#27217897: JSON_TABLE() FAILS IF NO DATABASE SELECTED
+--echo #
+
+
+# Connect without a schema name:
+connect (conn1,localhost,root,,*NO-ONE*);
+connection conn1;
+
+CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+
+connection default;
+disconnect conn1;
+use test;
+
+SHOW CREATE VIEW test.v;
+SELECT * FROM test.v;
+DROP VIEW test.v;
+
+--echo #
+--echo # Bug#27729112 JSON_TABLE SHOULD DECODE BASE64-ENCODED STRINGS
+--echo #
+SELECT v
+FROM JSON_TABLE(JSON_OBJECT('foo', _binary'bar'), '$'
+ COLUMNS(v VARCHAR(255) PATH '$.foo')) tbl;
+
+--disable_parsing
+# not supported
+CREATE TABLE t1 (col1 VARCHAR(255) CHARACTER SET ucs2,
+ col2 VARCHAR(255) CHARACTER SET utf8mb4 COLLATE
+ utf8mb4_unicode_cs);
+INSERT INTO t1 VALUES ("æ", "ハ"), ("å", "ø"), ("ø", "パ"), ("@", "バ");
+
+ANALYZE TABLE t1 UPDATE HISTOGRAM ON col1, col2;
+SELECT v value, c cumulfreq
+FROM INFORMATION_SCHEMA.COLUMN_STATISTICS,
+ JSON_TABLE(histogram->'$.buckets', '$[*]'
+ COLUMNS(v VARCHAR(255) CHARACTER SET ucs2 PATH '$[0]',
+ c double PATH '$[1]')) hist
+WHERE column_name = "col1";
+SELECT v value, c cumulfreq
+FROM INFORMATION_SCHEMA.COLUMN_STATISTICS,
+ JSON_TABLE(histogram->'$.buckets', '$[*]'
+ COLUMNS(v VARCHAR(255) CHARACTER SET utf8mb4 PATH '$[0]',
+ c double PATH '$[1]')) hist
+WHERE column_name = "col2";
+DROP TABLE t1;
+
+--enable_parsing
+--echo #
+--echo # Bug#28254268: JSON_TABLE() FUNCTION REJECTS SELECT PERMISSIONS
+--echo #
+CREATE DATABASE db2;
+USE db2;
+CREATE TABLE t1 (c JSON);
+INSERT INTO t1 VALUES('[1,2,3]');
+
+CREATE USER user1@localhost;
+GRANT SELECT ON db2.t1 TO user1@localhost;
+
+connect (conn1,localhost,user1,,);
+connection conn1;
+USE db2;
+SELECT t1.c FROM t1;
+SELECT jt.* FROM t1, JSON_TABLE(t1.c, '$[*]' COLUMNS (num INT PATH '$[0]'))
+AS jt;
+
+disconnect conn1;
+connection default;
+
+DROP USER user1@localhost;
+DROP DATABASE db2;
+
+--echo #
+--echo # Bug#27856835 JSON_TABLE RETURNS WRONG DATATYPE WHEN INT-VALUE IS GRATER
+--echo # THAN (2^31-1)
+--echo #
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775807"}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+
+--echo # As we currently have no way of telling if a JSON string value is
+--echo # signed or unsigned, this value will overflow.
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+
+--echo # Here the JSON value is a NUMERIC value, and we thus know if the value
+--echo # is signed or unsigned.
+SELECT id FROM JSON_TABLE('[{"id":9223372036854775808}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+
+--echo # If we tell the JSON table column to be unsigned, we get to store the
+--echo # full value correctly.
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS
+(id BIGINT UNSIGNED PATH '$.id')) AS json;
+
+SELECT id FROM JSON_TABLE('[{"id":"2147483648"}]', '$[*]' COLUMNS
+(id INT UNSIGNED PATH '$.id')) AS json;
+
+--echo # Check that we preserve the signedness of the columns.
+USE test;
+CREATE TABLE t1 AS SELECT id, value FROM
+ JSON_TABLE('[{"id":9223372036854775808, "value":9223372036854775807}]',
+ '$[*]' COLUMNS (id BIGINT UNSIGNED PATH '$.id',
+ value BIGINT PATH '$.value'))
+ AS json;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#28255453: VIEW USING JSON_TABLE FAILS IF NO SCHEMA IS SELECTED
+--echo #
+connect (conn1,localhost,root,,*NO-ONE*);
+CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+SELECT * FROM test.v;
+DROP VIEW test.v;
+
+--echo # Check that a user without access to the schema 'foo' cannot query
+--echo # a JSON_TABLE view in that schema.
+CREATE SCHEMA foo;
+CREATE VIEW foo.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+
+CREATE USER foo@localhost;
+connect (con1,localhost,foo,,);
+--error ER_TABLEACCESS_DENIED_ERROR
+SELECT * FROM foo.v;
+
+connection default;
+disconnect con1;
+DROP USER foo@localhost;
+DROP SCHEMA foo;
+
+--echo # Check that a user with access to the schema 'foo' can do a SELECT with
+--echo # a JSON_TABLE function.
+CREATE SCHEMA foo;
+CREATE USER foo@localhost;
+GRANT EXECUTE ON foo.* TO foo@localhost;
+connect (con1,localhost,foo,,foo);
+SELECT 1 FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$.a')) AS jt;
+connection default;
+disconnect con1;
+DROP USER foo@localhost;
+DROP SCHEMA foo;
+
+--echo #
+--echo # Bug#27923406 ERROR 1142 (42000) WHEN USING JSON_TABLE
+--echo #
+CREATE SCHEMA my_schema;
+
+CREATE USER foo@localhost;
+GRANT EXECUTE ON my_schema.* TO foo@localhost;
+connect (con1,localhost,foo,,my_schema);
+
+SELECT
+ *
+FROM
+ JSON_TABLE(
+ '[{"x":2,"y":"8"},{"x":"3","y":"7"},{"x":"4","y":6}]',
+ "$[*]" COLUMNS(
+ xval VARCHAR(100) PATH "$.x",
+ yval VARCHAR(100) PATH "$.y"
+ )
+ ) AS jt1;
+
+connection default;
+disconnect con1;
+DROP USER foo@localhost;
+DROP SCHEMA my_schema;
+
+--echo #
+--echo # Bug#28538315: JSON_TABLE() COLUMN TYPES DON'T SUPPORT COLLATE CLAUSE
+--echo #
+
+CREATE TABLE t1 SELECT *
+ FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10)
+ CHARSET utf8mb4
+ PATH '$')) AS jt1;
+SHOW CREATE TABLE t1;
+
+--disable_parsing
+CREATE TABLE t2 SELECT *
+ FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10)
+ CHARSET utf8mb4 COLLATE utf8mb4_bin
+ PATH '$')) AS jt1;
+SHOW CREATE TABLE t2;
+
+CREATE TABLE t3 AS SELECT *
+ FROM JSON_TABLE('"a"', '$' COLUMNS (a VARCHAR(10)
+ COLLATE ascii_bin
+ PATH '$')) jt;
+SHOW CREATE TABLE t3;
+--enable_parsing
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#28643862 JSON_TABLE'S "COLUMNS" CLAUSE USES
+--echo # GLOBAL.CHARACTER_SET_RESULTS DEFAULT CHARSET
+--echo #
+
+SET @@SESSION.character_set_connection = ascii;
+
+CREATE TABLE t1 SELECT a.col
+ FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a;
+
+SHOW CREATE TABLE t1;
+
+SET @@SESSION.collation_connection = latin1_bin;
+
+CREATE TABLE t2 SELECT a.col
+ FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a;
+
+SHOW CREATE TABLE t2;
+
+DROP TABLE t1, t2;
+
+SET @@SESSION.character_set_connection = DEFAULT;
+
+--echo #
+--echo # Bug#28851656: JSON_TABLE RETURN "UNKNOWN DATABASE ''" FROM A FUNCTION
+--echo #
+CREATE FUNCTION FN_COUNT_ROWS(X JSON)
+RETURNS INT DETERMINISTIC
+ RETURN (
+ SELECT COUNT(*) FROM JSON_TABLE( X, '$[*]' COLUMNS( I INT PATH '$')) der
+ );
+
+SELECT FN_COUNT_ROWS('[1, 2]') CNT;
+SELECT FN_COUNT_ROWS('[1, 2, 3]') CNT;
+SELECT FN_COUNT_ROWS('[1, 2, 3, 4]') CNT;
+
+DROP FUNCTION FN_COUNT_ROWS;
+
+--echo #
+--echo # Bug#30310265: VIEWS LOSE THE CHARACTER SET OF JSON_TABLE'S
+--echo # PATH ARGUMENTS
+--echo #
+
+CREATE VIEW v1 AS
+ SELECT * FROM JSON_TABLE('{"å":{"å":1}}', '$.å' COLUMNS (x INT PATH '$.å')) t;
+SELECT * FROM v1;
+SET NAMES latin1;
+# Used to return zero rows.
+SELECT * FROM v1;
+SET NAMES DEFAULT;
+DROP VIEW v1;
+
+CREATE VIEW v2 AS
+ SELECT * FROM JSON_TABLE('{}', '$' COLUMNS (
+ x VARCHAR(10) PATH '$.a' DEFAULT '"isn''t here"' ON EMPTY)
+ ) t;
+# SHOW CREATE VIEW and SELECT from the view used to fail with a syntax error.
+--disable_parsing
+# psergey-done
+SHOW CREATE VIEW v2;
+
+SELECT * FROM v2;
+--enable_parsing
+DROP VIEW v2;
+
+# The string literals in JSON_TABLE didn't accept character set
+# introducers. Verify that they are accepted now.
+--disable_parsing
+# psergey: it's a bug!
+SELECT * FROM
+ JSON_TABLE(JSON_OBJECT(),
+ _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x' COLUMNS
+ (y INT PATH _utf8mb4'$.y'
+ DEFAULT _utf8mb4'1' ON EMPTY
+ DEFAULT _utf8mb4'2' ON ERROR))) jt;
+--enable_parsing
+
+--echo #
+--echo # Bug#30382156: STORED PROCEDURE, JSON_TABLE AND "CONST" JOIN TYPE
+--echo #
+CREATE TABLE t (id INT PRIMARY KEY, j JSON);
+INSERT INTO t VALUES (1, '{"x":1}');
+CREATE PROCEDURE p()
+ SELECT * FROM t, JSON_TABLE(j, '$' COLUMNS (x INT PATH '$.x')) jt
+ WHERE id = 1;
+CALL p();
+CALL p();
+CALL p();
+DROP PROCEDURE p;
+DROP TABLE t;
+
+--echo #
+--echo # WL#13512: Deprecate ON ERROR before ON EMPTY in JSON_TABLE syntax
+--echo #
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ DEFAULT '"a"' ON ERROR DEFAULT '"b"' ON EMPTY)) jt;
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ NULL ON ERROR NULL ON EMPTY)) jt;
+SELECT * FROM
+ JSON_TABLE('{"x":"c"}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ ERROR ON ERROR ERROR ON EMPTY)) jt;
+--echo #
+--echo # Bug#30628330: INSERT WITH JSON_TABLE FAILS DUE TO WARNING INVOKED
+--echo # WITH ON ERROR CLAUSE
+--echo #
+CREATE TABLE t(id INT, a TINYINT, b VARCHAR(3), c DATE, d DECIMAL);
+# This statement used to fail with "data too long".
+INSERT INTO t SELECT * FROM
+ JSON_TABLE(JSON_ARRAY(JSON_OBJECT('a', 1, 'b', 'abc'),
+ JSON_OBJECT('a', 2, 'b', 'abcd'),
+ JSON_OBJECT('a', 1000, 'b', 'xyz'),
+ JSON_OBJECT('c', TIME'12:00:00', 'd', 1e308)),
+ '$[*]' COLUMNS (id FOR ORDINALITY,
+ a TINYINT PATH '$.a' DEFAULT '111' ON ERROR,
+ b VARCHAR(3) PATH '$.b' DEFAULT '"ERR"' ON ERROR,
+ c DATE PATH '$.c' DEFAULT '"2001-01-01"' ON ERROR,
+ d DECIMAL PATH '$.c' DEFAULT '999' ON ERROR)
+ ) AS jt;
+SELECT * FROM t ORDER BY id;
+DROP TABLE t;
+
+--echo #
+--echo # Bug#30263373: INCORRECT OUTPUT FROM TABLE_FUNCTION_JSON::PRINT()
+--echo #
+
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('[123]', '$[*]' COLUMNS (`name with space 1` INT PATH '$',
+ `name with space 2` FOR ORDINALITY)) jt;
+# Used to fail with a syntax error, due to unquoted column names in
+# the view definition.
+SELECT * FROM v;
+DROP VIEW v;
+
+CREATE VIEW v AS SELECT HEX(x), HEX(y) FROM
+ JSON_TABLE('["abc"]', '$[*]' COLUMNS (x BINARY(10) PATH '$',
+ y VARBINARY(10) PATH '$')) jt;
+# Used to say CHAR(10) and VARCHAR(10) instead of BINARY(10) and VARBINARY(10).
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+SELECT * FROM v;
+DROP VIEW v;
+
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('[123]', '$[*]' COLUMNS(ti TINYINT PATH '$',
+ si SMALLINT PATH '$',
+ mi MEDIUMINT PATH '$',
+ i INT PATH '$',
+ bi BIGINT PATH '$',
+ tiu TINYINT UNSIGNED PATH '$',
+ siu SMALLINT UNSIGNED PATH '$',
+ miu MEDIUMINT UNSIGNED PATH '$',
+ iu INT UNSIGNED PATH '$',
+ biu BIGINT UNSIGNED PATH '$')) jt;
+# Used to lack the UNSIGNED attribute for the unsigned columns.
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+SELECT * FROM v;
+DROP VIEW v;
+
+--disable_parsing
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('[]', '$[*]' COLUMNS (g GEOMETRY PATH '$',
+ ls LINESTRING PATH '$')) AS jt;
+# Used to say GEOMETRY for both columns.
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+SELECT * FROM v;
+DROP VIEW v;
+--enable_parsing
+
+--disable_parsing
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('["abc"]', '$[*]' COLUMNS
+ (x VARCHAR(10) CHARSET latin1 PATH '$',
+ y VARCHAR(10) COLLATE utf8mb4_de_pb_0900_ai_ci PATH '$',
+ z LONGTEXT COLLATE utf8mb4_bin PATH '$')) jt;
+# Character set and collation information wasn't included.
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+# Used to return the default collation instead of the collation
+# specified in the JSON_TABLE column definitions.
+SELECT x, y, z, COLLATION(x) c_x, COLLATION(y) c_y, COLLATION(z) c_z FROM v;
+DROP VIEW v;
+--enable_parsing
+
+--echo #
+--echo # Bug#31345503 JSON_TABLE SHOULD RETURN SQL NULL FOR JSON NULL
+--echo #
+
+SELECT *
+FROM
+JSON_TABLE(
+'[
+ {"c1": null,
+ "c2": [{"c": "c_val", "l": [1,2]}],
+ "c3": null},
+ {"c1": true,
+ "c2": [{"c": "c_val","l": [11]}, {"c": "c_val", "l": [false]}],
+ "c3": true},
+ {"c1": false,
+ "c2": [{"c": null,"l": [true]}, {"c": "c_val", "l": [null]}],
+ "c3": false}
+ ]',
+ '$[*]' COLUMNS(
+ top_ord FOR ORDINALITY,
+ c1path VARCHAR(10) PATH '$.c1' ERROR ON ERROR,
+ NESTED PATH '$.c2[*]' COLUMNS (
+ c2path VARCHAR(10) PATH '$.c' ERROR ON ERROR,
+ ord FOR ORDINALITY,
+ NESTED PATH '$.l[*]' COLUMNS (lpath_c VARCHAR(10) PATH '$' ERROR ON ERROR,
+ lpath_i INT PATH '$' ERROR ON ERROR)
+ ),
+ c3path INT PATH '$.c3' ERROR ON ERROR
+ )
+) as jt;
+
+--echo #
+--echo # Bug #31327187 UBSAN: JSON_TABLE: NULL POINTER PASSED AS ARGUMENT 2,
+--echo # WHICH IS DECLARED TO NEVER BE NULL
+--echo #
+
+SELECT HEX(a) FROM JSON_TABLE(
+ '[{"E":{"e":true,"~":1,"S3":"sTa"},"r":3,"":6.7},"",6.5]',
+ '$'
+ COLUMNS(a BINARY(5) PATH '$[1]' NULL ON EMPTY)
+) e;
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
index 314e5825c08..52948a644f7 100644
--- a/mysql-test/main/opt_trace.test
+++ b/mysql-test/main/opt_trace.test
@@ -680,5 +680,20 @@ from information_schema.optimizer_trace;
set in_predicate_conversion_threshold=@tmp;
drop table t0;
+--echo #
+--echo # Test table functions.
+--echo #
+
+CREATE TABLE t1(id INT, f1 JSON);
+INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'),
+ (4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}');
+
+SELECT * FROM t1 WHERE id IN
+ (SELECT id FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+
+select * from information_schema.OPTIMIZER_TRACE;
+DROP TABLE t1;
+
--echo # End of 10.5 tests
set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/query_cache.result b/mysql-test/main/query_cache.result
index fc7ca726c48..7f728b20286 100644
--- a/mysql-test/main/query_cache.result
+++ b/mysql-test/main/query_cache.result
@@ -2207,6 +2207,22 @@ Variable_name Value
Qcache_queries_in_cache 0
DROP FUNCTION foo;
drop table t1;
+#
+# MDEV-22301 JSON_TABLE: Queries are not inserted into query cache.
+#
+create table t1 (a text);
+insert into t1 values ('{"a":"foo"}');
+flush status;
+SHOW STATUS LIKE 'Qcache_inserts';
+Variable_name Value
+Qcache_inserts 0
+select * from t1, json_table(t1.a, '$' columns (f varchar(20) path '$.a')) as jt;
+a f
+{"a":"foo"} foo
+SHOW STATUS LIKE 'Qcache_inserts';
+Variable_name Value
+Qcache_inserts 1
+drop table t1;
restore defaults
SET GLOBAL query_cache_type= default;
SET GLOBAL query_cache_size=@save_query_cache_size;
diff --git a/mysql-test/main/query_cache.test b/mysql-test/main/query_cache.test
index 6e113f0cdb7..6f92674812d 100644
--- a/mysql-test/main/query_cache.test
+++ b/mysql-test/main/query_cache.test
@@ -1800,6 +1800,17 @@ show status like "Qcache_queries_in_cache";
DROP FUNCTION foo;
drop table t1;
+--echo #
+--echo # MDEV-22301 JSON_TABLE: Queries are not inserted into query cache.
+--echo #
+create table t1 (a text);
+insert into t1 values ('{"a":"foo"}');
+flush status;
+SHOW STATUS LIKE 'Qcache_inserts';
+select * from t1, json_table(t1.a, '$' columns (f varchar(20) path '$.a')) as jt;
+SHOW STATUS LIKE 'Qcache_inserts';
+drop table t1;
+
--echo restore defaults
SET GLOBAL query_cache_type= default;
SET GLOBAL query_cache_size=@save_query_cache_size;
diff --git a/mysql-test/suite/json/r/json_table.result b/mysql-test/suite/json/r/json_table.result
new file mode 100644
index 00000000000..762b2ce4216
--- /dev/null
+++ b/mysql-test/suite/json/r/json_table.result
@@ -0,0 +1,371 @@
+select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+a
+1
+2
+select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+1 11
+1 111
+2 22
+2 222
+3 NULL
+SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt;
+a b c
+1 11 NULL
+1 111 NULL
+1 NULL 11
+1 NULL 111
+2 22 NULL
+2 222 NULL
+2 NULL 22
+2 NULL 222
+3 NULL NULL
+create table t1 (id varchar(5), json varchar(1024));
+insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]');
+insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]');
+select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt;
+id json a
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 4
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 5
+select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+id json js_id a l_js_id b
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 1 11
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 2 111
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 1 22
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 2 222
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 1 11
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 2 111
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 1 22
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 2 222
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 1 22
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 2 222
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+ERROR 42S22: Unknown column 't1.no_field' in 'JSON_TABLE argument'
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt;
+ERROR 42S21: Duplicate column name 'a'
+DROP TABLE t1;
+create table t1 (item_name varchar(32), item_props varchar(1024));
+insert into t1 values ('Laptop', '{"color": "black", "price": 1000}');
+insert into t1 values ('Jeans', '{"color": "blue", "price": 50}');
+select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+item_name item_props color
+Laptop {"color": "black", "price": 1000} black
+Jeans {"color": "blue", "price": 50} blue
+select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+ERROR 42000: Cross dependency found in OUTER JOIN; examine your ON conditions
+DROP TABLE t1;
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+101 11
+101 111
+2 22
+2 222
+3 NULL
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+NULL 11
+NULL 111
+2 22
+2 222
+3 NULL
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+NULL 11
+NULL 111
+2 22
+2 222
+3 NULL
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+202 11
+202 111
+2 22
+2 222
+3 NULL
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+ERROR HY000: Syntax error in JSON text in argument 1 to function 'JSON_TABLE' at position 65
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'.
+select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo;
+a
+0.0
+connect con1,localhost,root,,;
+select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo;
+a
+1
+connection default;
+disconnect con1;
+create database db;
+use db;
+create table t (a text);
+insert into t values ('{"foo":"bar"}');
+create user u@localhost;
+grant select (a) on db.t to u@localhost;
+connect con1,localhost,u,,db;
+select a from t;
+a
+{"foo":"bar"}
+select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt;
+a f
+{"foo":"bar"} bar
+connection default;
+disconnect con1;
+drop user u@localhost;
+drop database db;
+use test;
+create table t1 (
+color varchar(32),
+price int
+);
+insert into t1 values ("red", 100), ("blue", 50);
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+set optimizer_switch='firstmatch=off';
+select * from
+json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+'$[*]' columns( color varchar(100) path '$.color',
+price text path '$.price'
+ )
+) as T
+where
+T.color in (select color from t1 where t1.price=T.price);
+color price
+blue 50
+red 100
+drop table t1;
+select * from
+json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]},
+ {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]',
+'$[*]' columns(
+color varchar(4) path '$.color',
+seq0 for ordinality,
+nested path '$.sizes[*]'
+ columns (seq1 for ordinality,
+size int path '$'),
+nested path '$.prices[*]'
+ columns (seq2 for ordinality,
+price int path '$')
+)
+) as T;
+color seq0 seq1 size seq2 price
+blue 1 1 1 NULL NULL
+blue 1 2 2 NULL NULL
+blue 1 3 3 NULL NULL
+blue 1 4 4 NULL NULL
+blue 1 NULL NULL 1 10
+blue 1 NULL NULL 2 20
+red 2 1 10 NULL NULL
+red 2 2 11 NULL NULL
+red 2 3 12 NULL NULL
+red 2 4 13 NULL NULL
+red 2 5 14 NULL NULL
+red 2 NULL NULL 1 100
+red 2 NULL NULL 2 200
+red 2 NULL NULL 3 300
+select * from json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100},
+ {"color": "rojo", "price": 10.0},
+ {"color": "blanco", "price": 11.0}]',
+'$[*]' columns( color varchar(100) path '$.color',
+price text path '$.price', seq for ordinality)) as T order by color desc;
+color price seq
+rojo 10.0 3
+red 100 2
+blue 50 1
+blanco 11.0 4
+create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x;
+select * from v;
+a x
+- 123
+show create table v;
+View Create View character_set_client collation_connection
+v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `x`.`a` AS `a`,`x`.`x` AS `x` from JSON_TABLE('{"as":"b", "x":123}', '$' COLUMNS (`a` varchar(8) PATH '$.a' DEFAULT '-' ON EMPTY, `x` int(11) PATH '$.x')) `x` latin1 latin1_swedish_ci
+drop view v;
+select * from json_table('{"as":"b", "x":123}',
+"$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'null on empty, x int path '$.x')) x' at line 2
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*')) as jt;
+v
+NULL
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+v
+-
+select * from json_table('{"b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+v
+bar
+create table t1 (a varchar(100));
+insert into t1 values ('1');
+select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T;
+ERROR 42000: Not unique table/alias: 'T'
+drop table t1;
+prepare s from 'select * from
+json_table(?,
+ \'$[*]\' columns( color varchar(100) path \'$.color\',
+ price text path \'$.price\',
+ seq for ordinality)) as T
+order by color desc; ';
+execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]';
+color price seq
+red 1 1
+brown 2 2
+deallocate prepare s;
+create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T;
+select * from v2;
+color
+blue
+drop view v2;
+explain format=json select * from
+json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "tt",
+ "access_type": "ALL",
+ "rows": 40,
+ "filtered": 100,
+ "table_function": "json_table"
+ }
+ }
+}
+explain select * from
+json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE tt ALL NULL NULL NULL NULL 40 Table function: json_table
+create view v1 as select * from
+json_table('[{"color": "blue", "price": 50}]',
+'$[*]' columns(color text path '$.nonexistent',
+seq for ordinality)) as `ALIAS NOT QUOTED`;
+select * from v1;
+color seq
+NULL 1
+drop view v1;
+create view v1 as select * from
+json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+'$[*]' columns(
+color text path "$.QUOTES \" HERE \"",
+color1 text path '$.QUOTES " HERE "',
+color2 text path "$.QUOTES ' HERE '",
+seq for ordinality)) as T;
+select * from v1;
+color color1 color2 seq
+NULL NULL NULL 1
+NULL NULL NULL 2
+drop view v1;
+CREATE TABLE t1 (x INT);
+INSERT INTO t1 VALUES (1), (2), (3);
+SELECT t1.x*2 m, jt.* FROM t1,
+JSON_TABLE(m, '$[*]' COLUMNS (i INT PATH '$')) jt;
+ERROR 42S22: Unknown column 'm' in 'JSON_TABLE argument'
+DROP TABLE t1;
+select * from json_table(JS3.size, '$' columns (size INT PATH '$.size')) as JS1, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS2, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS3 where 1;
+ERROR 42S22: Unknown column 'JS3.size' in 'JSON_TABLE argument'
+create table t1 (json varchar(100) character set utf8);
+insert into t1 values ('{"value":"АБВ"}');
+create table tj1 as
+select T.value
+from t1, json_table(t1.json, '$' columns (value varchar(32) PATH '$.value')) T;
+show create table tj1;
+Table Create Table
+tj1 CREATE TABLE `tj1` (
+ `value` varchar(32) CHARACTER SET utf8 DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+drop table tj1;
+CREATE TABLE t1(id INT, f1 JSON);
+INSERT INTO t1 VALUES
+(1, '{\"1\": 1}'),
+(2, '{\"1\": 2}'),
+(3, '{\"1\": 3}'),
+(4, '{\"1\": 4}'),
+(5, '{\"1\": 5}'),
+(6, '{\"1\": 6}');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'f1'
+test.t1 analyze status OK
+SELECT * FROM JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN t1 AS tt3;
+ERROR HY000: Incorrect usage of JSON_TABLE and argument
+SELECT * FROM t1 as jj1,
+(SELECT tt2.*
+FROM
+t1 as tt2,
+JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl
+STRAIGHT_JOIN
+t1 AS tt3
+) dt
+ORDER BY 1,3 LIMIT 10;
+ERROR HY000: Incorrect usage of JSON_TABLE and argument
+drop table t1;
+select collation(x) from
+JSON_TABLE('["abc"]', '$[*]' COLUMNS (x VARCHAR(10) CHARSET latin1 PATH '$')) tbl;
+collation(x)
+latin1_swedish_ci
+SELECT * FROM JSON_TABLE('{"x":1, "y":2}', _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x'
+ COLUMNS(y INT PATH _utf8mb4'$.y' DEFAULT _utf8mb4'1' ON EMPTY DEFAULT _utf8mb4'2' ON ERROR))) jt;
+y
+1
+select * from json_table(
+'{"name":"t-shirt", "colors": ["yellow", "blue"],"sizes": ["small", "medium", "large"]}',
+'$' columns(name varchar(32) path '$.name',
+nested path '$.colors[*]' columns (
+color varchar(32) path '$',
+nested path '$.sizes[*]' columns (
+size varchar(32) path '$'
+)))) as t;
+name color size
+t-shirt yellow NULL
+t-shirt blue NULL
+SELECT x, length(x) FROM
+JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' DEFAULT 'abcdefg' ON EMPTY)) jt;
+x length(x)
+abcdefg 7
+select * from
+json_table('[{"a":"aa"}, {"b":null}]', '$[*]'
+ columns (col1 int path '$.b' default '456' on empty)) as tt;
+col1
+456
+NULL
+select * from
+json_table('[{"a":"aa"}, {"b":true}]', '$[*]'
+ columns (col1 int path '$.b' default '456' on empty)) as tt;
+col1
+456
+1
+select * from
+json_table('[{"a":"aa"}, {"b":false}]', '$[*]'
+ columns (col1 int path '$.b' default '456' on empty)) as tt;
+col1
+456
+0
+select * from
+json_table('[{"a":"aa"}, {"b":null}]', '$[*]'
+ columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt;
+col1
+456
+NULL
+select * from
+json_table('[{"a":"aa"}, {"b":true}]', '$[*]'
+ columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt;
+col1
+456
+true
+select * from
+json_table('[{"a":"aa"}, {"b":false}]', '$[*]'
+ columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt;
+col1
+456
+false
+#
+# End of 10.5 tests
+#
diff --git a/mysql-test/suite/json/r/json_table2.result b/mysql-test/suite/json/r/json_table2.result
new file mode 100644
index 00000000000..e6d94278e06
--- /dev/null
+++ b/mysql-test/suite/json/r/json_table2.result
@@ -0,0 +1,266 @@
+select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+a
+1
+2
+select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+1 11
+1 111
+2 22
+2 222
+SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt;
+a b c
+1 11 NULL
+1 111 NULL
+1 NULL 11
+1 NULL 111
+2 22 NULL
+2 222 NULL
+2 NULL 22
+2 NULL 222
+create table t1 (id varchar(5), json varchar(1024));
+insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]');
+insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]');
+select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt;
+id json a
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 4
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 5
+select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+id json js_id a l_js_id b
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 1 11
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 1 1 2 111
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 1 22
+j1 [{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}] 2 2 2 222
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 1 11
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 1 3 2 111
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 1 22
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 2 4 2 222
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 1 22
+j2 [{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}] 3 5 2 222
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+ERROR 42S22: Unknown column 't1.no_field' in 'JSON_TABLE argument'
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt;
+ERROR 42S21: Duplicate column name 'a'
+DROP TABLE t1;
+create table t1 (item_name varchar(32), item_props varchar(1024));
+insert into t1 values ('Laptop', '{"color": "black", "price": 1000}');
+insert into t1 values ('Jeans', '{"color": "blue", "price": 50}');
+select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+item_name item_props color
+Laptop {"color": "black", "price": 1000} black
+Jeans {"color": "blue", "price": 50} blue
+select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+ERROR 42000: Cross dependency found in OUTER JOIN; examine your ON conditions
+DROP TABLE t1;
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+101 11
+101 111
+2 22
+2 222
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+NULL 11
+NULL 111
+2 22
+2 222
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+NULL 11
+NULL 111
+2 22
+2 222
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+a b
+202 11
+202 111
+2 22
+2 222
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+ERROR HY000: Syntax error in JSON text in argument 1 to function 'JSON_TABLE' at position 65
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+ERROR HY000: Field 'a' can't be set for JSON_TABLE 'jt'.
+select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo;
+a
+0.0
+connect con1,localhost,root,,;
+select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo;
+a
+1
+connection default;
+disconnect con1;
+create database db;
+use db;
+create table t (a text);
+insert into t values ('{"foo":"bar"}');
+create user u@localhost;
+grant select (a) on db.t to u@localhost;
+connect con1,localhost,u,,db;
+select a from t;
+a
+{"foo":"bar"}
+select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt;
+a f
+{"foo":"bar"} bar
+connection default;
+disconnect con1;
+drop user u@localhost;
+drop database db;
+use test;
+create table t1 (
+color varchar(32),
+price int
+);
+insert into t1 values ("red", 100), ("blue", 50);
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+set optimizer_switch='firstmatch=off';
+select * from
+json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+'$[*]' columns( color varchar(100) path '$.color',
+price text path '$.price'
+ )
+) as T
+where
+T.color in (select color from t1 where t1.price=T.price);
+color price
+blue 50
+red 100
+drop table t1;
+select * from
+json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]},
+ {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]',
+'$[*]' columns(
+color varchar(4) path '$.color',
+seq0 for ordinality,
+nested path '$.sizes[*]'
+ columns (seq1 for ordinality,
+size int path '$'),
+nested path '$.prices[*]'
+ columns (seq2 for ordinality,
+price int path '$')
+)
+) as T;
+color seq0 seq1 size seq2 price
+blue 1 1 1 NULL NULL
+blue 1 2 2 NULL NULL
+blue 1 3 3 NULL NULL
+blue 1 4 4 NULL NULL
+blue 1 NULL NULL 1 10
+blue 1 NULL NULL 2 20
+red 2 1 10 NULL NULL
+red 2 2 11 NULL NULL
+red 2 3 12 NULL NULL
+red 2 4 13 NULL NULL
+red 2 5 14 NULL NULL
+red 2 NULL NULL 1 100
+red 2 NULL NULL 2 200
+red 2 NULL NULL 3 300
+select * from json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100},
+ {"color": "rojo", "price": 10.0},
+ {"color": "blanco", "price": 11.0}]',
+'$[*]' columns( color varchar(100) path '$.color',
+price text path '$.price', seq for ordinality)) as T order by color desc;
+color price seq
+rojo 10.0 3
+red 100 2
+blue 50 1
+blanco 11.0 4
+create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x;
+select * from v;
+a x
+- 123
+show create table v;
+View Create View character_set_client collation_connection
+v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v` AS select `x`.`a` AS `a`,`x`.`x` AS `x` from JSON_TABLE('{"as":"b", "x":123}', '$' COLUMNS (`a` varchar(8) PATH '$.a' DEFAULT '-' ON EMPTY, `x` int(11) PATH '$.x')) `x` latin1 latin1_swedish_ci
+drop view v;
+select * from json_table('{"as":"b", "x":123}',
+"$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'null on empty, x int path '$.x')) x' at line 2
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*')) as jt;
+v
+NULL
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+v
+-
+select * from json_table('{"b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+v
+bar
+create table t1 (a varchar(100));
+insert into t1 values ('1');
+select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T;
+ERROR 42000: Not unique table/alias: 'T'
+drop table t1;
+prepare s from 'select * from
+json_table(?,
+ \'$[*]\' columns( color varchar(100) path \'$.color\',
+ price text path \'$.price\',
+ seq for ordinality)) as T
+order by color desc; ';
+execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]';
+color price seq
+red 1 1
+brown 2 2
+deallocate prepare s;
+create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T;
+select * from v2;
+color
+blue
+drop view v2;
+explain format=json select * from
+json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "tt",
+ "access_type": "ALL",
+ "rows": 40,
+ "filtered": 100,
+ "table_function": "json_table"
+ }
+ }
+}
+explain select * from
+json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE tt ALL NULL NULL NULL NULL 40 Table function: json_table
+create view v1 as select * from
+json_table('[{"color": "blue", "price": 50}]',
+'$[*]' columns(color text path '$.nonexistent',
+seq for ordinality)) as `ALIAS NOT QUOTED`;
+select * from v1;
+color seq
+NULL 1
+drop view v1;
+create view v1 as select * from
+json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+'$[*]' columns(
+color text path "$.QUOTES \" HERE \"",
+color1 text path '$.QUOTES " HERE "',
+color2 text path "$.QUOTES ' HERE '",
+seq for ordinality)) as T;
+select * from v1;
+color color1 color2 seq
+NULL NULL NULL 1
+NULL NULL NULL 2
+drop view v1;
+CREATE TABLE t1 (x INT);
+INSERT INTO t1 VALUES (1), (2), (3);
+SELECT t1.x*2 m, jt.* FROM t1,
+JSON_TABLE(m, '$[*]' COLUMNS (i INT PATH '$')) jt;
+ERROR 42S22: Unknown column 'm' in 'JSON_TABLE argument'
+DROP TABLE t1;
+#
+# End of 10.5 tests
+#
diff --git a/mysql-test/suite/json/t/json_table.test b/mysql-test/suite/json/t/json_table.test
new file mode 100644
index 00000000000..8c011038331
--- /dev/null
+++ b/mysql-test/suite/json/t/json_table.test
@@ -0,0 +1,284 @@
+select * from json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+
+select * from JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+SELECT * FROM JSON_TABLE( '[ {"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'), NESTED PATH '$.b[*]' COLUMNS (c INT PATH '$') ) ) jt;
+
+create table t1 (id varchar(5), json varchar(1024));
+insert into t1 values ('j1', '[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]');
+insert into t1 values ('j2', '[{"a": 3, "b": [11,111]}, {"a": 4, "b": [22,222]}, {"a": 5, "b": [22,222]}]');
+select id, json, a from t1, json_table(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a')) as tt;
+select * from t1, JSON_TABLE(t1.json, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+--error ER_BAD_FIELD_ERROR
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, b INT PATH '$'))) as jt;
+
+--error ER_DUP_FIELDNAME
+select * from t1, JSON_TABLE(t1.no_field, '$[*]' COLUMNS(js_id FOR ORDINALITY, a INT PATH '$.a', NESTED PATH '$.b[*]' COLUMNS (l_js_id FOR ORDINALITY, a INT PATH '$'))) as jt;
+
+DROP TABLE t1;
+
+create table t1 (item_name varchar(32), item_props varchar(1024));
+insert into t1 values ('Laptop', '{"color": "black", "price": 1000}');
+insert into t1 values ('Jeans', '{"color": "blue", "price": 50}');
+
+select * from t1 left join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+
+--error ER_WRONG_OUTER_JOIN
+select * from t1 right join json_table(t1.item_props,'$' columns( color varchar(100) path '$.color')) as T on 1;
+
+DROP TABLE t1;
+
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+select * from JSON_TABLE( '[ {"xa": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+select * from JSON_TABLE( '[ {"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+--error ER_JSON_SYNTAX
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3} xx YY]', '$[*]' COLUMNS( a INT PATH '$.a' default '202' on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+--error ER_JSON_TABLE_ERROR_ON_FIELD
+select * from JSON_TABLE( '[{"a": [1, 2], "b": [11,111]}, {"a": 2, "b": [22,222]}, {"a":3}]', '$[*]' COLUMNS( a INT PATH '$.a' error on error default '101' on empty, NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$'))) as jt;
+
+#
+# MDEV-22290 JSON_TABLE: Decimal type with M equal D causes Assertion
+# `scale <= precision' failure
+#
+select * from json_table('{"a":0}',"$" columns(a decimal(1,1) path '$.a')) foo;
+
+
+#
+# MDEV-22291 JSON_TABLE: SELECT from json_table does not work without default database
+#
+connect (con1,localhost,root,,);
+select a from json_table('{"a":0}',"$" columns(a for ordinality)) foo;
+connection default;
+disconnect con1;
+
+#
+# MDEV-22302 JSON_TABLE: Column privilege is insufficient for query with json_table
+#
+
+create database db;
+use db;
+create table t (a text);
+insert into t values ('{"foo":"bar"}');
+create user u@localhost;
+grant select (a) on db.t to u@localhost;
+
+--connect (con1,localhost,u,,db)
+select a from t;
+select * from t, json_table(t.a, '$' columns(f varchar(20) path '$.foo')) as jt;
+
+connection default;
+disconnect con1;
+
+drop user u@localhost;
+drop database db;
+
+use test;
+create table t1 (
+ color varchar(32),
+ price int
+ );
+insert into t1 values ("red", 100), ("blue", 50);
+insert into t1 select * from t1;
+insert into t1 select * from t1;
+
+set optimizer_switch='firstmatch=off';
+select * from
+ json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+ '$[*]' columns( color varchar(100) path '$.color',
+ price text path '$.price'
+ )
+ ) as T
+ where
+ T.color in (select color from t1 where t1.price=T.price);
+
+drop table t1;
+
+select * from
+json_table(' [ {"color": "blue", "sizes": [1,2,3,4], "prices" : [10,20]},
+ {"color": "red", "sizes": [10,11,12,13,14], "prices" : [100,200,300]} ]',
+ '$[*]' columns(
+ color varchar(4) path '$.color',
+ seq0 for ordinality,
+ nested path '$.sizes[*]'
+ columns (seq1 for ordinality,
+ size int path '$'),
+ nested path '$.prices[*]'
+ columns (seq2 for ordinality,
+ price int path '$')
+ )
+ ) as T;
+
+
+select * from json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100},
+ {"color": "rojo", "price": 10.0},
+ {"color": "blanco", "price": 11.0}]',
+ '$[*]' columns( color varchar(100) path '$.color',
+ price text path '$.price', seq for ordinality)) as T order by color desc;
+
+create view v as select * from json_table('{"as":"b", "x":123}',"$" columns(a varchar(8) path '$.a' default '-' on empty, x int path '$.x')) x;
+select * from v;
+show create table v;
+drop view v;
+
+--error ER_PARSE_ERROR
+select * from json_table('{"as":"b", "x":123}',
+ "$" columns(a varchar(8) path '$.a' default '-' on empty null on error null on empty, x int path '$.x')) x;
+
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*')) as jt;
+
+select * from json_table('{"a":"foo","b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+
+select * from json_table('{"b":"bar"}', '$'
+ columns (v varchar(20) path '$.*' default '-' on error)) as jt;
+
+create table t1 (a varchar(100));
+insert into t1 values ('1');
+--error ER_NONUNIQ_TABLE
+select * from t1 as T, json_table(T.a, '$[*]' columns(color varchar(100) path '$.nonexistent', seq for ordinality)) as T;
+
+drop table t1;
+
+prepare s from 'select * from
+json_table(?,
+ \'$[*]\' columns( color varchar(100) path \'$.color\',
+ price text path \'$.price\',
+ seq for ordinality)) as T
+order by color desc; ';
+
+execute s using '[{"color": "red", "price":1}, {"color":"brown", "price":2}]';
+deallocate prepare s;
+
+create view v2 as select * from json_table('[{"co\\\\lor": "blue", "price": 50}]', '$[*]' columns( color varchar(100) path '$.co\\\\lor') ) as T;
+select * from v2;
+drop view v2;
+
+explain format=json select * from
+ json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+explain select * from
+ json_table('[{"a": 1, "b": [11,111]}, {"a": 2, "b": [22,222]}]', '$[*]' COLUMNS( a INT PATH '$.a')) as tt;
+
+create view v1 as select * from
+ json_table('[{"color": "blue", "price": 50}]',
+ '$[*]' columns(color text path '$.nonexistent',
+ seq for ordinality)) as `ALIAS NOT QUOTED`;
+select * from v1;
+drop view v1;
+create view v1 as select * from
+ json_table('[{"color": "blue", "price": 50},
+ {"color": "red", "price": 100}]',
+ '$[*]' columns(
+ color text path "$.QUOTES \" HERE \"",
+ color1 text path '$.QUOTES " HERE "',
+ color2 text path "$.QUOTES ' HERE '",
+ seq for ordinality)) as T;
+select * from v1;
+drop view v1;
+
+CREATE TABLE t1 (x INT);
+INSERT INTO t1 VALUES (1), (2), (3);
+--error ER_BAD_FIELD_ERROR
+SELECT t1.x*2 m, jt.* FROM t1,
+ JSON_TABLE(m, '$[*]' COLUMNS (i INT PATH '$')) jt;
+DROP TABLE t1;
+
+--error ER_BAD_FIELD_ERROR
+select * from json_table(JS3.size, '$' columns (size INT PATH '$.size')) as JS1, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS2, json_table(JS1.size, '$' columns (size INT PATH '$.size')) as JS3 where 1;
+
+create table t1 (json varchar(100) character set utf8);
+insert into t1 values ('{"value":"АБВ"}');
+create table tj1 as
+select T.value
+ from t1, json_table(t1.json, '$' columns (value varchar(32) PATH '$.value')) T;
+show create table tj1;
+drop table t1;
+drop table tj1;
+
+CREATE TABLE t1(id INT, f1 JSON);
+INSERT INTO t1 VALUES
+ (1, '{\"1\": 1}'),
+ (2, '{\"1\": 2}'),
+ (3, '{\"1\": 3}'),
+ (4, '{\"1\": 4}'),
+ (5, '{\"1\": 5}'),
+ (6, '{\"1\": 6}');
+ANALYZE TABLE t1;
+
+--error ER_WRONG_USAGE
+SELECT * FROM JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN t1 AS tt3;
+--error ER_WRONG_USAGE
+SELECT * FROM t1 as jj1,
+ (SELECT tt2.*
+ FROM
+ t1 as tt2,
+ JSON_TABLE(tt3.f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl
+ STRAIGHT_JOIN
+ t1 AS tt3
+ ) dt
+ORDER BY 1,3 LIMIT 10;
+
+drop table t1;
+
+select collation(x) from
+ JSON_TABLE('["abc"]', '$[*]' COLUMNS (x VARCHAR(10) CHARSET latin1 PATH '$')) tbl;
+
+SELECT * FROM JSON_TABLE('{"x":1, "y":2}', _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x'
+ COLUMNS(y INT PATH _utf8mb4'$.y' DEFAULT _utf8mb4'1' ON EMPTY DEFAULT _utf8mb4'2' ON ERROR))) jt;
+
+select * from json_table(
+'{"name":"t-shirt", "colors": ["yellow", "blue"],"sizes": ["small", "medium", "large"]}',
+ '$' columns(name varchar(32) path '$.name',
+ nested path '$.colors[*]' columns (
+ color varchar(32) path '$',
+ nested path '$.sizes[*]' columns (
+ size varchar(32) path '$'
+)))) as t;
+
+SELECT x, length(x) FROM
+ JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x' DEFAULT 'abcdefg' ON EMPTY)) jt;
+
+# check how conversion works for JSON NULL, TRUE and FALSE
+
+select * from
+ json_table('[{"a":"aa"}, {"b":null}]', '$[*]'
+ columns (col1 int path '$.b' default '456' on empty)) as tt;
+
+select * from
+ json_table('[{"a":"aa"}, {"b":true}]', '$[*]'
+ columns (col1 int path '$.b' default '456' on empty)) as tt;
+
+select * from
+ json_table('[{"a":"aa"}, {"b":false}]', '$[*]'
+ columns (col1 int path '$.b' default '456' on empty)) as tt;
+
+select * from
+ json_table('[{"a":"aa"}, {"b":null}]', '$[*]'
+ columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt;
+
+select * from
+ json_table('[{"a":"aa"}, {"b":true}]', '$[*]'
+ columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt;
+
+select * from
+ json_table('[{"a":"aa"}, {"b":false}]', '$[*]'
+ columns (col1 varchar(100) path '$.b' default '456' on empty)) as tt;
+
+
+select * from
+ json_table( '[{"a":"asd"}, {"a":123}, {"a":[]}, {"a":{}} ]', '$[*]'
+ columns (id for ordinality,
+ intcol int path '$.a' default '1234' on empty default '5678' on error)
+ ) as tt;
+
+--echo #
+--echo # End of 10.5 tests
+--echo #
diff --git a/mysql-test/suite/json/t/json_table2.test b/mysql-test/suite/json/t/json_table2.test
new file mode 100644
index 00000000000..767542f0cbd
--- /dev/null
+++ b/mysql-test/suite/json/t/json_table2.test
@@ -0,0 +1,1563 @@
+# For stable statistics
+#--source include/have_innodb_16k.inc
+
+
+--echo #
+--echo # WL#8867: Add JSON_TABLE table function
+--echo #
+let $query=
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a',
+ jexst int exists path '\$.b')
+ ) as tt;
+eval $query;
+eval explain $query;
+#eval explain format=tree $query; # Tests printing of MaterializedTableFunctionIterator.
+
+#--error ER_INVALID_DEFAULT
+select * from
+ json_table(
+ '[{"x":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ jpath varchar(100) path '$.a' default '[99]' on error,
+ jexst int exists path '$.b')
+ ) as tt;
+
+let $query= select * from
+ json_table(
+ '[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a'
+ default '33' on empty
+ default '66' on error,
+ jsn_path json path '\$.a' default '{"x":33}' on empty,
+ jexst int exists path '\$.b')
+ ) as tt;
+eval $query;
+eval explain $query;
+
+let $query= select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0.33},{"a":"asd"}]',
+ '\$[*]' columns (id for ordinality,
+ jpath_i int path '\$.a'
+ default '33' on empty
+ default '66' on error,
+ jpath_r real path '\$.a'
+ default '33.3' on empty
+ default '77.7' on error,
+ jsn_path json path '\$.a' default '{"x":33}' on empty,
+ jexst int exists path '\$.b')
+ ) as tt;
+eval $query;
+eval explain $query;
+
+let $query= select * from
+ json_table(
+ '[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a'
+ default '33' on empty
+ default '66' on error,
+ jsn_path json path '\$.a' default '{"x":33}' on empty,
+ jexst int exists path '\$.b')
+ ) as tt;
+eval $query;
+eval explain $query;
+
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a',
+ json_path json path '\$.a',
+ jexst int exists path '\$.b')
+ ) as tt;
+
+let $query= select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a',
+ json_path json path '\$.a',
+ jexst int exists path '\$.b')
+ ) as tt
+ where id = 3;
+eval $query;
+eval explain $query;
+
+#--error ER_MISSING_JSON_TABLE_VALUE
+--error 4174
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a' error on empty,
+ jexst int exists path '\$.b')
+ ) as tt;
+
+#--error ER_WRONG_JSON_TABLE_VALUE
+--error 4174
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"a":1},{"a":[0,1]}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a' error on error,
+ jexst int exists path '\$.b')
+ ) as tt;
+
+#--error ER_INVALID_JSON_TEXT_IN_PARAM
+--error 4038
+select * from
+ json_table(
+ '!#@\$!@#\$',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path '\$.a',
+ jexst int exists path '\$.b')
+ ) as tt;
+
+#--error ER_INVALID_JSON_PATH
+--error 4042
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ "!@#\$!@#\$" columns (id for ordinality,
+ jpath varchar(100) path '\$.a',
+ jexst int exists path '\$.b')
+ ) as tt;
+
+#--error ER_INVALID_JSON_PATH
+--error 4042
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '\$[*]' columns (id for ordinality,
+ jpath varchar(100) path "!@#\$!@#\$",
+ jexst int exists path '\$.b')
+ ) as tt;
+
+--error ER_DUP_FIELDNAME
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ id for ordinality)
+ ) as tt;
+
+select * from
+ json_table(
+ '[{"a":"3"},{"a":2},{"b":1},{"a":0}]',
+ '$[*]' columns (id for ordinality,
+ _id for ordinality)
+ ) as tt;
+
+let $query= select * from
+ json_table(
+ '[
+ {"a":"3", "n": { "l": 1}},
+ {"a":2, "n": { "l": 1}},
+ {"b":1, "n": { "l": 1}},
+ {"a":0, "n": { "l": 1}}
+ ]',
+ '\$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '\$.a',
+ jexst int exists path '\$.b',
+ nested path '\$.n' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '\$.l')
+ )
+ ) as tt;
+
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+
+let $query= select * from
+ json_table(
+ '[
+ {"a":2, "n": [{ "l": 1}, {"l": 11}]},
+ {"a":1, "n": [{ "l": 2}, {"l": 22}]}
+ ]',
+ '\$[*]' columns (
+ id for ordinality,
+ jpath varchar(50) path '\$.a',
+ nested path '\$.n[*]' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '\$.l'),
+ nested path '\$.n[*]' columns (
+ id_m for ordinality,
+ jpath_m varchar(50) path '\$.l')
+ )
+ ) as tt;
+
+eval $query;
+eval explain $query;
+
+select * from json_table(
+ '[
+ {"a":"3", "n": ["b","a","c"]},
+ {"a":2, "n": [1,2]},
+ {"b":1, "n": ["zzz"]},
+ {"a":0, "n": [0.1, 0.02]}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n[*]' columns (
+ id_n for ordinality,
+ jpath_n varchar(50) path '$')
+ )
+ ) as tt;
+
+select * from json_table(
+ '[
+ {"a":"3", "n": ["b","a","c"]},
+ {"a":2, "n": [1,2]},
+ {"b":1, "n": ["zzz"]},
+ {"a":0, "n": [0.1, 0.02]}
+ ]',
+ '$[*]' columns (
+ id for ordinality,
+ jpath varchar(100) path '$.a',
+ jexst int exists path '$.b',
+ nested path '$.n[*]' columns (
+ id_n1 for ordinality,
+ jpath_n1 varchar(50) path '$') ,
+ nested path '$.n[*]' columns (
+ id_n2 for ordinality,
+ jpath_n2 varchar(50) path '$')
+ )
+ ) as tt;
+
+let $query= select * from json_table(
+ '[
+ {"a":"3", "n": [ {"ll":["b1","b2","b3"]}, {"ll": ["a1","a2"]},
+ {"ll":["c"]} ]},
+ {"a":2, "n": [{"ll":[1,11,111]},{"ll":[2]}]},
+ {"b":1, "n": [{"ll":["zzz"]}]},
+ {"a":0, "n": [{"ll":[0.1,0.01]}, {"ll":[0.02,0.002,0.0002]}]}
+ ]',
+ '\$[*]' columns (
+ id1 for ordinality,
+ jpath varchar(100) path '\$.a',
+ jexst int exists path '\$.b',
+ nested path '\$.n[*]' columns (
+ id2 for ordinality,
+ nested path '\$.ll[*]' columns (
+ id3 for ordinality,
+ jpath_3 varchar(50) path '\$')
+ ),
+ nested path '\$.n[*]' columns (
+ id4 for ordinality,
+ jpath_4 json path '\$')
+ )
+ ) as tt;
+
+eval $query;
+eval explain $query;
+
+--echo ord should be 1,1,1,2, which tells that first two values of 'l' are
+--echo from the same object, and next two are from different objects
+let $query= SELECT *
+FROM JSON_TABLE(
+ '[{"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [1,2]}
+ ]
+ }, {"a": "a_val",
+ "b": [
+ {"c": "c_val",
+ "l": [11]},
+ {"c": "c_val",
+ "l": [22]}
+ ]
+ }]',
+ '\$[*]' COLUMNS (
+ apath VARCHAR(10) PATH '\$.a',
+ NESTED PATH '\$.b[*]' COLUMNS (
+ bpath VARCHAR(10) PATH '\$.c',
+ ord FOR ORDINALITY,
+ NESTED PATH '\$.l[*]' COLUMNS (
+ lpath varchar(10) PATH '\$'
+ )
+ )
+ )) as jt;
+eval $query;
+eval explain $query;
+
+CREATE TABLE jt( i JSON );
+SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1;
+EXPLAIN SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1;
+
+SELECT * FROM (
+ SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1) AS ttt;
+EXPLAIN SELECT * FROM (
+ SELECT * FROM jt, JSON_TABLE(jt.i, '$' COLUMNS (a INT PATH '$')) AS tt
+ WHERE a=1) AS ttt;
+DROP TABLE jt;
+
+--disable_parsing
+SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON),
+ '$' COLUMNS (dt DATE PATH '$')) as tt;
+SELECT * FROM JSON_TABLE(CAST(CAST("11:22:33" AS TIME) AS JSON),
+ '$' COLUMNS (dt TIME PATH '$')) as tt;
+SELECT * FROM JSON_TABLE(CAST(CAST("2001.02.03" AS DATE) AS JSON),
+ '$' COLUMNS (dt DATE PATH '$')) as tt;
+--enable_parsing
+SELECT * FROM JSON_TABLE(NULL,
+ '$' COLUMNS (dt DATE PATH '$')) as tt;
+
+CREATE VIEW v AS
+ SELECT * FROM JSON_TABLE('[1,2,3]',
+ '$[*]' COLUMNS (num INT PATH '$.a'
+ DEFAULT '123' ON EMPTY
+ DEFAULT '456' ON ERROR)) AS jt;
+SELECT * FROM v;
+SHOW CREATE VIEW v;
+DROP VIEW v;
+
+#--error ER_INVALID_JSON_VALUE_FOR_CAST
+SELECT * FROM JSON_TABLE('"asdf"',
+ '$' COLUMNS (a INT PATH '$' ERROR ON ERROR)) AS jt;
+#--error ER_WRONG_JSON_TABLE_VALUE
+--error 4174
+SELECT * FROM
+ JSON_TABLE('[{"a":1},{"a":2}]',
+ '$' COLUMNS (a INT PATH '$[*].a' ERROR ON ERROR)) AS jt;
+--error 4174
+SELECT * FROM
+ JSON_TABLE('[{"a":1},{"a":2}]',
+ '$' COLUMNS (a JSON PATH '$[*].a' ERROR ON ERROR)) AS jt;
+#--error ER_JT_VALUE_OUT_OF_RANGE
+--error 4174
+SELECT * FROM
+ JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$' ERROR ON ERROR)) AS jt;
+SELECT * FROM
+ JSON_TABLE('123.456', '$' COLUMNS (a DECIMAL(2,1) PATH '$')) AS jt;
+
+#--error ER_JT_MAX_NESTED_PATH
+SELECT * FROM
+ JSON_TABLE('{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{"a":1,"b":{}}}}}}}}}}}}}}}}}}}',
+ '$' COLUMNS (i0 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i1 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i2 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i3 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i4 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i5 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i6 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i7 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i8 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i9 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i10 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i11 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i12 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i13 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i14 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i15 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i16 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i17 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i18 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i19 INT PATH '$.a',
+ NESTED PATH '$.b' COLUMNS (i20 INT PATH '$.a'
+ )))))))))))))))))))))) jt;
+
+CREATE TABLE t1(id int, jd JSON);
+INSERT INTO t1 values (1, '[1,3,5]'),(2,'[2,4,6]');
+SELECT id, jt.* FROM t1,
+ JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt;
+SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.*
+ FROM t1,
+ JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt;
+
+EXPLAIN SELECT /*+ JOIN_ORDER(jt, t1) */ id, jt.*
+ FROM t1,
+ JSON_TABLE(jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt;
+
+--sorted_result
+SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.*
+ FROM t1,
+ JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt,
+ t1 AS t2;
+
+EXPLAIN SELECT t1.id, t2.id, jt.*
+ FROM t1,
+ JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt,
+ t1 AS t2;
+
+EXPLAIN SELECT /*+ JOIN_ORDER(t2,jt) */ t1.id, t2.id, jt.*
+ FROM t1,
+ JSON_TABLE(t1.jd, '$[*]' COLUMNS (jid FOR ORDINALITY,
+ val INT PATH '$')) AS jt,
+ t1 AS t2;
+
+SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+EXPLAIN SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE('[1,2]', '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+
+# JSON_TABLE referring outer scope
+SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+
+EXPLAIN SELECT * FROM t1 WHERE id IN
+ (SELECT * FROM JSON_TABLE(t1.jd, '$[*]' COLUMNS
+ (id INT PATH '$')) AS jt);
+# JSON_TABLE referring another JSON_TABLE
+SELECT id, jt1.*, jt2.*
+ FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1,
+ JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2;
+
+EXPLAIN SELECT id, jt1.*, jt2.*
+ FROM t1, JSON_TABLE(jd, '$' COLUMNS (data1 JSON PATH '$')) AS jt1,
+ JSON_TABLE(data1, '$[*]' COLUMNS (id2 INT PATH '$')) AS jt2;
+
+DROP TABLE t1;
+
+SELECT * FROM JSON_TABLE('"asdf"', '$' COLUMNS(
+ tm TIME PATH '$',
+ dt DATE PATH '$',
+ i INT PATH '$',
+ f FLOAT PATH '$',
+ d DECIMAL PATH '$')) AS jt;
+
+# DEFAULT NULL is not accepted syntax.
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON EMPTY)) jt;
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON ERROR)) jt;
+
+# The DEFAULT value must be a string on JSON format for now.
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON EMPTY)) jt;
+
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT 0 ON ERROR)) jt;
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x DATE
+ PATH '$.x'
+ DEFAULT DATE'2020-01-01' ON EMPTY)) jt;
+--error ER_PARSE_ERROR
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x DATE
+ PATH '$.x'
+ DEFAULT DATE'2020-01-01' ON ERROR)) jt;
+
+--echo #
+--echo # Bug#25413069: SIG11 IN CHECK_COLUMN_GRANT_IN_TABLE_REF
+--echo #
+--error ER_BAD_FIELD_ERROR
+SELECT a FROM JSON_TABLE(abc, '$[*]' COLUMNS ( a int path '$.a')) AS jt;
+
+--echo #
+--echo # Bug#25420680: ASSERTION `THD->IS_ERROR()' FAILED IN SQL/SQL_SELECT.CC
+--echo #
+SELECT * FROM JSON_TABLE('{"a":"2017-11-1"}',
+ '$' COLUMNS (jpath DATE PATH '$.a')) AS jt;
+--echo #
+--echo # Bug#25413826: ASSERTION `TABLE_LIST->ALIAS' FAILED
+--echo #
+#--error ER_TF_MUST_HAVE_ALIAS
+--error ER_PARSE_ERROR
+SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]',
+ '$[*]' COLUMNS ( a int path '$.b'));
+
+--echo #
+--echo # Bug#25421464: ASSERTION `!STRCMP(TABLE_REF->TABLE_NAME, ...
+--echo #
+CREATE VIEW v1 AS
+ SELECT * FROM JSON_TABLE('[{"a": 1, "b": 2}]',
+ '$[*]' COLUMNS ( a INT PATH '$.b')) AS jt;
+SELECT * FROM v1;
+SHOW CREATE VIEW v1;
+DROP VIEW v1;
+
+--echo #
+--echo # Bug#25427457: ASSERTION `!((*REG_FIELD)->FLAGS & 16)'
+--echo #
+SELECT * FROM JSON_TABLE('{"a":"1"}',
+ '$' COLUMNS (jpath JSON PATH '$.a',
+ o FOR ORDINALITY)) AS jt
+WHERE o = 1;
+
+--echo #
+--echo # Bug#25427982: ASSERTION `DERIVED' FAILED IN SQL/TABLE.H
+--echo #
+#--error ER_WRONG_FIELD_WITH_GROUP
+SELECT je,o FROM JSON_TABLE('{"a":"1"}',
+ '$' COLUMNS (o FOR ORDINALITY,
+ je BIGINT EXISTS PATH '$.a')) AS jt
+GROUP BY je;
+SELECT je,COUNT(o) FROM JSON_TABLE('{"a":"1"}',
+ '$' COLUMNS (o FOR ORDINALITY,
+ je BIGINT EXISTS PATH '$.a')) AS jt
+GROUP BY je;
+
+--echo #
+--echo # Bug#25413194: ASSERTION `!(WANT_PRIVILEGE & ~(GRANT->WANT_PRIVILEGE
+--echo #
+CREATE TABLE t1 (j JSON);
+SELECT * FROM t1,JSON_TABLE(t1.j, '$[*]' COLUMNS ( a int path '$.b')) AS jt;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25460537:SIG 11 IN NEXT_FAST AT SQL/SQL_LIST.H
+--echo #
+PREPARE STMT FROM
+ "SELECT * FROM JSON_TABLE(
+ \'[{\"a\":\"3\"},{\"a\":2},{\"b\":1},{\"a\":0}]\',
+ \'$[*]\' COLUMNS (id
+ FOR ORDINALITY,
+ jpath VARCHAR(100) PATH \'$.a\',
+ jexst INT EXISTS PATH \'$.b\')
+ ) as tt";
+EXECUTE STMT;
+EXECUTE STMT;
+DEALLOCATE PREPARE stmt;
+
+--echo #
+--echo # Bug#25522353: SIG 11 IN JOIN::MAKE_JOIN_PLAN | SQL/SQL_OPTIMIZER.CC
+--echo #
+CREATE TABLE t1 (id INT, jc JSON);
+
+#psergey: hmm --error ER_TF_FORBIDDEN_JOIN_TYPE
+SELECT * FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+# psergey: wow: --error ER_UNKNOWN_TABLE
+SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt LEFT JOIN t1 ON t1.jc=jt.id;
+
+#--error ER_UNKNOWN_TABLE psergey-todo: WHYYY?
+SELECT * FROM JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt RIGHT JOIN t1 ON t1.jc=jt.id;
+
+SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id
+ LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+EXPLAIN SELECT * FROM t1 t1o RIGHT JOIN t1 ON t1o.id=t1.id
+ LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+WITH qn AS
+ (SELECT jt.* FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id)
+ SELECT * from qn;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+WITH qn AS
+ (SELECT 1 UNION
+ SELECT jt.id FROM t1 RIGHT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1.jc=jt.id)
+ SELECT * from qn;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+SELECT * FROM t1 AS t1o RIGHT JOIN
+ (t1 AS t1i RIGHT JOIN JSON_TABLE(t1o.jc, '$' COLUMNS
+ (id FOR ORDINALITY)) as jt ON t1i.jc=jt.id)
+ ON t1o.id=t1i.id;
+
+INSERT INTO t1 VALUES(1,"1"),(2,"4"),(3,"3");
+
+ANALYZE TABLE t1;
+
+SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id INT PATH '$')) as jt ON t1.id=jt.id;
+
+EXPLAIN SELECT * FROM t1 LEFT JOIN JSON_TABLE(t1.jc, '$' COLUMNS
+ (id INT PATH '$')) as jt ON t1.id=jt.id;
+
+#--error ER_TF_FORBIDDEN_JOIN_TYPE
+--error 1120
+SELECT * FROM t1
+ LEFT JOIN
+ JSON_TABLE(t1.jc, '$' COLUMNS (id FOR ORDINALITY)) as jt ON t1.jc=jt.id
+ RIGHT JOIN
+ JSON_TABLE(jt.id, '$' COLUMNS (id FOR ORDINALITY)) as jt1 ON jt.id=jt1.id;
+
+DROP TABLE t1;
+
+# Test that tmp table can overflow to disk
+#set @save_mem_se= @@internal_tmp_mem_storage_engine;
+#set @@internal_tmp_mem_storage_engine=MEMORY;
+
+set @save_heap_size= @@max_heap_table_size;
+set @@max_heap_table_size= 16384;
+
+FLUSH STATUS;
+SELECT * FROM JSON_TABLE(
+ '[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]',
+ '$[*]' COLUMNS (
+ c1 CHAR(255) PATH '$',
+ c2 CHAR(255) PATH '$',
+ c3 CHAR(255) PATH '$',
+ c4 CHAR(255) PATH '$',
+ c5 CHAR(255) PATH '$',
+ c6 CHAR(255) PATH '$',
+ c7 CHAR(255) PATH '$',
+ c8 CHAR(255) PATH '$')) AS jt;
+SHOW STATUS LIKE '%tmp%';
+set @@max_heap_table_size= @save_heap_size;
+#set @@internal_tmp_mem_storage_engine= @save_mem_se;
+
+--echo #
+--echo # Bug#25504063: EXPLAIN OF JSON_TABLE QUERY USES INTERNAL TEMP TABLES
+--echo #
+FLUSH STATUS;
+SELECT * FROM
+ JSON_TABLE(
+ '[{"a":"3"}]',
+ '$[*]' COLUMNS (id FOR ORDINALITY)
+ ) AS tt;
+SHOW STATUS LIKE '%tmp%';
+
+--echo #
+--echo # Bug#25525409: ASSERTION `TABLE_LIST->TABLE' FAILED IN SQL/SQL_BASE.CC
+--echo #
+#--error ER_BAD_FIELD_ERROR
+--error 1054
+SELECT * FROM JSON_TABLE( ( SELECT a ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+
+## psergey: why does it succeed?
+#--error 1064
+SELECT * FROM JSON_TABLE( ( SELECT 1 ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+
+--error 1111
+SELECT * FROM JSON_TABLE( ( SUM(1) ) , '$.*' COLUMNS (col1 FOR ORDINALITY) )
+AS alias1;
+
+--echo #
+--echo # Bug# #25472875: ERROR SHOULD BE THROWN FOR INCORRECT VALUES
+--echo #
+#--error ER_JT_VALUE_OUT_OF_RANGE psergey!
+SELECT *
+ FROM JSON_TABLE('{"a":"1993-01-01"}',
+ '$' COLUMNS (jp DATE PATH '$.b' DEFAULT '1000' ON EMPTY))
+ AS jt;
+
+--echo #
+--echo # Bug#25532429: INVALID JSON ERROR NOT THROWN WITH EMPTY TABLES JOIN
+--echo #
+CREATE TABLE t1(j JSON);
+#--error ER_INVALID_JSON_TEXT_IN_PARAM psergey!
+SELECT * FROM t1,
+ JSON_TABLE( 'dqwfjqjf' , '$[*]' COLUMNS (col5 FOR ORDINALITY) ) AS alias7;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Bug#25540370: SIG 11 IN SHOW_SQL_TYPE|SQL/SQL_SHOW.CC:7063
+--echo #
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 CHAR(70) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+--echo # Too short field causes truncation, error and triggers ON ERROR clause
+let $query=
+ SELECT * FROM
+ JSON_TABLE('["3.14159"]',
+ '\$[*]' COLUMNS (col18 CHAR(6) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+--echo #Truncated space doesn't trigger ON ERROR
+let $query=
+ SELECT * FROM
+ JSON_TABLE('["3.14159 "]',
+ '\$[*]' COLUMNS (col18 CHAR(7) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+let $query=
+SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 CHAR(255) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 VARCHAR(70) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 VARCHAR(255) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 FLOAT PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 DOUBLE PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+#--error ER_JT_VALUE_OUT_OF_RANGE
+--error 4174
+SELECT * FROM
+ JSON_TABLE('[3.14159]',
+ '\$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '\$' ERROR ON ERROR)
+ ) AS alias2;
+
+let $query=
+ SELECT * FROM
+ JSON_TABLE('[0.9]',
+ '\$[*]' COLUMNS (col18 DECIMAL(3,3) PATH '\$')
+ ) AS alias2;
+eval EXPLAIN $query;
+eval $query;
+
+SELECT * FROM
+ JSON_TABLE('["asdf","ghjk"]',
+ '$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$'
+ DEFAULT "3.14159" ON ERROR)
+ ) AS alias2;
+
+CREATE TABLE t1(jd JSON);
+INSERT INTO t1 VALUES('["asdf"]'),('["ghjk"]');
+SELECT * FROM t1,
+ JSON_TABLE(jd,
+ '$[*]' COLUMNS (col18 DECIMAL(4,3) PATH '$'
+ DEFAULT "3.14159" ON ERROR)
+ ) AS alias2;
+DROP TABLE t1;
+--echo #
+--echo # Bug#25540027: SIG 11 IN FIND_FIELD_IN_TABLE | SQL/SQL_BASE.CC
+--echo #
+CREATE TABLE t1(c1 JSON);
+--error ER_NON_UPDATABLE_TABLE
+UPDATE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+ SET jt1.a=1;
+--error ER_PARSE_ERROR
+DELETE JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+ FROM t1;
+--error ER_PARSE_ERROR
+DELETE t1, JSON_TABLE(t1.c1,'$[*]' COLUMNS (a INT PATH '$.a')) AS jt1
+ USING t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25547244: ASSERTION `!TABLE || (!TABLE->READ_SET || BITMAP_IS_SET(
+--echo #
+--disable_parsing
+## psergey: CRASH!
+CREATE TABLE t1(i INT);
+INSERT INTO t1 VALUES(1);
+WITH cte_query AS
+ (SELECT * FROM t1, JSON_TABLE( JSON_OBJECT('ISSKF',i) ,
+ '$[*]' COLUMNS (jtcol1 INT EXISTS PATH '$[*]') ) AS alias2)
+SELECT jtcol1 AS field1 FROM cte_query;
+DROP TABLE t1;
+--enable_parsing
+
+--echo #
+--echo # Bug#25540675: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED
+--echo #
+CREATE TABLE j1(j JSON);
+INSERT INTO j1 VALUES('[1,2,3]'),('[1,2,4]');
+SELECT * FROM j1,
+ JSON_TABLE( JSON_OBJECT('key1', j) ,
+ '$.*' COLUMNS (NESTED PATH '$.*' COLUMNS (col11 FOR ORDINALITY))) AS alias2;
+DROP TABLE j1;
+
+--echo #
+--echo # Bug#25584335: SIG 11 IN TABLE_LIST::FETCH_NUMBER_OF_ROWS
+--echo #
+CREATE TABLE t1(i INT);
+
+PREPARE stmt FROM "SELECT alias1.i AS field1 FROM (
+ t1 AS alias1,
+ (SELECT * FROM
+ JSON_TABLE('[1,2,3]' ,
+ '$[*]' COLUMNS (`col_varchar` FOR ORDINALITY)) AS SQ1_alias1
+ ) AS alias2 )";
+
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25604048: COLUMN NAMES WITH SAME 33-CHAR PREFIX ARE EQUAL
+--echo #
+SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS(
+ column_name_is_thirty_four_or_more VARCHAR(17) PATH '$.key1',
+ column_name_is_thirty_four_or_more_yes_indeed VARCHAR(17) PATH '$.key1'
+) ) AS alias1;
+
+#--error ER_WRONG_COLUMN_NAME
+SELECT * FROM JSON_TABLE( '{"key1": "test"}' , '$' COLUMNS(
+ `column_name_is_thirty_four_or_more ` VARCHAR(17) PATH '$.key1'
+) ) AS alias1;
+
+--echo #
+--echo # Bug#25604404: JSON_TABLE MORE RESTRICTIVE WITH IDENTIFIERS THAN
+--echo # CREATE TABLE
+--echo #
+SELECT * FROM JSON_TABLE( '[1, 2]', '$' COLUMNS(
+ one INT PATH '$[0]', two INT PATH '$[1]'
+)) AS jt;
+
+--echo #
+--echo # Bug#25588450: SIG 6 IN JSON_WRAPPER::SEEK|SQL/JSON_DOM.CC
+--echo #
+CREATE TABLE t1(c VARCHAR(10)) ENGINE=MEMORY;
+INSERT INTO t1 VALUES('fiheife');
+#--error ER_INVALID_JSON_TEXT_IN_PARAM
+--error 4038
+SELECT * FROM `t1` AS alias1, JSON_TABLE( `c` , '$[*]' COLUMNS (jtcol1 JSON
+PATH '$.*')) AS alias2 WHERE jtcol1 <= 'kjfh';
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#25587754: ASSERTION `FIXED == 0 || BASIC_CONST_ITEM()' FAILED
+--echo #
+PREPARE stmt FROM
+ "SELECT * FROM JSON_TABLE( '[1,2]', '$[*]'
+ COLUMNS (jtcol1 JSON PATH '$.*')) AS alias2";
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+
+--echo #
+--echo # Bug#25584593: UNABLE TO USE JSON_TABLE() ON TEXT/BLOB JSON DATA
+--echo #
+SELECT * FROM JSON_TABLE(NULL, '$.k' COLUMNS (id FOR ORDINALITY)) AS aLias;
+SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias;
+SET @myjson = '{"k": 42}';
+SELECT * FROM JSON_TABLE(@myjson, '$.k' COLUMNS (id FOR ORDINALITY)) AS alias;
+
+CREATE TABLE t1(
+ txt TEXT, ty TINYTEXT, tm MEDIUMTEXT, tl LONGTEXT);
+INSERT INTO t1 values (
+ '{"k": "text"}','{"k": "tinytext"}','{"k": "mediumtext"}','{"k": "longtext"}');
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.ty, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.tm, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.tl, '$.k' COLUMNS (id VARCHAR(10) PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TEXT PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id TINYTEXT PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id MEDIUMTEXT PATH '$')) AS alias;
+SELECT alias.* FROM t1, JSON_TABLE(t1.txt,'$.k' COLUMNS (id LONGTEXT PATH '$')) AS alias;
+
+# BLOB can store data from JSON
+SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias;
+# Check that type is printed correctly
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb BLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYBLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMBLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGBLOB PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TEXT PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb TINYTEXT PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb MEDIUMTEXT PATH '$')) as alias;
+EXPLAIN SELECT alias.* FROM JSON_TABLE('"asd123"', '$' COLUMNS (blb LONGTEXT PATH '$')) as alias;
+# But can't be used as a data source
+#--error ER_WRONG_ARGUMENTS
+--error 4038
+SELECT * FROM
+ (SELECT blb as jf
+ FROM
+ JSON_TABLE('"asd123"',
+ '$' COLUMNS (blb BLOB PATH '$')
+ ) AS jti
+ ) AS dt,
+ JSON_TABLE(jf, '$' COLUMNS (blb BLOB PATH '$')) AS jto;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26500384: ASSERT FAILURE IN QUERY WITH WINDOW FUNCTION AND
+--echo # JSON_TABLE
+--echo #
+--disable_parsing
+## psergey: crash!
+CREATE TABLE t (x INT);
+INSERT INTO t VALUES (1), (2), (3);
+--error ER_BAD_FIELD_ERROR
+SELECT MAX(t.x) OVER () m, jt.* FROM t,
+ JSON_TABLE(JSON_ARRAY(m), '$[*]' COLUMNS (i INT PATH '$')) jt;
+DROP TABLE t;
+--enable_parsing
+
+--echo #
+--echo # Bug#26583283: ASSERTION `!THD->IS_ERROR()' FAILED IN SQL_RESOLVER.CC
+--echo #
+EXPLAIN SELECT * FROM JSON_TABLE('null', '$' COLUMNS(AA DECIMAL PATH '$')) tt;
+
+CREATE VIEW v1 AS SELECT * FROM
+ JSON_TABLE( 'null', '$' COLUMNS (c1 DECIMAL PATH '$' ) ) AS jt;
+SELECT * FROM v1;
+EXPLAIN SELECT * FROM v1;
+DROP VIEW v1;
+
+--echo #
+--echo # Bug#25822408: ASSERTION `!COL->CHILD_JDS->PRODUCING_RECORDS' FAILED
+--echo #
+PREPARE stmt FROM "SELECT * FROM
+ JSON_TABLE('{\"a\":1}','$' COLUMNS (c1 CHAR(20) PATH '$.b' ERROR ON EMPTY)) jt";
+#--error ER_MISSING_JSON_TABLE_VALUE
+--error 4174
+EXECUTE stmt;
+#--error ER_MISSING_JSON_TABLE_VALUE
+--error 4174
+EXECUTE stmt;
+
+--echo #
+--echo # Bug#25594571: CRASH AT ITEM::CONST_ITEM|SQL/ITEM.H
+--echo #
+CREATE TABLE t1 (i INT);
+
+INSERT INTO t1 VALUES(1),(2),(3),(4),(5),(6),(7);
+
+PREPARE stmt FROM "SELECT * FROM t1 AS alias1 LEFT JOIN t1 AS alias2
+LEFT JOIN JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (i FOR ORDINALITY )) AS
+alias3 ON alias2 . `i` = alias3 . `i` ON alias1 . `i` = alias2 . `i`";
+
+EXECUTE stmt;
+EXECUTE stmt;
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26648617: ASSERTION `IS_VIEW_OR_DERIVED() &&
+--echo # USES_MATERIALIZATION()' FAILED.
+--echo #
+CREATE TABLE t1 (
+ col_varchar_key varchar(1) DEFAULT NULL
+) ;
+
+INSERT INTO t1 VALUES(1),(4);
+
+SELECT * FROM t1 WHERE col_varchar_key NOT IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+EXPLAIN SELECT * FROM t1 WHERE col_varchar_key NOT IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+SELECT * FROM t1 WHERE col_varchar_key IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+EXPLAIN SELECT * FROM t1 WHERE col_varchar_key IN (
+ SELECT col_varchar_key FROM JSON_TABLE(
+ '[{"col_key": 1},{"col_key": 2}]', "$[*]" COLUMNS
+ (col_varchar_key VARCHAR(10) PATH "$.col_key")) AS innr1);
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26711551: WL8867:CONDITIONAL JUMP IN JSON_TABLE_COLUMN::CLEANUP
+--echo #
+CREATE TABLE t(x int, y int);
+INSERT INTO t(x) VALUES (1);
+UPDATE t t1, JSON_TABLE('[2]', '$[*]' COLUMNS (x INT PATH '$')) t2
+SET t1.y = t2.x;
+SELECT * FROM t;
+DROP TABLE t;
+
+--echo #
+--echo # Bug#26679671: SIG 11 IN JSON_BINARY::PARSE_BINARY()
+--echo #
+CREATE TABLE t1(id INT, f1 JSON);
+INSERT INTO t1 VALUES (1, '{\"1\": 1}'), (2, '{\"1\": 2}'), (3, '{\"1\": 3}'),
+(4, '{\"1\": 4}'), (5, '{\"1\": 5}'), (6, '{\"1\": 6}');
+ANALYZE TABLE t1;
+
+ANALYZE TABLE t1;
+
+--sorted_result
+SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt;
+EXPLAIN SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl) dt;
+
+--disable_parsing
+# psergey: odd name resolution rules?
+SELECT * FROM t1 as jj1,
+ (SELECT tt2.*
+ FROM
+ t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN
+ t1 AS tt3) dt
+ ORDER BY 1,3 LIMIT 10;
+
+EXPLAIN SELECT * FROM t1 as jj1,
+ (SELECT tt2.* FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (id FOR ORDINALITY)) AS tbl STRAIGHT_JOIN
+ t1 AS tt3) dt ORDER BY 1,3 LIMIT 11;
+--enable_parsing
+
+--disable_parsing
+# psergey: crash
+SELECT * FROM t1 WHERE id IN
+ (SELECT id FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+EXPLAIN SELECT * FROM t1 WHERE id IN
+ (SELECT id FROM t1 as tt2,
+ JSON_TABLE(f1, "$" COLUMNS (jf FOR ORDINALITY)) AS tbl);
+--enable_parsing
+
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#26760811: WL#8867: MEMORY LEAK REPORTED BY ASAN AND VALGRIND
+--echo #
+CREATE TABLE t (j JSON);
+INSERT INTO t VALUES
+('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]'),
+('[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]');
+SELECT COUNT(*) FROM t, JSON_TABLE(j, '$[*]' COLUMNS (i INT PATH '$')) AS jt;
+PREPARE ps FROM
+'SELECT COUNT(*) FROM t, JSON_TABLE(j, ''$[*]'' COLUMNS (i INT PATH ''$'')) AS jt';
+EXECUTE ps;
+EXECUTE ps;
+DROP PREPARE ps;
+DROP TABLE t;
+
+--echo #
+--echo # Bug #26781759: NON-UNIQUE ALIAS ERROR NOT BEING THROWN
+--echo #
+--error ER_NONUNIQ_TABLE
+SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias,
+JSON_TABLE(NULL, '$' COLUMNS(j1 FOR ORDINALITY)) AS jalias;
+
+--disable_parsing
+# psergey: odd error !
+--echo #
+--echo # Bug #26761470: WL#8867: JOIN::MAKE_JOIN_PLAN():
+--echo # ASSERTION `SELECT_LEX->IS_RECURSIVE()' FAILED
+--echo #
+CREATE TABLE t1 (x INT);
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (j JSON);
+INSERT INTO t2 (j) VALUES ('[1,2,3]');
+--sorted_result
+SELECT * FROM
+ t1 RIGHT JOIN
+ (SELECT o
+ FROM
+ t2,
+ JSON_TABLE(j, '$[*]' COLUMNS (o FOR ORDINALITY)) AS jt
+ ) AS t3
+ ON (t3.o = t1.x);
+DROP TABLE t1, t2;
+--enable_parsing
+
+--echo #
+--echo # Bug#27152428 JSON_TABLE + PREPARED STATEMENT + VIEW HAS PROBLEM IN DURING RESOLUTION
+--echo #
+
+CREATE TABLE t1 (a INT, b INT);
+
+CREATE VIEW v2 AS SELECT * FROM t1 LIMIT 2;
+
+SELECT b
+ FROM (SELECT * FROM v2) vq1,
+ JSON_TABLE(CONCAT(vq1.b,'[{\"a\":\"3\"}]'),
+ '$[*]' COLUMNS (id FOR ORDINALITY,
+ jpath VARCHAR(100) PATH '$.a',
+ JEXST INT EXISTS PATH '$.b')
+ ) AS dt;
+
+DROP TABLE t1;
+DROP VIEW v2;
+
+
+--echo #
+--echo # Bug#27189940: CREATE VIEW FAILS ON JSON_TABLE() IN SCHEMA-LESS CONNECTIONS
+--echo # BUG#27217897: JSON_TABLE() FAILS IF NO DATABASE SELECTED
+--echo #
+
+
+# Connect without a schema name:
+connect (conn1,localhost,root,,*NO-ONE*);
+connection conn1;
+
+CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+
+connection default;
+disconnect conn1;
+use test;
+
+SHOW CREATE VIEW test.v;
+SELECT * FROM test.v;
+DROP VIEW test.v;
+
+--echo #
+--echo # Bug#27729112 JSON_TABLE SHOULD DECODE BASE64-ENCODED STRINGS
+--echo #
+--disable_parsing
+SELECT v
+FROM JSON_TABLE(JSON_OBJECT('foo', _binary'bar'), '$'
+ COLUMNS(v VARCHAR(255) PATH '$.foo')) tbl;
+
+CREATE TABLE t1 (col1 VARCHAR(255) CHARACTER SET ucs2,
+ col2 VARCHAR(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_as_cs);
+INSERT INTO t1 VALUES ("æ", "ハ"), ("å", "ø"), ("ø", "パ"), ("@", "バ");
+
+ANALYZE TABLE t1 UPDATE HISTOGRAM ON col1, col2;
+SELECT v value, c cumulfreq
+FROM INFORMATION_SCHEMA.COLUMN_STATISTICS,
+ JSON_TABLE(histogram->'$.buckets', '$[*]'
+ COLUMNS(v VARCHAR(255) CHARACTER SET ucs2 PATH '$[0]',
+ c double PATH '$[1]')) hist
+WHERE column_name = "col1";
+
+SELECT v value, c cumulfreq
+FROM INFORMATION_SCHEMA.COLUMN_STATISTICS,
+ JSON_TABLE(histogram->'$.buckets', '$[*]'
+ COLUMNS(v VARCHAR(255) CHARACTER SET utf8mb4 PATH '$[0]',
+ c double PATH '$[1]')) hist
+WHERE column_name = "col2";
+DROP TABLE t1;
+--enable_parsing
+
+--echo #
+--echo # Bug#28254268: JSON_TABLE() FUNCTION REJECTS SELECT PERMISSIONS
+--echo #
+CREATE DATABASE db2;
+USE db2;
+CREATE TABLE t1 (c JSON);
+INSERT INTO t1 VALUES('[1,2,3]');
+
+CREATE USER user1@localhost;
+GRANT SELECT ON db2.t1 TO user1@localhost;
+
+connect (conn1,localhost,user1,,);
+connection conn1;
+USE db2;
+SELECT t1.c FROM t1;
+SELECT jt.* FROM t1, JSON_TABLE(t1.c, '$[*]' COLUMNS (num INT PATH '$[0]'))
+AS jt;
+
+disconnect conn1;
+connection default;
+
+DROP USER user1@localhost;
+DROP DATABASE db2;
+
+--echo #
+--echo # Bug#27856835 JSON_TABLE RETURNS WRONG DATATYPE WHEN INT-VALUE IS GRATER
+--echo # THAN (2^31-1)
+--echo #
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775807"}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+
+--echo # As we currently have no way of telling if a JSON string value is
+--echo # signed or unsigned, this value will overflow.
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+
+--echo # Here the JSON value is a NUMERIC value, and we thus know if the value
+--echo # is signed or unsigned.
+SELECT id FROM JSON_TABLE('[{"id":9223372036854775808}]', '$[*]' COLUMNS
+(id BIGINT PATH '$.id')) AS json;
+
+--echo # If we tell the JSON table column to be unsigned, we get to store the
+--echo # full value correctly.
+SELECT id FROM JSON_TABLE('[{"id":"9223372036854775808"}]', '$[*]' COLUMNS
+(id BIGINT UNSIGNED PATH '$.id')) AS json;
+
+SELECT id FROM JSON_TABLE('[{"id":"2147483648"}]', '$[*]' COLUMNS
+(id INT UNSIGNED PATH '$.id')) AS json;
+
+--echo # Check that we preserve the signedness of the columns.
+USE test;
+CREATE TABLE t1 AS SELECT id, value FROM
+ JSON_TABLE('[{"id":9223372036854775808, "value":9223372036854775807}]',
+ '$[*]' COLUMNS (id BIGINT UNSIGNED PATH '$.id',
+ value BIGINT PATH '$.value'))
+ AS json;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Bug#28255453: VIEW USING JSON_TABLE FAILS IF NO SCHEMA IS SELECTED
+--echo #
+connect (conn1,localhost,root,,*NO-ONE*);
+CREATE VIEW test.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+SELECT * FROM test.v;
+DROP VIEW test.v;
+
+--echo # Check that a user without access to the schema 'foo' cannot query
+--echo # a JSON_TABLE view in that schema.
+CREATE SCHEMA foo;
+CREATE VIEW foo.v AS SELECT * FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$[0]')) AS jt;
+
+CREATE USER foo@localhost;
+connect (con1,localhost,foo,,);
+--error ER_TABLEACCESS_DENIED_ERROR
+SELECT * FROM foo.v;
+
+connection default;
+disconnect con1;
+DROP USER foo@localhost;
+DROP SCHEMA foo;
+
+--echo # Check that a user with access to the schema 'foo' can do a SELECT with
+--echo # a JSON_TABLE function.
+CREATE SCHEMA foo;
+CREATE USER foo@localhost;
+GRANT EXECUTE ON foo.* TO foo@localhost;
+connect (con1,localhost,foo,,foo);
+SELECT 1 FROM JSON_TABLE('[1,2,3]', '$[*]' COLUMNS (num INT PATH '$.a')) AS jt;
+connection default;
+disconnect con1;
+DROP USER foo@localhost;
+DROP SCHEMA foo;
+
+--echo #
+--echo # Bug#27923406 ERROR 1142 (42000) WHEN USING JSON_TABLE
+--echo #
+CREATE SCHEMA my_schema;
+
+CREATE USER foo@localhost;
+GRANT EXECUTE ON my_schema.* TO foo@localhost;
+connect (con1,localhost,foo,,my_schema);
+
+SELECT
+ *
+FROM
+ JSON_TABLE(
+ '[{"x":2,"y":"8"},{"x":"3","y":"7"},{"x":"4","y":6}]',
+ "$[*]" COLUMNS(
+ xval VARCHAR(100) PATH "$.x",
+ yval VARCHAR(100) PATH "$.y"
+ )
+ ) AS jt1;
+
+connection default;
+disconnect con1;
+DROP USER foo@localhost;
+DROP SCHEMA my_schema;
+
+--echo #
+--echo # Bug#28538315: JSON_TABLE() COLUMN TYPES DON'T SUPPORT COLLATE CLAUSE
+--echo #
+
+--disable_parsing
+# psergey: COLLATE is not supported?
+CREATE TABLE t1 SELECT *
+ FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10)
+ CHARSET utf8mb4
+ PATH '$')) AS jt1;
+SHOW CREATE TABLE t1;
+
+CREATE TABLE t2 SELECT *
+ FROM JSON_TABLE('"test"', '$' COLUMNS(x VARCHAR(10)
+ CHARSET utf8mb4 COLLATE utf8mb4_bin
+ PATH '$')) AS jt1;
+SHOW CREATE TABLE t2;
+
+CREATE TABLE t3 AS SELECT *
+ FROM JSON_TABLE('"a"', '$' COLUMNS (a VARCHAR(10)
+ COLLATE ascii_bin
+ PATH '$')) jt;
+SHOW CREATE TABLE t3;
+DROP TABLE t1, t2, t3;
+--enable_parsing
+
+
+--echo #
+--echo # Bug#28643862 JSON_TABLE'S "COLUMNS" CLAUSE USES
+--echo # GLOBAL.CHARACTER_SET_RESULTS DEFAULT CHARSET
+--echo #
+
+SET @@SESSION.character_set_connection = ascii;
+
+CREATE TABLE t1 SELECT a.col
+ FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a;
+
+SHOW CREATE TABLE t1;
+
+SET @@SESSION.collation_connection = latin1_bin;
+
+CREATE TABLE t2 SELECT a.col
+ FROM JSON_TABLE('"test"', '$' COLUMNS(col VARCHAR(10) PATH '$')) AS a;
+
+SHOW CREATE TABLE t2;
+
+DROP TABLE t1, t2;
+
+SET @@SESSION.character_set_connection = DEFAULT;
+
+--echo #
+--echo # Bug#28851656: JSON_TABLE RETURN "UNKNOWN DATABASE ''" FROM A FUNCTION
+--echo #
+CREATE FUNCTION FN_COUNT_ROWS(X JSON)
+RETURNS INT DETERMINISTIC
+ RETURN (
+ SELECT COUNT(*) FROM JSON_TABLE( X, '$[*]' COLUMNS( I INT PATH '$')) der
+ );
+
+SELECT FN_COUNT_ROWS('[1, 2]') CNT;
+SELECT FN_COUNT_ROWS('[1, 2, 3]') CNT;
+SELECT FN_COUNT_ROWS('[1, 2, 3, 4]') CNT;
+
+DROP FUNCTION FN_COUNT_ROWS;
+
+--echo #
+--echo # Bug#30310265: VIEWS LOSE THE CHARACTER SET OF JSON_TABLE'S
+--echo # PATH ARGUMENTS
+--echo #
+
+CREATE VIEW v1 AS
+ SELECT * FROM JSON_TABLE('{"å":{"å":1}}', '$.å' COLUMNS (x INT PATH '$.å')) t;
+SELECT * FROM v1;
+SET NAMES latin1;
+# Used to return zero rows.
+SELECT * FROM v1;
+SET NAMES DEFAULT;
+DROP VIEW v1;
+
+CREATE VIEW v2 AS
+ SELECT * FROM JSON_TABLE('{}', '$' COLUMNS (
+ x VARCHAR(10) PATH '$.a' DEFAULT '"isn''t here"' ON EMPTY)
+ ) t;
+# SHOW CREATE VIEW and SELECT from the view used to fail with a syntax error.
+--disable_parsing
+# psergey: print-out quoting
+SHOW CREATE VIEW v2;
+SELECT * FROM v2;
+--enable_parsing
+DROP VIEW v2;
+
+--disable_parsing
+# psergey: don't work
+# The string literals in JSON_TABLE didn't accept character set
+# introducers. Verify that they are accepted now.
+SELECT * FROM
+ JSON_TABLE(JSON_OBJECT(),
+ _utf8mb4'$' COLUMNS (NESTED PATH _utf8mb4'$.x' COLUMNS
+ (y INT PATH _utf8mb4'$.y'
+ DEFAULT _utf8mb4'1' ON EMPTY
+ DEFAULT _utf8mb4'2' ON ERROR))) jt;
+--enable_parsing
+
+--echo #
+--echo # Bug#30382156: STORED PROCEDURE, JSON_TABLE AND "CONST" JOIN TYPE
+--echo #
+CREATE TABLE t (id INT PRIMARY KEY, j JSON);
+INSERT INTO t VALUES (1, '{"x":1}');
+CREATE PROCEDURE p()
+ SELECT * FROM t, JSON_TABLE(j, '$' COLUMNS (x INT PATH '$.x')) jt
+ WHERE id = 1;
+CALL p();
+CALL p();
+CALL p();
+DROP PROCEDURE p;
+DROP TABLE t;
+
+--echo #
+--echo # WL#13512: Deprecate ON ERROR before ON EMPTY in JSON_TABLE syntax
+--echo #
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ DEFAULT '"a"' ON ERROR DEFAULT '"b"' ON EMPTY)) jt;
+SELECT * FROM
+ JSON_TABLE('{}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ NULL ON ERROR NULL ON EMPTY)) jt;
+SELECT * FROM
+ JSON_TABLE('{"x":"c"}', '$' COLUMNS (x VARCHAR(10) PATH '$.x'
+ ERROR ON ERROR ERROR ON EMPTY)) jt;
+--echo #
+--echo # Bug#30628330: INSERT WITH JSON_TABLE FAILS DUE TO WARNING INVOKED
+--echo # WITH ON ERROR CLAUSE
+--echo #
+CREATE TABLE t(id INT, a TINYINT, b VARCHAR(3), c DATE, d DECIMAL);
+# This statement used to fail with "data too long".
+INSERT INTO t SELECT * FROM
+ JSON_TABLE(JSON_ARRAY(JSON_OBJECT('a', 1, 'b', 'abc'),
+ JSON_OBJECT('a', 2, 'b', 'abcd'),
+ JSON_OBJECT('a', 1000, 'b', 'xyz'),
+ JSON_OBJECT('c', TIME'12:00:00', 'd', 1e308)),
+ '$[*]' COLUMNS (id FOR ORDINALITY,
+ a TINYINT PATH '$.a' DEFAULT '111' ON ERROR,
+ b VARCHAR(3) PATH '$.b' DEFAULT '"ERR"' ON ERROR,
+ c DATE PATH '$.c' DEFAULT '"2001-01-01"' ON ERROR,
+ d DECIMAL PATH '$.c' DEFAULT '999' ON ERROR)
+ ) AS jt;
+SELECT * FROM t ORDER BY id;
+DROP TABLE t;
+
+--echo #
+--echo # Bug#30263373: INCORRECT OUTPUT FROM TABLE_FUNCTION_JSON::PRINT()
+--echo #
+
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('[123]', '$[*]' COLUMNS (`name with space 1` INT PATH '$',
+ `name with space 2` FOR ORDINALITY)) jt;
+# Used to fail with a syntax error, due to unquoted column names in
+# the view definition.
+SELECT * FROM v;
+DROP VIEW v;
+
+CREATE VIEW v AS SELECT HEX(x), HEX(y) FROM
+ JSON_TABLE('["abc"]', '$[*]' COLUMNS (x BINARY(10) PATH '$',
+ y VARBINARY(10) PATH '$')) jt;
+# Used to say CHAR(10) and VARCHAR(10) instead of BINARY(10) and VARBINARY(10).
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+SELECT * FROM v;
+DROP VIEW v;
+
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('[123]', '$[*]' COLUMNS(ti TINYINT PATH '$',
+ si SMALLINT PATH '$',
+ mi MEDIUMINT PATH '$',
+ i INT PATH '$',
+ bi BIGINT PATH '$',
+ tiu TINYINT UNSIGNED PATH '$',
+ siu SMALLINT UNSIGNED PATH '$',
+ miu MEDIUMINT UNSIGNED PATH '$',
+ iu INT UNSIGNED PATH '$',
+ biu BIGINT UNSIGNED PATH '$')) jt;
+# Used to lack the UNSIGNED attribute for the unsigned columns.
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+SELECT * FROM v;
+DROP VIEW v;
+
+--disable_parsing
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('[]', '$[*]' COLUMNS (g GEOMETRY PATH '$',
+ ls LINESTRING PATH '$')) AS jt;
+# Used to say GEOMETRY for both columns.
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+SELECT * FROM v;
+DROP VIEW v;
+--enable_parsing
+
+--disable_parsing
+CREATE VIEW v AS SELECT * FROM
+ JSON_TABLE('["abc"]', '$[*]' COLUMNS
+ (x VARCHAR(10) CHARSET latin1 PATH '$',
+ y VARCHAR(10) COLLATE utf8mb4_de_pb_0900_ai_ci PATH '$',
+ z LONGTEXT COLLATE utf8mb4_bin PATH '$')) jt;
+# Character set and collation information wasn't included.
+SELECT VIEW_DEFINITION FROM INFORMATION_SCHEMA.VIEWS
+ WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME = 'v';
+# Used to return the default collation instead of the collation
+# specified in the JSON_TABLE column definitions.
+SELECT x, y, z, COLLATION(x) c_x, COLLATION(y) c_y, COLLATION(z) c_z FROM v;
+DROP VIEW v;
+--enable_parsing
+
+--echo #
+--echo # Bug#31345503 JSON_TABLE SHOULD RETURN SQL NULL FOR JSON NULL
+--echo #
+
+SELECT *
+FROM
+JSON_TABLE(
+'[
+ {"c1": null,
+ "c2": [{"c": "c_val", "l": [1,2]}],
+ "c3": null},
+ {"c1": true,
+ "c2": [{"c": "c_val","l": [11]}, {"c": "c_val", "l": [false]}],
+ "c3": true},
+ {"c1": false,
+ "c2": [{"c": null,"l": [true]}, {"c": "c_val", "l": [null]}],
+ "c3": false}
+ ]',
+ '$[*]' COLUMNS(
+ top_ord FOR ORDINALITY,
+ c1path VARCHAR(10) PATH '$.c1' ERROR ON ERROR,
+ NESTED PATH '$.c2[*]' COLUMNS (
+ c2path VARCHAR(10) PATH '$.c' ERROR ON ERROR,
+ ord FOR ORDINALITY,
+ NESTED PATH '$.l[*]' COLUMNS (lpath_c VARCHAR(10) PATH '$' ERROR ON ERROR,
+ lpath_i INT PATH '$' ERROR ON ERROR)
+ ),
+ c3path INT PATH '$.c3' ERROR ON ERROR
+ )
+) as jt;
+
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index b9cd418f295..054d3871a09 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -165,6 +165,7 @@ SET (SQL_SOURCE
rowid_filter.cc rowid_filter.h
opt_trace.cc
table_cache.cc encryption.cc temporary_tables.cc
+ table_function.cc
proxy_protocol.cc backup.cc xa.cc
${CMAKE_CURRENT_BINARY_DIR}/lex_hash.h
${CMAKE_CURRENT_BINARY_DIR}/lex_token.h
diff --git a/sql/field.cc b/sql/field.cc
index 45b3a3c703e..fd258eaa959 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1753,7 +1753,8 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, size_t len,
if (get_thd()->count_cuted_fields > CHECK_FIELD_EXPRESSION &&
check_int(cs, from, len, end, error))
return 1;
- return 0;
+
+ return error && get_thd()->count_cuted_fields == CHECK_FIELD_EXPRESSION;
out_of_range:
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
diff --git a/sql/field.h b/sql/field.h
index dfc02149f9d..ac480b15324 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1611,6 +1611,7 @@ public:
virtual longlong val_time_packed(THD *thd);
virtual const TYPELIB *get_typelib() const { return NULL; }
virtual CHARSET_INFO *charset() const= 0;
+ virtual void change_charset(const DTCollation &new_cs) {}
virtual const DTCollation &dtcollation() const= 0;
virtual CHARSET_INFO *charset_for_protocol(void) const
{ return binary() ? &my_charset_bin : charset(); }
@@ -2109,6 +2110,12 @@ public:
{
return m_collation;
}
+ void change_charset(const DTCollation &new_cs) override
+ {
+ field_length= (field_length * new_cs.collation->mbmaxlen) /
+ m_collation.collation->mbmaxlen;
+ m_collation= new_cs;
+ }
bool binary() const override { return field_charset() == &my_charset_bin; }
uint32 max_display_length() const override { return field_length; }
uint32 character_octet_length() const override { return field_length; }
diff --git a/sql/handler.cc b/sql/handler.cc
index 228c016e082..b69cfa7e1eb 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -619,10 +619,12 @@ int ha_finalize_handlerton(st_plugin_int *plugin)
}
+const char *hton_no_exts[]= { 0 };
+
+
int ha_initialize_handlerton(st_plugin_int *plugin)
{
handlerton *hton;
- static const char *no_exts[]= { 0 };
DBUG_ENTER("ha_initialize_handlerton");
DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str));
@@ -635,7 +637,7 @@ int ha_initialize_handlerton(st_plugin_int *plugin)
goto err_no_hton_memory;
}
- hton->tablefile_extensions= no_exts;
+ hton->tablefile_extensions= hton_no_exts;
hton->discover_table_names= hton_ext_based_table_discovery;
hton->drop_table= hton_drop_table;
diff --git a/sql/handler.h b/sql/handler.h
index 4e1e3f0413f..9fdb71abd8c 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -1710,6 +1710,8 @@ struct handlerton
};
+extern const char *hton_no_exts[];
+
static inline LEX_CSTRING *hton_name(const handlerton *hton)
{
return &(hton2plugin[hton->slot]->name);
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index f1a4ac4ef8a..6b6210d8359 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -115,10 +115,6 @@ String *Item_func_geometry_from_wkb::val_str(String *str)
}
-void report_json_error_ex(String *js, json_engine_t *je,
- const char *fname, int n_param,
- Sql_condition::enum_warning_level lv);
-
String *Item_func_geometry_from_json::val_str(String *str)
{
DBUG_ASSERT(fixed == 1);
@@ -178,7 +174,8 @@ String *Item_func_geometry_from_json::val_str(String *str)
my_error(ER_GIS_INVALID_DATA, MYF(0), "ST_GeometryFromJSON");
break;
default:
- report_json_error_ex(js, &je, func_name(), 0, Sql_condition::WARN_LEVEL_WARN);
+ report_json_error_ex(js->ptr(), &je, func_name(), 0,
+ Sql_condition::WARN_LEVEL_WARN);
return NULL;
}
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 032ecb1bb91..d9a482e88cd 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -247,15 +247,15 @@ error:
#define report_json_error(js, je, n_param) \
- report_json_error_ex(js, je, func_name(), n_param, \
+ report_json_error_ex(js->ptr(), je, func_name(), n_param, \
Sql_condition::WARN_LEVEL_WARN)
-void report_json_error_ex(String *js, json_engine_t *je,
+void report_json_error_ex(const char *js, json_engine_t *je,
const char *fname, int n_param,
Sql_condition::enum_warning_level lv)
{
THD *thd= current_thd;
- int position= (int)((const char *) je->s.c_str - js->ptr());
+ int position= (int)((const char *) je->s.c_str - js);
uint code;
n_param++;
@@ -285,34 +285,39 @@ void report_json_error_ex(String *js, json_engine_t *je,
case JE_DEPTH:
code= ER_JSON_DEPTH;
- push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT,
- n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code), JSON_DEPTH_LIMIT,
+ n_param, fname, position);
return;
default:
return;
}
- push_warning_printf(thd, lv, code, ER_THD(thd, code),
- n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ n_param, fname, position);
}
-
#define NO_WILDCARD_ALLOWED 1
#define SHOULD_END_WITH_ARRAY 2
#define TRIVIAL_PATH_NOT_ALLOWED 3
#define report_path_error(js, je, n_param) \
- report_path_error_ex(js, je, func_name(), n_param,\
+ report_path_error_ex(js->ptr(), je, func_name(), n_param,\
Sql_condition::WARN_LEVEL_WARN)
-static void report_path_error_ex(String *ps, json_path_t *p,
- const char *fname, int n_param,
- Sql_condition::enum_warning_level lv)
+void report_path_error_ex(const char *ps, json_path_t *p,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv)
{
THD *thd= current_thd;
- int position= (int)((const char *) p->s.c_str - ps->ptr() + 1);
+ int position= (int)((const char *) p->s.c_str - ps + 1);
uint code;
n_param++;
@@ -331,8 +336,11 @@ static void report_path_error_ex(String *ps, json_path_t *p,
case JE_DEPTH:
code= ER_JSON_PATH_DEPTH;
- push_warning_printf(thd, lv, code, ER_THD(thd, code),
- JSON_DEPTH_LIMIT, n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), JSON_DEPTH_LIMIT, n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ JSON_DEPTH_LIMIT, n_param, fname, position);
return;
case NO_WILDCARD_ALLOWED:
@@ -347,12 +355,14 @@ static void report_path_error_ex(String *ps, json_path_t *p,
default:
return;
}
- push_warning_printf(thd, lv, code, ER_THD(thd, code),
- n_param, fname, position);
+ if (lv == Sql_condition::WARN_LEVEL_ERROR)
+ my_error(code, MYF(0), n_param, fname, position);
+ else
+ push_warning_printf(thd, lv, code, ER_THD(thd, code),
+ n_param, fname, position);
}
-
/*
Checks if the path has '.*' '[*]' or '**' constructions
and sets the NO_WILDCARD_ALLOWED error if the case.
diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h
index ec6c6696001..519bad3f6f3 100644
--- a/sql/item_jsonfunc.h
+++ b/sql/item_jsonfunc.h
@@ -41,6 +41,13 @@ public:
};
+void report_path_error_ex(const char *ps, json_path_t *p,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv);
+void report_json_error_ex(const char *js, json_engine_t *je,
+ const char *fname, int n_param,
+ Sql_condition::enum_warning_level lv);
+
class Json_engine_scan: public json_engine_t
{
public:
diff --git a/sql/lex.h b/sql/lex.h
index 542356c0e43..f17d8204798 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -211,6 +211,7 @@ static SYMBOL symbols[] = {
{ "ELSE", SYM(ELSE)},
{ "ELSEIF", SYM(ELSEIF_MARIADB_SYM)},
{ "ELSIF", SYM(ELSIF_MARIADB_SYM)},
+ { "EMPTY", SYM(EMPTY_SYM)},
{ "ENABLE", SYM(ENABLE_SYM)},
{ "ENCLOSED", SYM(ENCLOSED)},
{ "END", SYM(END)},
@@ -419,6 +420,7 @@ static SYMBOL symbols[] = {
{ "NATIONAL", SYM(NATIONAL_SYM)},
{ "NATURAL", SYM(NATURAL)},
{ "NCHAR", SYM(NCHAR_SYM)},
+ { "NESTED", SYM(NESTED_SYM)},
{ "NEVER", SYM(NEVER_SYM)},
{ "NEW", SYM(NEW_SYM)},
{ "NEXT", SYM(NEXT_SYM)},
@@ -453,6 +455,7 @@ static SYMBOL symbols[] = {
{ "OPTIONALLY", SYM(OPTIONALLY)},
{ "OR", SYM(OR_SYM)},
{ "ORDER", SYM(ORDER_SYM)},
+ { "ORDINALITY", SYM(ORDINALITY_SYM)},
{ "OTHERS", SYM(OTHERS_MARIADB_SYM)},
{ "OUT", SYM(OUT_SYM)},
{ "OUTER", SYM(OUTER)},
@@ -466,6 +469,7 @@ static SYMBOL symbols[] = {
{ "PAGE_CHECKSUM", SYM(PAGE_CHECKSUM_SYM)},
{ "PARSER", SYM(PARSER_SYM)},
{ "PARSE_VCOL_EXPR", SYM(PARSE_VCOL_EXPR_SYM)},
+ { "PATH", SYM(PATH_SYM)},
{ "PERIOD", SYM(PERIOD_SYM)},
{ "PARTIAL", SYM(PARTIAL)},
{ "PARTITION", SYM(PARTITION_SYM)},
@@ -755,6 +759,7 @@ static SYMBOL sql_functions[] = {
{ "GROUP_CONCAT", SYM(GROUP_CONCAT_SYM)},
{ "JSON_ARRAYAGG", SYM(JSON_ARRAYAGG_SYM)},
{ "JSON_OBJECTAGG", SYM(JSON_OBJECTAGG_SYM)},
+ { "JSON_TABLE", SYM(JSON_TABLE_SYM)},
{ "LAG", SYM(LAG_SYM)},
{ "LEAD", SYM(LEAD_SYM)},
{ "MAX", SYM(MAX_SYM)},
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 0a87d9ccd2f..1ac38aaeb92 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -1414,6 +1414,14 @@ void get_delayed_table_estimates(TABLE *table,
double *startup_cost)
{
Item_in_subselect *item= table->pos_in_table_list->jtbm_subselect;
+ Table_function_json_table *table_function=
+ table->pos_in_table_list->table_function;
+
+ if (table_function)
+ {
+ table_function->get_estimates(out_rows, scan_time, startup_cost);
+ return;
+ }
DBUG_ASSERT(item->engine->engine_type() ==
subselect_engine::HASH_SJ_ENGINE);
@@ -1784,6 +1792,10 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred)
tl->jtbm_subselect->fix_after_pullout(parent_lex, &dummy, true);
DBUG_ASSERT(dummy == tl->jtbm_subselect);
}
+ else if (tl->table_function)
+ {
+ tl->table_function->fix_after_pullout(tl, parent_lex, true);
+ }
SELECT_LEX *old_sl= tl->select_lex;
tl->select_lex= parent_join->select_lex;
for (TABLE_LIST *emb= tl->embedding;
diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc
index ddec6d5ed2d..bf1408ee989 100644
--- a/sql/opt_trace.cc
+++ b/sql/opt_trace.cc
@@ -247,9 +247,11 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
{
/*
Anonymous derived tables (as in
- "SELECT ... FROM (SELECT ...)") don't have their grant.privilege set.
+ "SELECT ... FROM (SELECT ...)") and table functions
+ don't have their grant.privilege set.
*/
- if (!t->is_anonymous_derived_table())
+ if (!t->is_anonymous_derived_table() &&
+ !t->table_function)
{
const GRANT_INFO backup_grant_info= t->grant;
Security_context *const backup_table_sctx= t->security_ctx;
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 3f3cb7677fc..1167dbc2d23 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -7974,3 +7974,5 @@ ER_NOT_ALLOWED_IN_THIS_CONTEXT
eng "'%-.128s' is not allowed in this context"
ER_DATA_WAS_COMMITED_UNDER_ROLLBACK
eng "Engine %s does not support rollback. Changes were committed during rollback call"
+ER_JSON_TABLE_ERROR_ON_FIELD
+ eng "Field '%s' can't be set for JSON_TABLE '%s'."
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index aed0ad02e4d..b0346964eea 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -8144,6 +8144,16 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables,
if (!want_access)
continue; // ok
+ if (t_ref->table_function)
+ {
+ /*
+ Table function doesn't need any privileges to check.
+ */
+ t_ref->grant.privilege|= TMP_TABLE_ACLS;
+ t_ref->grant.want_privilege= NO_ACL;
+ continue;
+ }
+
if (!(~t_ref->grant.privilege & want_access) ||
t_ref->is_anonymous_derived_table() || t_ref->schema_table)
{
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 291cad89d79..db3baaeb54a 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -3684,6 +3684,14 @@ open_and_process_table(THD *thd, TABLE_LIST *tables, uint *counter, uint flags,
error= TRUE;
goto end;
}
+
+ if (tables->table_function)
+ {
+ if (!create_table_for_function(thd, tables))
+ error= TRUE;
+ goto end;
+ }
+
DBUG_PRINT("tcache", ("opening table: '%s'.'%s' item: %p",
tables->db.str, tables->table_name.str, tables));
(*counter)++;
@@ -6094,6 +6102,10 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
if (table_list->sequence)
DBUG_RETURN(0);
+ if (table_list->table_function &&
+ !table_list->table_function->ready_for_lookup())
+ DBUG_RETURN(0);
+
*actual_table= NULL;
if (table_list->field_translation)
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 3c28d089e20..71d0cd4cf85 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -7493,5 +7493,9 @@ public:
extern THD_list server_threads;
+void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps);
+void
+setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count);
+
#endif /* MYSQL_SERVER */
#endif /* SQL_CLASS_INCLUDED */
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index 353217982e2..c4d1ae6f70a 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -1635,6 +1635,9 @@ void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_t
case ET_DISTINCT:
writer->add_member("distinct").add_bool(true);
break;
+ case ET_TABLE_FUNCTION:
+ writer->add_member("table_function").add_str("json_table");
+ break;
default:
DBUG_ASSERT(0);
@@ -2028,6 +2031,9 @@ void Explain_table_access::append_tag_name(String *str, enum explain_extra_tag t
str->append(" (scanning)");
break;
}
+ case ET_TABLE_FUNCTION:
+ str->append("Table function: json_table");
+ break;
default:
str->append(extra_tag_text[tag]);
}
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index 9090416847f..6587f0e4178 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -550,6 +550,7 @@ enum explain_extra_tag
ET_CONST_ROW_NOT_FOUND,
ET_UNIQUE_ROW_NOT_FOUND,
ET_IMPOSSIBLE_ON_CONDITION,
+ ET_TABLE_FUNCTION,
ET_total
};
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 10b71781ce4..8c684daddbc 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -34,6 +34,7 @@
#include "sql_tvc.h"
#include "item.h"
#include "sql_limit.h" // Select_limit_counters
+#include "table_function.h" // Json_table_column
#include "sql_schema.h"
/* Used for flags of nesting constructs */
@@ -454,6 +455,7 @@ enum enum_drop_mode
#define TL_OPTION_IGNORE_LEAVES 4
#define TL_OPTION_ALIAS 8
#define TL_OPTION_SEQUENCE 16
+#define TL_OPTION_TABLE_FUNCTION 32
typedef List<Item> List_item;
typedef Mem_root_array<ORDER*, true> Group_list_ptrs;
@@ -1397,7 +1399,8 @@ public:
enum_mdl_type mdl_type= MDL_SHARED_READ,
List<Index_hint> *hints= 0,
List<String> *partition_names= 0,
- LEX_STRING *option= 0);
+ LEX_STRING *option= 0,
+ Table_function_json_table *tfunc= 0);
TABLE_LIST* get_table_list();
bool init_nested_join(THD *thd);
TABLE_LIST *end_nested_join(THD *thd);
@@ -3297,6 +3300,7 @@ public:
SQL_I_List<ORDER> proc_list;
SQL_I_List<TABLE_LIST> auxiliary_table_list, save_list;
Column_definition *last_field;
+ Table_function_json_table *json_table;
Item_sum *in_sum_func;
udf_func udf;
HA_CHECK_OPT check_opt; // check/repair options
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 9211625c804..1337554a86f 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -7132,6 +7132,9 @@ check_table_access(THD *thd, privilege_t requirements, TABLE_LIST *tables,
if (table_ref->is_anonymous_derived_table())
continue;
+ if (table_ref->table_function)
+ continue;
+
if (table_ref->sequence)
{
/* We want to have either SELECT or INSERT rights to sequences depending
@@ -8195,7 +8198,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
enum_mdl_type mdl_type,
List<Index_hint> *index_hints_arg,
List<String> *partition_names,
- LEX_STRING *option)
+ LEX_STRING *option,
+ Table_function_json_table *tfunc)
{
TABLE_LIST *ptr;
TABLE_LIST *UNINIT_VAR(previous_table_ref); /* The table preceding the current one. */
@@ -8220,6 +8224,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
}
if (unlikely(table->is_derived_table() == FALSE && table->db.str &&
+ !(table_options & TL_OPTION_TABLE_FUNCTION) &&
check_db_name((LEX_STRING*) &table->db)))
{
my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str);
@@ -8263,6 +8268,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->table_name= table->table;
ptr->lock_type= lock_type;
+ ptr->table_function= tfunc;
ptr->updating= MY_TEST(table_options & TL_OPTION_UPDATING);
/* TODO: remove TL_OPTION_FORCE_INDEX as it looks like it's not used */
ptr->force_index= MY_TEST(table_options & TL_OPTION_FORCE_INDEX);
@@ -8307,7 +8313,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
{
if (unlikely(!my_strcasecmp(table_alias_charset, alias_str.str,
tables->alias.str) &&
- !cmp(&ptr->db, &tables->db) && ! tables->sequence))
+ (tables->table_function || ptr->table_function ||
+ !cmp(&ptr->db, &tables->db)) &&
+ !tables->sequence))
{
my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str.str); /* purecov: tested */
DBUG_RETURN(0); /* purecov: tested */
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 5422346884d..68a08738a31 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1256,6 +1256,14 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num,
enum_parsing_place save_place=
thd->lex->current_select->context_analysis_place;
thd->lex->current_select->context_analysis_place= SELECT_LIST;
+
+ for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (tbl->table_function &&
+ tbl->table_function->setup(thd, tbl))
+ DBUG_RETURN(-1);
+ }
+
if (setup_fields(thd, ref_ptrs, fields_list, MARK_COLUMNS_READ,
&all_fields, &select_lex->pre_fix, 1))
DBUG_RETURN(-1);
@@ -12664,6 +12672,10 @@ uint check_join_cache_usage(JOIN_TAB *tab,
!join->allowed_outer_join_with_cache)
goto no_join_cache;
+ if (tab->table->pos_in_table_list->table_function &&
+ !tab->table->pos_in_table_list->table_function->join_cache_allowed())
+ goto no_join_cache;
+
/*
Non-linked join buffers can't guarantee one match
*/
@@ -16408,7 +16420,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
if (table->outer_join && !table->embedding && table->table)
table->table->maybe_null= FALSE;
table->outer_join= 0;
- if (!(straight_join || table->straight))
+ if (!(straight_join || table->straight || table->table_function))
{
table->dep_tables= 0;
TABLE_LIST *embedding= table->embedding;
@@ -26678,6 +26690,9 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
!((QUICK_ROR_INTERSECT_SELECT*)cur_quick)->need_to_fetch_row)
key_read=1;
+ if (table_list->table_function)
+ eta->push_extra(ET_TABLE_FUNCTION);
+
if (info)
{
eta->push_extra(info);
@@ -27514,6 +27529,14 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
cmp_name= table_name.str;
}
}
+ else if (table_function)
+ {
+ /* A table function. */
+ (void) table_function->print(thd, this, str, query_type);
+ str->append(' ');
+ append_identifier(thd, str, &alias);
+ cmp_name= alias.str;
+ }
else
{
// A normal table
diff --git a/sql/sql_select.h b/sql/sql_select.h
index fad008ebcb9..40de4aa411f 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -2448,7 +2448,6 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options);
bool open_tmp_table(TABLE *table);
-void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps);
double prev_record_reads(const POSITION *positions, uint idx, table_map found_ref);
void fix_list_after_tbl_changes(SELECT_LEX *new_parent, List<TABLE_LIST> *tlist);
double get_tmp_table_lookup_cost(THD *thd, double row_count, uint row_size);
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index cfd43bd13ab..f5a179e8a28 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -522,7 +522,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
{
/* is this table temporary and is not view? */
if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view &&
- !tbl->schema_table)
+ !tbl->schema_table && !tbl->table_function)
{
my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias.str);
res= TRUE;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 4fb9419d2f8..9a94d932d85 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -68,6 +68,7 @@
#include "sql_sequence.h"
#include "my_base.h"
#include "sql_type_json.h"
+#include "table_function.h"
/* this is to get the bison compilation windows warnings out */
#ifdef _MSC_VER
@@ -195,6 +196,19 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
MYSQL_YYABORT; \
} while(0)
+
+#define set_collation(X) \
+ do { \
+ if (X) \
+ { \
+ if (unlikely(Lex->charset && !my_charset_same(Lex->charset,X))) \
+ my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), \
+ X->name,Lex->charset->csname)); \
+ Lex->charset= X; \
+ } \
+ } while(0)
+
+
%}
%union {
int num;
@@ -219,6 +233,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
Lex_for_loop_st for_loop;
Lex_for_loop_bounds_st for_loop_bounds;
Lex_trim_st trim;
+ Json_table_column::On_response json_on_response;
vers_history_point_t vers_history_point;
struct
{
@@ -238,6 +253,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
/* pointers */
Lex_ident_sys *ident_sys_ptr;
Create_field *create_field;
+ Json_table_column *json_table_column;
Spvar_definition *spvar_definition;
Row_definition_list *spvar_definition_list;
const Type_handler *type_handler;
@@ -506,6 +522,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> ELSEIF_MARIADB_SYM
%token <kwd> ELSE /* SQL-2003-R */
%token <kwd> ELSIF_ORACLE_SYM /* PLSQL-R */
+%token <kwd> EMPTY_SYM /* SQL-2016-R */
%token <kwd> ENCLOSED
%token <kwd> ESCAPED
%token <kwd> EXCEPT_SYM /* SQL-2003-R */
@@ -524,6 +541,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> GROUP_CONCAT_SYM
%token <rwd> JSON_ARRAYAGG_SYM
%token <rwd> JSON_OBJECTAGG_SYM
+%token <rwd> JSON_TABLE_SYM
%token <kwd> GROUP_SYM /* SQL-2003-R */
%token <kwd> HAVING /* SQL-2003-R */
%token <kwd> HOUR_MICROSECOND_SYM
@@ -581,6 +599,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> MOD_SYM /* SQL-2003-N */
%token <kwd> NATURAL /* SQL-2003-R */
%token <kwd> NEG
+%token <kwd> NESTED_SYM /* SQL-2003-N */
%token <kwd> NOT_SYM /* SQL-2003-R */
%token <kwd> NO_WRITE_TO_BINLOG
%token <kwd> NOW_SYM
@@ -592,6 +611,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> OPTIMIZE
%token <kwd> OPTIONALLY
%token <kwd> ORDER_SYM /* SQL-2003-R */
+%token <kwd> ORDINALITY_SYM /* SQL-2003-N */
%token <kwd> OR_SYM /* SQL-2003-R */
%token <kwd> OTHERS_ORACLE_SYM /* SQL-2011-N, PLSQL-R */
%token <kwd> OUTER
@@ -602,6 +622,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%token <kwd> PAGE_CHECKSUM_SYM
%token <kwd> PARSE_VCOL_EXPR_SYM
%token <kwd> PARTITION_SYM /* SQL-2003-R */
+%token <kwd> PATH_SYM /* SQL-2003-N */
%token <kwd> PERCENTILE_CONT_SYM
%token <kwd> PERCENTILE_DISC_SYM
%token <kwd> PERCENT_RANK_SYM
@@ -1297,6 +1318,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <lex_string_with_metadata>
TEXT_STRING
NCHAR_STRING
+ json_text_literal
%type <lex_str_ptr>
opt_table_alias_clause
@@ -1356,6 +1378,8 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <sp_handler> sp_handler
+%type <json_on_response> json_on_response
+
%type <Lex_field_type> type_with_opt_collate field_type
qualified_field_type
field_type_numeric
@@ -1363,6 +1387,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
field_type_lob
field_type_temporal
field_type_misc
+ json_table_field_type
%type <Lex_dyncol_type> opt_dyncol_type dyncol_type
numeric_dyncol_type temporal_dyncol_type string_dyncol_type
@@ -1525,7 +1550,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
table_primary_derived table_primary_derived_opt_parens
derived_table_list table_reference_list_parens
nested_table_reference_list join_table_parens
- update_table_list
+ update_table_list table_function
%type <date_time_type> date_time_type;
%type <interval> interval
@@ -1680,6 +1705,9 @@ End SQL_MODE_ORACLE_SPECIFIC */
opt_delete_gtid_domain
asrow_attribute
opt_constraint_no_id
+ json_table_columns_clause json_table_columns_list json_table_column
+ json_table_column_type json_opt_on_empty_or_error
+ json_on_error_response json_on_empty_response
%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt
%type <NONE> sp_if_then_statements sp_case_then_statements
@@ -11461,10 +11489,194 @@ table_ref:
}
;
+json_text_literal:
+ TEXT_STRING
+ {
+ Lex->json_table->m_text_literal_cs= NULL;
+ }
+ | NCHAR_STRING
+ {
+ Lex->json_table->m_text_literal_cs= national_charset_info;
+ }
+ | UNDERSCORE_CHARSET TEXT_STRING
+ {
+ Lex->json_table->m_text_literal_cs= $1;
+ $$= $2;
+ }
+ ;
+
join_table_list:
derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); }
;
+json_table_columns_clause:
+ COLUMNS '(' json_table_columns_list ')'
+ {}
+ ;
+
+json_table_columns_list:
+ json_table_column
+ | json_table_columns_list ',' json_table_column
+ {}
+ ;
+
+json_table_column:
+ ident
+ {
+ LEX *lex=Lex;
+ Create_field *f= new (thd->mem_root) Create_field();
+
+ if (unlikely(check_string_char_length(&$1, 0, NAME_CHAR_LEN,
+ system_charset_info, 1)))
+ my_yyabort_error((ER_TOO_LONG_IDENT, MYF(0), $1.str));
+
+ lex->json_table->m_cur_json_table_column=
+ new (thd->mem_root) Json_table_column(f,
+ lex->json_table->m_sql_nest);
+
+ if (unlikely(!f ||
+ !lex->json_table->m_cur_json_table_column))
+ MYSQL_YYABORT;
+
+ lex->init_last_field(f, &$1, NULL);
+ }
+ json_table_column_type
+ {
+ LEX *lex=Lex;
+ if (unlikely(lex->json_table->
+ m_cur_json_table_column->m_field->check(thd)))
+ MYSQL_YYABORT;
+ lex->json_table->m_columns.push_back(
+ lex->json_table->m_cur_json_table_column, thd->mem_root);
+ }
+ | NESTED_SYM PATH_SYM json_text_literal
+ {
+ LEX *lex=Lex;
+ Json_table_nested_path *np= new (thd->mem_root)
+ Json_table_nested_path(lex->json_table->m_sql_nest);
+ np->set_path(thd, $3);
+ lex->json_table->add_nested(np);
+ }
+ json_table_columns_clause
+ {
+ LEX *lex=Lex;
+ lex->json_table->leave_nested();
+ }
+ ;
+
+json_table_column_type:
+ FOR_SYM ORDINALITY_SYM
+ {
+ Lex_field_type_st type;
+ type.set_handler_length_flags(&type_handler_slong, 0, 0);
+ Lex->last_field->set_attributes(thd, type, Lex->charset,
+ COLUMN_DEFINITION_TABLE_FIELD);
+ Lex->json_table->m_cur_json_table_column->
+ set(Json_table_column::FOR_ORDINALITY);
+ }
+ | json_table_field_type PATH_SYM json_text_literal
+ json_opt_on_empty_or_error
+ {
+ Lex->last_field->set_attributes(thd, $1, Lex->charset,
+ COLUMN_DEFINITION_TABLE_FIELD);
+ if (Lex->json_table->m_cur_json_table_column->
+ set(thd, Json_table_column::PATH, $3))
+ {
+ MYSQL_YYABORT;
+ }
+ }
+ | json_table_field_type EXISTS PATH_SYM json_text_literal
+ {
+ Lex->last_field->set_attributes(thd, $1, Lex->charset,
+ COLUMN_DEFINITION_TABLE_FIELD);
+ Lex->json_table->m_cur_json_table_column->
+ set(thd, Json_table_column::EXISTS_PATH, $4);
+ }
+ ;
+
+json_table_field_type:
+ field_type_numeric
+ | field_type_temporal
+ | field_type_string opt_collate
+ {
+ set_collation($2);
+ }
+ | field_type_lob opt_collate
+ {
+ set_collation($2);
+ }
+ ;
+
+json_opt_on_empty_or_error:
+ /* none */
+ {}
+ | json_on_error_response
+ | json_on_error_response json_on_empty_response
+ | json_on_empty_response
+ | json_on_empty_response json_on_error_response
+ ;
+
+json_on_response:
+ ERROR_SYM
+ {
+ $$.m_response= Json_table_column::RESPONSE_ERROR;
+ }
+ | NULL_SYM
+ {
+ $$.m_response= Json_table_column::RESPONSE_NULL;
+ }
+ | DEFAULT json_text_literal
+ {
+ $$.m_response= Json_table_column::RESPONSE_DEFAULT;
+ $$.m_default= $2;
+ Lex->json_table->m_cur_json_table_column->m_defaults_cs=
+ thd->variables.collation_connection;
+ }
+ ;
+
+json_on_error_response:
+ json_on_response ON ERROR_SYM
+ {
+ Lex->json_table->m_cur_json_table_column->m_on_error= $1;
+ }
+ ;
+
+json_on_empty_response:
+ json_on_response ON EMPTY_SYM
+ {
+ Lex->json_table->m_cur_json_table_column->m_on_empty= $1;
+ }
+ ;
+
+table_function:
+ JSON_TABLE_SYM '(' expr ','
+ {
+ Table_function_json_table *jt=
+ new (thd->mem_root) Table_function_json_table($3);
+ if (unlikely(!jt))
+ MYSQL_YYABORT;
+ Lex->json_table= jt;
+ jt->m_sql_nest= &jt->m_nested_path;
+ }
+ json_text_literal json_table_columns_clause ')' opt_as ident_table_alias
+ {
+ SELECT_LEX *sel= Select;
+ if (unlikely(Lex->json_table->m_nested_path.set_path(thd, $6)))
+ MYSQL_YYABORT;
+ sel->table_join_options= 0;
+ if (!($$= Select->add_table_to_list(thd,
+ new (thd->mem_root) Table_ident(thd, &empty_clex_str,
+ &$10, TRUE),
+ NULL,
+ Select->get_table_join_options() |
+ TL_OPTION_TABLE_FUNCTION,
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ 0,0,0, Lex->json_table)))
+ MYSQL_YYABORT;
+ }
+ ;
+
/*
The ODBC escape syntax for Outer Join is: '{' OJ join_table '}'
The parser does not define OJ as a token, any ident is accepted
@@ -11672,6 +11884,7 @@ table_factor:
$$= $1;
}
| table_reference_list_parens { $$= $1; }
+ | table_function { $$= $1; }
;
table_primary_ident_opt_parens:
@@ -15697,6 +15910,7 @@ keyword_sp_var_and_label:
| MYSQL_SYM
| MYSQL_ERRNO_SYM
| NAME_SYM
+ | NESTED_SYM
| NEVER_SYM
| NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2
| NEXTVAL_SYM
@@ -15715,7 +15929,7 @@ keyword_sp_var_and_label:
| OLD_PASSWORD_SYM
| ONE_SYM
| ONLINE_SYM
- | ONLY_SYM
+ | ORDINALITY_SYM
| OVERLAPS_SYM
| PACKAGE_MARIADB_SYM
| PACK_KEYS_SYM
@@ -15911,6 +16125,7 @@ reserved_keyword_udt_not_param_type:
| ELSE
| ELSEIF_MARIADB_SYM
| ELSIF_ORACLE_SYM
+ | EMPTY_SYM
| ENCLOSED
| ESCAPED
| EXCEPT_SYM
@@ -15992,6 +16207,7 @@ reserved_keyword_udt_not_param_type:
| PAGE_CHECKSUM_SYM
| PARSE_VCOL_EXPR_SYM
| PARTITION_SYM
+ | PATH_SYM
| PERCENT_RANK_SYM
| PERCENTILE_CONT_SYM
| PERCENTILE_DISC_SYM
diff --git a/sql/table.cc b/sql/table.cc
index 6cd2b1690cf..cc22be2ba7f 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -6722,6 +6722,8 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
LEX_CSTRING *name)
{
bool save_wrapper= thd->lex->first_select_lex()->no_wrap_view_item;
+ bool *wrapper_to_set= thd->lex->current_select ?
+ &thd->lex->current_select->no_wrap_view_item : &save_wrapper;
Item *field= *field_ref;
DBUG_ENTER("create_view_field");
@@ -6737,17 +6739,17 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
}
DBUG_ASSERT(field);
- thd->lex->current_select->no_wrap_view_item= TRUE;
+ *wrapper_to_set= TRUE;
if (!field->is_fixed())
{
if (field->fix_fields(thd, field_ref))
{
- thd->lex->current_select->no_wrap_view_item= save_wrapper;
+ *wrapper_to_set= save_wrapper;
DBUG_RETURN(0);
}
field= *field_ref;
}
- thd->lex->current_select->no_wrap_view_item= save_wrapper;
+ *wrapper_to_set= save_wrapper;
if (save_wrapper)
{
DBUG_RETURN(field);
@@ -6884,7 +6886,7 @@ const char *Field_iterator_table_ref::get_table_name()
DBUG_ASSERT(!strcmp(table_ref->table_name.str,
table_ref->table->s->table_name.str) ||
- table_ref->schema_table);
+ table_ref->schema_table || table_ref->table_function);
return table_ref->table_name.str;
}
@@ -6903,7 +6905,8 @@ const char *Field_iterator_table_ref::get_db_name()
*/
DBUG_ASSERT(!cmp(&table_ref->db, &table_ref->table->s->db) ||
(table_ref->schema_table &&
- is_infoschema_db(&table_ref->table->s->db)));
+ is_infoschema_db(&table_ref->table->s->db)) ||
+ table_ref->table_function);
return table_ref->db.str;
}
@@ -8096,7 +8099,8 @@ bool TABLE::is_filled_at_execution()
*/
return MY_TEST(!pos_in_table_list ||
pos_in_table_list->jtbm_subselect ||
- pos_in_table_list->is_active_sjm());
+ pos_in_table_list->is_active_sjm() ||
+ pos_in_table_list->table_function);
}
diff --git a/sql/table.h b/sql/table.h
index 83e8b69b5e3..9bc8a06bc83 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -63,6 +63,7 @@ class Range_rowid_filter_cost_info;
class derived_handler;
class Pushdown_derived;
struct Name_resolution_context;
+class Table_function_json_table;
/*
Used to identify NESTED_JOIN structures within a join (applicable only to
@@ -2203,6 +2204,7 @@ struct TABLE_LIST
const char *option; /* Used by cache index */
Item *on_expr; /* Used with outer join */
Name_resolution_context *on_context; /* For ON expressions */
+ Table_function_json_table *table_function; /* If it's the table function. */
Item *sj_on_expr;
/*
@@ -2590,7 +2592,7 @@ struct TABLE_LIST
void cleanup_items();
bool placeholder()
{
- return derived || view || schema_table || !table;
+ return derived || view || schema_table || !table || table_function;
}
void print(THD *thd, table_map eliminated_tables, String *str,
enum_query_type query_type);
diff --git a/sql/table_function.cc b/sql/table_function.cc
new file mode 100644
index 00000000000..c12f5a86f1e
--- /dev/null
+++ b/sql/table_function.cc
@@ -0,0 +1,1249 @@
+/*
+ Copyright (c) 2020, MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
+*/
+
+#include "mariadb.h"
+#include "sql_priv.h"
+#include "sql_class.h" /* TMP_TABLE_PARAM */
+#include "table.h"
+#include "item_jsonfunc.h"
+#include "table_function.h"
+#include "sql_show.h"
+
+
+class table_function_handlerton
+{
+public:
+ handlerton m_hton;
+ table_function_handlerton()
+ {
+ bzero(&m_hton, sizeof(m_hton));
+ m_hton.tablefile_extensions= hton_no_exts;
+ m_hton.slot= HA_SLOT_UNDEF;
+ }
+};
+
+
+static table_function_handlerton table_function_hton;
+
+
+/*
+ A table that produces output rows for JSON_TABLE().
+*/
+
+class ha_json_table: public handler
+{
+protected:
+ Table_function_json_table *m_jt;
+ String m_tmps;
+ String *m_js;
+ uchar *m_cur_pos;
+public:
+ ha_json_table(TABLE_SHARE *share_arg, Table_function_json_table *jt):
+ handler(&table_function_hton.m_hton, share_arg), m_jt(jt)
+ {
+ /*
+ set the mark_trx_read_write_done to avoid the
+ handler::mark_trx_read_write_internal() call.
+ It relies on &ha_thd()->ha_data[ht->slot].ha_info[0] to be set.
+ But we don't set the ha_data for the ha_json_table, and
+ that call makes no sence for ha_json_table.
+ */
+ mark_trx_read_write_done= 1;
+ ref_length= (jt->m_depth+1)*(4+4) + jt->m_depth * 1;
+ }
+ ~ha_json_table() {}
+ handler *clone(const char *name, MEM_ROOT *mem_root) { return NULL; }
+ const char *index_type(uint inx) { return "NONE"; }
+ /* Rows also use a fixed-size format */
+ enum row_type get_row_type() const { return ROW_TYPE_FIXED; }
+ ulonglong table_flags() const
+ {
+ return (HA_FAST_KEY_READ | /*HA_NO_BLOBS |*/ HA_NULL_IN_KEY |
+ HA_CAN_SQL_HANDLER |
+ HA_REC_NOT_IN_SEQ | HA_NO_TRANSACTIONS |
+ HA_HAS_RECORDS | HA_CAN_HASH_KEYS);
+ }
+ ulong index_flags(uint inx, uint part, bool all_parts) const
+ {
+ return HA_ONLY_WHOLE_INDEX | HA_KEY_SCAN_NOT_ROR;
+ }
+ uint max_supported_keys() const { return 1; }
+ uint max_supported_key_part_length() const { return MAX_KEY_LENGTH; }
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void) { return 0; }
+ int rnd_init(bool scan);
+ int rnd_next(uchar *buf);
+ int rnd_pos(uchar * buf, uchar *pos);
+ void position(const uchar *record);
+ int can_continue_handler_scan() { return 1; }
+ int info(uint);
+ int extra(enum ha_extra_function operation);
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+ { return NULL; }
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info)
+ { return 1; }
+private:
+ void update_key_stats();
+};
+
+
+/*
+ Helper class that creates the temporary table that
+ represents the table function in the query.
+*/
+
+class Create_json_table: public Data_type_statistics
+{
+ // The following members are initialized only in start()
+ Field **m_default_field;
+ uchar *m_bitmaps;
+ // The following members are initialized in ctor
+ uint m_temp_pool_slot;
+ uint m_null_count;
+public:
+ Create_json_table(const TMP_TABLE_PARAM *param,
+ bool save_sum_fields)
+ :m_temp_pool_slot(MY_BIT_NONE),
+ m_null_count(0)
+ { }
+
+ void add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols);
+
+ TABLE *start(THD *thd,
+ TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt,
+ const LEX_CSTRING *table_alias);
+
+ bool add_json_table_fields(THD *thd, TABLE *table,
+ Table_function_json_table *jt);
+ bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt);
+};
+
+
+/*
+ @brief
+ Start scanning the JSON document in [str ... end]
+
+ @detail
+ Note: non-root nested paths are set to scan one JSON node (that is, a
+ "subdocument")
+*/
+
+void Json_table_nested_path::scan_start(CHARSET_INFO *i_cs,
+ const uchar *str, const uchar *end)
+{
+ json_get_path_start(&m_engine, i_cs, str, end, &m_cur_path);
+ m_cur_nested= 0;
+ m_n_cur_nested= 0;
+ m_null= false;
+ m_ordinality_counter= 0;
+}
+
+
+/*
+ @brief
+ Find the next JSON element that matches the search path.
+*/
+
+int Json_table_nested_path::scan_next()
+{
+ bool no_records_found= false;
+ if (m_cur_nested)
+ {
+ for (;;)
+ {
+ if (m_cur_nested->scan_next() == 0)
+ return 0;
+ m_n_cur_nested++;
+ if (!(m_cur_nested= m_cur_nested->m_next_nested))
+ break;
+handle_new_nested:
+ m_cur_nested->scan_start(m_engine.s.cs, m_engine.value_begin,
+ m_engine.s.str_end);
+ }
+ if (no_records_found)
+ return 0;
+ }
+
+ DBUG_ASSERT(!m_cur_nested);
+
+ while (!json_get_path_next(&m_engine, &m_cur_path))
+ {
+ if (json_path_compare(&m_path, &m_cur_path, m_engine.value_type))
+ continue;
+ /* path found. */
+ ++m_ordinality_counter;
+
+ if (!m_nested)
+ return 0;
+
+ m_cur_nested= m_nested;
+ m_n_cur_nested= 0;
+ no_records_found= true;
+ goto handle_new_nested;
+ }
+
+ m_null= true;
+ return 1;
+}
+
+
+/*
+ Stores the current position in the form
+ [0..3] - position in the JSON string
+ [4..7] - ORDINALITY counter value
+ if there are nested paths
+ [8] - current NESTED PATH
+ [9...] - position in the nested path
+*/
+void Json_table_nested_path::get_current_position(
+ const uchar *j_start, uchar *pos) const
+{
+ long j_pos= (long) (m_engine.s.c_str - j_start);
+ int4store(pos, j_pos);
+ int4store(pos+4, m_ordinality_counter);
+ if (m_cur_nested)
+ {
+ pos[8]= (uchar) m_n_cur_nested;
+ m_cur_nested->get_current_position(m_engine.s.c_str, pos + 9);
+ }
+}
+
+
+/*
+ Function sets the object to the json parser to the specified position,
+ and restores the m_ordinality_counter.
+*/
+void Json_table_nested_path::set_position(const uchar *j_start,
+ const uchar *j_end, const uchar *pos)
+{
+ const uchar *s_pos= (const uchar *) j_start + sint4korr(pos);
+ m_null= FALSE;
+ scan_start(m_engine.s.cs, j_start, j_end);
+
+ while (m_engine.s.c_str < s_pos)
+ {
+ if (json_get_path_next(&m_engine, &m_cur_path))
+ {
+ DBUG_ASSERT(FALSE); /* should never get here. */
+ }
+ }
+ DBUG_ASSERT(m_engine.s.c_str == s_pos);
+
+ if (m_nested)
+ {
+ unsigned char n_cur_nest= pos[8];
+ m_n_cur_nested= n_cur_nest;
+ for (Json_table_nested_path *np= m_nested; np; np= np->m_next_nested)
+ {
+ np->m_null= TRUE;
+ if (n_cur_nest-- == 0)
+ m_cur_nested= np;
+ }
+
+ m_cur_nested->set_position(j_start, j_end, pos+9);
+ }
+ m_ordinality_counter= sint4korr(pos+4);
+}
+
+
+int ha_json_table::open(const char *name, int mode, uint test_if_locked)
+{
+ m_cur_pos= (uchar*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length));
+ return 0;
+}
+
+
+int ha_json_table::extra(enum ha_extra_function operation)
+{
+ return 0;
+}
+
+
+int ha_json_table::rnd_init(bool scan)
+{
+ Json_table_nested_path &p= m_jt->m_nested_path;
+ DBUG_ENTER("ha_json_table::rnd_init");
+
+ if ((m_js= m_jt->m_json->val_str(&m_tmps)))
+ {
+ p.scan_start(m_js->charset(),
+ (const uchar *) m_js->ptr(), (const uchar *) m_js->end());
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+static int store_json_in_field(Field *f, const json_engine_t *je)
+{
+ switch (je->value_type)
+ {
+ case JSON_VALUE_NULL:
+ f->set_null();
+ return 0;
+
+ case JSON_VALUE_TRUE:
+ case JSON_VALUE_FALSE:
+ {
+ Item_result rt= f->result_type();
+ if (rt == INT_RESULT || rt == DECIMAL_RESULT || rt == REAL_RESULT)
+ return f->store(je->value_type == JSON_VALUE_TRUE, false);
+ break;
+ }
+ default:
+ break;
+ };
+
+ return f->store((const char *) je->value, (uint32) je->value_len, je->s.cs);
+}
+
+
+int ha_json_table::rnd_next(uchar *buf)
+{
+ Field **f= table->field;
+ Json_table_column *jc;
+ enum_check_fields cf_orig;
+
+ if (!m_js)
+ return HA_ERR_END_OF_FILE;
+
+ m_jt->m_nested_path.get_current_position((uchar *) m_js->ptr(), m_cur_pos);
+ if (m_jt->m_nested_path.scan_next())
+ {
+ if (m_jt->m_nested_path.m_engine.s.error)
+ {
+ report_json_error_ex(m_js->ptr(), &m_jt->m_nested_path.m_engine,
+ "JSON_TABLE", 0, Sql_condition::WARN_LEVEL_ERROR);
+
+ /*
+ We already reported an error, so returning an
+ error code that just doesn't produce extra
+ messages.
+ */
+ return HA_ERR_TABLE_IN_FK_CHECK;
+ }
+ return HA_ERR_END_OF_FILE;
+ }
+
+ cf_orig= table->in_use->count_cuted_fields;
+ table->in_use->count_cuted_fields= CHECK_FIELD_EXPRESSION;
+ /*
+ Get the values for each field of the table
+ */
+ List_iterator_fast<Json_table_column> jc_i(m_jt->m_columns);
+ my_ptrdiff_t ptrdiff= buf - table->record[0];
+ while ((jc= jc_i++))
+ {
+ if (!bitmap_is_set(table->read_set, (*f)->field_index))
+ goto cont_loop;
+
+ if (ptrdiff)
+ (*f)->move_field_offset(ptrdiff);
+ if (jc->m_nest->m_null)
+ {
+ (*f)->set_null();
+ }
+ else
+ {
+ (*f)->set_notnull();
+ switch (jc->m_column_type)
+ {
+ case Json_table_column::FOR_ORDINALITY:
+ (*f)->store(jc->m_nest->m_ordinality_counter, TRUE);
+ break;
+ case Json_table_column::PATH:
+ case Json_table_column::EXISTS_PATH:
+ {
+ json_engine_t je;
+ json_engine_t &nest_je= jc->m_nest->m_engine;
+ json_path_step_t *cur_step;
+ uint array_counters[JSON_DEPTH_LIMIT];
+ int not_found;
+
+ json_scan_start(&je, nest_je.s.cs,
+ nest_je.value_begin, nest_je.s.str_end);
+
+ cur_step= jc->m_path.steps;
+ not_found= json_find_path(&je, &jc->m_path, &cur_step, array_counters) ||
+ json_read_value(&je);
+
+ if (jc->m_column_type == Json_table_column::EXISTS_PATH)
+ {
+ (*f)->store(!not_found);
+ }
+ else /*PATH*/
+ {
+ if (not_found)
+ jc->m_on_empty.respond(jc, *f);
+ else
+ {
+ if (!json_value_scalar(&je) ||
+ store_json_in_field(*f, &je))
+ jc->m_on_error.respond(jc, *f);
+ else
+ {
+ /*
+ If the path contains wildcards, check if there are
+ more matches for it in json and report an error if so.
+ */
+ if (jc->m_path.types_used &
+ (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD) &&
+ (json_scan_next(&je) ||
+ !json_find_path(&je, &jc->m_path, &cur_step,
+ array_counters)))
+ jc->m_on_error.respond(jc, *f);
+ }
+
+ }
+ }
+ break;
+ }
+ };
+ }
+ if (ptrdiff)
+ (*f)->move_field_offset(-ptrdiff);
+cont_loop:
+ f++;
+ }
+ table->in_use->count_cuted_fields= cf_orig;
+ return 0;
+}
+
+
+int ha_json_table::rnd_pos(uchar * buf, uchar *pos)
+{
+ m_jt->m_nested_path.set_position((const uchar *) m_js->ptr(),
+ (const uchar *) m_js->end(), pos);
+ return rnd_next(buf);
+}
+
+
+void ha_json_table::position(const uchar *record)
+{
+ memcpy(ref, m_cur_pos, ref_length);
+}
+
+
+int ha_json_table::info(uint)
+{
+ /*
+ We don't want 0 or 1 in stats.records.
+ Though this value shouldn't matter as the optimizer
+ supposed to use Table_function_json_table::get_estimates
+ to obtain this data.
+ */
+ stats.records= 4;
+ return 0;
+}
+
+
+void Create_json_table::add_field(TABLE *table, Field *field,
+ uint fieldnr, bool force_not_null_cols)
+{
+ DBUG_ASSERT(!field->field_name.str ||
+ strlen(field->field_name.str) == field->field_name.length);
+
+ if (force_not_null_cols)
+ {
+ field->flags|= NOT_NULL_FLAG;
+ field->null_ptr= NULL;
+ }
+
+ if (!(field->flags & NOT_NULL_FLAG))
+ m_null_count++;
+
+ table->s->reclength+= field->pack_length();
+
+ // Assign it here, before update_data_type_statistics() changes m_blob_count
+ if (field->flags & BLOB_FLAG)
+ table->s->blob_field[m_blob_count]= fieldnr;
+
+ table->field[fieldnr]= field;
+ field->field_index= fieldnr;
+
+ field->update_data_type_statistics(this);
+}
+
+
+/**
+ Create a json table according to a field list.
+
+ @param thd thread handle
+ @param param a description used as input to create the table
+ @param jt json_table specificaion
+ @param table_alias alias
+*/
+
+TABLE *Create_json_table::start(THD *thd,
+ TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt,
+ const LEX_CSTRING *table_alias)
+{
+ MEM_ROOT *mem_root_save, own_root;
+ TABLE *table;
+ TABLE_SHARE *share;
+ uint copy_func_count= param->func_count;
+ char *tmpname,path[FN_REFLEN];
+ Field **reg_field;
+ uint *blob_field;
+ DBUG_ENTER("Create_json_table::start");
+ DBUG_PRINT("enter",
+ ("table_alias: '%s' ", table_alias->str));
+
+ if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
+ m_temp_pool_slot = bitmap_lock_set_next(&temp_pool);
+
+ if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot
+ sprintf(path, "%s-%lx-%i", tmp_file_prefix,
+ current_pid, m_temp_pool_slot);
+ else
+ {
+ /* if we run out of slots or we are not using tempool */
+ sprintf(path, "%s-%lx-%lx-%x", tmp_file_prefix,current_pid,
+ (ulong) thd->thread_id, thd->tmp_table++);
+ }
+
+ /*
+ No need to change table name to lower case.
+ */
+ fn_format(path, path, mysql_tmpdir, "",
+ MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+
+ const uint field_count= param->field_count;
+ DBUG_ASSERT(field_count);
+
+ init_sql_alloc(key_memory_TABLE, &own_root,
+ TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
+
+ if (!multi_alloc_root(&own_root,
+ &table, sizeof(*table),
+ &share, sizeof(*share),
+ &reg_field, sizeof(Field*) * (field_count+1),
+ &m_default_field, sizeof(Field*) * (field_count),
+ &blob_field, sizeof(uint)*(field_count+1),
+ &param->items_to_copy,
+ sizeof(param->items_to_copy[0])*(copy_func_count+1),
+ &param->keyinfo, sizeof(*param->keyinfo),
+ &param->start_recinfo,
+ sizeof(*param->recinfo)*(field_count*2+4),
+ &tmpname, (uint) strlen(path)+1,
+ &m_bitmaps, bitmap_buffer_size(field_count)*6,
+ NullS))
+ {
+ DBUG_RETURN(NULL); /* purecov: inspected */
+ }
+ strmov(tmpname, path);
+ /* make table according to fields */
+
+ bzero((char*) table,sizeof(*table));
+ bzero((char*) reg_field, sizeof(Field*) * (field_count+1));
+ bzero((char*) m_default_field, sizeof(Field*) * (field_count));
+
+ table->mem_root= own_root;
+ mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
+
+ table->field=reg_field;
+ table->alias.set(table_alias->str, table_alias->length, table_alias_charset);
+
+ table->reginfo.lock_type=TL_WRITE; /* Will be updated */
+ table->map=1;
+ table->temp_pool_slot= m_temp_pool_slot;
+ table->copy_blobs= 1;
+ table->in_use= thd;
+ table->no_rows_with_nulls= param->force_not_null_cols;
+
+ table->s= share;
+ init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
+ share->blob_field= blob_field;
+ share->table_charset= param->table_charset;
+ share->primary_key= MAX_KEY; // Indicate no primary key
+ share->not_usable_by_query_cache= FALSE;
+ if (param->schema_table)
+ share->db= INFORMATION_SCHEMA_NAME;
+
+ param->using_outer_summary_function= 0;
+
+ share->db_plugin= NULL;
+ if (!(table->file= new (&table->mem_root) ha_json_table(share, jt)))
+ DBUG_RETURN(NULL);
+
+ table->file->init();
+
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(table);
+}
+
+
+bool Create_json_table::finalize(THD *thd, TABLE *table,
+ TMP_TABLE_PARAM *param,
+ Table_function_json_table *jt)
+{
+ DBUG_ENTER("Create_json_table::finalize");
+ DBUG_ASSERT(table);
+
+ uint null_pack_length;
+ bool use_packed_rows= false;
+ uchar *pos;
+ uchar *null_flags;
+ TMP_ENGINE_COLUMNDEF *recinfo;
+ TABLE_SHARE *share= table->s;
+
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
+
+ DBUG_ASSERT(param->field_count >= share->fields);
+ DBUG_ASSERT(param->field_count >= share->blob_fields);
+
+ if (table->file->set_ha_share_ref(&share->ha_share))
+ {
+ delete table->file;
+ goto err;
+ }
+
+ if (share->blob_fields == 0)
+ m_null_count++;
+
+ null_pack_length= (m_null_count + m_uneven_bit_length + 7) / 8;
+ share->reclength+= null_pack_length;
+ if (!share->reclength)
+ share->reclength= 1; // Dummy select
+
+ {
+ uint alloc_length= ALIGN_SIZE(share->reclength + MI_UNIQUE_HASH_LENGTH+1);
+ share->rec_buff_length= alloc_length;
+ if (!(table->record[0]= (uchar*)
+ alloc_root(&table->mem_root, alloc_length*3)))
+ goto err;
+ table->record[1]= table->record[0]+alloc_length;
+ share->default_values= table->record[1]+alloc_length;
+ }
+
+ setup_tmp_table_column_bitmaps(table, m_bitmaps, table->s->fields);
+
+ recinfo=param->start_recinfo;
+ null_flags=(uchar*) table->record[0];
+ pos=table->record[0]+ null_pack_length;
+ if (null_pack_length)
+ {
+ bzero((uchar*) recinfo,sizeof(*recinfo));
+ recinfo->type=FIELD_NORMAL;
+ recinfo->length=null_pack_length;
+ recinfo++;
+ bfill(null_flags,null_pack_length,255); // Set null fields
+
+ table->null_flags= (uchar*) table->record[0];
+ share->null_fields= m_null_count;
+ share->null_bytes= share->null_bytes_for_compare= null_pack_length;
+ }
+ m_null_count= (share->blob_fields == 0) ? 1 : 0;
+ for (uint i= 0; i < share->fields; i++, recinfo++)
+ {
+ Field *field= table->field[i];
+ uint length;
+ bzero((uchar*) recinfo,sizeof(*recinfo));
+
+ if (!(field->flags & NOT_NULL_FLAG))
+ {
+ recinfo->null_bit= (uint8)1 << (m_null_count & 7);
+ recinfo->null_pos= m_null_count/8;
+ field->move_field(pos, null_flags + m_null_count/8,
+ (uint8)1 << (m_null_count & 7));
+ m_null_count++;
+ }
+ else
+ field->move_field(pos,(uchar*) 0,0);
+ if (field->type() == MYSQL_TYPE_BIT)
+ {
+ /* We have to reserve place for extra bits among null bits */
+ ((Field_bit*) field)->set_bit_ptr(null_flags + m_null_count / 8,
+ m_null_count & 7);
+ m_null_count+= (field->field_length & 7);
+ }
+ field->reset();
+
+ /*
+ Test if there is a default field value. The test for ->ptr is to skip
+ 'offset' fields generated by initialize_tables
+ */
+ if (m_default_field[i] && m_default_field[i]->ptr)
+ {
+ /*
+ default_field[i] is set only in the cases when 'field' can
+ inherit the default value that is defined for the field referred
+ by the Item_field object from which 'field' has been created.
+ */
+ const Field *orig_field= m_default_field[i];
+ /* Get the value from default_values */
+ if (orig_field->is_null_in_record(orig_field->table->s->default_values))
+ field->set_null();
+ else
+ {
+ field->set_notnull();
+ memcpy(field->ptr,
+ orig_field->ptr_in_record(orig_field->table->s->default_values),
+ field->pack_length_in_rec());
+ }
+ }
+
+ length=field->pack_length();
+ pos+= length;
+
+ /* Make entry for create table */
+ recinfo->length=length;
+ recinfo->type= field->tmp_engine_column_type(use_packed_rows);
+
+ // fix table name in field entry
+ field->set_table_name(&table->alias);
+ }
+
+ param->recinfo= recinfo; // Pointer to after last field
+ store_record(table,s->default_values); // Make empty default record
+
+ share->max_rows= ~(ha_rows) 0;
+ param->end_write_records= HA_POS_ERROR;
+
+ share->db_record_offset= 1;
+
+ if (unlikely(table->file->ha_open(table, table->s->path.str, O_RDWR,
+ HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
+ goto err;
+
+ table->db_stat= HA_OPEN_KEYFILE;
+ table->set_created();
+
+ thd->mem_root= mem_root_save;
+
+ DBUG_RETURN(false);
+
+err:
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(true);
+}
+
+
+/*
+ @brief
+ Read the JSON_TABLE's field definitions from @jt and add the fields to
+ table @table.
+*/
+
+bool Create_json_table::add_json_table_fields(THD *thd, TABLE *table,
+ Table_function_json_table *jt)
+{
+ TABLE_SHARE *share= table->s;
+ Json_table_column *jc;
+ uint fieldnr= 0;
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ List_iterator_fast<Json_table_column> jc_i(jt->m_columns);
+
+ DBUG_ENTER("add_json_table_fields");
+
+ thd->mem_root= &table->mem_root;
+
+ while ((jc= jc_i++))
+ {
+ Create_field *sql_f= jc->m_field;
+ List_iterator_fast<Json_table_column> it2(jt->m_columns);
+ Json_table_column *jc2;
+ if (!(jc->m_explicit_cs= sql_f->charset))
+ sql_f->charset= thd->variables.collation_server;
+
+ if (sql_f->prepare_stage1(thd, thd->mem_root, table->file,
+ table->file->ha_table_flags()))
+ goto err_exit;
+
+ while ((jc2= it2++) != jc)
+ {
+ if (lex_string_cmp(system_charset_info,
+ &sql_f->field_name, &jc2->m_field->field_name) == 0)
+ {
+ my_error(ER_DUP_FIELDNAME, MYF(0), sql_f->field_name.str);
+ goto err_exit;
+ }
+ }
+ it2.rewind();
+ }
+
+ jc_i.rewind();
+
+ while ((jc= jc_i++))
+ {
+ Create_field *sql_f= jc->m_field;
+ Record_addr addr(!(sql_f->flags & NOT_NULL_FLAG));
+ Bit_addr bit(addr.null());
+
+ sql_f->prepare_stage2(table->file, table->file->ha_table_flags());
+
+ if (!sql_f->charset)
+ sql_f->charset= &my_charset_utf8mb4_bin;
+
+ Field *f= sql_f->type_handler()->make_table_field_from_def(share,
+ thd->mem_root, &sql_f->field_name, addr, bit, sql_f, sql_f->flags);
+ if (!f)
+ goto err_exit;
+ f->init(table);
+ add_field(table, f, fieldnr++, FALSE);
+ }
+
+ share->fields= fieldnr;
+ share->blob_fields= m_blob_count;
+ table->field[fieldnr]= 0; // End marker
+ share->blob_field[m_blob_count]= 0; // End marker
+ share->column_bitmap_size= bitmap_buffer_size(share->fields);
+
+ thd->mem_root= mem_root_save;
+
+ DBUG_RETURN(FALSE);
+err_exit:
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(TRUE);
+}
+
+
+/*
+ @brief
+ Given a TABLE_LIST representing JSON_TABLE(...) syntax, create a temporary
+ table for it.
+
+ @detail
+ The temporary table will have:
+ - fields whose names/datatypes are specified in JSON_TABLE(...) syntax
+ - a ha_json_table as the storage engine.
+
+ The uses of the temporary table are:
+ - name resolution: the query may have references to the columns of
+ JSON_TABLE(...). A TABLE object will allow to resolve them.
+ - query execution: ha_json_table will produce JSON_TABLE's rows.
+*/
+
+TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table)
+{
+ TMP_TABLE_PARAM tp;
+ TABLE *table;
+ uint field_count= sql_table->table_function->m_columns.elements+1;
+
+ DBUG_ENTER("create_table_for_function");
+
+ tp.init();
+ tp.table_charset= system_charset_info;
+ tp.field_count= field_count;
+ {
+ Create_json_table maker(&tp, false);
+
+ if (!(table= maker.start(thd, &tp,
+ sql_table->table_function, &sql_table->alias)) ||
+ maker.add_json_table_fields(thd, table, sql_table->table_function) ||
+ maker.finalize(thd, table, &tp, sql_table->table_function))
+ {
+ if (table)
+ free_tmp_table(thd, table);
+ DBUG_RETURN(NULL);
+ }
+ }
+ sql_table->schema_table_name.length= 0;
+
+ my_bitmap_map* bitmaps=
+ (my_bitmap_map*) thd->alloc(bitmap_buffer_size(field_count));
+ my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
+ FALSE);
+ table->read_set= &table->def_read_set;
+ bitmap_clear_all(table->read_set);
+ table->alias_name_used= true;
+ table->next= thd->derived_tables;
+ thd->derived_tables= table;
+ table->s->tmp_table= INTERNAL_TMP_TABLE;
+ table->grant.privilege= SELECT_ACL;
+
+ sql_table->table= table;
+
+ DBUG_RETURN(table);
+}
+
+
+int Json_table_column::set(THD *thd, enum_type ctype, const LEX_CSTRING &path)
+{
+ set(ctype);
+ if (json_path_setup(&m_path, thd->variables.collation_connection,
+ (const uchar *) path.str, (const uchar *)(path.str + path.length)))
+ {
+ report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1,
+ Sql_condition::WARN_LEVEL_ERROR);
+ return 1;
+ }
+
+ /*
+ This is done so the ::print function can just print the path string.
+ Can be removed if we redo that function to print the path using it's
+ anctual content. Not sure though if we should.
+ */
+ m_path.s.c_str= (const uchar *) path.str;
+ return 0;
+}
+
+
+static int print_path(String *str, const json_path_t *p)
+{
+ return str->append('\'') ||
+ str->append_for_single_quote((const char *) p->s.c_str,
+ p->s.str_end - p->s.c_str) ||
+ str->append('\'');
+}
+
+
+/*
+ Print the string representation of the Json_table_column.
+
+ @param thd - the thread
+ @param f - the remaining array of Field-s from the table
+ if the Json_table_column
+ @param str - the string where to print
+*/
+int Json_table_column::print(THD *thd, Field **f, String *str)
+{
+ StringBuffer<MAX_FIELD_WIDTH> column_type(str->charset());
+
+ if (append_identifier(thd, str, &m_field->field_name) ||
+ str->append(' '))
+ return 1;
+
+ switch (m_column_type)
+ {
+ case FOR_ORDINALITY:
+ if (str->append("FOR ORDINALITY"))
+ return 1;
+ break;
+ case EXISTS_PATH:
+ case PATH:
+ (*f)->sql_type(column_type);
+
+ if (str->append(column_type) ||
+ str->append(m_column_type == PATH ? " PATH " : " EXISTS ") ||
+ print_path(str, &m_path))
+ return 1;
+ break;
+ };
+
+ if (m_on_empty.print("EMPTY", str) ||
+ m_on_error.print("ERROR", str))
+ return 1;
+
+ return 0;
+}
+
+
+int Json_table_nested_path::set_path(THD *thd, const LEX_CSTRING &path)
+{
+ if (json_path_setup(&m_path, thd->variables.collation_connection,
+ (const uchar *) path.str, (const uchar *)(path.str + path.length)))
+ {
+ report_path_error_ex(path.str, &m_path, "JSON_TABLE", 1,
+ Sql_condition::WARN_LEVEL_ERROR);
+ return 1;
+ }
+
+ /*
+ This is done so the ::print function can just print the path string.
+ Can be removed if we redo that function to print the path using its
+ actual content. Not sure though if we should.
+ */
+ m_path.s.c_str= (const uchar *) path.str;
+ return 0;
+}
+
+
+/*
+ @brief
+ Perform the action of this response on field @f (emit an error, or set @f
+ to NULL, or set it to default value).
+*/
+
+void Json_table_column::On_response::respond(Json_table_column *jc, Field *f)
+{
+ switch (m_response)
+ {
+ case Json_table_column::RESPONSE_NOT_SPECIFIED:
+ case Json_table_column::RESPONSE_NULL:
+ f->set_null();
+ break;
+ case Json_table_column::RESPONSE_ERROR:
+ f->set_null();
+ my_error(ER_JSON_TABLE_ERROR_ON_FIELD, MYF(0),
+ f->field_name.str, f->table->alias.ptr());
+ break;
+ case Json_table_column::RESPONSE_DEFAULT:
+ f->set_notnull();
+ f->store(m_default.str,
+ m_default.length, jc->m_defaults_cs);
+ break;
+ }
+}
+
+
+int Json_table_column::On_response::print(const char *name, String *str) const
+{
+ const char *resp;
+ const LEX_CSTRING *ds= NULL;
+ if (m_response == Json_table_column::RESPONSE_NOT_SPECIFIED)
+ return 0;
+
+ switch (m_response)
+ {
+ case Json_table_column::RESPONSE_NULL:
+ resp= "NULL";
+ break;
+ case Json_table_column::RESPONSE_ERROR:
+ resp= "ERROR";
+ break;
+ case Json_table_column::RESPONSE_DEFAULT:
+ {
+ resp= "DEFAULT";
+ ds= &m_default;
+ break;
+ }
+ default:
+ DBUG_ASSERT(FALSE); /* should never happen. */
+ }
+
+ return
+ (str->append(' ') || str->append(resp) ||
+ (ds && (str->append(" '") ||
+ str->append_for_single_quote(ds->str, ds->length) ||
+ str->append('\''))) ||
+ str->append(" ON ") ||
+ str->append(name));
+}
+
+
+void Table_function_json_table::add_nested(Json_table_nested_path *np)
+{
+ *m_sql_nest->m_nested_hook= np;
+ m_sql_nest->m_nested_hook= &np->m_next_nested;
+ m_sql_nest= np;
+ if (++m_cur_depth > m_depth)
+ m_depth= m_cur_depth;
+}
+
+
+void Table_function_json_table::leave_nested()
+{
+ m_sql_nest= m_sql_nest->m_parent;
+ --m_cur_depth;
+}
+
+
+/*
+ @brief
+ Perform name-resolution phase tasks
+
+ @detail
+ - The only argument that needs resolution is the JSON text
+ - Then, we need to set dependencies: if JSON_TABLE refers to table's
+ column, e.g.
+
+ JSON_TABLE (t1.col ... ) AS t2
+
+ then it can be computed only after table t1.
+ - The dependencies must not form a loop.
+*/
+
+int Table_function_json_table::setup(THD *thd, TABLE_LIST *sql_table)
+{
+ TABLE *t= sql_table->table;
+
+ thd->where= "JSON_TABLE argument";
+ {
+ bool save_is_item_list_lookup;
+ bool res;
+ save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup;
+ thd->lex->current_select->is_item_list_lookup= 0;
+ res= m_json->fix_fields_if_needed(thd, &m_json);
+ thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup;
+ if (res)
+ return TRUE;
+ }
+
+ {
+ List_iterator_fast<Json_table_column> jc_i(m_columns);
+ for (uint i= 0; t->field[i]; i++)
+ {
+ Json_table_column *jc= jc_i++;
+ t->field[i]->change_charset(
+ jc->m_explicit_cs ? jc->m_explicit_cs : m_json->collation);
+ }
+ }
+
+ m_dep_tables= m_json->used_tables();
+
+ if (m_dep_tables)
+ {
+ sql_table->dep_tables|= m_dep_tables;
+ t->no_cache= TRUE;
+ if (unlikely(sql_table->dep_tables & sql_table->get_map()))
+ {
+ /* Table itself is used in the argument. */
+ my_error(ER_WRONG_USAGE, MYF(0), "JSON_TABLE", "argument");
+ return TRUE;
+ }
+
+ if (sql_table->join_list)
+ {
+ List_iterator<TABLE_LIST> it(*sql_table->join_list);
+ TABLE_LIST *tbl;
+ bool dep_met= false;
+
+ while ((tbl= it++) && tbl != sql_table)
+ {
+ if (!dep_met && (m_dep_tables & tbl->get_map()))
+ dep_met= true;
+
+ if (tbl->straight && dep_met)
+ {
+ /* STRAIGHT_JOIN-ed table is used in the argument. */
+ my_error(ER_WRONG_USAGE, MYF(0), "JSON_TABLE", "argument");
+ return TRUE;
+ }
+ }
+ }
+ }
+
+ m_setup_done= true;
+ return FALSE;
+}
+
+void Table_function_json_table::get_estimates(ha_rows *out_rows,
+ double *scan_time, double *startup_cost)
+{
+ *out_rows= 40;
+ *scan_time= 0.0;
+ *startup_cost= 0.0;
+}
+
+
+/*
+ Print the string representation of the Json_nested_path object.
+ Which is the COLUMNS(...) part of the JSON_TABLE definition.
+
+ @param thd - the thread
+ @param f - the remaining part of the array of Field* objects
+ taken from the TABLE.
+ It's needed as Json_table_column objects
+ don't have links to the related Field-s.
+ @param str - the string where to print
+ @param it - the remaining part of the Json_table_column list
+ @param last_column - the last column taken from the list.
+*/
+
+int Json_table_nested_path::print(THD *thd, Field ***f, String *str,
+ List_iterator_fast<Json_table_column> &it,
+ Json_table_column **last_column)
+{
+ Json_table_nested_path *c_path= this;
+ Json_table_nested_path *c_nested= m_nested;
+ Json_table_column *jc= *last_column;
+ bool first_column= TRUE;
+
+ if (str->append("COLUMNS ("))
+ return 1;
+
+ do
+ {
+ if (first_column)
+ first_column= FALSE;
+ else if (str->append(", "))
+ return 1;
+
+ if (jc->m_nest == c_path)
+ {
+ if (jc->print(thd, *f, str))
+ return 1;
+ if (!(jc= it++))
+ goto exit_ok;
+ ++(*f);
+ }
+ else if (jc->m_nest == c_nested)
+ {
+ if (str->append("NESTED PATH ") ||
+ print_path(str, &jc->m_nest->m_path) ||
+ c_nested->print(thd, f, str, it, &jc))
+ return 1;
+ c_nested= c_nested->m_next_nested;
+ }
+ else
+ break;
+ } while(jc);
+
+exit_ok:
+ if (str->append(")"))
+ return 1;
+
+ *last_column= jc;
+ return 0;
+}
+
+
+/*
+ Print the SQL definition of the JSON_TABLE.
+ Used mostly as a part of the CREATE VIEW statement.
+
+ @param thd - the thread
+ @param sql_table - the corresponding TABLE_LIST object
+ @param str - the string where to print
+ @param query_type - the query type
+*/
+int Table_function_json_table::print(THD *thd, TABLE_LIST *sql_table,
+ String *str, enum_query_type query_type)
+{
+ List_iterator_fast<Json_table_column> jc_i(m_columns);
+ Json_table_column *jc= jc_i++;
+ Field **f_list= sql_table->table->field;
+
+ DBUG_ENTER("Table_function_json_table::print");
+
+ if (str->append("JSON_TABLE("))
+ DBUG_RETURN(TRUE);
+
+ m_json->print(str, query_type);
+
+ if (str->append(", ") ||
+ print_path(str, &m_nested_path.m_path) ||
+ str->append(' ') ||
+ m_nested_path.print(thd, &f_list, str, jc_i, &jc) ||
+ str->append(')'))
+ DBUG_RETURN(TRUE);
+
+ DBUG_RETURN(0);
+}
+
+
+void Table_function_json_table::fix_after_pullout(TABLE_LIST *sql_table,
+ st_select_lex *new_parent, bool merge)
+{
+ if (m_dep_tables)
+ sql_table->dep_tables&= ~m_dep_tables;
+ m_json->fix_after_pullout(new_parent, &m_json, merge);
+ m_dep_tables= m_json->used_tables();
+
+ if (m_dep_tables)
+ sql_table->dep_tables|= m_dep_tables;
+}
+
+
diff --git a/sql/table_function.h b/sql/table_function.h
new file mode 100644
index 00000000000..9ddb03f27fb
--- /dev/null
+++ b/sql/table_function.h
@@ -0,0 +1,234 @@
+#ifndef TABLE_FUNCTION_INCLUDED
+#define TABLE_FUNCTION_INCLUDED
+
+/* Copyright (c) 2020, MariaDB Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
+
+
+#include <json_lib.h>
+
+class Json_table_column;
+
+/*
+ The Json_table_nested_path represents the 'current nesting' level
+ for a set of JSON_TABLE columns.
+ Each column (Json_table_column instance) is linked with corresponding
+ 'nested path' object and gets its piece of JSON to parse during the computation
+ phase.
+ The root 'nested_path' is always present as a part of Table_function_json_table,
+ then other 'nested_paths' can be created and linked into a tree structure when new
+ 'NESTED PATH' is met. The nested 'nested_paths' are linked with 'm_nested', the same-level
+ 'nested_paths' are linked with 'm_next_nested'.
+ So for instance
+ JSON_TABLE( '...', '$[*]'
+ COLUMNS( a INT PATH '$.a' ,
+ NESTED PATH '$.b[*]' COLUMNS (b INT PATH '$',
+ NESTED PATH '$.c[*]' COLUMNS(x INT PATH '$')),
+ NESTED PATH '$.n[*]' COLUMNS (z INT PAHT '$'))
+ results in 4 'nested_path' created:
+ root nested_b nested_c nested_n
+ m_path '$[*]' '$.b[*]' '$.c[*]' '$.n[*]
+ m_nested &nested_b &nested_c NULL NULL
+ n_next_nested NULL &nested_n NULL NULL
+
+and 4 columns created:
+ a b x z
+ m_nest &root &nested_b &nested_c &nested_n
+*/
+
+
+class Json_table_nested_path : public Sql_alloc
+{
+public:
+ bool m_null; // TRUE <=> produce SQL NULL.
+
+ json_path_t m_path;
+ json_engine_t m_engine;
+ json_path_t m_cur_path;
+
+ /* Counts the rows produced. Value is set to the FOR ORDINALITY coluns */
+ longlong m_ordinality_counter;
+
+ /* the Json_table_nested_path that nests this. */
+ Json_table_nested_path *m_parent;
+
+ /* The head of the list of nested NESTED PATH statements. */
+ Json_table_nested_path *m_nested;
+
+ /* in the above list items are linked with the */
+ Json_table_nested_path *m_next_nested;
+
+ /*
+ The pointer to the 'm_next_nested' member of the
+ last item of the above list. So we can add new item to
+ the list doing *m_next_nexted_hook= new_item_ptr
+ */
+ Json_table_nested_path **m_nested_hook;
+
+ /*
+ The NESTED PATH that is currently scanned in rnd_next.
+ */
+ Json_table_nested_path *m_cur_nested;
+ /*
+ The order of the above m_cur_nested in the list of the NESTED PATH.
+ Used only to build the reference in position()/rnd_pos().
+ */
+ int m_n_cur_nested;
+
+ Json_table_nested_path(Json_table_nested_path *parent_nest):
+ m_parent(parent_nest), m_nested(0), m_next_nested(0),
+ m_nested_hook(&m_nested) {}
+ int set_path(THD *thd, const LEX_CSTRING &path);
+ void scan_start(CHARSET_INFO *i_cs, const uchar *str, const uchar *end);
+ int scan_next();
+ int print(THD *thd, Field ***f, String *str,
+ List_iterator_fast<Json_table_column> &it,
+ Json_table_column **last_column);
+ void get_current_position(const uchar *j_start, uchar *pos) const;
+ void set_position(const uchar *j_start, const uchar *j_end, const uchar *pos);
+};
+
+
+class Json_table_column : public Sql_alloc
+{
+public:
+ enum enum_type
+ {
+ FOR_ORDINALITY,
+ PATH,
+ EXISTS_PATH
+ };
+
+ enum enum_on_type
+ {
+ ON_EMPTY,
+ ON_ERROR
+ };
+
+ enum enum_on_response
+ {
+ RESPONSE_NOT_SPECIFIED,
+ RESPONSE_ERROR,
+ RESPONSE_NULL,
+ RESPONSE_DEFAULT
+ };
+
+ struct On_response
+ {
+ public:
+ Json_table_column::enum_on_response m_response;
+ LEX_CSTRING m_default;
+ void respond(Json_table_column *jc, Field *f);
+ int print(const char *name, String *str) const;
+ bool specified() const { return m_response != RESPONSE_NOT_SPECIFIED; }
+ };
+
+ enum_type m_column_type;
+ json_path_t m_path;
+ On_response m_on_error;
+ On_response m_on_empty;
+ Create_field *m_field;
+ Json_table_nested_path *m_nest;
+ CHARSET_INFO *m_explicit_cs;
+ CHARSET_INFO *m_defaults_cs;
+
+ void set(enum_type ctype)
+ {
+ m_column_type= ctype;
+ }
+ int set(THD *thd, enum_type ctype, const LEX_CSTRING &path);
+ Json_table_column(Create_field *f, Json_table_nested_path *nest) :
+ m_field(f), m_nest(nest)
+ {
+ m_on_error.m_response= RESPONSE_NOT_SPECIFIED;
+ m_on_empty.m_response= RESPONSE_NOT_SPECIFIED;
+ }
+ int print(THD *tnd, Field **f, String *str);
+};
+
+
+/*
+ Class represents the table function, the function
+ that returns the table as a result so supposed to appear
+ in the FROM list of the SELECT statement.
+ At the moment there is only one such function JSON_TABLE,
+ so the class named after it, but should be refactored
+ into the hierarchy root if we create more of that functions.
+
+ As the parser finds the table function in the list it
+ creates an instance of Table_function_json_table storing it
+ into the TABLE_LIST::table_function.
+ Then the ha_json_table instance is created based on it in
+ the create_table_for_function().
+*/
+class Table_function_json_table : public Sql_alloc
+{
+protected:
+ bool m_setup_done;
+public:
+ Item *m_json; /* The JSON value to be parsed. */
+
+ /* The COLUMNS(...) part representation. */
+ Json_table_nested_path m_nested_path;
+ /* The list of table column definitions. */
+ List<Json_table_column> m_columns;
+
+ /*
+ the JSON argument can be taken from other tables.
+ We have to mark these tables as dependent so the
+ mask of these dependent tables is calculated in ::setup().
+ */
+ table_map m_dep_tables;
+
+ /*
+ The 'depth' of NESTED PATH statements nesting.
+ Needed to calculate the reference length.
+ m_cur_depth is used in parser.
+ */
+ uint m_depth, m_cur_depth;
+
+ /* used in parser. */
+ Json_table_column *m_cur_json_table_column;
+ CHARSET_INFO *m_text_literal_cs;
+
+ Table_function_json_table(Item *json): m_setup_done(false),
+ m_json(json), m_nested_path(0), m_depth(0), m_cur_depth(0) {}
+
+ /*
+ Used in sql_yacc.yy.
+ Represents the current NESTED PATH level being parsed.
+ */
+ Json_table_nested_path *m_sql_nest;
+ void add_nested(Json_table_nested_path *np);
+ void leave_nested();
+
+ int setup(THD *thd, TABLE_LIST *sql_table);
+ /* if the table is ready to be used in Item_field::fix_fieds */
+ bool ready_for_lookup() const { return m_setup_done; }
+ bool join_cache_allowed() const { return !m_dep_tables; }
+ void get_estimates(ha_rows *out_rows,
+ double *scan_time, double *startup_cost);
+ int print(THD *thd, TABLE_LIST *sql_table,
+ String *str, enum_query_type query_type);
+
+ void fix_after_pullout(TABLE_LIST *sql_table,
+ st_select_lex *new_parent, bool merge);
+};
+
+
+TABLE *create_table_for_function(THD *thd, TABLE_LIST *sql_table);
+
+#endif /* TABLE_FUNCTION_INCLUDED */
+