diff options
author | Sergei Petrunia <psergey@askmonty.org> | 2014-08-09 01:52:54 +0400 |
---|---|---|
committer | Sergei Petrunia <psergey@askmonty.org> | 2014-08-09 01:52:54 +0400 |
commit | 83f0ddc6294ea8d4e424a540a043bf88ee4a8c8d (patch) | |
tree | 04280aba1544710aac2a6971a8d86a41c11cbecc | |
parent | d87ffeb49133aa459e134f09924cd7b7b5013632 (diff) | |
parent | 5cfd3270ec79238b27765af3062ae7d97f6f06d0 (diff) | |
download | mariadb-git-83f0ddc6294ea8d4e424a540a043bf88ee4a8c8d.tar.gz |
Merge 10.1 (with ANALYZE) and 10.1-explain-json
-rw-r--r-- | libmysqld/CMakeLists.txt | 1 | ||||
-rw-r--r-- | sql/CMakeLists.txt | 1 | ||||
-rw-r--r-- | sql/lex.h | 1 | ||||
-rw-r--r-- | sql/my_json_writer.cc | 129 | ||||
-rw-r--r-- | sql/my_json_writer.h | 43 | ||||
-rw-r--r-- | sql/sql_class.cc | 12 | ||||
-rw-r--r-- | sql/sql_class.h | 1 | ||||
-rw-r--r-- | sql/sql_explain.cc | 410 | ||||
-rw-r--r-- | sql/sql_explain.h | 35 | ||||
-rw-r--r-- | sql/sql_lex.cc | 1 | ||||
-rw-r--r-- | sql/sql_lex.h | 1 | ||||
-rw-r--r-- | sql/sql_parse.cc | 42 | ||||
-rw-r--r-- | sql/sql_select.cc | 30 | ||||
-rw-r--r-- | sql/sql_select.h | 1 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 39 |
15 files changed, 643 insertions, 104 deletions
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt index 0920be53baf..57a8fd830df 100644 --- a/libmysqld/CMakeLists.txt +++ b/libmysqld/CMakeLists.txt @@ -100,6 +100,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc ../sql/rpl_reporting.cc ../sql/sql_expression_cache.cc ../sql/my_apc.cc ../sql/my_apc.h + ../sql/my_json_writer.cc ../sql/my_json_writer.h ../sql/rpl_gtid.cc ../sql/sql_explain.cc ../sql/sql_explain.h ../sql/compat56.cc diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 1eb8dc7cdd6..3e9010a76d8 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -90,6 +90,7 @@ SET (SQL_SOURCE threadpool_common.cc ../sql-common/mysql_async.c my_apc.cc my_apc.h + my_json_writer.cc my_json_writer.h rpl_gtid.cc rpl_parallel.cc table_cache.cc ${CMAKE_CURRENT_BINARY_DIR}/sql_builtin.cc diff --git a/sql/lex.h b/sql/lex.h index 10a52160cf0..d66160ffec0 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -242,6 +242,7 @@ static SYMBOL symbols[] = { { "FOR", SYM(FOR_SYM)}, { "FORCE", SYM(FORCE_SYM)}, { "FOREIGN", SYM(FOREIGN)}, + { "FORMAT", SYM(FORMAT_SYM)}, { "FOUND", SYM(FOUND_SYM)}, { "FROM", SYM(FROM)}, { "FULL", SYM(FULL)}, diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc new file mode 100644 index 00000000000..206597da59e --- /dev/null +++ b/sql/my_json_writer.cc @@ -0,0 +1,129 @@ +/* Todo: SkySQL copyrights */ + +#include <my_global.h> +#include "sql_priv.h" +#include "sql_string.h" + +#include "my_json_writer.h" + +void Json_writer::append_indent() +{ + if (!document_start) + output.append('\n'); + for (int i=0; i< indent_level; i++) + output.append(' '); +} + +void Json_writer::start_object() +{ + if (!element_started) + start_element(); + + output.append("{"); + indent_level+=INDENT_SIZE; + first_child=true; + element_started= false; + document_start= false; +} + +void Json_writer::start_array() +{ + if (!element_started) + start_element(); + + output.append("["); + indent_level+=INDENT_SIZE; + first_child=true; + element_started= false; + document_start= false; +} + + +void Json_writer::end_object() +{ + indent_level-=INDENT_SIZE; + if (!first_child) + append_indent(); + output.append("}"); +} + + +void Json_writer::end_array() +{ + indent_level-=INDENT_SIZE; + if (!first_child) + append_indent(); + output.append("]"); +} + + +Json_writer& Json_writer::add_member(const char *name) +{ + // assert that we are in an object + DBUG_ASSERT(!element_started); + start_element(); + + output.append('"'); + output.append(name); + output.append("\": "); + return *this; +} + + +void Json_writer::start_element() +{ + element_started= true; + + if (first_child) + first_child= false; + else + output.append(','); + + append_indent(); +} + +void Json_writer::add_ll(longlong val) +{ + if (!element_started) + start_element(); + + char buf[64]; + my_snprintf(buf, sizeof(buf), "%ld", val); + output.append(buf); + element_started= false; +} + + +void Json_writer::add_double(double val) +{ + if (!element_started) + start_element(); + + char buf[64]; + my_snprintf(buf, sizeof(buf), "%lf", val); + output.append(buf); + element_started= false; +} + + +void Json_writer::add_str(const char *str) +{ + if (!element_started) + start_element(); + + output.append('"'); + output.append(str); + output.append('"'); + element_started= false; +} + +void Json_writer::add_bool(bool val) +{ + add_str(val? "true" : "false"); +} + +void Json_writer::add_str(const String &str) +{ + add_str(str.ptr()); +} + diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h new file mode 100644 index 00000000000..403d7e0688c --- /dev/null +++ b/sql/my_json_writer.h @@ -0,0 +1,43 @@ +/* Todo: SkySQL copyrights */ + +class Json_writer +{ +public: + /* Add a member. We must be in an object. */ + Json_writer& add_member(const char *name); + + /* Add atomic values */ + void add_ll(longlong val); + void add_str(const char* val); + void add_str(const String &str); + void add_double(double val); + void add_bool(bool val); + + /* Start a child object */ + void start_object(); + void start_array(); + + void end_object(); + void end_array(); + + Json_writer() : + indent_level(0), document_start(true), element_started(false), + first_child(true) + {} +private: + // stack of (name, bool is_object_or_array) elements. + int indent_level; + enum { INDENT_SIZE = 2 }; + + bool document_start; + bool element_started; + bool first_child; + + void append_indent(); + void start_element(); + + //const char *new_member_name; +public: + String output; +}; + diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 8d6ddc0bb08..3d1476715e5 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2271,7 +2271,10 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length) int THD::send_explain_fields(select_result *result) { List<Item> field_list; - make_explain_field_list(field_list); + if (lex->explain_json) + make_explain_json_field_list(field_list); + else + make_explain_field_list(field_list); result->prepare(field_list, NULL); return (result->send_result_set_metadata(field_list, Protocol::SEND_NUM_ROWS | @@ -2279,6 +2282,13 @@ int THD::send_explain_fields(select_result *result) } +void THD::make_explain_json_field_list(List<Item> &field_list) +{ + Item *item= new Item_empty_string("EXPLAIN", 78, system_charset_info); + field_list.push_back(item); +} + + /* Populate the provided field_list with EXPLAIN output columns. this->lex->describe has the EXPLAIN flags diff --git a/sql/sql_class.h b/sql/sql_class.h index f3537086132..c0636349b89 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3060,6 +3060,7 @@ public: CHANGED_TABLE_LIST * changed_table_dup(const char *key, long key_length); int send_explain_fields(select_result *result); void make_explain_field_list(List<Item> &field_list); + void make_explain_json_field_list(List<Item> &field_list); /** Clear the current error, if any. We do not clear is_fatal_error or is_fatal_sub_stmt_error since we diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 9df4fd965a5..496e113162a 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -20,7 +20,7 @@ #include "sql_priv.h" #include "sql_select.h" - +#include "my_json_writer.h" Explain_query::Explain_query(THD *thd_arg) : upd_del_plan(NULL), insert_plan(NULL), thd(thd_arg), apc_enabled(false) @@ -139,8 +139,13 @@ int Explain_query::send_explain(THD *thd) thd->send_explain_fields(result)) return 1; - int res; - if ((res= print_explain(result, lex->describe, lex->analyze_stmt))) + int res= 0; + if (thd->lex->explain_json) + print_explain_json(result, thd->lex->analyze_stmt); + else + res= print_explain(result, lex->describe, thd->lex->analyze_stmt); + + if (res) result->abort_result_set(); else result->send_eof(); @@ -177,6 +182,40 @@ int Explain_query::print_explain(select_result_sink *output, } +void Explain_query::print_explain_json(select_result_sink *output, bool is_analyze) +{ + Json_writer writer; + writer.start_object(); + + if (upd_del_plan) + { + //upd_del_plan->print_explain(this, output, explain_flags, is_analyze); + DBUG_ASSERT(0); + } + else if (insert_plan) + { + //insert_plan->print_explain(this, output, explain_flags, is_analyze); + DBUG_ASSERT(0); + } + else + { + /* Start printing from node with id=1 */ + Explain_node *node= get_node(1); + if (!node) + return; /* No query plan */ + node->print_explain_json(this, &writer, is_analyze); + } + + writer.end_object(); + + const CHARSET_INFO *cs= system_charset_info; + List<Item> item_list; + String *buf= &writer.output; + item_list.push_back(new Item_string(buf->ptr(), buf->length(), cs)); + output->send_data(item_list); +} + + bool print_explain_query(LEX *lex, THD *thd, String *str) { return lex->explain->print_explain_str(thd, str, false); @@ -214,12 +253,59 @@ static void push_string(List<Item> *item_list, String *str) system_charset_info)); } +static void push_string_list(List<Item> *item_list, List<char> &lines, + String *buf) +{ + List_iterator_fast<char> it(lines); + char *line; + bool first= true; + while ((line= it++)) + { + if (first) + first= false; + else + buf->append(','); + + buf->append(line); + } + push_string(item_list, buf); +} + + +uint Explain_union::make_union_table_name(char *buf) +{ + uint childno= 0; + uint len= 6, lastop= 0; + memcpy(buf, STRING_WITH_LEN("<union")); + + for (; childno < union_members.elements() && len + lastop + 5 < NAME_LEN; + childno++) + { + len+= lastop; + lastop= my_snprintf(buf + len, NAME_LEN - len, + "%u,", union_members.at(childno)); + } + + if (childno < union_members.elements() || len + lastop >= NAME_LEN) + { + memcpy(buf + len, STRING_WITH_LEN("...>") + 1); + len+= 4; + } + else + { + len+= lastop; + buf[len - 1]= '>'; // change ',' to '>' + } + return len; +} + int Explain_union::print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze) { + const CHARSET_INFO *cs= system_charset_info; char table_name_buffer[SAFE_NAME_LEN]; /* print all UNION children, in order */ @@ -240,32 +326,8 @@ int Explain_union::print_explain(Explain_query *query, push_str(&item_list, fake_select_type); /* `table` column: something like "<union1,2>" */ - { - uint childno= 0; - uint len= 6, lastop= 0; - memcpy(table_name_buffer, STRING_WITH_LEN("<union")); - - for (; childno < union_members.elements() && len + lastop + 5 < NAME_LEN; - childno++) - { - len+= lastop; - lastop= my_snprintf(table_name_buffer + len, NAME_LEN - len, - "%u,", union_members.at(childno)); - } - - if (childno < union_members.elements() || len + lastop >= NAME_LEN) - { - memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1); - len+= 4; - } - else - { - len+= lastop; - table_name_buffer[len - 1]= '>'; // change ',' to '>' - } - const CHARSET_INFO *cs= system_charset_info; - item_list.push_back(new Item_string(table_name_buffer, len, cs)); - } + uint len= make_union_table_name(table_name_buffer); + item_list.push_back(new Item_string(table_name_buffer, len, cs)); /* `partitions` column */ if (explain_flags & DESCRIBE_PARTITIONS) @@ -311,7 +373,6 @@ int Explain_union::print_explain(Explain_query *query, { extra_buf.append(STRING_WITH_LEN("Using filesort")); } - const CHARSET_INFO *cs= system_charset_info; item_list.push_back(new Item_string(extra_buf.ptr(), extra_buf.length(), cs)); //output->unit.offset_limit_cnt= 0; @@ -326,6 +387,36 @@ int Explain_union::print_explain(Explain_query *query, } +void Explain_union::print_explain_json(Explain_query *query, + Json_writer *writer, bool is_analyze) +{ + char table_name_buffer[SAFE_NAME_LEN]; + + writer->add_member("query_block").start_object(); + writer->add_member("union_result").start_object(); + // using_temporary_table + make_union_table_name(table_name_buffer); + writer->add_member("table_name").add_str(table_name_buffer); + writer->add_member("access_type").add_str("ALL"); // not very useful + writer->add_member("query_specifications").start_array(); + + for (int i= 0; i < (int) union_members.elements(); i++) + { + writer->start_object(); + writer->add_member("dependent").add_str("TODO"); + writer->add_member("cacheable").add_str("TODO"); + Explain_select *sel= query->get_select(union_members.at(i)); + sel->print_explain_json(query, writer, is_analyze); + writer->end_object(); + } + writer->end_array(); + + //TODO: print_explain_for_children + + writer->end_object(); +} + + /* Print EXPLAINs for all children nodes (i.e. for subqueries) */ @@ -421,21 +512,112 @@ int Explain_select::print_explain(Explain_query *query, } +void Explain_select::print_explain_json(Explain_query *query, + Json_writer *writer, bool is_analyze) +{ + writer->add_member("query_block").start_object(); + writer->add_member("select_id").add_ll(1); + if (message) + { + writer->add_member("table").start_object(); + writer->add_member("message").add_str(message); + writer->end_object(); + } + else + { + for (uint i=0; i< n_join_tabs; i++) + { + // psergey-todo: Need to honor SJM nests... + join_tabs[i]->print_explain_json(writer, is_analyze); + } + } + writer->end_object(); +} + + void Explain_table_access::push_extra(enum explain_extra_tag extra_tag) { extra_tags.append(extra_tag); } +void Explain_table_access::fill_key_str(String *key_str) +{ + const CHARSET_INFO *cs= system_charset_info; + bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT || + type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE); + const char *hash_key_prefix= "#hash#"; + + if (key.get_key_name()) + { + if (is_hj) + key_str->append(hash_key_prefix, strlen(hash_key_prefix), cs); + + key_str->append(key.get_key_name()); + + if (is_hj && type != JT_HASH) + key_str->append(':'); + } + + if (quick_info) + { + StringBuffer<64> buf2; + quick_info->print_key(&buf2); + key_str->append(buf2); + } + if (type == JT_HASH_NEXT) + key_str->append(hash_next_key.get_key_name()); +} + + +void Explain_table_access::fill_key_len_str(String *key_len_str) +{ + bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT || + type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE); + if (key.get_key_len() != (uint)-1) + { + char buf[64]; + size_t length; + length= longlong10_to_str(key.get_key_len(), buf, 10) - buf; + key_len_str->append(buf, length); + if (is_hj && type != JT_HASH) + key_len_str->append(':'); + } + + if (quick_info) + { + StringBuffer<64> buf2; + quick_info->print_key_len(&buf2); + key_len_str->append(buf2); + } + + if (type == JT_HASH_NEXT) + { + char buf[64]; + size_t length; + length= longlong10_to_str(hash_next_key.get_key_len(), buf, 10) - buf; + key_len_str->append(buf, length); + } +} + + +double Explain_table_access::get_r_filtered() +{ + //psergey-todo: modify this to produce separate filtered% for both parts of + //WHERE. + double r_filtered= tracker.get_filtered_after_where(); + if (bka_type.is_using_jbuf()) + r_filtered *= jbuf_tracker.get_filtered_after_where(); + return r_filtered; +} + + int Explain_table_access::print_explain(select_result_sink *output, uint8 explain_flags, bool is_analyze, uint select_id, const char *select_type, bool using_temporary, bool using_filesort) { const CHARSET_INFO *cs= system_charset_info; - const char *hash_key_prefix= "#hash#"; - bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT || - type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE); List<Item> item_list; Item *item_null= new Item_null(); @@ -470,32 +652,15 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai push_str(&item_list, join_type_str[type]); /* `possible_keys` column */ - if (possible_keys_str.length() > 0) - push_string(&item_list, &possible_keys_str); - else + StringBuffer<64> possible_keys_buf; + if (possible_keys.is_empty()) item_list.push_back(item_null); + else + push_string_list(&item_list, possible_keys, &possible_keys_buf); /* `key` */ StringBuffer<64> key_str; - if (key.get_key_name()) - { - if (is_hj) - key_str.append(hash_key_prefix, strlen(hash_key_prefix), cs); - - key_str.append(key.get_key_name()); - - if (is_hj && type != JT_HASH) - key_str.append(':'); - } - - if (quick_info) - { - StringBuffer<64> buf2; - quick_info->print_key(&buf2); - key_str.append(buf2); - } - if (type == JT_HASH_NEXT) - key_str.append(hash_next_key.get_key_name()); + fill_key_str(&key_str); if (key_str.length() > 0) push_string(&item_list, &key_str); @@ -504,31 +669,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai /* `key_len` */ StringBuffer<64> key_len_str; - - if (key.get_key_len() != (uint)-1) - { - char buf[64]; - size_t length; - length= longlong10_to_str(key.get_key_len(), buf, 10) - buf; - key_len_str.append(buf, length); - if (is_hj && type != JT_HASH) - key_len_str.append(':'); - } - - if (quick_info) - { - StringBuffer<64> buf2; - quick_info->print_key_len(&buf2); - key_len_str.append(buf2); - } - - if (type == JT_HASH_NEXT) - { - char buf[64]; - size_t length; - length= longlong10_to_str(hash_next_key.get_key_len(), buf, 10) - buf; - key_len_str.append(buf, length); - } + fill_key_len_str(&key_len_str); if (key_len_str.length() > 0) push_string(&item_list, &key_len_str); @@ -631,6 +772,115 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai } +static void write_item(Json_writer *writer, Item *item) +{ + char item_buf[256]; + String str(item_buf, sizeof(item_buf), &my_charset_bin); + str.length(0); + item->print(&str ,QT_ORDINARY); + writer->add_str(str.c_ptr_safe()); +} + + +void Explain_table_access::tag_to_json(Json_writer *writer, enum explain_extra_tag tag) +{ + switch (tag) + { + case ET_OPEN_FULL_TABLE: + writer->add_member("open_full_table").add_bool(true); + break; + case ET_SCANNED_0_DATABASES: + writer->add_member("scanned_databases").add_ll(0); + break; + case ET_SCANNED_1_DATABASE: + writer->add_member("scanned_databases").add_ll(1); + break; + case ET_SCANNED_ALL_DATABASES: + writer->add_member("scanned_databases").add_str("all"); + break; + case ET_SKIP_OPEN_TABLE: + writer->add_member("skip_open_table").add_bool(true); + break; + case ET_OPEN_FRM_ONLY: + writer->add_member("open_frm_only").add_bool(true); + break; + case ET_USING_INDEX_CONDITION: + writer->add_member("index_condition"); + write_item(writer, pushed_index_cond); + break; + case ET_USING_WHERE: + writer->add_member("attached_condition"); + write_item(writer, where_cond); + break; + case ET_USING_INDEX: + writer->add_member("using_index").add_bool(true); + break; + case ET_USING: + // index merge: case ET_USING + break; + default: + DBUG_ASSERT(0); + } +} + + +void Explain_table_access::print_explain_json(Json_writer *writer, + bool is_analyze) +{ + writer->add_member("table").start_object(); + + writer->add_member("table_name").add_str(table_name); + // partitions + writer->add_member("access_type").add_str(join_type_str[type]); + if (!possible_keys.is_empty()) + { + List_iterator_fast<char> it(possible_keys); + const char *name; + writer->add_member("possible_keys").start_array(); + while ((name= it++)) + writer->add_str(name); + writer->end_array(); + } + /* `key` */ + StringBuffer<64> key_str; + fill_key_str(&key_str); + if (key_str.length()) + writer->add_member("key").add_str(key_str); + + /* `used_key_parts` */ + writer->add_member("used_key_parts").add_str("TODO"); + + StringBuffer<64> key_len_str; + fill_key_len_str(&key_len_str); + if (key_len_str.length()) + writer->add_member("key_length").add_str(key_len_str); + + if (rows_set) + writer->add_member("rows").add_ll(rows); + + /* `r_rows` */ + if (is_analyze && tracker.has_scans()) + { + ha_rows avg_rows= tracker.get_avg_rows(); + writer->add_member("r_rows").add_ll(avg_rows); + } + + if (filtered_set) + writer->add_member("filtered").add_double(filtered); + + /* `r_filtered` */ + if (is_analyze) + writer->add_member("r_filtered").add_double(get_r_filtered()); + + for (int i=0; i < (int)extra_tags.elements(); i++) + { + tag_to_json(writer, extra_tags.at(i)); + } + + writer->end_object(); +} + + /* Elements in this array match members of enum Extra_tag, defined in sql_explain.h diff --git a/sql/sql_explain.h b/sql/sql_explain.h index a2b4ea282b7..8708a74e661 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -67,6 +67,7 @@ const int FAKE_SELECT_LEX_ID= (int)UINT_MAX; class Explain_query; +class Json_writer; /* A node can be either a SELECT, or a UNION. */ @@ -97,7 +98,9 @@ public: virtual int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze)=0; - + virtual void print_explain_json(Explain_query *query, Json_writer *writer, + bool is_analyze)= 0; + int print_explain_for_children(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); virtual ~Explain_node(){} @@ -177,6 +180,8 @@ public: int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); + void print_explain_json(Explain_query *query, Json_writer *writer, + bool is_analyze); Table_access_tracker *get_using_temporary_read_tracker() { @@ -222,6 +227,8 @@ public: } int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); + void print_explain_json(Explain_query *query, Json_writer *writer, + bool is_analyze); const char *fake_select_type; bool using_filesort; @@ -235,6 +242,8 @@ public: return &tmptable_read_tracker; } private: + uint make_union_table_name(char *buf); + Table_access_tracker fake_select_lex_tracker; /* This one is for reading after ORDER BY */ Table_access_tracker tmptable_read_tracker; @@ -310,6 +319,8 @@ public: /* Return tabular EXPLAIN output as a text string */ bool print_explain_str(THD *thd, String *out_str, bool is_analyze); + void print_explain_json(select_result_sink *output, bool is_analyze); + /* If true, at least part of EXPLAIN can be printed */ bool have_query_plan() { return insert_plan || upd_del_plan|| get_node(1) != NULL; } @@ -479,7 +490,8 @@ public: bool used_partitions_set; /* Empty string means "NULL" will be printed */ - StringBuffer<32> possible_keys_str; + List<char> possible_keys; + //StringBuffer<32> possible_keys_str; /* Index use: key name and length. @@ -528,11 +540,19 @@ public: EXPLAIN_BKA_TYPE bka_type; StringBuffer<32> firstmatch_table_name; + + /* + Note: lifespan of WHERE condition is less than lifespan of this object. + THe below is valid if tags include "ET_USING_WHERE". + */ + Item *where_cond; + Item *pushed_index_cond; int print_explain(select_result_sink *output, uint8 explain_flags, bool is_analyze, uint select_id, const char *select_type, bool using_temporary, bool using_filesort); + void print_explain_json(Json_writer *writer, bool is_analyze); /* ANALYZE members*/ Table_access_tracker tracker; @@ -540,6 +560,10 @@ public: private: void append_tag_name(String *str, enum explain_extra_tag tag); + void fill_key_str(String *key_str); + void fill_key_len_str(String *key_len_str); + double get_r_filtered(); + void tag_to_json(Json_writer *writer, enum explain_extra_tag tag); }; @@ -584,6 +608,8 @@ public: virtual int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); + virtual void print_explain_json(Explain_query *query, Json_writer *writer, bool is_analyze) + { /* EXPLAIN_JSON_NOT_IMPL */} }; @@ -604,6 +630,9 @@ public: int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); + void print_explain_json(Explain_query *query, Json_writer *writer, + bool is_analyze) + { /* EXPLAIN_JSON_NOT_IMPL */} }; @@ -625,6 +654,8 @@ public: virtual int print_explain(Explain_query *query, select_result_sink *output, uint8 explain_flags, bool is_analyze); + virtual void print_explain_json(Explain_query *query, Json_writer *writer, bool is_analyze) + { /* EXPLAIN_JSON_NOT_IMPL */} }; diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index cd9f9238f71..1e095627245 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -484,6 +484,7 @@ void lex_start(THD *thd) lex->select_lex.group_list_ptrs->clear(); lex->describe= 0; lex->analyze_stmt= 0; + lex->explain_json= false; lex->subqueries= FALSE; lex->context_analysis_only= 0; lex->derived_tables= 0; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 70a793541af..5e5f36172b6 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -2461,6 +2461,7 @@ struct LEX: public Query_tables_list uint table_count; uint8 describe; bool analyze_stmt; /* TRUE<=> this is "ANALYZE $stmt" */ + bool explain_json; /* A flag that indicates what kinds of derived tables are present in the query (0 if no derived tables, otherwise a combination of flags diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 019fd55e3d8..ab2ef71d22b 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -97,6 +97,8 @@ #include "log_slow.h" #include "sql_bootstrap.h" +#include "my_json_writer.h" + #define FLAGSTR(V,F) ((V)&(F)?#F" ":"") #ifdef WITH_ARIA_STORAGE_ENGINE @@ -5267,19 +5269,39 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) top-level LIMIT */ result->reset_offset_limit(); - lex->explain->print_explain(result, lex->describe, lex->analyze_stmt); - if (lex->describe & DESCRIBE_EXTENDED) + if (thd->lex->explain_json) { - char buff[1024]; - String str(buff,(uint32) sizeof(buff), system_charset_info); - str.length(0); /* - The warnings system requires input in utf8, @see - mysqld_show_warnings(). + Json_writer writer; + writer.start_object(); + thd->lex->explain->print_explain_json(&writer, thd->lex->analyze_stmt); + writer.end_object(); + + const CHARSET_INFO *cs= system_charset_info; + List<Item> item_list; + String *buf= &writer.output; + item_list.push_back(new Item_string(buf->ptr(), buf->length(), cs)); + result->send_data(item_list); */ - lex->unit.print(&str, QT_TO_SYSTEM_CHARSET); - push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_YES, str.c_ptr_safe()); + thd->lex->explain->print_explain_json(result, thd->lex->analyze_stmt); + } + else + { + lex->explain->print_explain(result, thd->lex->describe, + thd->lex->analyze_stmt); + if (lex->describe & DESCRIBE_EXTENDED) + { + char buff[1024]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + /* + The warnings system requires input in utf8, @see + mysqld_show_warnings(). + */ + lex->unit.print(&str, QT_TO_SYSTEM_CHARSET); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_YES, str.c_ptr_safe()); + } } } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 05c88a5f534..f772c53ebb9 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -22967,7 +22967,6 @@ void JOIN::clear() /* Print an EXPLAIN line with all NULLs and given message in the 'Extra' column - TODO: is_analyze */ int print_explain_message_line(select_result_sink *result, @@ -23263,20 +23262,24 @@ void explain_append_mrr_info(QUICK_RANGE_SELECT *quick, String *res) /////////////////////////////////////////////////////////////////////////////// // TODO: join with make_possible_keys_line ? -void append_possible_keys(String *str, TABLE *table, key_map possible_keys) +int append_possible_keys(MEM_ROOT *alloc, List<char> &list, TABLE *table, + key_map possible_keys) { uint j; for (j=0 ; j < table->s->keys ; j++) { if (possible_keys.is_set(j)) { - if (str->length()) - str->append(','); - str->append(table->key_info[j].name, - strlen(table->key_info[j].name), - system_charset_info); + const char *key_name= table->key_info[j].name; + size_t len= strlen(key_name); + char *cp; + if (!(cp = (char*)alloc_root(alloc, len))) + return 1; + memcpy(cp, key_name, len+1); + list.push_back(cp); } } + return 0; } // TODO: this function is only applicable for the first non-const optimization @@ -23417,7 +23420,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab eta->type= tab_type; /* Build "possible_keys" value */ - append_possible_keys(&eta->possible_keys_str, table, tab->keys); + // psergey-todo: why does this use thd MEM_ROOT??? Doesn't this + // break ANALYZE ? thd->mem_root will be freed, and after that we will + // attempt to print the query plan? + append_possible_keys(thd->mem_root, eta->possible_keys, table, tab->keys); + // psergey-todo: ^ check for error return code /* Build "key", "key_len", and "ref" */ if (tab_type == JT_NEXT) @@ -23575,7 +23582,10 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab if (keyno != MAX_KEY && keyno == table->file->pushed_idx_cond_keyno && table->file->pushed_idx_cond) + { eta->push_extra(ET_USING_INDEX_CONDITION); + eta->pushed_index_cond= table->file->pushed_idx_cond; + } else if (tab->cache_idx_cond) eta->push_extra(ET_USING_INDEX_CONDITION_BKA); @@ -23616,7 +23626,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab */ } else + { + eta->where_cond= tab->select->cond? tab->select->cond: + tab->cache_select->cond; eta->push_extra(ET_USING_WHERE); + } } } if (table_list /* SJM bushes don't have table_list */ && diff --git a/sql/sql_select.h b/sql/sql_select.h index 63fd6a6d99f..41ee2cb51d2 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1912,4 +1912,5 @@ ulong check_selectivity(THD *thd, TABLE *table, List<COND_STATISTIC> *conds); + #endif /* SQL_SELECT_INCLUDED */ diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a261d611aa6..eb0d359f608 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1178,6 +1178,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token FORCE_SYM %token FOREIGN /* SQL-2003-R */ %token FOR_SYM /* SQL-2003-R */ +%token FORMAT_SYM %token FOUND_SYM /* SQL-2003-R */ %token FROM %token FULL /* SQL-2003-R */ @@ -1847,6 +1848,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); subselect_end select_var_list select_var_list_init help field_length opt_field_length opt_extended_describe shutdown + opt_format_json prepare prepare_src execute deallocate statement sp_suid sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa @@ -9757,6 +9759,18 @@ function_call_conflict: if ($$ == NULL) MYSQL_YYABORT; } + | FORMAT_SYM '(' expr ',' expr ')' + { + $$= new (thd->mem_root) Item_func_format($3, $5); + if ($$ == NULL) + MYSQL_YYABORT; + } + | FORMAT_SYM '(' expr ',' expr ',' expr ')' + { + $$= new (thd->mem_root) Item_func_format($3, $5, $7); + if ($$ == NULL) + MYSQL_YYABORT; + } | LAST_VALUE '(' expr_list ')' { $$= new (thd->mem_root) Item_func_last_value(* $3); @@ -12769,16 +12783,34 @@ describe_command: ; analyze_stmt_command: - ANALYZE_SYM explainable_command + ANALYZE_SYM opt_format_json explainable_command { Lex->analyze_stmt= true; } ; opt_extended_describe: - /* empty */ {} - | EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; } + EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; } | PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; } + | opt_format_json {} + ; + +opt_format_json: + /* empty */ {} + | FORMAT_SYM EQ ident_or_text + { + if (!my_strcasecmp(system_charset_info, $3.str, "JSON")) + Lex->explain_json= true; + else if (!my_strcasecmp(system_charset_info, $3.str, "TRADITIONAL")) + { + DBUG_ASSERT(Lex->explain_json==false); + } + else + { + my_error(ER_UNKNOWN_EXPLAIN_FORMAT, MYF(0), $3.str); + MYSQL_YYABORT; + } + } ; opt_describe_column: @@ -14064,6 +14096,7 @@ keyword: | EXAMINED_SYM {} | EXECUTE_SYM {} | FLUSH_SYM {} + | FORMAT_SYM {} | GET_SYM {} | HANDLER_SYM {} | HELP_SYM {} |