summaryrefslogtreecommitdiff
path: root/sql/sql_analyze_stmt.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_analyze_stmt.cc')
-rw-r--r--sql/sql_analyze_stmt.cc93
1 files changed, 87 insertions, 6 deletions
diff --git a/sql/sql_analyze_stmt.cc b/sql/sql_analyze_stmt.cc
index 4874c33a544..5e6774bd539 100644
--- a/sql/sql_analyze_stmt.cc
+++ b/sql/sql_analyze_stmt.cc
@@ -23,32 +23,41 @@
#include "sql_select.h"
#include "my_json_writer.h"
-void Filesort_tracker::print_json(Json_writer *writer)
+void Filesort_tracker::print_json_members(Json_writer *writer)
{
const char *varied_str= "(varied across executions)";
- writer->add_member("r_loops").add_ll(r_loops);
+ writer->add_member("r_loops").add_ll(get_r_loops());
+ if (get_r_loops() && time_tracker.timed)
+ {
+ writer->add_member("r_total_time_ms").
+ add_double(time_tracker.get_time_ms());
+ }
if (r_limit != HA_POS_ERROR)
{
writer->add_member("r_limit");
if (r_limit == 0)
writer->add_str(varied_str);
else
- writer->add_ll(rint(r_limit/r_loops));
+ writer->add_ll(rint(r_limit/get_r_loops()));
}
writer->add_member("r_used_priority_queue");
- if (r_used_pq == r_loops)
+ if (r_used_pq == get_r_loops())
writer->add_bool(true);
else if (r_used_pq == 0)
writer->add_bool(false);
else
writer->add_str(varied_str);
- writer->add_member("r_output_rows").add_ll(rint(r_output_rows / r_loops));
+ writer->add_member("r_output_rows").add_ll(rint(r_output_rows /
+ get_r_loops()));
if (sort_passes)
- writer->add_member("r_sort_passes").add_ll(rint(sort_passes / r_loops));
+ {
+ writer->add_member("r_sort_passes").add_ll(rint(sort_passes /
+ get_r_loops()));
+ }
if (sort_buffer_size != 0)
{
@@ -60,3 +69,75 @@ void Filesort_tracker::print_json(Json_writer *writer)
}
}
+
+/*
+ Report that we are doing a filesort.
+ @return
+ Tracker object to be used with filesort
+*/
+
+Filesort_tracker *Sort_and_group_tracker::report_sorting(THD *thd)
+{
+ DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS);
+
+ if (total_actions)
+ {
+ /* This is not the first execution. Check */
+ if (qep_actions[cur_action] != EXPL_ACTION_FILESORT)
+ {
+ varied_executions= true;
+ cur_action++;
+ if (!dummy_fsort_tracker)
+ dummy_fsort_tracker= new (thd->mem_root) Filesort_tracker(is_analyze);
+ return dummy_fsort_tracker;
+ }
+ return qep_actions_data[cur_action++].filesort_tracker;
+ }
+
+ Filesort_tracker *fs_tracker= new(thd->mem_root)Filesort_tracker(is_analyze);
+ qep_actions_data[cur_action].filesort_tracker= fs_tracker;
+ qep_actions[cur_action++]= EXPL_ACTION_FILESORT;
+
+ return fs_tracker;
+}
+
+
+void Sort_and_group_tracker::report_tmp_table(TABLE *tbl)
+{
+ DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS);
+ if (total_actions)
+ {
+ /* This is not the first execution. Check if the steps match. */
+ // todo: should also check that tmp.table kinds are the same.
+ if (qep_actions[cur_action] != EXPL_ACTION_TEMPTABLE)
+ varied_executions= true;
+ }
+
+ if (!varied_executions)
+ {
+ qep_actions[cur_action]= EXPL_ACTION_TEMPTABLE;
+ // qep_actions_data[cur_action]= ....
+ }
+
+ cur_action++;
+}
+
+
+void Sort_and_group_tracker::report_duplicate_removal()
+{
+ DBUG_ASSERT(cur_action < MAX_QEP_ACTIONS);
+ if (total_actions)
+ {
+ /* This is not the first execution. Check if the steps match. */
+ if (qep_actions[cur_action] != EXPL_ACTION_REMOVE_DUPS)
+ varied_executions= true;
+ }
+
+ if (!varied_executions)
+ {
+ qep_actions[cur_action]= EXPL_ACTION_REMOVE_DUPS;
+ }
+
+ cur_action++;
+}
+