diff options
author | Sergei Golubchik <serg@mariadb.org> | 2022-05-18 10:34:38 +0200 |
---|---|---|
committer | Sergei Golubchik <serg@mariadb.org> | 2022-05-18 10:34:38 +0200 |
commit | 99a433ed1cc2cebad93d6ece2b65691f2f49d3ea (patch) | |
tree | e7fc16ab8fe9b4b9ff486d1adbc27ed84c72b923 /sql | |
parent | 0a1d9d0681fda7595c0d08038357b56cf3bea9eb (diff) | |
parent | b2187662bcba12b66667bc0531727453b3b8a666 (diff) | |
download | mariadb-git-99a433ed1cc2cebad93d6ece2b65691f2f49d3ea.tar.gz |
Merge branch '10.6' into 10.7mariadb-10.7.4
Diffstat (limited to 'sql')
-rw-r--r-- | sql/gcalc_tools.cc | 2 | ||||
-rw-r--r-- | sql/gcalc_tools.h | 16 | ||||
-rw-r--r-- | sql/ha_partition.h | 1 | ||||
-rw-r--r-- | sql/handler.h | 13 | ||||
-rw-r--r-- | sql/item.h | 4 | ||||
-rw-r--r-- | sql/item_sum.cc | 6 | ||||
-rw-r--r-- | sql/mysql_install_db.cc | 2 | ||||
-rw-r--r-- | sql/mysqld.cc | 2 | ||||
-rw-r--r-- | sql/partition_info.cc | 2 | ||||
-rw-r--r-- | sql/slave.cc | 34 | ||||
-rw-r--r-- | sql/sql_class.h | 4 | ||||
-rw-r--r-- | sql/sql_lex.cc | 2 | ||||
-rw-r--r-- | sql/sql_lex.h | 12 | ||||
-rw-r--r-- | sql/sql_parse.cc | 18 | ||||
-rw-r--r-- | sql/sql_select.cc | 173 | ||||
-rw-r--r-- | sql/sql_select.h | 2 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 1 | ||||
-rw-r--r-- | sql/wsrep_trans_observer.h | 4 |
18 files changed, 165 insertions, 133 deletions
diff --git a/sql/gcalc_tools.cc b/sql/gcalc_tools.cc index 307f063fb43..25c80a7a796 100644 --- a/sql/gcalc_tools.cc +++ b/sql/gcalc_tools.cc @@ -132,7 +132,7 @@ int Gcalc_function::count_internal(const char *cur_func, uint set_type, int mask= (c_op & op_not) ? 1:0; uint n_ops= c_op & ~(op_any | op_not | v_mask); uint n_shape= c_op & ~(op_any | op_not | v_mask); /* same as n_ops */ - value v_state= (value) (c_op & v_mask); + op_type v_state= (op_type) (c_op & v_mask); int result= 0; const char *sav_cur_func= cur_func; diff --git a/sql/gcalc_tools.h b/sql/gcalc_tools.h index e625b355d95..bb1f473e180 100644 --- a/sql/gcalc_tools.h +++ b/sql/gcalc_tools.h @@ -52,17 +52,15 @@ private: int count_internal(const char *cur_func, uint set_type, const char **end); public: - enum value - { - v_empty= 0x0000000, - v_find_t= 0x1000000, - v_find_f= 0x2000000, - v_t_found= 0x3000000, - v_f_found= 0x4000000, - v_mask= 0x7000000 - }; enum op_type { + v_empty= 0x00000000, + v_find_t= 0x01000000, + v_find_f= 0x02000000, + v_t_found= 0x03000000, + v_f_found= 0x04000000, + v_mask= 0x07000000, + op_not= 0x80000000, op_shape= 0x00000000, op_union= 0x10000000, diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 29763447e6e..a9010e76389 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -1616,7 +1616,6 @@ public: for (; part_id < part_id_end; ++part_id) { handler *file= m_file[part_id]; - DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id)); file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_OPEN); part_recs+= file->stats.records; } diff --git a/sql/handler.h b/sql/handler.h index e8960d095e5..ed24f160385 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -965,6 +965,7 @@ typedef struct xid_t XID; */ typedef uint Binlog_file_id; const Binlog_file_id MAX_binlog_id= UINT_MAX; +const my_off_t MAX_off_t = (~(my_off_t) 0); /* Compound binlog-id and byte offset of transaction's first event in a sequence (e.g the recovery sequence) of binlog files. @@ -979,14 +980,22 @@ struct xid_recovery_member my_xid xid; uint in_engine_prepare; // number of engines that have xid prepared bool decided_to_commit; - Binlog_offset binlog_coord; // semisync recovery binlog offset + /* + Semisync recovery binlog offset. It's initialized with the maximum + unreachable offset. The max value will remain for any transaction + not found in binlog to yield its rollback decision as it's guaranteed + to be within a truncated tail part of the binlog. + */ + Binlog_offset binlog_coord; XID *full_xid; // needed by wsrep or past it recovery decltype(::server_id) server_id; // server id of orginal server xid_recovery_member(my_xid xid_arg, uint prepare_arg, bool decided_arg, XID *full_xid_arg, decltype(::server_id) server_id_arg) : xid(xid_arg), in_engine_prepare(prepare_arg), - decided_to_commit(decided_arg), full_xid(full_xid_arg) , server_id(server_id_arg) {}; + decided_to_commit(decided_arg), + binlog_coord(Binlog_offset(MAX_binlog_id, MAX_off_t)), + full_xid(full_xid_arg), server_id(server_id_arg) {}; }; /* for recover() handlerton call */ diff --git a/sql/item.h b/sql/item.h index f6c123e79d4..c3251259b43 100644 --- a/sql/item.h +++ b/sql/item.h @@ -6648,9 +6648,9 @@ public: bool get_date(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate) override; bool val_native(THD *thd, Native *to) override; bool val_native_result(THD *thd, Native *to) override; - longlong val_datetime_packed(THD *thd) + longlong val_datetime_packed(THD *thd) override { return Item::val_datetime_packed(thd); } - longlong val_time_packed(THD *thd) + longlong val_time_packed(THD *thd) override { return Item::val_time_packed(thd); } /* Result variants */ diff --git a/sql/item_sum.cc b/sql/item_sum.cc index ca757b6b94a..d33202d0a42 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -4255,9 +4255,9 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref) result.set_charset(collation.collation); result_field= 0; null_value= 1; - max_length= (uint32)MY_MIN(thd->variables.group_concat_max_len - / collation.collation->mbminlen - * collation.collation->mbmaxlen, UINT_MAX32); + max_length= (uint32) MY_MIN((ulonglong) thd->variables.group_concat_max_len + / collation.collation->mbminlen + * collation.collation->mbmaxlen, UINT_MAX32); uint32 offset; if (separator->needs_conversion(separator->length(), separator->charset(), diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc index 713c6ac4c3f..1e79e6444ff 100644 --- a/sql/mysql_install_db.cc +++ b/sql/mysql_install_db.cc @@ -333,7 +333,7 @@ static char *init_bootstrap_command_line(char *cmdline, size_t size) " %s" " --bootstrap" " --datadir=." - " --loose-innodb-buffer-pool-size=10M" + " --loose-innodb-buffer-pool-size=20M" "\"" , mysqld_path, opt_verbose_bootstrap ? "--console" : ""); return cmdline; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index f444fb19db7..ea1bfa1aaf3 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5374,7 +5374,9 @@ static int init_server_components() if (ha_recover(0)) unireg_abort(1); +#ifndef EMBEDDED_LIBRARY start_handle_manager(); +#endif if (opt_bin_log) { int error; diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 294834b84f0..6f3f6ec211a 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -891,8 +891,6 @@ void partition_info::vers_check_limit(THD *thd) uint32 part_id= vers_info->hist_part->id * sub_factor; const uint32 part_id_end= part_id + sub_factor; DBUG_ASSERT(part_id_end <= num_parts * sub_factor); - for (; part_id < part_id_end; ++part_id) - bitmap_set_bit(&read_partitions, part_id); ha_partition *hp= (ha_partition*)(table->file); ha_rows hist_rows= hp->part_records(vers_info->hist_part); diff --git a/sql/slave.cc b/sql/slave.cc index a8aa349b6cc..f9532752d3f 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -7154,8 +7154,9 @@ dbug_gtid_accept: mi->using_gtid != Master_info::USE_GTID_NO && mi->events_queued_since_last_gtid > 0 && ( (mi->last_queued_gtid_standalone && - !Log_event::is_part_of_group((Log_event_type)(uchar) - buf[EVENT_TYPE_OFFSET])) || + (LOG_EVENT_IS_QUERY((Log_event_type)(uchar) + buf[EVENT_TYPE_OFFSET]) || + (uchar)buf[EVENT_TYPE_OFFSET] == INCIDENT_EVENT)) || (!mi->last_queued_gtid_standalone && ((uchar)buf[EVENT_TYPE_OFFSET] == XID_EVENT || (uchar)buf[EVENT_TYPE_OFFSET] == XA_PREPARE_LOG_EVENT || @@ -7189,33 +7190,8 @@ dbug_gtid_accept: mi->gtid_current_pos.update(&mi->last_queued_gtid); mi->events_queued_since_last_gtid= 0; - if (unlikely(gtid_skip_enqueue)) - { - error= ER_SLAVE_RELAY_LOG_WRITE_FAILURE; - sql_print_error("Recieved a group closing %s event " - "at %llu position in the group while there are " - "still %llu events to skip upon reconnecting; " - "the last seen GTID is %u-%u-%llu", - Log_event::get_type_str((Log_event_type) (uchar) - buf[EVENT_TYPE_OFFSET]), - (mi->events_queued_since_last_gtid - - mi->gtid_reconnect_event_skip_count), - mi->events_queued_since_last_gtid, - mi->last_queued_gtid); - goto err; - } - else - { - /* - The whole of the current event group is queued. So in case of - reconnect we can start from after the current GTID. - */ - mi->gtid_current_pos.update(&mi->last_queued_gtid); - mi->events_queued_since_last_gtid= 0; - - /* Reset the domain_id_filter flag. */ - mi->domain_id_filter.reset_filter(); - } + /* Reset the domain_id_filter flag. */ + mi->domain_id_filter.reset_filter(); } skip_relay_logging: diff --git a/sql/sql_class.h b/sql/sql_class.h index f8985d2093d..a6f51d4b28f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5595,8 +5595,8 @@ my_eof(THD *thd) inline date_conv_mode_t sql_mode_for_dates(THD *thd) { - static_assert((date_conv_mode_t::KNOWN_MODES & - time_round_mode_t::KNOWN_MODES) == 0, + static_assert((ulonglong(date_conv_mode_t::KNOWN_MODES) & + ulonglong(time_round_mode_t::KNOWN_MODES)) == 0, "date_conv_mode_t and time_round_mode_t must use different " "bit values"); static_assert(MODE_NO_ZERO_DATE == date_mode_t::NO_ZERO_DATE && diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 1a3bfa232e8..2fb98f5aa88 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -7800,7 +7800,7 @@ bool LEX::maybe_start_compound_statement(THD *thd) if (!make_sp_head(thd, NULL, &sp_handler_procedure, DEFAULT_AGGREGATE)) return true; sphead->set_suid(SP_IS_NOT_SUID); - sphead->set_body_start(thd, thd->m_parser_state->m_lip.get_cpp_ptr()); + sphead->set_body_start(thd, thd->m_parser_state->m_lip.get_cpp_tok_start()); } return false; } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 4625e60703b..3233234fd39 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -2047,8 +2047,7 @@ public: @retval nonzero if the statement is a row injection */ inline bool is_stmt_row_injection() const { - return binlog_stmt_flags & - (1U << (BINLOG_STMT_UNSAFE_COUNT + BINLOG_STMT_TYPE_ROW_INJECTION)); + return binlog_stmt_flags & (1U << BINLOG_STMT_TYPE_ROW_INJECTION); } /** @@ -2058,8 +2057,7 @@ public: */ inline void set_stmt_row_injection() { DBUG_ENTER("set_stmt_row_injection"); - binlog_stmt_flags|= - (1U << (BINLOG_STMT_UNSAFE_COUNT + BINLOG_STMT_TYPE_ROW_INJECTION)); + binlog_stmt_flags|= (1U << BINLOG_STMT_TYPE_ROW_INJECTION); DBUG_VOID_RETURN; } @@ -2335,7 +2333,7 @@ private: The statement is a row injection (i.e., either a BINLOG statement or a row event executed by the slave SQL thread). */ - BINLOG_STMT_TYPE_ROW_INJECTION = 0, + BINLOG_STMT_TYPE_ROW_INJECTION = BINLOG_STMT_UNSAFE_COUNT, /** The last element of this enumeration type. */ BINLOG_STMT_TYPE_COUNT @@ -2349,8 +2347,8 @@ private: - The low BINLOG_STMT_UNSAFE_COUNT bits indicate the types of unsafeness that the current statement has. - - The next BINLOG_STMT_TYPE_COUNT bits indicate if the statement - is of some special type. + - The next BINLOG_STMT_TYPE_COUNT-BINLOG_STMT_TYPE_COUNT bits indicate if + the statement is of some special type. This must be a member of LEX, not of THD: each stored procedure needs to remember its unsafeness state between calls and each diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index cdafc3f720e..f8f61c66df0 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1505,22 +1505,6 @@ static bool deny_updates_if_read_only_option(THD *thd, TABLE_LIST *all_tables) } #ifdef WITH_WSREP -static my_bool wsrep_read_only_option(THD *thd, TABLE_LIST *all_tables) -{ - int opt_readonly_saved = opt_readonly; - privilege_t flag_saved= thd->security_ctx->master_access & PRIV_IGNORE_READ_ONLY; - - opt_readonly = 0; - thd->security_ctx->master_access &= ~PRIV_IGNORE_READ_ONLY; - - my_bool ret = !deny_updates_if_read_only_option(thd, all_tables); - - opt_readonly = opt_readonly_saved; - thd->security_ctx->master_access |= flag_saved; - - return ret; -} - static void wsrep_copy_query(THD *thd) { thd->wsrep_retry_command = thd->get_command(); @@ -7848,7 +7832,7 @@ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, { bool is_autocommit= !thd->in_multi_stmt_transaction_mode() && - wsrep_read_only_option(thd, thd->lex->query_tables); + !thd->wsrep_applier; bool retry_autocommit; do { diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 4cc23c694d7..52deb20284c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -111,12 +111,19 @@ static void optimize_straight_join(JOIN *join, table_map join_tables); static bool greedy_search(JOIN *join, table_map remaining_tables, uint depth, uint prune_level, uint use_cond_selectivity); -static bool best_extension_by_limited_search(JOIN *join, - table_map remaining_tables, - uint idx, double record_count, - double read_time, uint depth, - uint prune_level, - uint use_cond_selectivity); +enum enum_best_search { + SEARCH_ABORT= -2, + SEARCH_ERROR= -1, + SEARCH_OK= 0, + SEARCH_FOUND_EDGE=1 +}; +static enum_best_search +best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, double record_count, + double read_time, uint depth, + uint prune_level, + uint use_cond_selectivity); static uint determine_search_depth(JOIN* join); C_MODE_START static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2); @@ -384,6 +391,7 @@ POSITION::POSITION() range_rowid_filter_info= 0; ref_depend_map= dups_producing_tables= 0; inner_tables_handled_with_other_sjs= 0; + type= JT_UNKNOWN; dups_weedout_picker.set_empty(); firstmatch_picker.set_empty(); loosescan_picker.set_empty(); @@ -8445,6 +8453,7 @@ best_access_path(JOIN *join, pos->records_read= records; pos->read_time= best; pos->key= best_key; + pos->type= best_type; pos->table= s; pos->ref_depend_map= best_ref_depends_map; pos->loosescan_picker.loosescan_key= MAX_KEY; @@ -9100,9 +9109,12 @@ greedy_search(JOIN *join, do { /* Find the extension of the current QEP with the lowest cost */ join->best_read= DBL_MAX; - if (best_extension_by_limited_search(join, remaining_tables, idx, record_count, - read_time, search_depth, prune_level, - use_cond_selectivity)) + if ((int) best_extension_by_limited_search(join, remaining_tables, idx, + record_count, + read_time, search_depth, + prune_level, + use_cond_selectivity) < + (int) SEARCH_OK) DBUG_RETURN(TRUE); /* 'best_read < DBL_MAX' means that optimizer managed to find @@ -9738,6 +9750,28 @@ exit: } +/* + Check if the table is an EQ_REF or similar table and there is no cost + to gain by moveing it to a later stage. + We call such a table a edge table (or hanging leaf) as it will read at + most one row and will not add to the number of row combinations in the join. +*/ + +static inline enum_best_search +check_if_edge_table(POSITION *pos, + double pushdown_cond_selectivity) +{ + + if ((pos->type == JT_EQ_REF || + (pos->type == JT_REF && + pos->records_read == 1 && + !pos->range_rowid_filter_info)) && + pushdown_cond_selectivity >= 0.999) + return SEARCH_FOUND_EDGE; + return SEARCH_OK; +} + + /** Find a good, possibly optimal, query execution plan (QEP) by a possibly exhaustive search. @@ -9852,12 +9886,17 @@ exit: pushed to a table should be taken into account @retval - FALSE ok + enum_best_search::SEARCH_OK All fine @retval - TRUE Fatal error + enum_best_search::SEARCH_FOUND_EDGE All remaning tables are edge tables + @retval + enum_best_search::SEARCH_ABORT Killed by user + @retval + enum_best_search::SEARCH_ERROR Fatal error */ -static bool + +static enum_best_search best_extension_by_limited_search(JOIN *join, table_map remaining_tables, uint idx, @@ -9867,9 +9906,17 @@ best_extension_by_limited_search(JOIN *join, uint prune_level, uint use_cond_selectivity) { - DBUG_ENTER("best_extension_by_limited_search"); - THD *thd= join->thd; + /* + 'join' is a partial plan with lower cost than the best plan so far, + so continue expanding it further with the tables in 'remaining_tables'. + */ + JOIN_TAB *s; + double best_record_count= DBL_MAX; + double best_read_time= DBL_MAX; + bool disable_jbuf= join->thd->variables.join_cache_level == 0; + enum_best_search best_res; + DBUG_ENTER("best_extension_by_limited_search"); DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search", if (dbug_user_var_equals_int(thd, @@ -9879,19 +9926,7 @@ best_extension_by_limited_search(JOIN *join, ); if (unlikely(thd->check_killed())) // Abort - DBUG_RETURN(TRUE); - - DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx, - "SOFAR:");); - - /* - 'join' is a partial plan with lower cost than the best plan so far, - so continue expanding it further with the tables in 'remaining_tables'. - */ - JOIN_TAB *s; - double best_record_count= DBL_MAX; - double best_read_time= DBL_MAX; - bool disable_jbuf= join->thd->variables.join_cache_level == 0; + DBUG_RETURN(SEARCH_ABORT); DBUG_EXECUTE("opt", print_plan(join, idx, record_count, read_time, read_time, "part_plan");); @@ -9907,15 +9942,18 @@ best_extension_by_limited_search(JOIN *join, for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) { table_map real_table_bit= s->table->map; - if ((remaining_tables & real_table_bit) && - (allowed_tables & real_table_bit) && + DBUG_ASSERT(remaining_tables & real_table_bit); + + if ((allowed_tables & real_table_bit) && !(remaining_tables & s->dependent) && - (!idx || !check_interleaving_with_nj(s))) + !check_interleaving_with_nj(s)) { double current_record_count, current_read_time; + double partial_join_cardinality; POSITION *position= join->positions + idx; - + POSITION loose_scan_pos; Json_writer_object trace_one_table(thd); + if (unlikely(thd->trace_started())) { trace_plan_prefix(join, idx, remaining_tables); @@ -9923,7 +9961,6 @@ best_extension_by_limited_search(JOIN *join, } /* Find the best access method from 's' to the current partial plan */ - POSITION loose_scan_pos; best_access_path(join, s, remaining_tables, join->positions, idx, disable_jbuf, record_count, position, &loose_scan_pos); @@ -9998,32 +10035,51 @@ best_extension_by_limited_search(JOIN *join, double pushdown_cond_selectivity= 1.0; if (use_cond_selectivity > 1) pushdown_cond_selectivity= table_cond_selectivity(join, idx, s, - remaining_tables & + remaining_tables & ~real_table_bit); join->positions[idx].cond_selectivity= pushdown_cond_selectivity; - if (unlikely(thd->trace_started()) && pushdown_cond_selectivity < 1.0) - trace_one_table.add("selectivity", pushdown_cond_selectivity); + partial_join_cardinality= (current_record_count * + pushdown_cond_selectivity); - double partial_join_cardinality= current_record_count * - pushdown_cond_selectivity; - if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables ) - { /* Recursively expand the current partial plan */ + if (unlikely(thd->trace_started())) + { + if (pushdown_cond_selectivity < 1.0) + { + trace_one_table.add("selectivity", pushdown_cond_selectivity); + trace_one_table.add("estimated_join_cardinality", + partial_join_cardinality); + } + } + + if ((search_depth > 1) && (remaining_tables & ~real_table_bit) & + allowed_tables) + { + /* Recursively expand the current partial plan */ swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); Json_writer_array trace_rest(thd, "rest_of_plan"); - if (best_extension_by_limited_search(join, - remaining_tables & ~real_table_bit, - idx + 1, - partial_join_cardinality, - current_read_time, - search_depth - 1, - prune_level, - use_cond_selectivity)) - DBUG_RETURN(TRUE); + best_res= + best_extension_by_limited_search(join, + remaining_tables & + ~real_table_bit, + idx + 1, + partial_join_cardinality, + current_read_time, + search_depth - 1, + prune_level, + use_cond_selectivity); + if ((int) best_res < (int) SEARCH_OK) + DBUG_RETURN(best_res); // Abort swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + if (best_res == SEARCH_FOUND_EDGE && + check_if_edge_table(join->positions+ idx, + pushdown_cond_selectivity) != + SEARCH_FOUND_EDGE) + best_res= SEARCH_OK; } else - { /* + { + /* 'join' is either the best partial QEP with 'search_depth' relations, or the best complete QEP so far, whichever is smaller. */ @@ -10032,15 +10088,13 @@ best_extension_by_limited_search(JOIN *join, join->positions[join->const_tables].table->table) { /* - We may have to make a temp table, note that this is only a - heuristic since we cannot know for sure at this point. - Hence it may be wrong. + We may have to make a temp table, note that this is only a + heuristic since we cannot know for sure at this point. + Hence it may be wrong. */ trace_one_table.add("cost_for_sorting", current_record_count); current_read_time= COST_ADD(current_read_time, current_record_count); } - trace_one_table.add("estimated_join_cardinality", - partial_join_cardinality); if (current_read_time < join->best_read) { memcpy((uchar*) join->best_positions, (uchar*) join->positions, @@ -10053,12 +10107,19 @@ best_extension_by_limited_search(JOIN *join, read_time, current_read_time, "full_plan");); + best_res= check_if_edge_table(join->positions + idx, + pushdown_cond_selectivity); } restore_prev_nj_state(s); restore_prev_sj_state(remaining_tables, s, idx); + if (best_res == SEARCH_FOUND_EDGE) + { + trace_one_table.add("pruned_by_hanging_leaf", true); + DBUG_RETURN(best_res); + } } } - DBUG_RETURN(FALSE); + DBUG_RETURN(SEARCH_OK); } @@ -17210,7 +17271,6 @@ static uint reset_nj_counters(JOIN *join, List<TABLE_LIST> *join_list) static bool check_interleaving_with_nj(JOIN_TAB *next_tab) { - TABLE_LIST *next_emb= next_tab->table->pos_in_table_list->embedding; JOIN *join= next_tab->join; if (join->cur_embedding_map & ~next_tab->embedding_map) @@ -17222,6 +17282,7 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab) return TRUE; } + TABLE_LIST *next_emb= next_tab->table->pos_in_table_list->embedding; /* Do update counters for "pairs of brackets" that we've left (marked as X,Y,Z in the above picture) diff --git a/sql/sql_select.h b/sql/sql_select.h index e854792b073..4a2929207a5 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -989,6 +989,8 @@ public: */ enum sj_strategy_enum sj_strategy; + /* Type of join (EQ_REF, REF etc) */ + enum join_type type; /* Valid only after fix_semijoin_strategies_for_picked_join_order() call: if sj_strategy!=SJ_OPT_NONE, this is the number of subsequent tables that diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index f4f6aca59b8..93a0163b073 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2361,6 +2361,7 @@ create: lex->create_info.default_table_charset= NULL; lex->name= null_clex_str; lex->create_last_non_select_table= lex->last_table(); + lex->inc_select_stack_outer_barrier(); } create_body { diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 70759e381a5..f99d33094df 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -232,6 +232,10 @@ static inline int wsrep_before_prepare(THD* thd, bool all) WSREP_DEBUG("wsrep_before_prepare: %d", wsrep_is_real(thd, all)); int ret= 0; DBUG_ASSERT(wsrep_run_commit_hook(thd, all)); + if ((ret= thd->wsrep_parallel_slave_wait_for_prior_commit())) + { + DBUG_RETURN(ret); + } if ((ret= thd->wsrep_cs().before_prepare()) == 0) { DBUG_ASSERT(!thd->wsrep_trx().ws_meta().gtid().is_undefined()); |