From 42802ad66c49b6de11b37c7ea4e4658ccc5a94aa Mon Sep 17 00:00:00 2001 From: Brandon Nesterenko Date: Wed, 14 Sep 2022 15:08:12 -0600 Subject: MDEV-25616 XA PREPARE event group is not binlogged when.. the only query of the XA transaction is on a non-transactional table errors out: XA BEGIN 'x'; --error ER_DUP_ENTRY INSERT INTO t1 VALUES (1),(1); XA END 'x'; XA PREPARE 'x'; The binlogging pattern is correctly started as expected with the errored-out Query or its ROW format events, but there is no empty XA_prepare_log_event group. The following XA COMMIT 'x'; therefore should not be logged either, but it does. The bug is fixed with proper maintaining of a read-write binlog hton property and use it to enforce correct binlogging decisions. Specifically in the bug description case XA COMMIT won't be binlogged in both when given in the same connection and externally after disconnect. The same continue to apply to an empty XA that do not change any data in all transactional engines involved. --- sql/log.cc | 52 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 6 deletions(-) (limited to 'sql') diff --git a/sql/log.cc b/sql/log.cc index ec212825787..d15e7b5dd88 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2003,25 +2003,56 @@ static int binlog_xa_recover_dummy(handlerton *hton __attribute__((unused)), static int binlog_commit_by_xid(handlerton *hton, XID *xid) { + int rc= 0; THD *thd= current_thd; + /* the asserted state can't be reachable with xa commit */ + DBUG_ASSERT(!thd->get_stmt_da()->is_error() || + thd->get_stmt_da()->sql_errno() != ER_XA_RBROLLBACK); + /* + This is a recovered user xa transaction commit. + Create a "temporary" binlog transaction to write the commit record + into binlog. + */ + THD_TRANS trans; + trans.ha_list= NULL; + + thd->ha_data[hton->slot].ha_info[1].register_ha(&trans, hton); + thd->ha_data[binlog_hton->slot].ha_info[1].set_trx_read_write(); (void) thd->binlog_setup_trx_data(); DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_COMMIT); - return binlog_commit(hton, thd, TRUE); + rc= binlog_commit(hton, thd, TRUE); + thd->ha_data[binlog_hton->slot].ha_info[1].reset(); + + return rc; } static int binlog_rollback_by_xid(handlerton *hton, XID *xid) { + int rc= 0; THD *thd= current_thd; + if (thd->get_stmt_da()->is_error() && + thd->get_stmt_da()->sql_errno() == ER_XA_RBROLLBACK) + return rc; + + THD_TRANS trans; + trans.ha_list= NULL; + + thd->ha_data[hton->slot].ha_info[1].register_ha(&trans, hton); + thd->ha_data[hton->slot].ha_info[1].set_trx_read_write(); (void) thd->binlog_setup_trx_data(); DBUG_ASSERT(thd->lex->sql_command == SQLCOM_XA_ROLLBACK || (thd->transaction->xid_state.get_state_code() == XA_ROLLBACK_ONLY)); - return binlog_rollback(hton, thd, TRUE); + + rc= binlog_rollback(hton, thd, TRUE); + thd->ha_data[hton->slot].ha_info[1].reset(); + + return rc; } @@ -2159,10 +2190,16 @@ static int binlog_commit(handlerton *hton, THD *thd, bool all) } if (cache_mngr->trx_cache.empty() && - thd->transaction->xid_state.get_state_code() != XA_PREPARED) + (thd->transaction->xid_state.get_state_code() != XA_PREPARED || + !(thd->ha_data[binlog_hton->slot].ha_info[1].is_started() && + thd->ha_data[binlog_hton->slot].ha_info[1].is_trx_read_write()))) { /* - we're here because cache_log was flushed in MYSQL_BIN_LOG::log_xid() + This is an empty transaction commit (both the regular and xa), + or such transaction xa-prepare or + either one's statement having no effect on the transactional cache + as any prior to it. + The empty xa-prepare sinks in only when binlog is read-only. */ cache_mngr->reset(false, true); THD_STAGE_INFO(thd, org_stage); @@ -2247,10 +2284,12 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) } if (!cache_mngr->trx_cache.has_incident() && cache_mngr->trx_cache.empty() && - thd->transaction->xid_state.get_state_code() != XA_PREPARED) + (thd->transaction->xid_state.get_state_code() != XA_PREPARED || + !(thd->ha_data[binlog_hton->slot].ha_info[1].is_started() && + thd->ha_data[binlog_hton->slot].ha_info[1].is_trx_read_write()))) { /* - we're here because cache_log was flushed in MYSQL_BIN_LOG::log_xid() + The same comments apply as in the binlog commit method's branch. */ cache_mngr->reset(false, true); thd->reset_binlog_for_next_statement(); @@ -10290,6 +10329,7 @@ int TC_LOG_BINLOG::unlog_xa_prepare(THD *thd, bool all) /* an empty XA-prepare event group is logged */ rc= write_empty_xa_prepare(thd, cache_mngr); // normally gains need_unlog trans_register_ha(thd, true, binlog_hton, 0); // do it for future commmit + thd->ha_data[binlog_hton->slot].ha_info[1].set_trx_read_write(); } if (rw_count == 0 || !cache_mngr->need_unlog) return rc; -- cgit v1.2.1 From 8c5d323326d9d527e9a5e08c69eb6085953eb130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 25 Oct 2022 07:33:35 +0300 Subject: Additional fixes * galera_many_rows : reduce the time used * wsrep_thd.cc : remove incorrect assertion * disabled.def : disable failing test cases --- sql/wsrep_thd.h | 1 - 1 file changed, 1 deletion(-) (limited to 'sql') diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h index 31f95510c5e..e9add662e3f 100644 --- a/sql/wsrep_thd.h +++ b/sql/wsrep_thd.h @@ -184,7 +184,6 @@ void wsrep_reset_threadvars(THD *); static inline void wsrep_override_error(THD *thd, uint error, const char *format= 0, ...) { - DBUG_ASSERT(error != ER_ERROR_DURING_COMMIT); Diagnostics_area *da= thd->get_stmt_da(); if (da->is_ok() || da->is_eof() || -- cgit v1.2.1 From 32158be720b85a3ae0e0eeebe1277c36f86dca38 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Fri, 21 Oct 2022 19:50:07 +0200 Subject: MDEV-29811 server advertises ssl even if it's unusable. Abort startup, if SSL setup fails. Also, for the server always check that certificate matches private key (even if ssl_cert is not set, OpenSSL will try to use default one) --- sql/mysqld.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'sql') diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 8c70a0d3145..5d58d42faf9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5037,10 +5037,9 @@ static void init_ssl() DBUG_PRINT("info",("ssl_acceptor_fd: %p", ssl_acceptor_fd)); if (!ssl_acceptor_fd) { - sql_print_warning("Failed to setup SSL"); - sql_print_warning("SSL error: %s", sslGetErrString(error)); - opt_use_ssl = 0; - have_ssl= SHOW_OPTION_DISABLED; + sql_print_error("Failed to setup SSL"); + sql_print_error("SSL error: %s", sslGetErrString(error)); + unireg_abort(1); } if (global_system_variables.log_warnings > 0) { -- cgit v1.2.1 From f1bbc1cd19d0d81fee5433efcb570a8845172241 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 25 Oct 2022 11:53:39 +0400 Subject: MDEV-28545 MyISAM reorganize partition corrupt older table format The ALTER related code cannot do at the same time both: - modify partitions - change column data types Explicit changing of a column data type together with a partition change is prohibited by the parter, so this is not allowed and returns a syntax error: ALTER TABLE t MODIFY ts BIGINT, DROP PARTITION p1; This fix additionally disables implicit data type upgrade (e.g. from "MariaDB 5.3 TIME" to "MySQL 5.6 TIME", or the other way around according to the current mysql56_temporal_format) in case of an ALTER modifying partitions, e.g.: ALTER TABLE t DROP PARTITION p1; In such commands now only the partition change happens, while the data types stay unchanged. One can additionally run: ALTER TABLE t FORCE; either before or after the ALTER modifying partitions to upgrade data types according to mysql56_temporal_format. --- sql/field.cc | 2 -- sql/field.h | 7 +++++++ sql/sql_insert.cc | 10 ++++++++++ sql/sql_table.cc | 12 ++++++++++++ 4 files changed, 29 insertions(+), 2 deletions(-) (limited to 'sql') diff --git a/sql/field.cc b/sql/field.cc index 853b0c62f14..bf27780e776 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -11020,8 +11020,6 @@ Column_definition::Column_definition(THD *thd, Field *old_field, type_handler()->Column_definition_reuse_fix_attributes(thd, this, old_field); - type_handler()->Column_definition_implicit_upgrade(this); - /* Copy the default (constant/function) from the column object orig_field, if supplied. We do this if all these conditions are met: diff --git a/sql/field.h b/sql/field.h index 7534a506edc..9d40caf0932 100644 --- a/sql/field.h +++ b/sql/field.h @@ -5203,6 +5203,13 @@ public: bool vers_check_timestamp(const Lex_table_name &table_name) const; bool vers_check_bigint(const Lex_table_name &table_name) const; + + static void upgrade_data_types(List &list) + { + List_iterator it(list); + while (Create_field *f= it++) + f->type_handler()->Column_definition_implicit_upgrade(f); + } }; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index aeb39871025..76fd6385041 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4346,6 +4346,16 @@ TABLE *select_create::create_table_from_items(THD *thd, List *items, alter_info->create_list.push_back(cr_field, thd->mem_root); } + /* + Item*::type_handler() always returns pointers to + type_handler_{time2|datetime2|timestamp2} no matter what + the current mysql56_temporal_format says. + Let's convert them according to mysql56_temporal_format. + QQ: This perhaps should eventually be fixed to have Item*::type_handler() + respect mysql56_temporal_format, and remove the upgrade from here. + */ + Create_field::upgrade_data_types(alter_info->create_list); + if (create_info->check_fields(thd, alter_info, create_table->table_name, create_table->db, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 6aae927800e..efa5c06dd2a 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -10068,6 +10068,18 @@ do_continue:; set_table_default_charset(thd, create_info, alter_ctx.db); + /* + The ALTER related code cannot alter partitions and change column data types + at the same time. So in case of partition change statements like: + ALTER TABLE t1 DROP PARTITION p1; + we skip implicit data type upgrade (such as "MariaDB 5.3 TIME" to + "MySQL 5.6 TIME" or vice versa according to mysql56_temporal_format). + Note, one can run a separate "ALTER TABLE t1 FORCE;" statement + before or after the partition change ALTER statement to upgrade data types. + */ + if (IF_PARTITIONING(!fast_alter_partition, 1)) + Create_field::upgrade_data_types(alter_info->create_list); + if (create_info->check_fields(thd, alter_info, table_list->table_name, table_list->db) || create_info->fix_period_fields(thd, alter_info)) -- cgit v1.2.1 From 72e79eaaf3e4619bbaf900f6710ffb6a00ff95bf Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2022 20:24:11 +0200 Subject: cleanup: put casts in a separate statement remove useless if() --- sql/item_geofunc.cc | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'sql') diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index f2cc2e61b96..b40105aaf36 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2601,51 +2601,53 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, switch (g2->get_class_info()->m_type_id) { case Geometry::wkb_point: - // Optimization for point-point case + { + Gis_point *g2p= static_cast(g2); + // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { - res= static_cast(g2)->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &error); } else { // Optimization for single point in Multipoint if (g1->get_data_size() == len) { - res= static_cast(g2)->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &error); } else { // There are multipoints in g1 // g1 is MultiPoint and calculate MP.sphericaldistance from g2 Point if (g1->get_data_size() != GET_SIZE_ERROR) - static_cast(g2)->spherical_distance_multipoints( - (Gis_multi_point *)g1, r, &res, &error); + g2p->spherical_distance_multipoints(g1, r, &res, &error); } } break; + } case Geometry::wkb_multipoint: // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { + Gis_point *g1p= static_cast(g1); // Optimization for single point in Multipoint g2 if (g2->get_data_size() == len) { - res= static_cast(g1)->calculate_haversine(g2, r, &error); + res= g1p->calculate_haversine(g2, r, &error); } else { if (g2->get_data_size() != GET_SIZE_ERROR) // g1 is a point (casted to multi_point) and g2 multipoint - static_cast(g1)->spherical_distance_multipoints( - (Gis_multi_point *)g2, r, &res, &error); + g1p->spherical_distance_multipoints(g2, r, &res, &error); } } else { + Gis_multi_point *g1mp= static_cast(g1); // Multipoints in g1 and g2 - no optimization - static_cast(g1)->spherical_distance_multipoints( - (Gis_multi_point *)g2, r, &res, &error); + g1mp->spherical_distance_multipoints(g2, r, &res, &error); } break; @@ -2654,16 +2656,12 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (res < 0) - goto handle_error; - - handle_error: - if (error > 0) - my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), - "Longitude should be [-180,180]", "ST_Distance_Sphere"); - else if(error < 0) - my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), - "Latitude should be [-90,90]", "ST_Distance_Sphere"); + if (error > 0) + my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), + "Longitude should be [-180,180]", "ST_Distance_Sphere"); + else if(error < 0) + my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), + "Latitude should be [-90,90]", "ST_Distance_Sphere"); return res; } -- cgit v1.2.1 From 58cd0bd59ef011be54f162237f2ff017c3148e7b Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 17 Oct 2022 16:44:10 -0700 Subject: MDEV-28846 Poor performance when rowid filter contains no elements When a range rowid filter was used with an index ref access the cost of accessing the index entries for the records rejected by the filter was not taken into account. For a ref access by an index with big average number of records per key this led to poor execution plans if selectivity of the used filter was high. The patch resolves this problem. It also introduces a minor optimization that skips look-ups into a filter that turns out to be empty. With this patch the output of ANALYZE stmt reports the number of look-ups into used rowid filters. The patch also back-ports from 10.5 the code that properly sets the field TABLE::file::table for opened temporary tables. The test cases that were supposed to use rowid filters have been adjusted in order to use similar execution plans after this fix. Approved by Oleksandr Byelkin --- sql/handler.h | 6 ++++++ sql/item_func.cc | 2 +- sql/rowid_filter.h | 11 +++++++++++ sql/sql_analyze_stmt.h | 5 ++++- sql/sql_explain.cc | 1 + sql/sql_insert.cc | 2 +- sql/sql_select.cc | 53 ++++++++++++++++++++++++++++++++++++++++++++++---- 7 files changed, 73 insertions(+), 7 deletions(-) (limited to 'sql') diff --git a/sql/handler.h b/sql/handler.h index cd999f30bc0..aa68c30480e 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3156,6 +3156,11 @@ public: DBUG_ASSERT(m_lock_type == F_UNLCK); DBUG_ASSERT(inited == NONE); } + /* To check if table has been properely opened */ + bool is_open() + { + return ref != 0; + } virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() @@ -4804,6 +4809,7 @@ public: ha_share= arg_ha_share; return false; } + void set_table(TABLE* table_arg) { table= table_arg; } int get_lock_type() const { return m_lock_type; } public: /* XXX to be removed, see ha_partition::partition_ht() */ diff --git a/sql/item_func.cc b/sql/item_func.cc index f4596803c2d..9c29280970b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -6019,7 +6019,7 @@ bool Item_func_match::init_search(THD *thd, bool no_order) { DBUG_ENTER("Item_func_match::init_search"); - if (!table->file->get_table()) // the handler isn't opened yet + if (!table->file->is_open()) DBUG_RETURN(0); /* Check if init_search() has been called before */ diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h index 467b6884ca6..b76b8b1e635 100644 --- a/sql/rowid_filter.h +++ b/sql/rowid_filter.h @@ -192,6 +192,9 @@ public: */ virtual bool check(void *ctxt, char *elem) = 0; + /* True if the container does not contain any element */ + virtual bool is_empty() = 0; + virtual ~Rowid_filter_container() {} }; @@ -231,6 +234,8 @@ public: virtual ~Rowid_filter() {} + bool is_empty() { return container->is_empty(); } + Rowid_filter_container *get_container() { return container; } void set_tracker(Rowid_filter_tracker *track_arg) { tracker= track_arg; } @@ -268,6 +273,8 @@ public: bool check(char *elem) { + if (container->is_empty()) + return false; bool was_checked= container->check(table, elem); tracker->increment_checked_elements_count(was_checked); return was_checked; @@ -339,6 +346,8 @@ public: my_qsort2(array->front(), array->elements()/elem_size, elem_size, (qsort2_cmp) cmp, cmp_arg); } + + bool is_empty() { return elements() == 0; } }; @@ -368,6 +377,8 @@ public: bool add(void *ctxt, char *elem) { return refpos_container.add(elem); } bool check(void *ctxt, char *elem); + + bool is_empty() { return refpos_container.is_empty(); } }; /** diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h index eec52822ae5..40876d178e0 100644 --- a/sql/sql_analyze_stmt.h +++ b/sql/sql_analyze_stmt.h @@ -355,11 +355,14 @@ public: uint get_container_elements() { return container_elements; } + uint get_container_lookups() { return n_checks; } + double get_r_selectivity_pct() { - return (double)n_positive_checks/(double)n_checks; + return n_checks ? (double)n_positive_checks/(double)n_checks : 0; } size_t get_container_buff_size() { return container_buff_size; } + }; diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 1681da63ac1..70e300997f9 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1676,6 +1676,7 @@ void Explain_rowid_filter::print_explain_json(Explain_query *query, if (is_analyze) { writer->add_member("r_rows").add_double(tracker->get_container_elements()); + writer->add_member("r_lookups").add_ll(tracker->get_container_lookups()); writer->add_member("r_selectivity_pct"). add_double(tracker->get_r_selectivity_pct() * 100.0); writer->add_member("r_buffer_size"). diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 76fd6385041..ca3de361865 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4171,7 +4171,7 @@ void select_insert::abort_result_set() table will be assigned with view table structure, but that table will not be opened really (it is dummy to check fields types & Co). */ - if (table && table->file->get_table()) + if (table && table->file->is_open()) { bool changed, transactional_table; /* diff --git a/sql/sql_select.cc b/sql/sql_select.cc index a91b4571b21..5ec88e5259c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7400,6 +7400,7 @@ best_access_path(JOIN *join, table_map best_ref_depends_map= 0; Range_rowid_filter_cost_info *best_filter= 0; double tmp; + double keyread_tmp= 0; ha_rows rec; bool best_uses_jbuf= FALSE; MY_BITMAP *eq_join_set= &s->table->eq_join_set; @@ -7666,11 +7667,16 @@ best_access_path(JOIN *join, tmp= records; set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); if (table->covering_keys.is_set(key)) - tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); + keyread_tmp= + tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); else + { + keyread_tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); tmp= table->file->read_time(key, 1, (ha_rows) MY_MIN(tmp,s->worst_seeks)); + } tmp= COST_MULT(tmp, record_count); + keyread_tmp= COST_MULT(keyread_tmp, record_count); } } else @@ -7847,11 +7853,16 @@ best_access_path(JOIN *join, /* Limit the number of matched rows */ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); if (table->covering_keys.is_set(key)) - tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); + keyread_tmp= + tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); else + { + keyread_tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); tmp= table->file->read_time(key, 1, (ha_rows) MY_MIN(tmp,s->worst_seeks)); + } tmp= COST_MULT(tmp, record_count); + keyread_tmp= COST_MULT(keyread_tmp, record_count); } else { @@ -7870,7 +7881,35 @@ best_access_path(JOIN *join, (found_part & 1)) // start_key->key can be used for index access { double rows= record_count * records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); + + /* + If we use filter F with selectivity s the the cost of fetching data + by key using this filter will be + cost_of_fetching_1_row * rows * s + + cost_of_fetching_1_key_tuple * rows * (1 - s) + + cost_of_1_lookup_into_filter * rows + Without using any filter the cost would be just + cost_of_fetching_1_row * rows + + So the gain in access cost per row will be + cost_of_fetching_1_row * (1 - s) - + cost_of_fetching_1_key_tuple * (1 - s) - + cost_of_1_lookup_into_filter + = + (cost_of_fetching_1_row - cost_of_fetching_1_key_tuple) * (1 - s) + - cost_of_1_lookup_into_filter + + Here we have: + cost_of_fetching_1_row = tmp/rows + cost_of_fetching_1_key_tuple = keyread_tmp/rows + + Note that access_cost_factor may be greater than 1.0. In this case + we still can expect a gain of using rowid filter due to smaller number + of checks for conditions pushed to the joined table. + */ + double rows_access_cost= MY_MIN(rows, s->worst_seeks); + double access_cost_factor= MY_MIN((rows_access_cost - keyread_tmp) / + rows, 1.0); filter= table->best_range_rowid_filter_for_partial_join(start_key->key, rows, access_cost_factor); @@ -8029,8 +8068,11 @@ best_access_path(JOIN *join, if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE) { double rows= record_count * s->found_records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); uint key_no= s->quick->index; + + /* See the comment concerning using rowid filter for with ref access */ + keyread_tmp= s->table->quick_index_only_costs[key_no]; + double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); filter= s->table->best_range_rowid_filter_for_partial_join(key_no, rows, access_cost_factor); @@ -18810,6 +18852,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, delete table->file; goto err; } + table->file->set_table(table); if (!using_unique_constraint) reclength+= group_null_items; // null flag is stored separately @@ -20651,6 +20694,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) DBUG_RETURN(NESTED_LOOP_ERROR); join_tab->build_range_rowid_filter_if_needed(); + if (join_tab->rowid_filter && join_tab->rowid_filter->is_empty()) + rc= NESTED_LOOP_NO_MORE_ROWS; join->return_tab= join_tab; -- cgit v1.2.1 From e910dff81ebaa84d0028705d20a40abe8f779afd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 25 Oct 2022 21:21:19 +0200 Subject: MDEV-26161 crash in Gis_point::calculate_haversine return an error on invalid gis data --- sql/item_geofunc.cc | 20 +++++++++++--------- sql/spatial.cc | 9 ++++++--- 2 files changed, 17 insertions(+), 12 deletions(-) (limited to 'sql') diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index b40105aaf36..87c736ab94b 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2596,7 +2596,7 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, double res= 0.0; // Length for the single point (25 Bytes) uint32 len= SRID_SIZE + POINT_DATA_SIZE + WKB_HEADER_SIZE; - int error= 0; + int err_hv= 0, err_sph= 0; switch (g2->get_class_info()->m_type_id) { @@ -2606,21 +2606,21 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, // Optimization for point-point case if (g1->get_class_info()->m_type_id == Geometry::wkb_point) { - res= g2p->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &err_hv); } else { // Optimization for single point in Multipoint if (g1->get_data_size() == len) { - res= g2p->calculate_haversine(g1, r, &error); + res= g2p->calculate_haversine(g1, r, &err_hv); } else { // There are multipoints in g1 // g1 is MultiPoint and calculate MP.sphericaldistance from g2 Point if (g1->get_data_size() != GET_SIZE_ERROR) - g2p->spherical_distance_multipoints(g1, r, &res, &error); + err_sph= g2p->spherical_distance_multipoints(g1, r, &res, &err_hv); } } break; @@ -2634,20 +2634,20 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, // Optimization for single point in Multipoint g2 if (g2->get_data_size() == len) { - res= g1p->calculate_haversine(g2, r, &error); + res= g1p->calculate_haversine(g2, r, &err_hv); } else { if (g2->get_data_size() != GET_SIZE_ERROR) // g1 is a point (casted to multi_point) and g2 multipoint - g1p->spherical_distance_multipoints(g2, r, &res, &error); + err_sph= g1p->spherical_distance_multipoints(g2, r, &res, &err_hv); } } else { Gis_multi_point *g1mp= static_cast(g1); // Multipoints in g1 and g2 - no optimization - g1mp->spherical_distance_multipoints(g2, r, &res, &error); + err_sph= g1mp->spherical_distance_multipoints(g2, r, &res, &err_hv); } break; @@ -2656,12 +2656,14 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (error > 0) + if (err_hv > 0) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Longitude should be [-180,180]", "ST_Distance_Sphere"); - else if(error < 0) + else if(err_hv < 0) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Latitude should be [-90,90]", "ST_Distance_Sphere"); + else if (err_sph) + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); return res; } diff --git a/sql/spatial.cc b/sql/spatial.cc index 53e8c4c8bdd..6e044a67161 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -1151,7 +1151,8 @@ int Gis_point::spherical_distance_multipoints(Geometry *g, const double r, POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); - DBUG_ASSERT(temp); + if (!temp) + return 1; temp_res= this->calculate_haversine(temp, r, err); if (res > temp_res) res= temp_res; @@ -2335,7 +2336,8 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); - DBUG_ASSERT(temp); + if (!temp) + return 1; // Optimization for single Multipoint if (num_of_points2 == 1) { @@ -2354,7 +2356,8 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, POINT_DATA_SIZE*(j-1), POINT_DATA_SIZE); s2[len-1]= '\0'; temp2= Geometry::construct(&buff_temp2, s2, len); - DBUG_ASSERT(temp2); + if (!temp2) + return 1; temp_res= static_cast(temp)->calculate_haversine(temp2, r, err); if (res > temp_res) res= temp_res; -- cgit v1.2.1 From 77951dd7102381385093209a1f2597d28e39900a Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Tue, 25 Oct 2022 23:48:54 +0400 Subject: MDEV-26161 crash in Gis_point::calculate_haversine More checks for bad geometry data added. --- sql/item_geofunc.cc | 4 ++-- sql/spatial.cc | 32 +++++++++++++++++++++----------- sql/spatial.h | 2 ++ 3 files changed, 25 insertions(+), 13 deletions(-) (limited to 'sql') diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 87c736ab94b..6e65366f2e0 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -2656,13 +2656,13 @@ double Item_func_sphere_distance::spherical_distance_points(Geometry *g1, break; } - if (err_hv > 0) + if (err_hv == 1) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Longitude should be [-180,180]", "ST_Distance_Sphere"); else if(err_hv < 0) my_error(ER_STD_OUT_OF_RANGE_ERROR, MYF(0), "Latitude should be [-90,90]", "ST_Distance_Sphere"); - else if (err_sph) + else if (err_sph || err_hv == 2) my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); return res; } diff --git a/sql/spatial.cc b/sql/spatial.cc index 6e044a67161..9a30d346a1c 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -1071,10 +1071,9 @@ double Gis_point::calculate_haversine(const Geometry *g, point_temp[point_size-1]= '\0'; Geometry_buffer gbuff; Geometry *gg= Geometry::construct(&gbuff, point_temp, point_size-1); - DBUG_ASSERT(gg); - if (static_cast(gg)->get_xy_radian(&x2r, &y2r)) + if (!gg || static_cast(gg)->get_xy_radian(&x2r, &y2r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } } @@ -1082,15 +1081,16 @@ double Gis_point::calculate_haversine(const Geometry *g, { if (static_cast(g)->get_xy_radian(&x2r, &y2r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } } if (this->get_xy_radian(&x1r, &y1r)) { - DBUG_ASSERT(0); + *error= 2; return -1; } + // // Check boundary conditions: longitude[-180,180] if (!((x2r >= -M_PI && x2r <= M_PI) && (x1r >= -M_PI && x1r <= M_PI))) { @@ -1143,12 +1143,16 @@ int Gis_point::spherical_distance_multipoints(Geometry *g, const double r, { Geometry_buffer buff_temp; Geometry *temp; + const char *pt_ptr= g->get_data_ptr()+ + 4+WKB_HEADER_SIZE*i + POINT_DATA_SIZE*(i-1); // First 4 bytes are handled already, make sure to create a Point memset(s + 4, Geometry::wkb_point, 1); + if (g->no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; + memcpy(s + 5, g->get_data_ptr() + 5, 4); - memcpy(s + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\ - POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); + memcpy(s + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); if (!temp) @@ -2329,11 +2333,14 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, Geometry *temp; double temp_res= 0.0; char s[len]; + const char *pt_ptr= get_data_ptr()+ + 4+WKB_HEADER_SIZE*i + POINT_DATA_SIZE*(i-1); // First 4 bytes are handled already, make sure to create a Point memset(s + 4, Geometry::wkb_point, 1); + if (no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; memcpy(s + 5, this->get_data_ptr() + 5, 4); - memcpy(s + 4 + WKB_HEADER_SIZE, this->get_data_ptr() + 4 + WKB_HEADER_SIZE*i +\ - POINT_DATA_SIZE*(i-1), POINT_DATA_SIZE); + memcpy(s + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s[len-1]= '\0'; temp= Geometry::construct(&buff_temp, s, len); if (!temp) @@ -2349,11 +2356,14 @@ int Gis_multi_point::spherical_distance_multipoints(Geometry *g, const double r, Geometry_buffer buff_temp2; Geometry *temp2; char s2[len]; + const char *pt_ptr= g->get_data_ptr()+ + 4+WKB_HEADER_SIZE*j + POINT_DATA_SIZE*(j-1); // First 4 bytes are handled already, make sure to create a Point memset(s2 + 4, Geometry::wkb_point, 1); + if (g->no_data(pt_ptr, POINT_DATA_SIZE)) + return 1; memcpy(s2 + 5, g->get_data_ptr() + 5, 4); - memcpy(s2 + 4 + WKB_HEADER_SIZE, g->get_data_ptr() + 4 + WKB_HEADER_SIZE*j +\ - POINT_DATA_SIZE*(j-1), POINT_DATA_SIZE); + memcpy(s2 + 4 + WKB_HEADER_SIZE, pt_ptr, POINT_DATA_SIZE); s2[len-1]= '\0'; temp2= Geometry::construct(&buff_temp2, s2, len); if (!temp2) diff --git a/sql/spatial.h b/sql/spatial.h index 0c00482c09b..1a69b32bb1c 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -354,6 +354,7 @@ protected: const char *get_mbr_for_points(MBR *mbr, const char *data, uint offset) const; +public: /** Check if there're enough data remaining as requested @@ -384,6 +385,7 @@ protected: (expected_points > ((m_data_end - data) / (POINT_DATA_SIZE + extra_point_space)))); } +protected: const char *m_data; const char *m_data_end; }; -- cgit v1.2.1 From 278fbe61d847337712c0f802cc8e0db85bf58bd7 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 26 Oct 2022 10:14:34 +0200 Subject: Add skipped changes to oracle mode parser. --- sql/sql_yacc_ora.yy | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'sql') diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index bcb6c982da3..ec8e4f4c946 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -5954,10 +5954,11 @@ opt_part_option: opt_versioning_rotation: /* empty */ {} - | INTERVAL_SYM expr interval opt_versioning_interval_start + | { Lex->expr_allows_subselect= false; } + INTERVAL_SYM expr interval opt_versioning_interval_start { partition_info *part_info= Lex->part_info; - if (unlikely(part_info->vers_set_interval(thd, $2, $3, $4))) + if (unlikely(part_info->vers_set_interval(thd, $3, $4, $5))) MYSQL_YYABORT; } | LIMIT ulonglong_num @@ -12866,11 +12867,16 @@ order_clause: */ DBUG_ASSERT(sel->master_unit()->fake_select_lex); lex->current_select= sel->master_unit()->fake_select_lex; + lex->push_context(&sel->master_unit()->fake_select_lex->context, thd->mem_root); } } order_list { - + if (Lex->current_select == + Lex->current_select->master_unit()->fake_select_lex) + { + Lex->pop_context(); + } } ; -- cgit v1.2.1 From 1a3859fff09986a8ffc7b1b466ef565ce2b0bf42 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 1 Nov 2022 13:22:34 +0100 Subject: MDEV-29924 Assertion `(((nr) % (1LL << 24)) % (int) log_10_int[6 - dec]) == 0' failed in my_time_packed_to_binary on SELECT when using TIME field when assigning the cached item to the Item_cache for the first time make sure to use Item_cache::setup(), not Item_cache::store(). Because the former copies the metadata (and allocates memory, in case of Item_cache_row), and Item_cache::decimal must be set for comparisons to work correctly. --- sql/sql_class.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_class.cc b/sql/sql_class.cc index be96fdda0f0..10bee9e7aae 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -3753,8 +3753,10 @@ int select_max_min_finder_subselect::send_data(List &items) { cache= val_item->get_cache(thd); set_op(val_item->type_handler()); + cache->setup(thd, val_item); } - cache->store(val_item); + else + cache->store(val_item); it->store(0, cache); } it->assigned(1); -- cgit v1.2.1 From 3303748fd13399ba39ce4d646153d086c5a09445 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 2 Nov 2022 12:49:24 +0100 Subject: MDEV-29926: ASAN heap-use-after-free in Explain_query::~Explain_query Make sure that EXPLAIN object allocated on runtime arena. --- sql/sql_select.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'sql') diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 70c0a80ba2a..0b330528452 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1078,6 +1078,15 @@ JOIN::prepare(TABLE_LIST *tables_init, // simple check that we got usable conds dbug_print_item(conds); + /* + It is hack which force creating EXPLAIN object always on runt-time arena + (because very top JOIN::prepare executes always with runtime arena, but + constant subquery like (SELECT 'x') can be called with statement arena + during prepare phase of top SELECT). + */ + if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE)) + create_explain_query_if_not_exists(thd->lex, thd->mem_root); + if (select_lex->handle_derived(thd->lex, DT_PREPARE)) DBUG_RETURN(-1); @@ -1521,7 +1530,6 @@ bool JOIN::build_explain() int JOIN::optimize() { int res= 0; - create_explain_query_if_not_exists(thd->lex, thd->mem_root); join_optimization_state init_state= optimization_state; if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE) res= optimize_stage2(); -- cgit v1.2.1