diff options
author | Sergei Golubchik <sergii@pisem.net> | 2013-07-21 16:39:19 +0200 |
---|---|---|
committer | Sergei Golubchik <sergii@pisem.net> | 2013-07-21 16:39:19 +0200 |
commit | b7b5f6f1ab49948b0e15b762266d4640b3d6b7fb (patch) | |
tree | 7c302c2025184dbd053aa6135f0ff28c8ce6f359 /sql | |
parent | 5f6380adde2dac3f32b40339b9b702c0135eb7d6 (diff) | |
parent | c1d6a2d7e194225ccc19a68ea5d0f368632620d0 (diff) | |
download | mariadb-git-b7b5f6f1ab49948b0e15b762266d4640b3d6b7fb.tar.gz |
10.0-monty merge
includes:
* remove some remnants of "Bug#14521864: MYSQL 5.1 TO 5.5 BUGS PARTITIONING"
* introduce LOCK_share, now LOCK_ha_data is strictly for engines
* rea_create_table() always creates .par file (even in "frm-only" mode)
* fix a 5.6 bug, temp file leak on dummy ALTER TABLE
Diffstat (limited to 'sql')
171 files changed, 18649 insertions, 10406 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index dc6ba20811d..b43474224ee 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -59,7 +59,8 @@ SET (SQL_SOURCE sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h sql_cursor.cc sql_db.cc sql_delete.cc sql_derived.cc sql_do.cc - sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc sql_lex.cc + sql_error.cc sql_handler.cc sql_get_diagnostics.cc + sql_help.cc sql_insert.cc sql_lex.cc sql_list.cc sql_load.cc sql_manager.cc sql_parse.cc sql_bootstrap.cc sql_bootstrap.h sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc @@ -107,7 +108,7 @@ ADD_LIBRARY(sql STATIC ${SQL_SOURCE}) ADD_DEPENDENCIES(sql GenServerSource) DTRACE_INSTRUMENT(sql) TARGET_LINK_LIBRARIES(sql ${MYSQLD_STATIC_PLUGIN_LIBS} - mysys dbug strings vio regex + mysys mysys_ssl dbug strings vio regex ${LIBWRAP} ${LIBCRYPT} ${LIBDL} ${SSL_LIBRARIES}) @@ -142,7 +143,7 @@ IF(NOT WITHOUT_DYNAMIC_PLUGINS) # incremental appears to crash from time to time,if used with /DEF option SET_TARGET_PROPERTIES(mysqld PROPERTIES LINK_FLAGS "${mysqld_link_flags} /DEF:mysqld.def /INCREMENTAL:NO") - FOREACH (CORELIB sql mysys dbug strings) + FOREACH (CORELIB sql mysys mysys_ssl dbug strings) GET_TARGET_PROPERTY(LOC ${CORELIB} LOCATION) FILE(TO_NATIVE_PATH ${LOC} LOC) SET (LIB_LOCATIONS ${LIB_LOCATIONS} ${LOC}) @@ -172,7 +173,7 @@ ENDIF() # On Solaris, some extra effort is required in order to get dtrace probes # from static libraries DTRACE_INSTRUMENT_STATIC_LIBS(mysqld - "sql;mysys;${MYSQLD_STATIC_PLUGIN_LIBS}") + "sql;mysys;mysys_ssl;${MYSQLD_STATIC_PLUGIN_LIBS}") SET(WITH_MYSQLD_LDFLAGS "" CACHE STRING "Additional linker flags for mysqld") @@ -225,7 +226,7 @@ ADD_CUSTOM_COMMAND( MYSQL_ADD_EXECUTABLE(mysql_tzinfo_to_sql tztime.cc COMPONENT Server) SET_TARGET_PROPERTIES(mysql_tzinfo_to_sql PROPERTIES COMPILE_FLAGS "-DTZINFO2SQL") -TARGET_LINK_LIBRARIES(mysql_tzinfo_to_sql mysys) +TARGET_LINK_LIBRARIES(mysql_tzinfo_to_sql mysys mysys_ssl) ADD_CUSTOM_TARGET( GenServerSource diff --git a/sql/create_options.cc b/sql/create_options.cc index f12120bd0a1..d956d01aa66 100644 --- a/sql/create_options.cc +++ b/sql/create_options.cc @@ -87,7 +87,7 @@ static bool report_wrong_value(THD *thd, const char *name, const char *val, return 1; } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_OPTION_VALUE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_OPTION_VALUE, ER(ER_BAD_OPTION_VALUE), val, name); return 0; } @@ -110,7 +110,7 @@ static bool report_unknown_option(THD *thd, engine_option_value *val, DBUG_RETURN(TRUE); } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_OPTION, ER(ER_UNKNOWN_OPTION), val->name.str); DBUG_RETURN(FALSE); } diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index 25f028e5451..750f770552e 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2009, 2010, Oracle and/or its affiliates. +/* Copyright (c) 2009, 2011, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -10,8 +10,8 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + along with this program; if not, write to the Free Software Foundation, + 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ /* see include/mysql/service_debug_sync.h for debug sync documentation */ @@ -38,7 +38,7 @@ */ struct st_debug_sync_action { - ulong activation_count; /* max(hit_limit, execute) */ + ulong activation_count; /* MY_MAX(hit_limit, execute) */ ulong hit_limit; /* hits before kill query */ ulong execute; /* executes before self-clear */ ulong timeout; /* wait_for timeout */ @@ -82,8 +82,6 @@ struct st_debug_sync_globals }; static st_debug_sync_globals debug_sync_global; /* All globals in one object */ -extern uint opt_debug_sync_timeout; - /** Callbacks from C files. */ @@ -112,14 +110,11 @@ static void init_debug_sync_psi_keys(void) const char* category= "sql"; int count; - if (PSI_server == NULL) - return; - count= array_elements(all_debug_sync_mutexes); - PSI_server->register_mutex(category, all_debug_sync_mutexes, count); + mysql_mutex_register(category, all_debug_sync_mutexes, count); count= array_elements(all_debug_sync_conds); - PSI_server->register_cond(category, all_debug_sync_conds, count); + mysql_cond_register(category, all_debug_sync_conds, count); } #endif /* HAVE_PSI_INTERFACE */ @@ -741,7 +736,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action) DBUG_ASSERT(action); DBUG_ASSERT(ds_control); - action->activation_count= max(action->hit_limit, action->execute); + action->activation_count= MY_MAX(action->hit_limit, action->execute); if (!action->activation_count) { debug_sync_remove_action(ds_control, action); @@ -783,7 +778,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action) point decremented it to 0. In this case the following happened: - an error message was reported with my_error() and - - the statement was killed with thd->killed= KILL_QUERY. + - the statement was killed with thd->killed= THD::KILL_QUERY. If a statement reports an error, it must not call send_ok(). The calling functions will not call send_ok(), if we return TRUE @@ -985,7 +980,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str) DBUG_ENTER("debug_sync_eval_action"); DBUG_ASSERT(thd); DBUG_ASSERT(action_str); - DBUG_PRINT("debug_sync", ("action_str='%s'", action_str)); + DBUG_PRINT("debug_sync", ("action_str: '%s'", action_str)); /* Get debug sync point name. Or a special command. @@ -1450,8 +1445,13 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action) sig_wait, sig_glob, error));}); if (error == ETIMEDOUT || error == ETIME) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + // We should not make the statement fail, even if in strict mode. + const bool save_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= false; + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_DEBUG_SYNC_TIMEOUT, ER(ER_DEBUG_SYNC_TIMEOUT)); + thd->abort_on_warning= save_abort_on_warning; + DBUG_EXECUTE_IF("debug_sync_abort_on_timeout", DBUG_ABORT();); break; } error= 0; @@ -1521,9 +1521,10 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action) static void debug_sync(THD *thd, const char *sync_point_name, size_t name_len) { if (!thd) - thd= current_thd; - if (!thd) - return; + { + if (!(thd= current_thd)) + return; + } st_debug_sync_control *ds_control= thd->debug_sync_control; st_debug_sync_action *action; diff --git a/sql/debug_sync.h b/sql/debug_sync.h index 4d29d6e7508..bf1b3167dbc 100644 --- a/sql/debug_sync.h +++ b/sql/debug_sync.h @@ -32,6 +32,9 @@ class THD; #if defined(ENABLED_DEBUG_SYNC) +/* Command line option --debug-sync-timeout. See mysqld.cc. */ +extern MYSQL_PLUGIN_IMPORT uint opt_debug_sync_timeout; + /* Default WAIT_FOR timeout if command line option is given without argument. */ #define DEBUG_SYNC_DEFAULT_WAIT_TIMEOUT 300 diff --git a/sql/derror.cc b/sql/derror.cc index 665427f45bc..74e8209496b 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -76,7 +76,7 @@ bool init_errmessage(void) &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) && !errmsgs) { - free(errmsgs); + my_free(errmsgs); if (org_errmsgs) { @@ -99,7 +99,7 @@ bool init_errmessage(void) } } else - free(org_errmsgs); // Free old language + my_free(org_errmsgs); // Free old language /* Register messages for use with my_error(). */ if (my_error_register(get_server_errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST)) @@ -146,8 +146,8 @@ bool read_texts(const char *file_name, const char *language, const char ***point, uint error_messages) { register uint i; - uint count,funktpos,textcount; - size_t length; + uint count,funktpos; + size_t offset, length; File file; char name[FN_REFLEN]; char lang_path[FN_REFLEN]; @@ -186,9 +186,8 @@ bool read_texts(const char *file_name, const char *language, goto err; funktpos=2; if (head[0] != (uchar) 254 || head[1] != (uchar) 254 || - head[2] != 2 || head[3] != 2) + head[2] != 2 || head[3] != 3) goto err; /* purecov: inspected */ - textcount=head[4]; error_message_charset_info= system_charset_info; length=uint4korr(head+6); count=uint2korr(head+10); @@ -203,7 +202,7 @@ Error message file '%s' had only %d error messages, but it should contain at lea } if (!(*point= (const char**) - my_malloc((size_t) (max(length,count*2)+count*sizeof(char*)),MYF(0)))) + my_malloc((size_t) (MY_MAX(length,count*2)+count*sizeof(char*)),MYF(0)))) { funktpos=3; /* purecov: inspected */ goto err; /* purecov: inspected */ @@ -212,18 +211,15 @@ Error message file '%s' had only %d error messages, but it should contain at lea if (mysql_file_read(file, buff, (size_t) count*2, MYF(MY_NABP))) goto err; - for (i=0, pos= buff ; i< count ; i++) + for (i=0, offset=0, pos= buff ; i< count ; i++) { - (*point)[i]= (char*) buff+uint2korr(pos); + (*point)[i]= (char*) buff+offset; + offset+= uint2korr(pos); pos+=2; } if (mysql_file_read(file, buff, length, MYF(MY_NABP))) goto err; - for (i=1 ; i < textcount ; i++) - { - point[i]= *point +uint2korr(head+10+i+i); - } (void) mysql_file_close(file, MYF(0)); i= check_error_mesg(file_name, *point); diff --git a/sql/discover.cc b/sql/discover.cc index cc0dece031a..9351cf034ab 100644 --- a/sql/discover.cc +++ b/sql/discover.cc @@ -70,7 +70,7 @@ int readfrm(const char *name, const uchar **frmdata, size_t *len) error= 2; if (mysql_file_fstat(file, &state, MYF(0))) goto err; - read_len= (size_t)min(FRM_MAX_SIZE, state.st_size); // safety + read_len= (size_t)MY_MIN(FRM_MAX_SIZE, state.st_size); // safety // Read whole frm file error= 3; diff --git a/sql/discover.h b/sql/discover.h index fbf94891c74..e1508107235 100644 --- a/sql/discover.h +++ b/sql/discover.h @@ -26,6 +26,14 @@ int readfrm(const char *name, const uchar **data, size_t *length); int writefrm(const char *path, const char *db, const char *table, bool tmp_table, const uchar *frmdata, size_t len); +/* a helper to delete an frm file, given a path w/o .frm extension */ +inline void deletefrm(const char *path) +{ + char frm_name[FN_REFLEN]; + strxmov(frm_name, path, reg_ext, NullS); + mysql_file_delete(key_file_frm, frm_name, MYF(0)); +} + int ext_table_discovery_simple(MY_DIR *dirp, handlerton::discovered_list *result); #endif diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 2e31d20d54e..e236319d757 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -608,7 +608,7 @@ Event_timed::load_from_row(THD *thd, TABLE *table) table, &creation_ctx)) { push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_EVENT_INVALID_CREATION_CTX, ER(ER_EVENT_INVALID_CREATION_CTX), (const char *) dbname.str, diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 5c77456d907..34658ab51ac 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -429,7 +429,7 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, key_info= event_table->key_info; - if (key_info->key_parts == 0 || + if (key_info->user_defined_key_parts == 0 || key_info->key_part[0].field != event_table->field[ET_FIELD_DB]) { /* Corrupted table: no index or index on a wrong column */ @@ -687,7 +687,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, if (create_if_not) { *event_already_exists= true; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_EVENT_ALREADY_EXISTS, ER(ER_EVENT_ALREADY_EXISTS), parse_data->name.str); ret= 0; @@ -912,7 +912,7 @@ Event_db_repository::drop_event(THD *thd, LEX_STRING db, LEX_STRING name, goto end; } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), "Event", name.str); ret= 0; @@ -958,7 +958,7 @@ Event_db_repository::find_named_event(LEX_STRING db, LEX_STRING name, if (db.length > table->field[ET_FIELD_DB]->field_length || name.length > table->field[ET_FIELD_NAME]->field_length || table->s->keys == 0 || - table->key_info[0].key_parts != 2 || + table->key_info[0].user_defined_key_parts != 2 || table->key_info[0].key_part[0].fieldnr != ET_FIELD_DB+1 || table->key_info[0].key_part[1].fieldnr != ET_FIELD_NAME+1) DBUG_RETURN(TRUE); diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index 4316a9f1fb8..7647419aff9 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -126,7 +126,7 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc) { switch (thd->lex->sql_command) { case SQLCOM_CREATE_EVENT: - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_EVENT_CANNOT_CREATE_IN_THE_PAST, ER(ER_EVENT_CANNOT_CREATE_IN_THE_PAST)); break; @@ -143,7 +143,7 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc) { status= Event_parse_data::DISABLED; status_changed= true; - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_EVENT_EXEC_TIME_IN_THE_PAST, ER(ER_EVENT_EXEC_TIME_IN_THE_PAST)); } diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc index 6317af1eac3..f75a8abc835 100644 --- a/sql/event_scheduler.cc +++ b/sql/event_scheduler.cc @@ -75,9 +75,9 @@ struct scheduler_param { void Event_worker_thread::print_warnings(THD *thd, Event_job_data *et) { - MYSQL_ERROR *err; + const Sql_condition *err; DBUG_ENTER("evex_print_warnings"); - if (thd->warning_info->is_empty()) + if (thd->get_stmt_da()->is_warning_info_empty()) DBUG_VOID_RETURN; char msg_buf[10 * STRING_BUFFER_USUAL_SIZE]; @@ -93,7 +93,8 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et) prefix.append(et->name.str, et->name.length, system_charset_info); prefix.append("] ", 2); - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); + Diagnostics_area::Sql_condition_iterator it= + thd->get_stmt_da()->sql_conditions(); while ((err= it++)) { String err_msg(msg_buf, sizeof(msg_buf), system_charset_info); diff --git a/sql/events.cc b/sql/events.cc index b9c51b77f05..acf842dea44 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -808,7 +808,16 @@ Events::init(bool opt_noacl_or_bootstrap) */ thd->thread_stack= (char*) &thd; thd->store_globals(); - + /* + Set current time for the thread that handles events. + Current time is stored in data member start_time of THD class. + Subsequently, this value is used to check whether event was expired + when make loading events from storage. Check for event expiration time + is done at Event_queue_element::compute_next_execution_time() where + event's status set to Event_parse_data::DISABLED and dropped flag set + to true if event was expired. + */ + thd->set_time(); /* We will need Event_db_repository anyway, even if the scheduler is disabled - to perform events DDL. @@ -1098,8 +1107,7 @@ Events::load_events_from_db(THD *thd) while (!(read_record_info.read_record(&read_record_info))) { Event_queue_element *et; - bool created; - bool drop_on_completion; + bool created, dropped; if (!(et= new Event_queue_element)) goto end; @@ -1114,10 +1122,13 @@ Events::load_events_from_db(THD *thd) delete et; goto end; } - drop_on_completion= (et->on_completion == - Event_parse_data::ON_COMPLETION_DROP); - + /** + Since the Event_queue_element object could be deleted inside + Event_queue::create_event we should save the value of dropped flag + into the temporary variable. + */ + dropped= et->dropped; if (event_queue->create_event(thd, et, &created)) { /* Out of memory */ @@ -1126,7 +1137,7 @@ Events::load_events_from_db(THD *thd) } if (created) count++; - else if (drop_on_completion) + else if (dropped) { /* If not created, a stale event - drop if immediately if diff --git a/sql/field.cc b/sql/field.cc index 1cdf2ffd313..9b374c2770d 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -68,7 +68,7 @@ const char field_separator=','; #define LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE 128 #define DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE 128 #define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \ -((ulong) ((1LL << min(arg, 4) * 8) - 1)) + ((ulong) ((1LL << MY_MIN(arg, 4) * 8) - 1)) #define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index))) #define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(is_stat_field || !table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || bitmap_is_set(table->vcol_set, field_index))) @@ -1073,13 +1073,13 @@ static void push_numerical_conversion_warning(THD* thd, const char* str, const char* field_name="UNKNOWN", ulong row_num=0) { - char buf[max(max(DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE, + char buf[MY_MAX(MY_MAX(DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE, LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE), DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE)]; String tmp(buf, sizeof(buf), cs); tmp.copy(str, length, cs); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, error, ER(error), typestr, tmp.c_ptr(), field_name, row_num); } @@ -1158,7 +1158,7 @@ double Field::pos_in_interval_val_real(Field *min, Field *max) d= max->val_real() - min->val_real(); if (d <= 0) return 1.0; - return min(n/d, 1.0); + return MY_MIN(n/d, 1.0); } @@ -1233,7 +1233,7 @@ double Field::pos_in_interval_val_str(Field *min, Field *max, uint data_offset) d= maxp - minp; if (d <= 0) return 1.0; - return min(n/d, 1.0); + return MY_MIN(n/d, 1.0); } @@ -1298,17 +1298,18 @@ int Field_num::check_int(CHARSET_INFO *cs, const char *str, int length, if (str == int_end || error == MY_ERRNO_EDOM) { ErrConvString err(str, length, cs); - push_warning_printf(get_thd(), MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(get_thd(), Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "integer", err.ptr(), field_name, - (ulong) get_thd()->warning_info->current_row_for_warning()); + (ulong) table->in_use->get_stmt_da()-> + current_row_for_warning()); return 1; } /* Test if we have garbage at the end of the given string. */ if (test_if_important_data(cs, int_end, str + length)) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); return 2; } return 0; @@ -1377,7 +1378,7 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, uint len, return 0; out_of_range: - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } @@ -1398,12 +1399,12 @@ int Field::warn_if_overflow(int op_result) { if (op_result == E_DEC_OVERFLOW) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } if (op_result == E_DEC_TRUNCATED) { - set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); /* We return 0 here as this is not a critical issue */ } return 0; @@ -1729,7 +1730,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val, { if (val->sign()) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); i= 0; *err= 1; } @@ -1869,7 +1870,7 @@ uint Field::fill_cache_field(CACHE_FIELD *copy) if (flags & BLOB_FLAG) { copy->type= CACHE_BLOB; - copy->length-= table->s->blob_ptr_size; + copy->length-= portable_sizeof_char_ptr; return copy->length; } else if (!zero_pack() && @@ -2037,7 +2038,7 @@ void Field_decimal::overflow(bool negative) uint len=field_length; uchar *to=ptr, filler= '9'; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); if (negative) { if (!unsigned_flag) @@ -2145,7 +2146,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs) from++; if (from == end) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); is_cuted_fields_incr=1; } else if (*from == '+' || *from == '-') // Found some sign ? @@ -2221,7 +2222,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs) for (;from != end && my_isspace(&my_charset_bin, *from); from++) ; if (from != end) // If still something left, warn { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); is_cuted_fields_incr=1; } } @@ -2258,7 +2259,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs) tmp_uint=tmp_dec+(uint)(int_digits_end-int_digits_from); else if (expo_sign_char == '-') { - tmp_uint=min(exponent,(uint)(int_digits_end-int_digits_from)); + tmp_uint=MY_MIN(exponent,(uint)(int_digits_end-int_digits_from)); frac_digits_added_zeros=exponent-tmp_uint; int_digits_end -= tmp_uint; frac_digits_head_end=int_digits_end+tmp_uint; @@ -2266,7 +2267,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs) } else // (expo_sign_char=='+') { - tmp_uint=min(exponent,(uint)(frac_digits_end-frac_digits_from)); + tmp_uint=MY_MIN(exponent,(uint)(frac_digits_end-frac_digits_from)); int_digits_added_zeros=exponent-tmp_uint; int_digits_tail_from=frac_digits_from; frac_digits_from=frac_digits_from+tmp_uint; @@ -2399,7 +2400,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs) if (tmp_char != '0') // Losing a non zero digit ? { if (!is_cuted_fields_incr) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); return 0; } @@ -2422,7 +2423,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs) This is a note, not a warning, as we don't want to abort when we cut decimals in strict mode */ - set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); } return 0; } @@ -2685,7 +2686,7 @@ Field *Field_new_decimal::create_from_item (Item *item) { signed int overflow; - dec= min(dec, DECIMAL_MAX_SCALE); + dec= MY_MIN(dec, DECIMAL_MAX_SCALE); /* If the value still overflows the field with the corrected dec, @@ -2701,7 +2702,7 @@ Field *Field_new_decimal::create_from_item (Item *item) overflow= required_length - len; if (overflow > 0) - dec= max(0, dec - overflow); // too long, discard fract + dec= MY_MAX(0, dec - overflow); // too long, discard fract else /* Corrected value fits. */ len= required_length; @@ -2772,7 +2773,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value) if (unsigned_flag && decimal_value->sign()) { DBUG_PRINT("info", ("unsigned overflow")); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; decimal_value= &decimal_zero; } @@ -2816,32 +2817,32 @@ int Field_new_decimal::store(const char *from, uint length, thd->abort_on_warning) { ErrConvString errmsg(from, length, &my_charset_bin); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "decimal", errmsg.ptr(), field_name, - (ulong) thd->warning_info->current_row_for_warning()); - + static_cast<ulong>(thd->get_stmt_da()-> + current_row_for_warning())); DBUG_RETURN(err); } switch (err) { case E_DEC_TRUNCATED: - set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); break; case E_DEC_OVERFLOW: - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); set_value_on_overflow(&decimal_value, decimal_value.sign()); break; case E_DEC_BAD_NUM: { ErrConvString errmsg(from, length, &my_charset_bin); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "decimal", errmsg.ptr(), field_name, - (ulong) thd->warning_info-> - current_row_for_warning()); + static_cast<ulong>(thd->get_stmt_da()-> + current_row_for_warning())); my_decimal_set_zero(&decimal_value); break; } @@ -3148,13 +3149,13 @@ int Field_tiny::store(double nr) if (nr < 0.0) { *ptr=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > 255.0) { *ptr= (uchar) 255; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3165,13 +3166,13 @@ int Field_tiny::store(double nr) if (nr < -128.0) { *ptr= (uchar) -128; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > 127.0) { *ptr=127; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3191,13 +3192,13 @@ int Field_tiny::store(longlong nr, bool unsigned_val) if (nr < 0 && !unsigned_val) { *ptr= 0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if ((ulonglong) nr > (ulonglong) 255) { *ptr= (char) 255; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3210,13 +3211,13 @@ int Field_tiny::store(longlong nr, bool unsigned_val) if (nr < -128) { *ptr= (char) -128; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > 127) { *ptr=127; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3250,7 +3251,7 @@ String *Field_tiny::val_str(String *val_buffer, ASSERT_COLUMN_MARKED_FOR_READ; CHARSET_INFO *cs= &my_charset_numeric; uint length; - uint mlength=max(field_length+1,5*cs->mbmaxlen); + uint mlength=MY_MAX(field_length+1,5*cs->mbmaxlen); val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); @@ -3327,13 +3328,13 @@ int Field_short::store(double nr) if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) UINT_MAX16) { res=(int16) UINT_MAX16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3344,13 +3345,13 @@ int Field_short::store(double nr) if (nr < (double) INT_MIN16) { res=INT_MIN16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) INT_MAX16) { res=INT_MAX16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3372,13 +3373,13 @@ int Field_short::store(longlong nr, bool unsigned_val) if (nr < 0L && !unsigned_val) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if ((ulonglong) nr > (ulonglong) UINT_MAX16) { res=(int16) UINT_MAX16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3392,13 +3393,13 @@ int Field_short::store(longlong nr, bool unsigned_val) if (nr < INT_MIN16) { res=INT_MIN16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (longlong) INT_MAX16) { res=INT_MAX16; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3432,7 +3433,7 @@ String *Field_short::val_str(String *val_buffer, ASSERT_COLUMN_MARKED_FOR_READ; CHARSET_INFO *cs= &my_charset_numeric; uint length; - uint mlength=max(field_length+1,7*cs->mbmaxlen); + uint mlength=MY_MAX(field_length+1,7*cs->mbmaxlen); val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); short j; @@ -3515,14 +3516,14 @@ int Field_medium::store(double nr) if (nr < 0) { int3store(ptr,0); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr >= (double) (long) (1L << 24)) { uint32 tmp=(uint32) (1L << 24)-1L; int3store(ptr,tmp); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3534,14 +3535,14 @@ int Field_medium::store(double nr) { long tmp=(long) INT_MIN24; int3store(ptr,tmp); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) INT_MAX24) { long tmp=(long) INT_MAX24; int3store(ptr,tmp); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3561,14 +3562,14 @@ int Field_medium::store(longlong nr, bool unsigned_val) if (nr < 0 && !unsigned_val) { int3store(ptr,0); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if ((ulonglong) nr >= (ulonglong) (long) (1L << 24)) { long tmp= (long) (1L << 24)-1L; int3store(ptr,tmp); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3583,14 +3584,14 @@ int Field_medium::store(longlong nr, bool unsigned_val) { long tmp= (long) INT_MIN24; int3store(ptr,tmp); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (longlong) INT_MAX24) { long tmp=(long) INT_MAX24; int3store(ptr,tmp); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3622,7 +3623,7 @@ String *Field_medium::val_str(String *val_buffer, ASSERT_COLUMN_MARKED_FOR_READ; CHARSET_INFO *cs= &my_charset_numeric; uint length; - uint mlength=max(field_length+1,10*cs->mbmaxlen); + uint mlength=MY_MAX(field_length+1,10*cs->mbmaxlen); val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr); @@ -3712,7 +3713,7 @@ int Field_long::store(double nr) else if (nr > (double) UINT_MAX32) { res= UINT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -3734,7 +3735,7 @@ int Field_long::store(double nr) res=(int32) (longlong) nr; } if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); int4store(ptr,res); return error; @@ -3780,7 +3781,7 @@ int Field_long::store(longlong nr, bool unsigned_val) res=(int32) nr; } if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); int4store(ptr,res); return error; @@ -3811,7 +3812,7 @@ String *Field_long::val_str(String *val_buffer, ASSERT_COLUMN_MARKED_FOR_READ; CHARSET_INFO *cs= &my_charset_numeric; uint length; - uint mlength=max(field_length+1,12*cs->mbmaxlen); + uint mlength=MY_MAX(field_length+1,12*cs->mbmaxlen); val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); int32 j; @@ -3879,7 +3880,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error); if (error == MY_ERRNO_ERANGE) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (get_thd()->count_cuted_fields && @@ -3901,7 +3902,7 @@ int Field_longlong::store(double nr) res= double_to_longlong(nr, unsigned_flag, &error); if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); int8store(ptr,res); return error; @@ -3922,7 +3923,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val) if (unsigned_flag != unsigned_val) { nr= unsigned_flag ? (ulonglong) 0 : (ulonglong) LONGLONG_MAX; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } } @@ -3961,7 +3962,7 @@ String *Field_longlong::val_str(String *val_buffer, { CHARSET_INFO *cs= &my_charset_numeric; uint length; - uint mlength=max(field_length+1,22*cs->mbmaxlen); + uint mlength=MY_MAX(field_length+1,22*cs->mbmaxlen); val_buffer->alloc(mlength); char *to=(char*) val_buffer->ptr(); longlong j; @@ -4036,7 +4037,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs) if (error || (!len || ((uint) (end-from) != len && get_thd()->count_cuted_fields))) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + set_warning(Sql_condition::WARN_LEVEL_WARN, (error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1); error= error ? 1 : 2; } @@ -4053,7 +4054,7 @@ int Field_float::store(double nr) unsigned_flag, FLT_MAX); if (error) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); if (error < 0) // Wrong double value { error= 1; @@ -4224,7 +4225,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs) if (error || (!len || ((uint) (end-from) != len && get_thd()->count_cuted_fields))) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + set_warning(Sql_condition::WARN_LEVEL_WARN, (error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1); error= error ? 1 : 2; } @@ -4241,7 +4242,7 @@ int Field_double::store(double nr) unsigned_flag, DBL_MAX); if (error) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); if (error < 0) // Wrong double value { error= 1; @@ -4406,7 +4407,7 @@ longlong Field_double::val_int(void) if (error) { ErrConvDouble err(j); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", err.ptr()); @@ -4602,7 +4603,7 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time, if (was_cut || !have_smth_to_conv) { error= 1; - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, str, MYSQL_TIMESTAMP_DATETIME, 1); } /* Only convert a correct date (not a zero date) */ @@ -4614,7 +4615,7 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time, conversion_error= ER_WARN_DATA_OUT_OF_RANGE; if (conversion_error) { - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, conversion_error, + set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, conversion_error, str, MYSQL_TIMESTAMP_DATETIME, !error); error= 1; } @@ -5084,7 +5085,7 @@ uint Field_temporal::is_equal(Create_field *new_field) } -void Field_temporal::set_warnings(MYSQL_ERROR::enum_warning_level trunc_level, +void Field_temporal::set_warnings(Sql_condition::enum_warning_level trunc_level, const ErrConv *str, int was_cut, timestamp_type ts_type) { @@ -5102,7 +5103,7 @@ void Field_temporal::set_warnings(MYSQL_ERROR::enum_warning_level trunc_level, set_datetime_warning(trunc_level, WARN_DATA_TRUNCATED, str, mysql_type_to_time_type(type()), 1); if (was_cut & MYSQL_TIME_WARN_OUT_OF_RANGE) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, + set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, str, mysql_type_to_time_type(type()), 1); } @@ -5122,7 +5123,7 @@ int Field_temporal_with_date::store_TIME_with_warning(MYSQL_TIME *ltime, int was_cut, int have_smth_to_conv) { - MYSQL_ERROR::enum_warning_level trunc_level= MYSQL_ERROR::WARN_LEVEL_WARN; + Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN; int ret= 2; ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; @@ -5141,7 +5142,7 @@ int Field_temporal_with_date::store_TIME_with_warning(MYSQL_TIME *ltime, mysql_type_to_time_type(type()) == MYSQL_TIMESTAMP_DATE && (ltime->hour || ltime->minute || ltime->second || ltime->second_part)) { - trunc_level= MYSQL_ERROR::WARN_LEVEL_NOTE; + trunc_level= Sql_condition::WARN_LEVEL_NOTE; was_cut|= MYSQL_TIME_WARN_TRUNCATED; ret= 3; } @@ -5237,7 +5238,7 @@ int Field_time::store_TIME_with_warning(MYSQL_TIME *ltime, int was_cut, int have_smth_to_conv) { - MYSQL_ERROR::enum_warning_level trunc_level= MYSQL_ERROR::WARN_LEVEL_WARN; + Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN; int ret= 2; ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED; @@ -5252,7 +5253,7 @@ int Field_time::store_TIME_with_warning(MYSQL_TIME *ltime, (ltime->year || ltime->month)) { ltime->year= ltime->month= ltime->day= 0; - trunc_level= MYSQL_ERROR::WARN_LEVEL_NOTE; + trunc_level= Sql_condition::WARN_LEVEL_NOTE; was_cut|= MYSQL_TIME_WARN_TRUNCATED; ret= 3; } @@ -5373,10 +5374,10 @@ bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) (fuzzydate & TIME_NO_ZERO_IN_DATE)) { THD *thd= get_thd(); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, ER(ER_WARN_DATA_OUT_OF_RANGE), field_name, - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); return 1; } long tmp=(long) sint3korr(ptr); @@ -5563,7 +5564,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) error == MY_ERRNO_ERANGE) { *ptr=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } if (get_thd()->count_cuted_fields && @@ -5606,7 +5607,7 @@ int Field_year::store(longlong nr, bool unsigned_val) if (nr < 0 || (nr >= 100 && nr <= 1900) || nr > 2155) { *ptr= 0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } if (nr != 0 || field_length != 4) // 0000 -> 0; 00 -> 2000 @@ -5627,7 +5628,7 @@ int Field_year::store_time_dec(MYSQL_TIME *ltime, uint dec) if (Field_year::store(ltime->year, 0)) return 1; - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, &str, ltime->time_type, 1); return 0; } @@ -6209,11 +6210,11 @@ check_string_copy_error(Field_str *field, convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6); push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "string", tmp, field->field_name, - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); return TRUE; } @@ -6248,14 +6249,14 @@ Field_longstr::report_if_important_data(const char *pstr, const char *end, if (test_if_important_data(field_charset, pstr, end)) { if (thd->abort_on_warning) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); return 2; } else if (count_spaces) { /* If we lost only spaces then produce a NOTE, not a WARNING */ - set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); return 2; } } @@ -6321,9 +6322,9 @@ int Field_str::store(double nr) if (error) { if (get_thd()->abort_on_warning) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } return store(buff, length, &my_charset_numeric); } @@ -6385,7 +6386,7 @@ double Field_string::val_real(void) (char*) ptr + field_length)))) { ErrConvString err((char*) ptr, field_length, cs); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE", err.ptr()); @@ -6409,7 +6410,7 @@ longlong Field_string::val_int(void) (char*) ptr + field_length)))) { ErrConvString err((char*) ptr, field_length, cs); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", err.ptr()); @@ -6445,7 +6446,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value) if (!get_thd()->no_errors && err) { ErrConvString errmsg((char*) ptr, field_length, charset()); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "DECIMAL", errmsg.ptr()); @@ -6541,7 +6542,7 @@ void Field_string::sql_type(String &res) const uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length) { - uint length= min(field_length,max_length); + uint length= MY_MIN(field_length,max_length); uint local_char_length= max_length/field_charset->mbmaxlen; DBUG_PRINT("debug", ("Packing field '%s' - length: %u ", field_name, length)); @@ -7288,7 +7289,7 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) from= tmpstr.ptr(); } - new_length= min(max_data_length(), field_charset->mbmaxlen * length); + new_length= MY_MIN(max_data_length(), field_charset->mbmaxlen * length); if (value.alloc(new_length)) goto oom_error; @@ -7448,7 +7449,7 @@ int Field_blob::cmp_binary(const uchar *a_ptr, const uchar *b_ptr, b_length=get_length(b_ptr); if (b_length > max_length) b_length=max_length; - diff=memcmp(a,b,min(a_length,b_length)); + diff=memcmp(a,b,MY_MIN(a_length,b_length)); return diff ? diff : (int) (a_length - b_length); } @@ -7626,7 +7627,7 @@ uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length) length given is smaller than the actual length of the blob, we just store the initial bytes of the blob. */ - store_length(to, packlength, min(length, max_length)); + store_length(to, packlength, MY_MIN(length, max_length)); /* Store the actual blob data, which will occupy 'length' bytes. @@ -7788,7 +7789,7 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs) ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), MYF(0), Geometry::ci_collection[geom_type]->m_name.str, Geometry::ci_collection[wkb_type]->m_name.str, field_name, - (ulong) table->in_use->warning_info->current_row_for_warning()); + (ulong) table->in_use->get_stmt_da()->current_row_for_warning()); goto err_exit; } @@ -7871,13 +7872,13 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs) if (err || end != from+length || tmp > typelib->count) { tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } if (!get_thd()->count_cuted_fields) err= 0; } else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } store_type((ulonglong) tmp); return err; @@ -7896,7 +7897,7 @@ int Field_enum::store(longlong nr, bool unsigned_val) int error= 0; if ((ulonglong) nr > typelib->count || nr == 0) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); if (nr != 0 || get_thd()->count_cuted_fields) { nr= 0; @@ -8050,11 +8051,11 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs) tmp > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1)) { tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } } else if (got_warning) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); store_type(tmp); return err; } @@ -8074,7 +8075,7 @@ int Field_set::store(longlong nr, bool unsigned_val) if ((ulonglong) nr > max_nr) { nr&= max_nr; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); error=1; } store_type((ulonglong) nr); @@ -8428,9 +8429,9 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs) set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len); memset(ptr, 0xff, bytes_in_rec); if (get_thd()->really_abort_on_warning()) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } /* delta is >= -1 here */ @@ -8518,7 +8519,7 @@ String *Field_bit::val_str(String *val_buffer, { ASSERT_COLUMN_MARKED_FOR_READ; char buff[sizeof(longlong)]; - uint length= min(pack_length(), sizeof(longlong)); + uint length= MY_MIN(pack_length(), sizeof(longlong)); ulonglong bits= val_int(); mi_int8store(buff,bits); @@ -8606,7 +8607,7 @@ uint Field_bit::get_key_image(uchar *buff, uint length, imagetype type_arg) *buff++= bits; length--; } - uint data_length = min(length, bytes_in_rec); + uint data_length = MY_MIN(length, bytes_in_rec); memcpy(buff, ptr, data_length); return data_length + 1; } @@ -8730,7 +8731,7 @@ Field_bit::pack(uchar *to, const uchar *from, uint max_length) uchar bits= get_rec_bits(bit_ptr + (from - ptr), bit_ofs, bit_len); *to++= bits; } - length= min(bytes_in_rec, max_length - (bit_len > 0)); + length= MY_MIN(bytes_in_rec, max_length - (bit_len > 0)); memcpy(to, from, length); return to + length; } @@ -8865,9 +8866,9 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs) if (bits) *ptr&= ((1 << bits) - 1); /* set first uchar */ if (get_thd()->really_abort_on_warning()) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1); else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } bzero(ptr, delta); @@ -9270,7 +9271,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, /* Otherwise a default of '' is just a warning. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BLOB_CANT_HAVE_DEFAULT, ER(ER_BLOB_CANT_HAVE_DEFAULT), fld_name); @@ -9762,11 +9763,6 @@ Create_field::Create_field(Field *old_field,Field *orig_field) option_list= old_field->option_list; option_struct= old_field->option_struct; - /* Fix if the original table had 4 byte pointer blobs */ - if (flags & BLOB_FLAG) - pack_length= (pack_length- old_field->table->s->blob_ptr_size + - portable_sizeof_char_ptr); - switch (sql_type) { case MYSQL_TYPE_BLOB: switch (pack_length - portable_sizeof_char_ptr) { @@ -9801,7 +9797,7 @@ Create_field::Create_field(Field *old_field,Field *orig_field) { char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1]; my_snprintf(buff, sizeof(buff), "YEAR(%lu)", length); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), buff, "YEAR(4)"); @@ -9929,11 +9925,11 @@ uint32 Field_blob::max_display_length() *****************************************************************************/ /** - Produce warning or note about data saved into field. +* Produce warning or note about data saved into field. @param level - level of message (Note/Warning/Error) @param code - error code of message to be produced - @param cuted_increment - whenever we should increase cut fields count or not + @param cut_increment - whenever we should increase cut fields count @note This function won't produce warning and increase cut fields counter @@ -9941,11 +9937,16 @@ uint32 Field_blob::max_display_length() if count_cuted_fields == CHECK_FIELD_IGNORE then we ignore notes. This allows us to avoid notes in optimisation, like convert_constant_item(). + + @retval + 1 if count_cuted_fields == CHECK_FIELD_IGNORE and error level is not NOTE + @retval + 0 otherwise */ -void -Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code, - int cuted_increment) +bool +Field::set_warning(Sql_condition::enum_warning_level level, uint code, + int cut_increment) const { /* If this field was created only for type conversion purposes it @@ -9954,10 +9955,12 @@ Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code, THD *thd= table ? table->in_use : current_thd; if (thd->count_cuted_fields) { - thd->cuted_fields+= cuted_increment; + thd->cuted_fields+= cut_increment; push_warning_printf(thd, level, code, ER(code), field_name, - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); + return 0; } + return level >= Sql_condition::WARN_LEVEL_WARN; } @@ -9979,12 +9982,12 @@ Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code, */ -void Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, +void Field::set_datetime_warning(Sql_condition::enum_warning_level level, uint code, const ErrConv *str, timestamp_type ts_type, int cuted_increment) { THD *thd= get_thd(); - if (thd->really_abort_on_warning() && level >= MYSQL_ERROR::WARN_LEVEL_WARN) + if (thd->really_abort_on_warning() && level >= Sql_condition::WARN_LEVEL_WARN) make_truncated_value_warning(thd, level, str, ts_type, field_name); else set_warning(level, code, cuted_increment); diff --git a/sql/field.h b/sql/field.h index feef0cbef08..40be4f7776a 100644 --- a/sql/field.h +++ b/sql/field.h @@ -29,7 +29,7 @@ #include "table.h" /* TABLE */ #include "sql_string.h" /* String */ #include "my_decimal.h" /* my_decimal */ -#include "sql_error.h" /* MYSQL_ERROR */ +#include "sql_error.h" /* Sql_condition */ #include "compat56.h" class Send_field; @@ -569,32 +569,53 @@ public: */ virtual void sql_type(String &str) const =0; virtual uint size_of() const =0; // For new field - inline bool is_null(my_ptrdiff_t row_offset= 0) - { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : table->null_row; } - inline bool is_real_null(my_ptrdiff_t row_offset= 0) + inline bool is_null(my_ptrdiff_t row_offset= 0) const + { + /* + The table may have been marked as containing only NULL values + for all fields if it is a NULL-complemented row of an OUTER JOIN + or if the query is an implicitly grouped query (has aggregate + functions but no GROUP BY clause) with no qualifying rows. If + this is the case (in which TABLE::null_row is true), the field + is considered to be NULL. + Note that if a table->null_row is set then also all null_bits are + set for the row. + + Otherwise, if the field is NULLable, it has a valid null_ptr + pointer, and its NULLity is recorded in the "null_bit" bit of + null_ptr[row_offset]. + */ + return (table->null_row ? TRUE : + null_ptr ? test(null_ptr[row_offset] & null_bit) : 0); + } + inline bool is_real_null(my_ptrdiff_t row_offset= 0) const { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; } - inline bool is_null_in_record(const uchar *record) + inline bool is_null_in_record(const uchar *record) const { if (!null_ptr) return 0; return test(record[(uint) (null_ptr -table->record[0])] & null_bit); } - inline bool is_null_in_record_with_offset(my_ptrdiff_t col_offset) - { - if (!null_ptr) - return 0; - return test(null_ptr[col_offset] & null_bit); - } inline void set_null(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]|= null_bit; } inline void set_notnull(my_ptrdiff_t row_offset= 0) { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } - inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } - /** - Signals that this field is NULL-able. - */ - inline bool real_maybe_null(void) { return null_ptr != 0; } + inline bool maybe_null(void) const + { return null_ptr != 0 || table->maybe_null; } + + /* @return true if this field is NULL-able, false otherwise. */ + inline bool real_maybe_null(void) const { return null_ptr != 0; } + uint null_offset(const uchar *record) const + { return (uint) (null_ptr - record); } + + uint null_offset() const + { return null_offset(table->record[0]); } + void set_null_ptr(uchar *p_null_ptr, uint p_null_bit) + { + null_ptr= p_null_ptr; + null_bit= p_null_bit; + } inline THD *get_thd() { return table ? table->in_use : current_thd; } @@ -762,9 +783,9 @@ public: virtual uint repertoire(void) const { return MY_REPERTOIRE_UNICODE30; } virtual void set_derivation(enum Derivation derivation_arg) { } virtual int set_time() { return 1; } - void set_warning(MYSQL_ERROR::enum_warning_level, unsigned int code, - int cuted_increment); - void set_datetime_warning(MYSQL_ERROR::enum_warning_level, uint code, + bool set_warning(Sql_condition::enum_warning_level, unsigned int code, + int cuted_increment) const; + void set_datetime_warning(Sql_condition::enum_warning_level, uint code, const ErrConv *str, timestamp_type ts_type, int cuted_increment); inline bool check_overflow(int op_result) @@ -809,6 +830,30 @@ public: return GEOM_GEOMETRY; } + ha_storage_media field_storage_type() const + { + return (ha_storage_media) + ((flags >> FIELD_FLAGS_STORAGE_MEDIA) & 3); + } + + void set_storage_type(ha_storage_media storage_type_arg) + { + DBUG_ASSERT(field_storage_type() == HA_SM_DEFAULT); + flags |= (storage_type_arg << FIELD_FLAGS_STORAGE_MEDIA); + } + + column_format_type column_format() const + { + return (column_format_type) + ((flags >> FIELD_FLAGS_COLUMN_FORMAT) & 3); + } + + void set_column_format(column_format_type column_format_arg) + { + DBUG_ASSERT(column_format() == COLUMN_FORMAT_TYPE_DEFAULT); + flags |= (column_format_arg << FIELD_FLAGS_COLUMN_FORMAT); + } + key_map get_possible_keys(); /* Hash value */ @@ -1458,7 +1503,7 @@ public: return (Field::eq_def(field) && decimals() == field->decimals()); } my_decimal *val_decimal(my_decimal*); - void set_warnings(MYSQL_ERROR::enum_warning_level trunc_level, + void set_warnings(Sql_condition::enum_warning_level trunc_level, const ErrConv *str, int was_cut, timestamp_type ts_type); double pos_in_interval(Field *min, Field *max) { @@ -2274,6 +2319,7 @@ public: Field_blob(uint32 packlength_arg) :Field_longstr((uchar*) 0, 0, (uchar*) "", 0, NONE, "temp", system_charset_info), packlength(packlength_arg) {} + /* Note that the default copy constructor is used, in clone() */ enum_field_types type() const { return MYSQL_TYPE_BLOB;} enum ha_base_keytype key_type() const { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } @@ -2298,7 +2344,7 @@ public: uint32 key_length() const { return 0; } void sort_string(uchar *buff,uint length); uint32 pack_length() const - { return (uint32) (packlength+table->s->blob_ptr_size); } + { return (uint32) (packlength + portable_sizeof_char_ptr); } /** Return the packed length without the pointer size added. @@ -2764,12 +2810,23 @@ public: { return (flags & (BINCMP_FLAG | BINARY_FLAG)) != 0; } + + ha_storage_media field_storage_type() const + { + return (ha_storage_media) + ((flags >> FIELD_FLAGS_STORAGE_MEDIA) & 3); + } + + column_format_type column_format() const + { + return (column_format_type) + ((flags >> FIELD_FLAGS_COLUMN_FORMAT) & 3); + } + uint virtual_col_expr_maxlen() { return 255 - FRM_VCOL_HEADER_SIZE(interval != NULL); } -private: - const String empty_set_string; }; diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 6c3fcc0d355..5e16166531d 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -133,7 +133,7 @@ set_field_to_null(Field *field) field->reset(); switch (field->table->in_use->count_cuted_fields) { case CHECK_FIELD_WARN: - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); /* fall through */ case CHECK_FIELD_IGNORE: return 0; @@ -202,7 +202,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) } switch (field->table->in_use->count_cuted_fields) { case CHECK_FIELD_WARN: - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1); + field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1); /* fall through */ case CHECK_FIELD_IGNORE: return 0; @@ -272,7 +272,7 @@ static void do_copy_nullable_row_to_notnull(Copy_field *copy) if (*copy->null_row || (copy->from_null_ptr && (*copy->from_null_ptr & copy->from_bit))) { - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); copy->to_field->reset(); } @@ -288,7 +288,7 @@ static void do_copy_not_null(Copy_field *copy) { if (*copy->from_null_ptr & copy->from_bit) { - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); copy->to_field->reset(); } @@ -440,7 +440,7 @@ static void do_cut_string(Copy_field *copy) (char*) copy->from_ptr + copy->from_length, MY_SEQ_SPACES) < copy->from_length - copy->to_length) { - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } } @@ -471,7 +471,7 @@ static void do_cut_string_complex(Copy_field *copy) (char*) from_end, MY_SEQ_SPACES) < (copy->from_length - copy_length)) { - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } @@ -510,7 +510,7 @@ static void do_varstring1(Copy_field *copy) length=copy->to_length - 1; if (copy->from_field->table->in_use->count_cuted_fields && copy->to_field) - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } *(uchar*) copy->to_ptr= (uchar) length; @@ -531,7 +531,7 @@ static void do_varstring1_mb(Copy_field *copy) if (length < from_length) { if (current_thd->count_cuted_fields) - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } *copy->to_ptr= (uchar) length; @@ -547,7 +547,7 @@ static void do_varstring2(Copy_field *copy) length=copy->to_length-HA_KEY_BLOB_LENGTH; if (copy->from_field->table->in_use->count_cuted_fields && copy->to_field) - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } int2store(copy->to_ptr,length); @@ -569,7 +569,7 @@ static void do_varstring2_mb(Copy_field *copy) if (length < from_length) { if (current_thd->count_cuted_fields) - copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } int2store(copy->to_ptr, length); @@ -712,8 +712,8 @@ Copy_field::get_copy_func(Field *to,Field *from) if (from_length != to_length) { // Correct pointer to point at char pointer - to_ptr+= to_length - to->table->s->blob_ptr_size; - from_ptr+= from_length- from->table->s->blob_ptr_size; + to_ptr+= to_length - portable_sizeof_char_ptr; + from_ptr+= from_length - portable_sizeof_char_ptr; return do_copy_blob; } } @@ -829,7 +829,7 @@ Copy_field::get_copy_func(Field *to,Field *from) int field_conv(Field *to,Field *from) { if (to->real_type() == from->real_type() && - !(to->type() == MYSQL_TYPE_BLOB && to->table->copy_blobs)) + !(to->flags & BLOB_FLAG && to->table->copy_blobs)) { if (to->pack_length() == from->pack_length() && !(to->flags & UNSIGNED_FLAG && !(from->flags & UNSIGNED_FLAG)) && @@ -858,7 +858,7 @@ int field_conv(Field *to,Field *from) return 0; } } - if (to->type() == MYSQL_TYPE_BLOB) + if (to->flags & BLOB_FLAG) { // Be sure the value is stored Field_blob *blob=(Field_blob*) to; from->val_str(&blob->value); diff --git a/sql/filesort.cc b/sql/filesort.cc index f5a85036faa..7cb2306eb7c 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -245,12 +245,12 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, { DBUG_PRINT("info", ("filesort PQ is not applicable")); - size_t min_sort_memory= max(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2); + size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2); set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2); while (memory_available >= min_sort_memory) { ulonglong keys= memory_available / (param.rec_length + sizeof(char*)); - param.max_keys_per_buffer= (uint) min(num_rows, keys); + param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys); if (table_sort.get_sort_keys()) { // If we have already allocated a buffer, it better have same size! @@ -391,7 +391,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, MYF(0), ER_THD(thd, ER_FILSORT_ABORT), kill_errno ? ER(kill_errno) : - thd->killed == ABORT_QUERY ? "" : thd->stmt_da->message()); + thd->killed == ABORT_QUERY ? "" : + thd->get_stmt_da()->message()); if (global_system_variables.log_warnings > 1) { @@ -1371,7 +1372,7 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, register uint count; uint length; - if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count))) + if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count))) { if (mysql_file_pread(fromfile->file, (uchar*) buffpek->base, (length= rec_length*count), @@ -1696,7 +1697,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file, != -1 && error != 0); end: - lastbuff->count= min(org_max_rows-max_rows, param->max_rows); + lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows); lastbuff->file_pos= to_start_filepos; err: delete_queue(&queue); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 6fc30fa4fa0..9524a0366d3 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -379,11 +379,11 @@ static int ndb_to_mysql_error(const NdbError *ndberr) - Used by replication to see if the error was temporary */ if (ndberr->status == NdbError::TemporaryError) - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG), ndberr->code, ndberr->message, "NDB"); else - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), ndberr->code, ndberr->message, "NDB"); return error; @@ -650,7 +650,7 @@ static void set_ndb_err(THD *thd, const NdbError &err) { char buf[FN_REFLEN]; ndb_error_string(thd_ndb->m_error_code, buf, sizeof(buf)); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), thd_ndb->m_error_code, buf, "NDB"); } @@ -930,7 +930,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u", (long) blob_ptr, blob_len)); - DBUG_DUMP("value", blob_ptr, min(blob_len, 26)); + DBUG_DUMP("value", blob_ptr, MY_MIN(blob_len, 26)); if (set_blob_value) *set_blob_value= TRUE; @@ -1245,8 +1245,8 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, } KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; - DBUG_ASSERT(key_info->key_parts == sz); + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; + DBUG_ASSERT(key_info->user_defined_key_parts == sz); for (unsigned i= 0; key_part != end; key_part++, i++) { const char *field_name= key_part->field->field_name; @@ -1576,7 +1576,7 @@ NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx, bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) { KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null"); for (; key_part != end; key_part++) @@ -1733,7 +1733,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const uchar *key) { KEY* key_info= table->key_info + table_share->primary_key; KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; DBUG_ENTER("set_primary_key"); for (; key_part != end; key_part++) @@ -1755,7 +1755,7 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const uchar *re { KEY* key_info= table->key_info + table_share->primary_key; KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; DBUG_ENTER("set_primary_key_from_record"); for (; key_part != end; key_part++) @@ -1772,7 +1772,7 @@ bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno) { KEY* key_info= table->key_info + keyno; KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; uint i; DBUG_ENTER("check_index_fields_in_write_set"); @@ -1793,7 +1793,7 @@ int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, { KEY* key_info= table->key_info + keyno; KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; uint i; DBUG_ENTER("set_index_key_from_record"); @@ -1815,7 +1815,7 @@ ha_ndbcluster::set_index_key(NdbOperation *op, DBUG_ENTER("ha_ndbcluster::set_index_key"); uint i; KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; for (i= 0; key_part != end; key_part++, i++) { @@ -2083,7 +2083,7 @@ check_null_in_record(const KEY* key_info, const uchar *record) { KEY_PART_INFO *curr_part, *end_part; curr_part= key_info->key_part; - end_part= curr_part + key_info->key_parts; + end_part= curr_part + key_info->user_defined_key_parts; while (curr_part != end_part) { @@ -2177,7 +2177,7 @@ int ha_ndbcluster::peek_indexed_rows(const uchar *record, NdbIndexOperation *iop; const NDBINDEX *unique_index = m_index[i].unique_index; key_part= key_info->key_part; - end= key_part + key_info->key_parts; + end= key_part + key_info->user_defined_key_parts; if (!(iop= trans->getNdbIndexOperation(unique_index, m_table)) || iop->readTuple(lm) != 0) ERR_RETURN(trans->getNdbError()); @@ -2405,7 +2405,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, uint range_no) { const KEY *const key_info= table->key_info + inx; - const uint key_parts= key_info->key_parts; + const uint key_parts= key_info->user_defined_key_parts; uint key_tot_len[2]; uint tot_len; uint i, j; @@ -3206,7 +3206,7 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) undo_res= write_row((uchar *)old_data); if (undo_res) push_warning(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, undo_res, "NDB failed undoing delete at primary key update"); m_primary_key_update= FALSE; @@ -3708,7 +3708,7 @@ check_null_in_key(const KEY* key_info, const uchar *key, uint key_len) KEY_PART_INFO *curr_part, *end_part; const uchar* end_ptr= key + key_len; curr_part= key_info->key_part; - end_part= curr_part + key_info->key_parts; + end_part= curr_part + key_info->user_defined_key_parts; for (; curr_part != end_part && key < end_ptr; curr_part++) { @@ -4079,7 +4079,7 @@ void ha_ndbcluster::position(const uchar *record) key_length= ref_length; key_info= table->key_info + table_share->primary_key; key_part= key_info->key_part; - end= key_part + key_info->key_parts; + end= key_part + key_info->user_defined_key_parts; buff= ref; for (; key_part != end; key_part++) @@ -5416,7 +5416,7 @@ int ha_ndbcluster::create(const char *name, { if (create_info->storage_media == HA_SM_MEMORY) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), ndbcluster_hton_name, @@ -5471,7 +5471,7 @@ int ha_ndbcluster::create(const char *name, case ROW_TYPE_FIXED: if (field_type_forces_var_part(field->type())) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), ndbcluster_hton_name, @@ -5500,7 +5500,7 @@ int ha_ndbcluster::create(const char *name, for (i= 0, key_info= form->key_info; i < form->s->keys; i++, key_info++) { KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->key_parts; + KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; for (; key_part != end; key_part++) tab.getColumn(key_part->fieldnr-1)->setStorageType( NdbDictionary::Column::StorageTypeMemory); @@ -5802,7 +5802,7 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info, case UNIQUE_INDEX: if (check_index_fields_not_null(key_info)) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_NULL_COLUMN_IN_INDEX, "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan"); } @@ -5811,7 +5811,7 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info, case ORDERED_INDEX: if (key_info->algorithm == HA_KEY_ALG_HASH) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), ndbcluster_hton_name, @@ -5860,7 +5860,7 @@ int ha_ndbcluster::create_ndb_index(const char *name, Ndb *ndb= get_ndb(); NdbDictionary::Dictionary *dict= ndb->getDictionary(); KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->key_parts; + KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; DBUG_ENTER("ha_ndbcluster::create_index"); DBUG_PRINT("enter", ("name: %s ", name)); @@ -7284,7 +7284,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, file_name->str)); if (ndb_create_table_from_engine(thd, db, file_name->str)) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TABLE_EXISTS_ERROR, "Discover of table %s.%s failed", db, file_name->str); @@ -7310,7 +7310,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, file_name->length); DBUG_ASSERT(record); my_hash_delete(&ndb_tables, record); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TABLE_EXISTS_ERROR, "Local table %s.%s shadows ndb table", db, file_name->str); @@ -8114,23 +8114,33 @@ uint8 ha_ndbcluster::table_cache_type() } -uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, +/** + Retrieve the commit count for the table object. + + @param thd Thread context. + @param norm_name Normalized path to the table. + @param[out] commit_count Commit count for the table. + + @return 0 on success. + @return 1 if an error occured. +*/ + +uint ndb_get_commitcount(THD *thd, char *norm_name, Uint64 *commit_count) { - char name[FN_REFLEN + 1]; + char dbname[NAME_LEN + 1]; NDB_SHARE *share; DBUG_ENTER("ndb_get_commitcount"); - build_table_filename(name, sizeof(name) - 1, - dbname, tabname, "", 0); - DBUG_PRINT("enter", ("name: %s", name)); - mysql_mutex_lock(&ndbcluster_mutex); + DBUG_PRINT("enter", ("name: %s", norm_name)); + pthread_mutex_lock(&ndbcluster_mutex); if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables, - (uchar*) name, - strlen(name)))) + (const uchar*) norm_name, + strlen(norm_name)))) { - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name)); + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", + norm_name)); DBUG_RETURN(1); } /* ndb_share reference temporary, free below */ @@ -8162,6 +8172,8 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, Ndb *ndb; if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(1); + + ha_ndbcluster::set_dbname(norm_name, dbname); if (ndb->setDatabaseName(dbname)) { ERR_RETURN(ndb->getNdbError()); @@ -8171,7 +8183,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, struct Ndb_statistics stat; { - Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname); + char tblname[NAME_LEN + 1]; + ha_ndbcluster::set_tabname(norm_name, tblname); + Ndb_table_guard ndbtab_g(ndb->getDictionary(), tblname); if (ndbtab_g.get_table() == 0 || ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat)) { @@ -8221,10 +8235,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, @param thd thread handle - @param full_name concatenation of database name, - the null character '\\0', and the table name - @param full_name_len length of the full name, - i.e. len(dbname) + len(tablename) + 1 + @param full_name normalized path to the table in the canonical + format. + @param full_name_len length of the normalized path to the table. @param engine_data parameter retrieved when query was first inserted into the cache. If the value of engine_data is changed, all queries for this table should be invalidated. @@ -8243,11 +8256,15 @@ ndbcluster_cache_retrieval_allowed(THD *thd, ulonglong *engine_data) { Uint64 commit_count; - char *dbname= full_name; - char *tabname= dbname+strlen(dbname)+1; + char dbname[NAME_LEN + 1]; + char tabname[NAME_LEN + 1]; #ifndef DBUG_OFF char buff[22], buff2[22]; #endif + + ha_ndbcluster::set_dbname(full_name, dbname); + ha_ndbcluster::set_tabname(full_name, tabname); + DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); DBUG_PRINT("enter", ("dbname: %s, tabname: %s", dbname, tabname)); @@ -8257,7 +8274,7 @@ ndbcluster_cache_retrieval_allowed(THD *thd, DBUG_RETURN(FALSE); } - if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) + if (ndb_get_commitcount(thd, full_name, &commit_count)) { *engine_data= 0; /* invalidate */ DBUG_PRINT("exit", ("No, could not retrieve commit_count")); @@ -8292,10 +8309,9 @@ ndbcluster_cache_retrieval_allowed(THD *thd, the cached query is reused. @param thd thread handle - @param full_name concatenation of database name, - the null character '\\0', and the table name - @param full_name_len length of the full name, - i.e. len(dbname) + len(tablename) + 1 + @param full_name normalized path to the table in the + canonical format. + @param full_name_len length of the normalized path to the table. @param engine_callback function to be called before using cache on this table @param[out] engine_data commit_count for this table @@ -8325,7 +8341,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd, DBUG_RETURN(FALSE); } - if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) + if (ndb_get_commitcount(thd, full_name, &commit_count)) { *engine_data= 0; DBUG_PRINT("exit", ("Error, could not get commitcount")); @@ -9827,11 +9843,11 @@ char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len) } err: if (ndberr.status == NdbError::TemporaryError) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG), ndberr.code, ndberr.message, "NDB"); else - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), ndberr.code, ndberr.message, "NDB"); return 0; @@ -9957,7 +9973,7 @@ int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info) if (adjusted_frag_count(no_fragments, no_nodes, reported_frags)) { push_warning(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Ndb might have problems storing the max amount of rows specified"); } return (int)reported_frags; @@ -10146,7 +10162,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, { if (!current_thd->variables.new_mode) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), ndbcluster_hton_name, diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 1544678de38..61dac31e52a 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -290,13 +290,13 @@ static void run_query(THD *thd, char *buf, char *end, Thd_ndb *thd_ndb= get_thd_ndb(thd); for (i= 0; no_print_error[i]; i++) if ((thd_ndb->m_error_code == no_print_error[i]) || - (thd->stmt_da->sql_errno() == (unsigned) no_print_error[i])) + (thd->get_stmt_da()->sql_errno() == (unsigned) no_print_error[i])) break; if (!no_print_error[i]) sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d", buf, - thd->stmt_da->message(), - thd->stmt_da->sql_errno(), + thd->get_stmt_da()->message(), + thd->get_stmt_da()->sql_errno(), thd_ndb->m_error_code, (int) thd->is_error(), thd->is_slave_error); } @@ -310,7 +310,7 @@ static void run_query(THD *thd, char *buf, char *end, is called from ndbcluster_reset_logs(), which is called from mysql_flush(). */ - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); thd->variables.option_bits= save_thd_options; thd->set_query(save_thd_query, save_thd_query_length); @@ -984,8 +984,8 @@ static void print_could_not_discover_error(THD *thd, "my_errno: %d", schema->db, schema->name, schema->query, schema->node_id, my_errno); - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; + List_iterator_fast<Sql_condition> it(thd->warning_info->warn_list()); + Sql_condition *err; while ((err= it++)) sql_print_warning("NDB Binlog: (%d)%s", err->get_sql_errno(), err->get_message_text()); @@ -1230,7 +1230,7 @@ ndbcluster_update_slock(THD *thd, char buf[1024]; my_snprintf(buf, sizeof(buf), "Could not release lock on '%s.%s'", db, table_name); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), ndb_error->code, ndb_error->message, buf); } @@ -1559,7 +1559,7 @@ err: } end: if (ndb_error) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), ndb_error->code, ndb_error->message, @@ -2349,8 +2349,8 @@ static int open_ndb_binlog_index(THD *thd, TABLE **ndb_binlog_index) sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed"); else sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'", - thd->stmt_da->sql_errno(), - thd->stmt_da->message()); + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->proc_info= save_proc_info; return -1; } @@ -2406,9 +2406,9 @@ int ndb_add_ndb_binlog_index(THD *thd, void *_row) } add_ndb_binlog_index_err: - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); close_thread_tables(thd); thd->mdl_context.release_transactional_locks(); ndb_binlog_index= 0; @@ -2730,7 +2730,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, "with BLOB attribute and no PK is not supported", share->key); if (push_warning) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), ndbcluster_hton_name, @@ -2774,7 +2774,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, failed, print a warning */ if (push_warning > 1) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, dict->getNdbError().message, "NDB"); @@ -2802,7 +2802,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, dict->dropEvent(my_event.getName())) { if (push_warning > 1) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, dict->getNdbError().message, "NDB"); @@ -2821,7 +2821,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, if (dict->createEvent(my_event)) { if (push_warning > 1) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, dict->getNdbError().message, "NDB"); @@ -2834,7 +2834,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, DBUG_RETURN(-1); } #ifdef NDB_BINLOG_EXTRA_WARNINGS - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), 0, "NDB Binlog: Removed trailing event", "NDB"); @@ -2945,7 +2945,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, { sql_print_error("NDB Binlog: Creating NdbEventOperation failed for" " %s",event_name); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), ndb->getNdbError().code, ndb->getNdbError().message, @@ -2994,7 +2994,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, sql_print_error("NDB Binlog: Creating NdbEventOperation" " blob field %u handles failed (code=%d) for %s", j, op->getNdbError().code, event_name); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), op->getNdbError().code, op->getNdbError().message, @@ -3033,7 +3033,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, retries= 0; if (retries == 0) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), op->getNdbError().code, op->getNdbError().message, "NDB"); @@ -3101,7 +3101,7 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name, if (dict->getNdbError().code != 4710) { /* drop event failed for some reason, issue a warning */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, dict->getNdbError().message, "NDB"); @@ -4277,9 +4277,9 @@ err: sql_print_information("Stopping Cluster Binlog"); DBUG_PRINT("info",("Shutting down cluster binlog thread")); thd->proc_info= "Shutting down"; - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); close_thread_tables(thd); thd->mdl_context.release_transactional_locks(); mysql_mutex_lock(&injector_mutex); diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc index f8b2ed8429a..22a7dbe55f7 100644 --- a/sql/ha_ndbcluster_cond.cc +++ b/sql/ha_ndbcluster_cond.cc @@ -1375,7 +1375,7 @@ ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op) { // err.message has static storage DBUG_PRINT("info", ("%s", err.message)); - push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, err.code, err.message); ret=0; } @@ -1431,7 +1431,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op, uchar *buf) { KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->key_parts; + KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; NdbScanFilter filter(op, true); // abort on too large int res; DBUG_ENTER("generate_scan_filter_from_key"); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 09ab6d48eba..a5acd5759aa 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1,5 +1,6 @@ /* Copyright (c) 2005, 2013, Oracle and/or its affiliates. + Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -36,10 +37,6 @@ in the execution of queries. This functionality will grow with later versions of MySQL. - You can enable it in your buld by doing the following during your build - process: - ./configure --with-partition - The partition is setup to use table locks. It implements an partition "SHARE" that is inserted into a hash by table name. You can use this to store information of state that any partition handler object will be able to see @@ -49,10 +46,6 @@ if this file. */ -#ifdef __GNUC__ -#pragma implementation // gcc: Class implementation -#endif - #include "sql_priv.h" #include "sql_parse.h" // append_file_to_dir #include "create_options.h" @@ -62,12 +55,25 @@ #include "sql_table.h" // tablename_to_filename #include "key.h" #include "sql_plugin.h" -#include "table.h" /* HA_DATA_PARTITION */ #include "sql_show.h" // append_identifier #include "sql_admin.h" // SQL_ADMIN_MSG_TEXT_SIZE #include "debug_sync.h" +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 +#define PARTITION_ENABLED_TABLE_FLAGS (HA_FILE_BASED | HA_REC_NOT_IN_SEQ) +#define PARTITION_DISABLED_TABLE_FLAGS (HA_CAN_GEOMETRY | \ + HA_CAN_FULLTEXT | \ + HA_DUPLICATE_POS | \ + HA_CAN_SQL_HANDLER | \ + HA_CAN_INSERT_DELAYED) static const char *ha_par_ext= ".par"; /**************************************************************************** @@ -92,6 +98,24 @@ static const char *ha_partition_ext[]= }; +#ifdef HAVE_PSI_INTERFACE +PSI_mutex_key key_partition_auto_inc_mutex; + +static PSI_mutex_info all_partition_mutexes[]= +{ + { &key_partition_auto_inc_mutex, "Partition_share::auto_inc_mutex", 0} +}; + +static void init_partition_psi_keys(void) +{ + const char* category= "partition"; + int count; + + count= array_elements(all_partition_mutexes); + mysql_mutex_register(category, all_partition_mutexes, count); +} +#endif /* HAVE_PSI_INTERFACE */ + static int partition_initialize(void *p) { @@ -108,9 +132,44 @@ static int partition_initialize(void *p) HTON_TEMPORARY_NOT_SUPPORTED; partition_hton->tablefile_extensions= ha_partition_ext; +#ifdef HAVE_PSI_INTERFACE + init_partition_psi_keys(); +#endif return 0; } + +/** + Initialize and allocate space for partitions shares. + + @param num_parts Number of partitions to allocate storage for. + + @return Operation status. + @retval true Failure (out of memory). + @retval false Success. +*/ + +bool Partition_share::init(uint num_parts) +{ + DBUG_ENTER("Partition_share::init"); + mysql_mutex_init(key_partition_auto_inc_mutex, + &auto_inc_mutex, + MY_MUTEX_INIT_FAST); + auto_inc_initialized= false; + partition_name_hash_initialized= false; + next_auto_inc_val= 0; + partitions_share_refs= new Parts_share_refs; + if (!partitions_share_refs) + DBUG_RETURN(true); + if (partitions_share_refs->init(num_parts)) + { + delete partitions_share_refs; + DBUG_RETURN(true); + } + DBUG_RETURN(false); +} + + /* Create new partition handler @@ -165,7 +224,7 @@ static uint alter_table_flags(uint flags __attribute__((unused))) HA_FAST_CHANGE_PARTITION); } -const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; +const uint32 ha_partition::NO_CURRENT_PART_ID= NOT_A_PARTITION_ID; /* Constructor method @@ -238,6 +297,8 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, m_is_sub_partitioned= m_part_info->is_sub_partitioned(); m_is_clone_of= clone_arg; m_clone_mem_root= clone_mem_root_arg; + part_share= clone_arg->part_share; + m_tot_parts= clone_arg->m_tot_parts; DBUG_VOID_RETURN; } @@ -268,7 +329,6 @@ void ha_partition::init_handler_variables() m_added_file= NULL; m_tot_parts= 0; m_pkey_is_clustered= 0; - m_lock_type= F_UNLCK; m_part_spec.start_part= NO_CURRENT_PART_ID; m_scan_value= 2; m_ref_length= 0; @@ -289,7 +349,6 @@ void ha_partition::init_handler_variables() m_rec_length= 0; m_last_part= 0; m_rec0= 0; - m_err_rec= NULL; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; m_part_func_monotonicity_info= NON_MONOTONIC; @@ -304,6 +363,8 @@ void ha_partition::init_handler_variables() m_is_sub_partitioned= 0; m_is_clone_of= NULL; m_clone_mem_root= NULL; + part_share= NULL; + m_new_partitions_share_refs.empty(); m_part_ids_sorted_by_num_of_records= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED @@ -313,6 +374,13 @@ void ha_partition::init_handler_variables() } +const char *ha_partition::table_type() const +{ + // we can do this since we only support a single engine type + return m_file[0]->table_type(); +} + + /* Destructor method @@ -326,6 +394,8 @@ void ha_partition::init_handler_variables() ha_partition::~ha_partition() { DBUG_ENTER("ha_partition::~ha_partition()"); + if (m_new_partitions_share_refs.elements) + m_new_partitions_share_refs.delete_elements(); if (m_file != NULL) { uint i; @@ -476,7 +546,7 @@ int ha_partition::delete_table(const char *name) { DBUG_ENTER("ha_partition::delete_table"); - DBUG_RETURN(del_ren_cre_table(name, NULL, NULL, NULL)); + DBUG_RETURN(del_ren_table(name, NULL)); } @@ -506,7 +576,7 @@ int ha_partition::rename_table(const char *from, const char *to) { DBUG_ENTER("ha_partition::rename_table"); - DBUG_RETURN(del_ren_cre_table(from, to, NULL, NULL)); + DBUG_RETURN(del_ren_table(from, to)); } @@ -595,24 +665,86 @@ int ha_partition::create_partitioning_metadata(const char *path, int ha_partition::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { - char t_name[FN_REFLEN]; + int error; + char name_buff[FN_REFLEN], name_lc_buff[FN_REFLEN]; + char *name_buffer_ptr; + const char *path; + uint i; + List_iterator_fast <partition_element> part_it(m_part_info->partitions); + partition_element *part_elem; + handler **file, **abort_file; DBUG_ENTER("ha_partition::create"); - if (create_info->used_fields & HA_CREATE_USED_CONNECTION) + DBUG_ASSERT(*fn_rext((char*)name) == '\0'); + + /* Not allowed to create temporary partitioned tables */ + if (create_info && create_info->options & HA_LEX_CREATE_TMP_TABLE) { - my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), - "CONNECTION not valid for partition"); - DBUG_RETURN(1); + my_error(ER_PARTITION_NO_TEMPORARY, MYF(0)); + DBUG_RETURN(TRUE); } - strmov(t_name, name); - DBUG_ASSERT(*fn_rext((char*)name) == '\0'); - if (del_ren_cre_table(t_name, NULL, table_arg, create_info)) + if (get_from_handler_file(name, ha_thd()->mem_root, false)) + DBUG_RETURN(TRUE); + DBUG_ASSERT(m_file_buffer); + DBUG_PRINT("enter", ("name: (%s)", name)); + name_buffer_ptr= m_name_buffer_ptr; + file= m_file; + /* + Since ha_partition has HA_FILE_BASED, it must alter underlying table names + if they do not have HA_FILE_BASED and lower_case_table_names == 2. + See Bug#37402, for Mac OS X. + The appended #P#<partname>[#SP#<subpartname>] will remain in current case. + Using the first partitions handler, since mixing handlers is not allowed. + */ + path= get_canonical_filename(*file, name, name_lc_buff); + for (i= 0; i < m_part_info->num_parts; i++) { - handler::delete_table(t_name); - DBUG_RETURN(1); + part_elem= part_it++; + if (m_is_sub_partitioned) + { + uint j; + List_iterator_fast <partition_element> sub_it(part_elem->subpartitions); + for (j= 0; j < m_part_info->num_subparts; j++) + { + part_elem= sub_it++; + create_partition_name(name_buff, path, name_buffer_ptr, + NORMAL_PART_NAME, FALSE); + if ((error= set_up_table_before_create(table_arg, name_buff, + create_info, part_elem)) || + ((error= (*file)->ha_create(name_buff, table_arg, create_info)))) + goto create_error; + + name_buffer_ptr= strend(name_buffer_ptr) + 1; + file++; + } + } + else + { + create_partition_name(name_buff, path, name_buffer_ptr, + NORMAL_PART_NAME, FALSE); + if ((error= set_up_table_before_create(table_arg, name_buff, + create_info, part_elem)) || + ((error= (*file)->ha_create(name_buff, table_arg, create_info)))) + goto create_error; + + name_buffer_ptr= strend(name_buffer_ptr) + 1; + file++; + } } DBUG_RETURN(0); + +create_error: + name_buffer_ptr= m_name_buffer_ptr; + for (abort_file= file, file= m_file; file < abort_file; file++) + { + create_partition_name(name_buff, path, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + (void) (*file)->ha_delete_table((const char*) name_buff); + name_buffer_ptr= strend(name_buffer_ptr) + 1; + } + handler::delete_table(name); + DBUG_RETURN(error); } @@ -993,7 +1125,8 @@ int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt) { DBUG_ENTER("ha_partition::repair"); - DBUG_RETURN(handle_opt_partitions(thd, check_opt, REPAIR_PARTS)); + int res= handle_opt_partitions(thd, check_opt, REPAIR_PARTS); + DBUG_RETURN(res); } /** @@ -1049,11 +1182,10 @@ int ha_partition::preload_keys(THD *thd, HA_CHECK_OPT *check_opt) 0 Success */ -int ha_partition::handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, - uint part_id, uint flag) +static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, + handler *file, uint flag) { int error; - handler *file= m_file[part_id]; DBUG_ENTER("handle_opt_part"); DBUG_PRINT("enter", ("flag = %u", flag)); @@ -1062,27 +1194,9 @@ int ha_partition::handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, else if (flag == ANALYZE_PARTS) error= file->ha_analyze(thd, check_opt); else if (flag == CHECK_PARTS) - { error= file->ha_check(thd, check_opt); - if (!error || - error == HA_ADMIN_ALREADY_DONE || - error == HA_ADMIN_NOT_IMPLEMENTED) - { - if (check_opt->flags & (T_MEDIUM | T_EXTEND)) - error= check_misplaced_rows(part_id, false); - } - } else if (flag == REPAIR_PARTS) - { error= file->ha_repair(thd, check_opt); - if (!error || - error == HA_ADMIN_ALREADY_DONE || - error == HA_ADMIN_NOT_IMPLEMENTED) - { - if (check_opt->flags & (T_MEDIUM | T_EXTEND)) - error= check_misplaced_rows(part_id, true); - } - } else if (flag == ASSIGN_KEYCACHE_PARTS) error= file->assign_to_keycache(thd, check_opt); else if (flag == PRELOAD_KEYS_PARTS) @@ -1187,7 +1301,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, when ALTER TABLE <CMD> PARTITION ... it should only do named partitions, otherwise all partitions */ - if (!(thd->lex->alter_info.flags & ALTER_ADMIN_PARTITION) || + if (!(thd->lex->alter_info.flags & Alter_info::ALTER_ADMIN_PARTITION) || part_elem->part_state == PART_ADMIN) { if (m_is_sub_partitioned) @@ -1201,7 +1315,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, part= i * num_subparts + j; DBUG_PRINT("info", ("Optimize subpartition %u (%s)", part, sub_elem->partition_name)); - if ((error= handle_opt_part(thd, check_opt, part, flag))) + if ((error= handle_opt_part(thd, check_opt, m_file[part], flag))) { /* print a line which partition the error belongs to */ if (error != HA_ADMIN_NOT_IMPLEMENTED && @@ -1227,7 +1341,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, { DBUG_PRINT("info", ("Optimize partition %u (%s)", i, part_elem->partition_name)); - if ((error= handle_opt_part(thd, check_opt, i, flag))) + if ((error= handle_opt_part(thd, check_opt, m_file[i], flag))) { /* print a line which partition the error belongs to */ if (error != HA_ADMIN_NOT_IMPLEMENTED && @@ -1261,6 +1375,8 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, @retval TRUE Error/Not supported @retval FALSE Success + + @note Called if open_table_from_share fails and ::is_crashed(). */ bool ha_partition::check_and_repair(THD *thd) @@ -1340,9 +1456,25 @@ int ha_partition::prepare_new_partition(TABLE *tbl, int error; DBUG_ENTER("prepare_new_partition"); - if ((error= set_up_table_before_create(tbl, part_name, create_info, - 0, p_elem))) + /* + This call to set_up_table_before_create() is done for an alter table. + So this may be the second time around for this partition_element, + depending on how many partitions and subpartitions there were before, + and how many there are now. + The first time, on the CREATE, data_file_name and index_file_name + came from the parser. They did not have the file name attached to + the end. But if this partition is less than the total number of + previous partitions, it's data_file_name has the filename attached. + So we need to take the partition filename off if it exists. + That file name may be different from part_name, which will be + attached in append_file_to_dir(). + */ + truncate_partition_filename(p_elem->data_file_name); + truncate_partition_filename(p_elem->index_file_name); + + if ((error= set_up_table_before_create(tbl, part_name, create_info, p_elem))) goto error_create; + tbl->s->connect_string = p_elem->connect_string; if ((error= file->ha_create(part_name, tbl, create_info))) { @@ -1358,7 +1490,8 @@ int ha_partition::prepare_new_partition(TABLE *tbl, goto error_create; } DBUG_PRINT("info", ("partition %s created", part_name)); - if ((error= file->ha_open(tbl, part_name, m_mode, m_open_test_lock))) + if ((error= file->ha_open(tbl, part_name, m_mode, + m_open_test_lock | HA_OPEN_NO_PSI_CALL))) goto error_open; DBUG_PRINT("info", ("partition %s opened", part_name)); /* @@ -1519,7 +1652,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, (m_reorged_parts + 1)))) { mem_alloc_error(sizeof(handler*)*(m_reorged_parts+1)); - DBUG_RETURN(ER_OUTOFMEMORY); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } /* @@ -1551,7 +1684,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, (2*(num_remain_partitions + 1))))) { mem_alloc_error(sizeof(handler*)*2*(num_remain_partitions+1)); - DBUG_RETURN(ER_OUTOFMEMORY); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } m_added_file= &new_file_array[num_remain_partitions + 1]; @@ -1621,15 +1754,33 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, part_elem->part_state == PART_TO_BE_ADDED) { uint j= 0; + Parts_share_refs *p_share_refs; + /* + The Handler_shares for each partition's handler can be allocated + within this handler, since there will not be any more instances of the + new partitions, until the table is reopened after the ALTER succeeded. + */ + p_share_refs= new Parts_share_refs; + if (!p_share_refs) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + if (p_share_refs->init(num_subparts)) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + if (m_new_partitions_share_refs.push_back(p_share_refs)) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); do { - if (!(new_file_array[part_count++]= + handler **new_file= &new_file_array[part_count++]; + if (!(*new_file= get_new_handler(table->s, thd->mem_root, part_elem->engine_type))) { mem_alloc_error(sizeof(handler)); - DBUG_RETURN(ER_OUTOFMEMORY); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + if ((*new_file)->set_ha_share_ref(&p_share_refs->ha_shares[j])) + { + DBUG_RETURN(HA_ERR_OUT_OF_MEM); } } while (++j < num_subparts); if (part_elem->part_state == PART_CHANGED) @@ -1785,7 +1936,7 @@ int ha_partition::copy_partitions(ulonglong * const copied, late_extra_cache(reorg_part); if ((result= file->ha_rnd_init_with_error(1))) - goto error; + goto init_error; while (TRUE) { if ((result= file->ha_rnd_next(m_rec0))) @@ -1830,10 +1981,10 @@ int ha_partition::copy_partitions(ulonglong * const copied, DBUG_RETURN(FALSE); error: m_reorged_file[reorg_part]->ha_rnd_end(); +init_error: DBUG_RETURN(result); } - /* Update create info as part of ALTER TABLE @@ -1845,11 +1996,16 @@ error: NONE DESCRIPTION - Method empty so far + Forward this handler call to the storage engine foreach + partition handler. The data_file_name for each partition may + need to be reset if the tablespace was moved. Use a dummy + HA_CREATE_INFO structure and transfer necessary data. */ void ha_partition::update_create_info(HA_CREATE_INFO *create_info) { + DBUG_ENTER("ha_partition::update_create_info"); + /* Fix for bug#38751, some engines needs info-calls in ALTER. Archive need this since it flushes in ::info. @@ -1863,13 +2019,130 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info) if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) create_info->auto_increment_value= stats.auto_increment_value; + /* + DATA DIRECTORY and INDEX DIRECTORY are never applied to the whole + partitioned table, only its parts. + */ + my_bool from_alter = (create_info->data_file_name == (const char*) -1); create_info->data_file_name= create_info->index_file_name = NULL; + create_info->connect_string.str= NULL; create_info->connect_string.length= 0; - return; + + /* + We do not need to update the individual partition DATA DIRECTORY settings + since they can be changed by ALTER TABLE ... REORGANIZE PARTITIONS. + */ + if (from_alter) + DBUG_VOID_RETURN; + + /* + send Handler::update_create_info() to the storage engine for each + partition that currently has a handler object. Using a dummy + HA_CREATE_INFO structure to collect DATA and INDEX DIRECTORYs. + */ + + List_iterator<partition_element> part_it(m_part_info->partitions); + partition_element *part_elem, *sub_elem; + uint num_subparts= m_part_info->num_subparts; + uint num_parts = num_subparts ? m_file_tot_parts / num_subparts + : m_file_tot_parts; + HA_CREATE_INFO dummy_info; + memset(&dummy_info, 0, sizeof(dummy_info)); + + /* + Since update_create_info() can be called from mysql_prepare_alter_table() + when not all handlers are set up, we look for that condition first. + If all handlers are not available, do not call update_create_info for any. + */ + uint i, j, part; + for (i= 0; i < num_parts; i++) + { + part_elem= part_it++; + if (!part_elem) + DBUG_VOID_RETURN; + if (m_is_sub_partitioned) + { + List_iterator<partition_element> subpart_it(part_elem->subpartitions); + for (j= 0; j < num_subparts; j++) + { + sub_elem= subpart_it++; + if (!sub_elem) + DBUG_VOID_RETURN; + part= i * num_subparts + j; + if (part >= m_file_tot_parts || !m_file[part]) + DBUG_VOID_RETURN; + } + } + else + { + if (!m_file[i]) + DBUG_VOID_RETURN; + } + } + part_it.rewind(); + + for (i= 0; i < num_parts; i++) + { + part_elem= part_it++; + DBUG_ASSERT(part_elem); + if (m_is_sub_partitioned) + { + List_iterator<partition_element> subpart_it(part_elem->subpartitions); + for (j= 0; j < num_subparts; j++) + { + sub_elem= subpart_it++; + DBUG_ASSERT(sub_elem); + part= i * num_subparts + j; + DBUG_ASSERT(part < m_file_tot_parts && m_file[part]); + if (ha_legacy_type(m_file[part]->ht) == DB_TYPE_INNODB) + { + dummy_info.data_file_name= dummy_info.index_file_name = NULL; + m_file[part]->update_create_info(&dummy_info); + + if (dummy_info.data_file_name || sub_elem->data_file_name) + { + sub_elem->data_file_name = (char*) dummy_info.data_file_name; + } + if (dummy_info.index_file_name || sub_elem->index_file_name) + { + sub_elem->index_file_name = (char*) dummy_info.index_file_name; + } + } + } + } + else + { + DBUG_ASSERT(m_file[i]); + if (ha_legacy_type(m_file[i]->ht) == DB_TYPE_INNODB) + { + dummy_info.data_file_name= dummy_info.index_file_name= NULL; + m_file[i]->update_create_info(&dummy_info); + if (dummy_info.data_file_name || part_elem->data_file_name) + { + part_elem->data_file_name = (char*) dummy_info.data_file_name; + } + if (dummy_info.index_file_name || part_elem->index_file_name) + { + part_elem->index_file_name = (char*) dummy_info.index_file_name; + } + } + } + } + DBUG_VOID_RETURN; } +/** + Change the internal TABLE_SHARE pointer + + @param table_arg TABLE object + @param share New share to use + + @note Is used in error handling in ha_delete_table. + All handlers should exist (lock_partitions should not be used) +*/ + void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share) { handler **file_array; @@ -1920,34 +2193,25 @@ char *ha_partition::update_table_comment(const char *comment) } +/** + Handle delete and rename table -/* - Handle delete, rename and create table - - SYNOPSIS - del_ren_cre_table() - from Full path of old table - to Full path of new table - table_arg Table object - create_info Create info + @param from Full path of old table + @param to Full path of new table - RETURN VALUE - >0 Error - 0 Success + @return Operation status + @retval >0 Error + @retval 0 Success - DESCRIPTION - Common routine to handle delete_table and rename_table. - The routine uses the partition handler file to get the - names of the partition instances. Both these routines - are called after creating the handler without table - object and thus the file is needed to discover the - names of the partitions and the underlying storage engines. + @note Common routine to handle delete_table and rename_table. + The routine uses the partition handler file to get the + names of the partition instances. Both these routines + are called after creating the handler without table + object and thus the file is needed to discover the + names of the partitions and the underlying storage engines. */ -uint ha_partition::del_ren_cre_table(const char *from, - const char *to, - TABLE *table_arg, - HA_CREATE_INFO *create_info) +uint ha_partition::del_ren_table(const char *from, const char *to) { int save_error= 0; int error; @@ -1958,14 +2222,7 @@ uint ha_partition::del_ren_cre_table(const char *from, const char *to_path= NULL; uint i; handler **file, **abort_file; - DBUG_ENTER("del_ren_cre_table()"); - - /* Not allowed to create temporary partitioned tables */ - if (create_info && create_info->options & HA_LEX_CREATE_TMP_TABLE) - { - my_error(ER_PARTITION_NO_TEMPORARY, MYF(0)); - DBUG_RETURN(TRUE); - } + DBUG_ENTER("ha_partition::del_ren_table"); if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); @@ -1973,7 +2230,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to ? to : "(nil)")); name_buffer_ptr= m_name_buffer_ptr; file= m_file; - if (to == NULL && table_arg == NULL) + if (to == NULL) { /* Delete table, start by delete the .par file. If error, break, otherwise @@ -1999,23 +2256,16 @@ uint ha_partition::del_ren_cre_table(const char *from, NORMAL_PART_NAME, FALSE); if (to != NULL) - { // Rename branch + { // Rename branch create_partition_name(to_buff, to_path, name_buffer_ptr, NORMAL_PART_NAME, FALSE); error= (*file)->ha_rename_table(from_buff, to_buff); if (error) goto rename_error; } - else if (table_arg == NULL) // delete branch - error= (*file)->ha_delete_table(from_buff); - else + else // delete branch { - if ((error= set_up_table_before_create(table_arg, from_buff, - create_info, i, NULL)) || - parse_engine_table_options(ha_thd(), (*file)->ht, - (*file)->table_share) || - ((error= (*file)->ha_create(from_buff, table_arg, create_info)))) - goto create_error; + error= (*file)->ha_delete_table(from_buff); } name_buffer_ptr= strend(name_buffer_ptr) + 1; if (error) @@ -2032,16 +2282,6 @@ uint ha_partition::del_ren_cre_table(const char *from, } } DBUG_RETURN(save_error); -create_error: - name_buffer_ptr= m_name_buffer_ptr; - for (abort_file= file, file= m_file; file < abort_file; file++) - { - create_partition_name(from_buff, from_path, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - (void) (*file)->ha_delete_table((const char*) from_buff); - name_buffer_ptr= strend(name_buffer_ptr) + 1; - } - DBUG_RETURN(error); rename_error: name_buffer_ptr= m_name_buffer_ptr; for (abort_file= file, file= m_file; file < abort_file; file++) @@ -2058,47 +2298,6 @@ rename_error: DBUG_RETURN(error); } -/* - Find partition based on partition id - - SYNOPSIS - find_partition_element() - part_id Partition id of partition looked for - - RETURN VALUE - >0 Reference to partition_element - 0 Partition not found -*/ - -partition_element *ha_partition::find_partition_element(uint part_id) -{ - uint i; - uint curr_part_id= 0; - List_iterator_fast <partition_element> part_it(m_part_info->partitions); - - for (i= 0; i < m_part_info->num_parts; i++) - { - partition_element *part_elem; - part_elem= part_it++; - if (m_is_sub_partitioned) - { - uint j; - List_iterator_fast <partition_element> sub_it(part_elem->subpartitions); - for (j= 0; j < m_part_info->num_subparts; j++) - { - part_elem= sub_it++; - if (part_id == curr_part_id++) - return part_elem; - } - } - else if (part_id == curr_part_id++) - return part_elem; - } - DBUG_ASSERT(0); - my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR)); - return NULL; -} - uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type) { DBUG_ENTER("ha_partition::count_query_cache_dependant_tables"); @@ -2115,26 +2314,27 @@ uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type) DBUG_RETURN(type == HA_CACHE_TBL_ASKTRANSACT ? m_tot_parts : 0); } -my_bool ha_partition::reg_query_cache_dependant_table(THD *thd, - char *key, uint key_len, - uint8 type, - Query_cache *cache, - Query_cache_block_table **block_table, - handler *file, - uint *n) +my_bool ha_partition:: +reg_query_cache_dependant_table(THD *thd, + char *engine_key, uint engine_key_len, + char *cache_key, uint cache_key_len, + uint8 type, + Query_cache *cache, + Query_cache_block_table **block_table, + handler *file, + uint *n) { DBUG_ENTER("ha_partition::reg_query_cache_dependant_table"); qc_engine_callback engine_callback; ulonglong engine_data; /* ask undelying engine */ - if (!file->register_query_cache_table(thd, key, - key_len, + if (!file->register_query_cache_table(thd, engine_key, + engine_key_len, &engine_callback, &engine_data)) { - DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", - key, - key + table_share->db.length + 1)); + DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s", + engine_key_len, engine_key)); /* As this can change from call to call, don't reset set thd->lex->safe_to_cache_query @@ -2143,9 +2343,11 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd, DBUG_RETURN(TRUE); } (++(*block_table))->n= ++(*n); - if (!cache->insert_table(key_len, - key, (*block_table), + if (!cache->insert_table(cache_key_len, + cache_key, (*block_table), table_share->db.length, + (uint8) (cache_key_len - + table_share->table_cache_key.length), type, engine_callback, engine_data, FALSE)) @@ -2154,19 +2356,19 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd, } -my_bool ha_partition::register_query_cache_dependant_tables(THD *thd, - Query_cache *cache, - Query_cache_block_table **block_table, - uint *n) +my_bool ha_partition:: +register_query_cache_dependant_tables(THD *thd, + Query_cache *cache, + Query_cache_block_table **block_table, + uint *n) { - char *name; - uint prefix_length= table_share->table_cache_key.length + 3; + char *engine_key_end, *query_cache_key_end; + uint i; uint num_parts= m_part_info->num_parts; uint num_subparts= m_part_info->num_subparts; - uint i= 0; + int diff_length; List_iterator<partition_element> part_it(m_part_info->partitions); - char key[FN_REFLEN]; - + char engine_key[FN_REFLEN], query_cache_key[FN_REFLEN]; DBUG_ENTER("ha_partition::register_query_cache_dependant_tables"); /* see ha_partition::count_query_cache_dependant_tables */ @@ -2174,36 +2376,51 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd, DBUG_RETURN(FALSE); // nothing to register /* prepare static part of the key */ - memmove(key, table_share->table_cache_key.str, - table_share->table_cache_key.length); + memcpy(engine_key, table_share->normalized_path.str, + table_share->normalized_path.length); + memcpy(query_cache_key, table_share->table_cache_key.str, + table_share->table_cache_key.length); - name= key + table_share->table_cache_key.length - 1; - name[0]= name[2]= '#'; - name[1]= 'P'; - name+= 3; + diff_length= ((int) table_share->table_cache_key.length - + (int) table_share->normalized_path.length -1); + engine_key_end= engine_key + table_share->normalized_path.length; + query_cache_key_end= query_cache_key + table_share->table_cache_key.length -1; + + engine_key_end[0]= engine_key_end[2]= query_cache_key_end[0]= + query_cache_key_end[2]= '#'; + query_cache_key_end[1]= engine_key_end[1]= 'P'; + engine_key_end+= 3; + query_cache_key_end+= 3; + + i= 0; do { partition_element *part_elem= part_it++; - uint part_len= strmov(name, part_elem->partition_name) - name; + char *engine_pos= strmov(engine_key_end, part_elem->partition_name); if (m_is_sub_partitioned) { List_iterator<partition_element> subpart_it(part_elem->subpartitions); partition_element *sub_elem; - char *sname= name + part_len; uint j= 0, part; - sname[0]= sname[3]= '#'; - sname[1]= 'S'; - sname[2]= 'P'; - sname += 4; + engine_pos[0]= engine_pos[3]= '#'; + engine_pos[1]= 'S'; + engine_pos[2]= 'P'; + engine_pos += 4; do { + char *end; + uint length; sub_elem= subpart_it++; part= i * num_subparts + j; - uint spart_len= strmov(sname, sub_elem->partition_name) - name + 1; - if (reg_query_cache_dependant_table(thd, key, - prefix_length + part_len + 4 + - spart_len, + /* we store the end \0 as part of the key */ + end= strmov(engine_pos, sub_elem->partition_name); + length= end - engine_key; + /* Copy the suffix also to query cache key */ + memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end)); + if (reg_query_cache_dependant_table(thd, engine_key, length, + query_cache_key, + length + diff_length, m_file[part]->table_cache_type(), cache, block_table, m_file[part], @@ -2213,8 +2430,13 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd, } else { - if (reg_query_cache_dependant_table(thd, key, - prefix_length + part_len + 1, + char *end= engine_pos+1; // copy end \0 + uint length= end - engine_key; + /* Copy the suffix also to query cache key */ + memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end)); + if (reg_query_cache_dependant_table(thd, engine_key, length, + query_cache_key, + length + diff_length, m_file[i]->table_cache_type(), cache, block_table, m_file[i], @@ -2227,31 +2449,28 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd, } -/* - Set up table share object before calling create on underlying handler - - SYNOPSIS - set_up_table_before_create() - table Table object - info Create info - part_id Partition id of partition to set-up +/** + Set up table share object before calling create on underlying handler - RETURN VALUE - TRUE Error - FALSE Success + @param table Table object + @param info Create info + @param part_elem[in,out] Pointer to used partition_element, searched if NULL - DESCRIPTION - Set up - 1) Comment on partition - 2) MAX_ROWS, MIN_ROWS on partition - 3) Index file name on partition - 4) Data file name on partition + @return status + @retval TRUE Error + @retval FALSE Success + + @details + Set up + 1) Comment on partition + 2) MAX_ROWS, MIN_ROWS on partition + 3) Index file name on partition + 4) Data file name on partition */ int ha_partition::set_up_table_before_create(TABLE *tbl, const char *partition_name_with_path, HA_CREATE_INFO *info, - uint part_id, partition_element *part_elem) { int error= 0; @@ -2259,12 +2478,10 @@ int ha_partition::set_up_table_before_create(TABLE *tbl, THD *thd= ha_thd(); DBUG_ENTER("set_up_table_before_create"); + DBUG_ASSERT(part_elem); + if (!part_elem) - { - part_elem= find_partition_element(part_id); - if (!part_elem) - DBUG_RETURN(1); // Fatal error - } + DBUG_RETURN(1); tbl->s->max_rows= part_elem->part_max_rows; tbl->s->min_rows= part_elem->part_min_rows; partition_name= strrchr(partition_name_with_path, FN_LIBCHAR); @@ -2399,10 +2616,8 @@ bool ha_partition::create_handler_file(const char *name) /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; tot_len_byte= PAR_WORD_SIZE * tot_len_words; - file_buffer= (uchar *) my_alloca(tot_len_byte); - if (!file_buffer) + if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL)))) DBUG_RETURN(TRUE); - bzero(file_buffer, tot_len_byte); engine_array= (file_buffer + PAR_ENGINES_OFFSET); name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + PAR_WORD_SIZE); @@ -2483,7 +2698,7 @@ bool ha_partition::create_handler_file(const char *name) } else result= TRUE; - my_afree((char*) file_buffer); + my_free(file_buffer); DBUG_RETURN(result); } @@ -2527,8 +2742,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) for (i= 0; i < m_tot_parts; i++) { handlerton *hton= plugin_data(m_engine_array[i], handlerton*); - if (!(m_file[i]= get_new_handler(table_share, mem_root, - hton))) + if (!(m_file[i]= get_new_handler(table_share, mem_root, hton))) DBUG_RETURN(TRUE); DBUG_PRINT("info", ("engine_type: %u", hton->db_type)); } @@ -2635,9 +2849,10 @@ error_end: bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *tot_name_len_offset; + char buff[FN_REFLEN]; + uchar *tot_name_len_offset; File file; - char *file_buffer; + uchar *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); @@ -2656,9 +2871,9 @@ bool ha_partition::read_par_file(const char *name) len_bytes= PAR_WORD_SIZE * len_words; if (mysql_file_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) goto err1; - if (!(file_buffer= (char*) alloc_root(&m_mem_root, len_bytes))) + if (!(file_buffer= (uchar*) alloc_root(&m_mem_root, len_bytes))) goto err1; - if (mysql_file_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) + if (mysql_file_read(file, file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; @@ -2681,7 +2896,7 @@ bool ha_partition::read_par_file(const char *name) if (len_words != (tot_partition_words + tot_name_words + 4)) goto err2; m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; + m_name_buffer_ptr= (char*) (tot_name_len_offset + PAR_WORD_SIZE); if (!(m_connect_string= (LEX_STRING*) alloc_root(&m_mem_root, m_tot_parts * sizeof(LEX_STRING)))) @@ -2731,7 +2946,8 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) { uint i; uchar *buff; - handlerton **engine_array; + handlerton **engine_array, *first_engine; + enum legacy_db_type db_type, first_db_type; DBUG_ASSERT(!m_file); DBUG_ENTER("ha_partition::setup_engine_array"); @@ -2740,22 +2956,36 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) DBUG_RETURN(true); buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); - for (i= 0; i < m_tot_parts; i++) - { - engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), - (enum legacy_db_type) - *(buff + i)); - if (!engine_array[i]) - goto err; - } + first_db_type= (enum legacy_db_type) buff[0]; + first_engine= ha_resolve_by_legacy_type(ha_thd(), first_db_type); + if (!first_engine) + goto err; + if (!(m_engine_array= (plugin_ref*) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) goto err; for (i= 0; i < m_tot_parts; i++) - m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); + { + db_type= (enum legacy_db_type) buff[i]; + if (db_type != first_db_type) + { + DBUG_PRINT("error", ("partition %u engine %d is not same as " + "first partition %d", i, db_type, + (int) first_db_type)); + DBUG_ASSERT(0); + clear_handler_file(); + goto err; + } + m_engine_array[i]= ha_lock_engine(NULL, first_engine); + if (!m_engine_array[i]) + { + clear_handler_file(); + goto err; + } + } - my_afree(engine_array); + my_afree((void*) engine_array); if (create_handlers(mem_root)) { @@ -2766,7 +2996,7 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) DBUG_RETURN(false); err: - my_afree(engine_array); + my_afree((void*) engine_array); DBUG_RETURN(true); } @@ -2809,19 +3039,298 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, MODULE open/close object ****************************************************************************/ +/** + Get the partition name. + + @param part Struct containing name and length + @param[out] length Length of the name + + @return Partition name +*/ + +static uchar *get_part_name(PART_NAME_DEF *part, size_t *length, + my_bool not_used __attribute__((unused))) +{ + *length= part->length; + return part->partition_name; +} + + +/** + Insert a partition name in the partition_name_hash. + + @param name Name of partition + @param part_id Partition id (number) + @param is_subpart Set if the name belongs to a subpartition + + @return Operation status + @retval true Failure + @retval false Sucess +*/ + +bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id, + bool is_subpart) +{ + PART_NAME_DEF *part_def; + uchar *part_name; + uint part_name_length; + DBUG_ENTER("ha_partition::insert_partition_name_in_hash"); + /* + Calculate and store the length here, to avoid doing it when + searching the hash. + */ + part_name_length= strlen(name); + /* + Must use memory that lives as long as table_share. + Freed in the Partition_share destructor. + Since we use my_multi_malloc, then my_free(part_def) will also free + part_name, as a part of my_hash_free. + */ + if (!my_multi_malloc(MY_WME, + &part_def, sizeof(PART_NAME_DEF), + &part_name, part_name_length + 1, + NULL)) + DBUG_RETURN(true); + memcpy(part_name, name, part_name_length + 1); + part_def->partition_name= part_name; + part_def->length= part_name_length; + part_def->part_id= part_id; + part_def->is_subpart= is_subpart; + if (my_hash_insert(&part_share->partition_name_hash, (uchar *) part_def)) + { + my_free(part_def); + DBUG_RETURN(true); + } + DBUG_RETURN(false); +} + + +/** + Populate the partition_name_hash in part_share. +*/ + +bool ha_partition::populate_partition_name_hash() +{ + List_iterator<partition_element> part_it(m_part_info->partitions); + uint num_parts= m_part_info->num_parts; + uint num_subparts= m_is_sub_partitioned ? m_part_info->num_subparts : 1; + uint tot_names; + uint i= 0; + DBUG_ASSERT(part_share); + + DBUG_ENTER("ha_partition::populate_partition_name_hash"); + + /* + partition_name_hash is only set once and never changed + -> OK to check without locking. + */ + + if (part_share->partition_name_hash_initialized) + DBUG_RETURN(false); + lock_shared_ha_data(); + if (part_share->partition_name_hash_initialized) + { + unlock_shared_ha_data(); + DBUG_RETURN(false); + } + tot_names= m_is_sub_partitioned ? m_tot_parts + num_parts : num_parts; + if (my_hash_init(&part_share->partition_name_hash, + system_charset_info, tot_names, 0, 0, + (my_hash_get_key) get_part_name, + my_free, HASH_UNIQUE)) + { + unlock_shared_ha_data(); + DBUG_RETURN(TRUE); + } + + do + { + partition_element *part_elem= part_it++; + DBUG_ASSERT(part_elem->part_state == PART_NORMAL); + if (part_elem->part_state == PART_NORMAL) + { + if (insert_partition_name_in_hash(part_elem->partition_name, + i * num_subparts, false)) + goto err; + if (m_is_sub_partitioned) + { + List_iterator<partition_element> + subpart_it(part_elem->subpartitions); + partition_element *sub_elem; + uint j= 0; + do + { + sub_elem= subpart_it++; + if (insert_partition_name_in_hash(sub_elem->partition_name, + i * num_subparts + j, true)) + goto err; + + } while (++j < num_subparts); + } + } + } while (++i < num_parts); + + part_share->partition_name_hash_initialized= true; + unlock_shared_ha_data(); + + DBUG_RETURN(FALSE); +err: + my_hash_free(&part_share->partition_name_hash); + unlock_shared_ha_data(); + + DBUG_RETURN(TRUE); +} + + +/** + Set Handler_share pointer and allocate Handler_share pointers + for each partition and set those. + + @param ha_share_arg Where to store/retrieve the Partitioning_share pointer + to be shared by all instances of the same table. + + @return Operation status + @retval true Failure + @retval false Sucess +*/ + +bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg) +{ + Handler_share **ha_shares; + uint i; + DBUG_ENTER("ha_partition::set_ha_share_ref"); + + DBUG_ASSERT(!part_share); + DBUG_ASSERT(table_share); + DBUG_ASSERT(!m_is_clone_of); + DBUG_ASSERT(m_tot_parts); + if (handler::set_ha_share_ref(ha_share_arg)) + DBUG_RETURN(true); + if (!(part_share= get_share())) + DBUG_RETURN(true); + DBUG_ASSERT(part_share->partitions_share_refs); + DBUG_ASSERT(part_share->partitions_share_refs->num_parts >= m_tot_parts); + ha_shares= part_share->partitions_share_refs->ha_shares; + for (i= 0; i < m_tot_parts; i++) + { + if (m_file[i]->set_ha_share_ref(&ha_shares[i])) + DBUG_RETURN(true); + } + DBUG_RETURN(false); +} + + +/** + Get the PARTITION_SHARE for the table. + + @return Operation status + @retval true Error + @retval false Success + + @note Gets or initializes the Partition_share object used by partitioning. + The Partition_share is used for handling the auto_increment etc. +*/ + +Partition_share *ha_partition::get_share() +{ + Partition_share *tmp_share; + DBUG_ENTER("ha_partition::get_share"); + DBUG_ASSERT(table_share); + + lock_shared_ha_data(); + if (!(tmp_share= static_cast<Partition_share*>(get_ha_share_ptr()))) + { + tmp_share= new Partition_share; + if (!tmp_share) + goto err; + if (tmp_share->init(m_tot_parts)) + { + delete tmp_share; + tmp_share= NULL; + goto err; + } + set_ha_share_ptr(static_cast<Handler_share*>(tmp_share)); + } +err: + unlock_shared_ha_data(); + DBUG_RETURN(tmp_share); +} + + + +/** + Helper function for freeing all internal bitmaps. +*/ + +void ha_partition::free_partition_bitmaps() +{ + /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ + bitmap_free(&m_bulk_insert_started); + bitmap_free(&m_locked_partitions); + bitmap_free(&m_partitions_to_reset); + bitmap_free(&m_key_not_found_partitions); +} + /** - A destructor for partition-specific TABLE_SHARE data. + Helper function for initializing all internal bitmaps. */ -void ha_data_partition_destroy(HA_DATA_PARTITION* ha_part_data) +bool ha_partition::init_partition_bitmaps() { - if (ha_part_data) + DBUG_ENTER("ha_partition::init_partition_bitmaps"); + /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ + if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) + DBUG_RETURN(true); + bitmap_clear_all(&m_bulk_insert_started); + + /* Initialize the bitmap we use to keep track of locked partitions */ + if (bitmap_init(&m_locked_partitions, NULL, m_tot_parts, FALSE)) + { + bitmap_free(&m_bulk_insert_started); + DBUG_RETURN(true); + } + bitmap_clear_all(&m_locked_partitions); + + /* + Initialize the bitmap we use to keep track of partitions which may have + something to reset in ha_reset(). + */ + if (bitmap_init(&m_partitions_to_reset, NULL, m_tot_parts, FALSE)) + { + bitmap_free(&m_bulk_insert_started); + bitmap_free(&m_locked_partitions); + DBUG_RETURN(true); + } + bitmap_clear_all(&m_partitions_to_reset); + + /* + Initialize the bitmap we use to keep track of partitions which returned + HA_ERR_KEY_NOT_FOUND from index_read_map. + */ + if (bitmap_init(&m_key_not_found_partitions, NULL, m_tot_parts, FALSE)) + { + bitmap_free(&m_bulk_insert_started); + bitmap_free(&m_locked_partitions); + bitmap_free(&m_partitions_to_reset); + DBUG_RETURN(true); + } + bitmap_clear_all(&m_key_not_found_partitions); + m_key_not_found= false; + /* Initialize the bitmap for read/lock_partitions */ + if (!m_is_clone_of) { - mysql_mutex_destroy(&ha_part_data->LOCK_auto_inc); + DBUG_ASSERT(!m_clone_mem_root); + if (m_part_info->set_partition_bitmaps(NULL)) + { + free_partition_bitmaps(); + DBUG_RETURN(true); + } } + DBUG_RETURN(false); } + /* Open handler object @@ -2851,7 +3360,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) int error= HA_ERR_INITIALIZATION; handler **file; char name_buff[FN_REFLEN]; - bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); @@ -2863,6 +3371,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) DBUG_RETURN(error); name_buffer_ptr= m_name_buffer_ptr; + if (populate_partition_name_hash()) + { + DBUG_RETURN(HA_ERR_INITIALIZATION); + } m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->stored_rec_length; @@ -2877,32 +3389,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_part_ids_sorted_by_num_of_records[i]= i; } - /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ - if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) + if (init_partition_bitmaps()) DBUG_RETURN(error); - bitmap_clear_all(&m_bulk_insert_started); - /* - Initialize the bitmap we use to keep track of partitions which returned - HA_ERR_KEY_NOT_FOUND from index_read_map. - */ - if (bitmap_init(&m_key_not_found_partitions, NULL, m_tot_parts, FALSE)) - { - bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(error); - } - bitmap_clear_all(&m_key_not_found_partitions); - m_key_not_found= false; - /* Initialize the bitmap we use to determine what partitions are used */ - if (!m_is_clone_of) - { - DBUG_ASSERT(!m_clone_mem_root); - if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) - { - bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(error); - } - bitmap_set_all(&(m_part_info->used_partitions)); - } + + DBUG_ASSERT(m_part_info); if (m_is_clone_of) { @@ -2911,7 +3401,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Allocate an array of handler pointers for the partitions handlers. */ alloc_len= (m_tot_parts + 1) * sizeof(handler*); if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + { + error= HA_ERR_INITIALIZATION; goto err_alloc; + } memset(m_file, 0, alloc_len); /* Populate them by cloning the original partitions. This also opens them. @@ -2922,6 +3415,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, FALSE); + /* ::clone() will also set ha_share from the original. */ if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) { error= HA_ERR_INITIALIZATION; @@ -2939,10 +3433,13 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, FALSE); table->s->connect_string = m_connect_string[(uint)(file-m_file)]; - if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) + if ((error= (*file)->ha_open(table, name_buff, mode, + test_if_locked | HA_OPEN_NO_PSI_CALL))) goto err_handler; bzero(&table->s->connect_string, sizeof(LEX_STRING)); - m_num_locks+= (*file)->lock_count(); + if (m_file == file) + m_num_locks= (*file)->lock_count(); + DBUG_ASSERT(m_num_locks == (*file)->lock_count()); name_buffer_ptr+= strlen(name_buffer_ptr) + 1; } while (*(++file)); } @@ -2965,7 +3462,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; - /* set file to last handler, so all of them is closed */ + /* set file to last handler, so all of them are closed */ file = &m_file[m_tot_parts - 1]; goto err_handler; } @@ -2986,34 +3483,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) clear_handler_file(); /* - Use table_share->ha_part_data to share auto_increment_value among - all handlers for the same table. - */ - if (is_not_tmp_table) - mysql_mutex_lock(&table_share->LOCK_ha_data); - if (!table_share->ha_part_data) - { - /* currently only needed for auto_increment */ - table_share->ha_part_data= (HA_DATA_PARTITION*) - alloc_root(&table_share->mem_root, - sizeof(HA_DATA_PARTITION)); - if (!table_share->ha_part_data) - { - if (is_not_tmp_table) - mysql_mutex_unlock(&table_share->LOCK_ha_data); - goto err_handler; - } - DBUG_PRINT("info", ("table_share->ha_part_data 0x%p", - table_share->ha_part_data)); - bzero(table_share->ha_part_data, sizeof(HA_DATA_PARTITION)); - table_share->ha_part_data_destroy= ha_data_partition_destroy; - mysql_mutex_init(key_PARTITION_LOCK_auto_inc, - &table_share->ha_part_data->LOCK_auto_inc, - MY_MUTEX_INIT_FAST); - } - if (is_not_tmp_table) - mysql_mutex_unlock(&table_share->LOCK_ha_data); - /* Some handlers update statistics as part of the open call. This will in some cases corrupt the statistics of the partition handler and thus to ensure we have correct statistics we call info from open after @@ -3033,15 +3502,19 @@ err_handler: while (file-- != m_file) (*file)->ha_close(); err_alloc: - bitmap_free(&m_bulk_insert_started); - bitmap_free(&m_key_not_found_partitions); - if (!m_is_clone_of) - bitmap_free(&(m_part_info->used_partitions)); + free_partition_bitmaps(); DBUG_RETURN(error); } +/* + Disabled since it is not possible to prune yet. + without pruning, it need to rebind/unbind every partition in every + statement which uses a table from the table cache. Will also use + as many PSI_tables as there are partitions. +*/ +#ifdef HAVE_M_PSI_PER_PARTITION void ha_partition::unbind_psi() { uint i; @@ -3069,6 +3542,7 @@ void ha_partition::rebind_psi() } DBUG_VOID_RETURN; } +#endif /* HAVE_M_PSI_PER_PARTITION */ /** @@ -3094,22 +3568,35 @@ handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) DBUG_ENTER("ha_partition::clone"); new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, this, mem_root); + if (!new_handler) + DBUG_RETURN(NULL); + + /* + We will not clone each partition's handler here, it will be done in + ha_partition::open() for clones. Also set_ha_share_ref is not needed + here, since 1) ha_share is copied in the constructor used above + 2) each partition's cloned handler will set it from its original. + */ + /* Allocate new_handler->ref here because otherwise ha_open will allocate it on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (new_handler && - !(new_handler->ref= (uchar*) alloc_root(mem_root, + if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(m_ref_length)*2))) - new_handler= NULL; + goto err; - if (new_handler && - new_handler->ha_open(table, name, - table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) - new_handler= NULL; + if (new_handler->ha_open(table, name, + table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_NO_PSI_CALL)) + goto err; DBUG_RETURN((handler*) new_handler); + +err: + delete new_handler; + DBUG_RETURN(NULL); } @@ -3139,10 +3626,8 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); destroy_record_priority_queue(); - bitmap_free(&m_bulk_insert_started); - bitmap_free(&m_key_not_found_partitions); - if (!m_is_clone_of) - bitmap_free(&(m_part_info->used_partitions)); + free_partition_bitmaps(); + DBUG_ASSERT(m_part_info); file= m_file; repeat: @@ -3204,41 +3689,64 @@ repeat: int ha_partition::external_lock(THD *thd, int lock_type) { - bool first= TRUE; uint error; - handler **file; + uint i, first_used_partition; + MY_BITMAP *used_partitions; DBUG_ENTER("ha_partition::external_lock"); DBUG_ASSERT(!auto_increment_lock && !auto_increment_safe_stmt_log_lock); - file= m_file; - m_lock_type= lock_type; -repeat: - do + if (lock_type == F_UNLCK) + used_partitions= &m_locked_partitions; + else + used_partitions= &(m_part_info->lock_partitions); + + first_used_partition= bitmap_get_first_set(used_partitions); + + for (i= first_used_partition; + i < m_tot_parts; + i= bitmap_get_next_set(used_partitions, i)) { - DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d", - lock_type, (int) (file - m_file))); - if ((error= (*file)->ha_external_lock(thd, lock_type))) + DBUG_PRINT("info", ("external_lock(thd, %d) part %d", lock_type, i)); + if ((error= m_file[i]->ha_external_lock(thd, lock_type))) { - if (F_UNLCK != lock_type) + if (lock_type != F_UNLCK) goto err_handler; } - } while (*(++file)); + DBUG_PRINT("info", ("external_lock part %u lock %d", i, lock_type)); + if (lock_type != F_UNLCK) + bitmap_set_bit(&m_locked_partitions, i); + } + if (lock_type == F_UNLCK) + { + bitmap_clear_all(used_partitions); + } + else + { + /* Add touched partitions to be included in reset(). */ + bitmap_union(&m_partitions_to_reset, used_partitions); + } - if (first && m_added_file && m_added_file[0]) + if (m_added_file && m_added_file[0]) { + handler **file= m_added_file; DBUG_ASSERT(lock_type == F_UNLCK); - file= m_added_file; - first= FALSE; - goto repeat; + do + { + (void) (*file)->ha_external_lock(thd, lock_type); + } while (*(++file)); } DBUG_RETURN(0); err_handler: - while (file-- != m_file) + uint j; + for (j= first_used_partition; + j < i; + j= bitmap_get_next_set(&m_locked_partitions, j)) { - (*file)->ha_external_lock(thd, F_UNLCK); + (void) m_file[j]->ha_external_lock(thd, F_UNLCK); } + bitmap_clear_all(&m_locked_partitions); DBUG_RETURN(error); } @@ -3293,14 +3801,30 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { - handler **file; + uint i; DBUG_ENTER("ha_partition::store_lock"); - file= m_file; - do + DBUG_ASSERT(thd == current_thd); + + /* + This can be called from get_lock_data() in mysql_lock_abort_for_thread(), + even when thd != table->in_use. In that case don't use partition pruning, + but use all partitions instead to avoid using another threads structures. + */ + if (thd != table->in_use) { - DBUG_PRINT("info", ("store lock %d iteration", (int) (file - m_file))); - to= (*file)->store_lock(thd, to, lock_type); - } while (*(++file)); + for (i= 0; i < m_tot_parts; i++) + to= m_file[i]->store_lock(thd, to, lock_type); + } + else + { + for (i= bitmap_get_first_set(&(m_part_info->lock_partitions)); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) + { + DBUG_PRINT("info", ("store lock %d iteration", i)); + to= m_file[i]->store_lock(thd, to, lock_type); + } + } DBUG_RETURN(to); } @@ -3324,40 +3848,57 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd, int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) { int error= 0; - handler **file; + uint i; + /* Assert that read_partitions is included in lock_partitions */ + DBUG_ASSERT(bitmap_is_subset(&m_part_info->read_partitions, + &m_part_info->lock_partitions)); + /* + m_locked_partitions is set in previous external_lock/LOCK TABLES. + Current statement's lock requests must not include any partitions + not previously locked. + */ + DBUG_ASSERT(bitmap_is_subset(&m_part_info->lock_partitions, + &m_locked_partitions)); DBUG_ENTER("ha_partition::start_stmt"); - file= m_file; - do + for (i= bitmap_get_first_set(&(m_part_info->lock_partitions)); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) { - if ((error= (*file)->start_stmt(thd, lock_type))) + if ((error= m_file[i]->start_stmt(thd, lock_type))) break; - } while (*(++file)); + /* Add partition to be called in reset(). */ + bitmap_set_bit(&m_partitions_to_reset, i); + } DBUG_RETURN(error); } -/* +/** Get number of lock objects returned in store_lock - SYNOPSIS - lock_count() + @returns Number of locks returned in call to store_lock - RETURN VALUE - Number of locks returned in call to store_lock - - DESCRIPTION + @desc Returns the number of store locks needed in call to store lock. - We return number of partitions since we call store_lock on each - underlying handler. Assists the above functions in allocating + We return number of partitions we will lock multiplied with number of + locks needed by each partition. Assists the above functions in allocating sufficient space for lock structures. */ uint ha_partition::lock_count() const { DBUG_ENTER("ha_partition::lock_count"); - DBUG_PRINT("info", ("m_num_locks %d", m_num_locks)); - DBUG_RETURN(m_num_locks); + /* + The caller want to know the upper bound, to allocate enough memory. + There is no performance lost if we simply return maximum number locks + needed, only some minor over allocation of memory in get_lock_data(). + + Also notice that this may be called for another thread != table->in_use, + when mysql_lock_abort_for_thread() is called. So this is more safe, then + using number of partitions after pruning. + */ + DBUG_RETURN(m_tot_parts * m_num_locks); } @@ -3409,7 +3950,7 @@ bool ha_partition::was_semi_consistent_read() { DBUG_ENTER("ha_partition::was_semi_consistent_read"); DBUG_ASSERT(m_last_part < m_tot_parts && - bitmap_is_set(&(m_part_info->used_partitions), m_last_part)); + bitmap_is_set(&(m_part_info->read_partitions), m_last_part)); DBUG_RETURN(m_file[m_last_part]->was_semi_consistent_read()); } @@ -3434,13 +3975,16 @@ bool ha_partition::was_semi_consistent_read() */ void ha_partition::try_semi_consistent_read(bool yes) { - handler **file; + uint i; DBUG_ENTER("ha_partition::try_semi_consistent_read"); - for (file= m_file; *file; file++) + i= bitmap_get_first_set(&(m_part_info->read_partitions)); + DBUG_ASSERT(i != MY_BIT_NONE); + for (; + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) - (*file)->try_semi_consistent_read(yes); + m_file[i]->try_semi_consistent_read(yes); } DBUG_VOID_RETURN; } @@ -3495,11 +4039,8 @@ int ha_partition::write_row(uchar * buf) bool have_auto_increment= table->next_number_field && buf == table->record[0]; my_bitmap_map *old_map; THD *thd= ha_thd(); - ulonglong saved_sql_mode= thd->variables.sql_mode; + sql_mode_t saved_sql_mode= thd->variables.sql_mode; bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null; -#ifdef NOT_NEEDED - uchar *rec0= m_rec0; -#endif DBUG_ENTER("ha_partition::write_row"); DBUG_ASSERT(buf == m_rec0); @@ -3509,7 +4050,7 @@ int ha_partition::write_row(uchar * buf) */ if (have_auto_increment) { - if (!table_share->ha_part_data->auto_inc_initialized && + if (!part_share->auto_inc_initialized && !table_share->next_number_keypart) { /* @@ -3546,26 +4087,20 @@ int ha_partition::write_row(uchar * buf) } old_map= dbug_tmp_use_all_columns(table, table->read_set); -#ifdef NOT_NEEDED - if (likely(buf == rec0)) -#endif - error= m_part_info->get_partition_id(m_part_info, &part_id, - &func_value); -#ifdef NOT_NEEDED - else - { - set_field_ptr(m_part_field_array, buf, rec0); - error= m_part_info->get_partition_id(m_part_info, &part_id, - &func_value); - set_field_ptr(m_part_field_array, rec0, buf); - } -#endif + error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); dbug_tmp_restore_column_map(table->read_set, old_map); if (unlikely(error)) { m_part_info->err_value= func_value; goto exit; } + if (!bitmap_is_set(&(m_part_info->lock_partitions), part_id)) + { + DBUG_PRINT("info", ("Write to non-locked partition %u (func_value: %ld)", + part_id, (long) func_value)); + error= HA_ERR_NOT_IN_LOCK_PARTITIONS; + goto exit; + } m_last_part= part_id; DBUG_PRINT("info", ("Insert in partition %d", part_id)); start_part_bulk_insert(thd, part_id); @@ -3603,7 +4138,7 @@ exit: Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. new_data is always record[0] - old_data is always record[1] + old_data is normally record[1] but may be anything */ int ha_partition::update_row(const uchar *old_data, uchar *new_data) @@ -3613,8 +4148,10 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) int error= 0; longlong func_value; DBUG_ENTER("ha_partition::update_row"); - m_err_rec= NULL; + // Need to read partition-related columns, to locate the row's partition: + DBUG_ASSERT(bitmap_is_subset(&m_part_info->full_part_field_set, + table->read_set)); if ((error= get_parts_for_update(old_data, new_data, table->record[0], m_part_info, &old_part_id, &new_part_id, &func_value))) @@ -3622,26 +4159,12 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) m_part_info->err_value= func_value; goto exit; } - /* - The protocol for updating a row is: - 1) position the handler (cursor) on the row to be updated, - either through the last read row (rnd or index) or by rnd_pos. - 2) call update_row with both old and new full records as arguments. - - This means that m_last_part should already be set to actual partition - where the row was read from. And if that is not the same as the - calculated part_id we found a misplaced row, we return an error to - notify the user that something is broken in the row distribution - between partitions! Since we don't check all rows on read, we return an - error instead of correcting m_last_part, to make the user aware of the - problem! - */ - if (old_part_id != m_last_part) + DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id)); + if (!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id)) { - m_err_rec= old_data; - DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); + error= HA_ERR_NOT_IN_LOCK_PARTITIONS; + goto exit; } - m_last_part= new_part_id; start_part_bulk_insert(thd, new_part_id); if (new_part_id == old_part_id) @@ -3690,7 +4213,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data) exit: /* if updating an auto_increment column, update - table_share->ha_part_data->next_auto_inc_val if needed. + part_share->next_auto_inc_val if needed. (not to be used if auto_increment on secondary field in a multi-column index) mysql_update does not set table->next_number_field, so we use @@ -3703,7 +4226,7 @@ exit: bitmap_is_set(table->write_set, table->found_next_number_field->field_index)) { - if (!table_share->ha_part_data->auto_inc_initialized) + if (!part_share->auto_inc_initialized) info(HA_STATUS_AUTO); set_auto_increment_if_higher(table->found_next_number_field); } @@ -3745,34 +4268,19 @@ int ha_partition::delete_row(const uchar *buf) int error; THD *thd= ha_thd(); DBUG_ENTER("ha_partition::delete_row"); - m_err_rec= NULL; + DBUG_ASSERT(bitmap_is_subset(&m_part_info->full_part_field_set, + table->read_set)); if ((error= get_part_for_delete(buf, m_rec0, m_part_info, &part_id))) { DBUG_RETURN(error); } - /* - The protocol for deleting a row is: - 1) position the handler (cursor) on the row to be deleted, - either through the last read row (rnd or index) or by rnd_pos. - 2) call delete_row with the full record as argument. - - This means that m_last_part should already be set to actual partition - where the row was read from. And if that is not the same as the - calculated part_id we found a misplaced row, we return an error to - notify the user that something is broken in the row distribution - between partitions! Since we don't check all rows on read, we return an - error instead of forwarding the delete to the correct (m_last_part) - partition! - TODO: change the assert in InnoDB into an error instead and make this one - an assert instead and remove the get_part_for_delete()! - */ - if (part_id != m_last_part) - { - m_err_rec= buf; - DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); - } - + m_last_part= part_id; + /* Should never call delete_row on a partition which is not read */ + DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id)); + DBUG_ASSERT(bitmap_is_set(&(m_part_info->lock_partitions), part_id)); + if (!bitmap_is_set(&(m_part_info->lock_partitions), part_id)) + DBUG_RETURN(HA_ERR_NOT_IN_LOCK_PARTITIONS); tmp_disable_binlog(thd); error= m_file[part_id]->ha_delete_row(buf); reenable_binlog(thd); @@ -3798,22 +4306,24 @@ int ha_partition::delete_row(const uchar *buf) Called from item_sum.cc by Item_func_group_concat::clear(), Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). Called from sql_delete.cc by mysql_delete(). - Called from sql_select.cc by JOIN::reinit(). + Called from sql_select.cc by JOIN::reset(). Called from sql_union.cc by st_select_lex_unit::exec(). */ int ha_partition::delete_all_rows() { int error; - handler **file; + uint i; DBUG_ENTER("ha_partition::delete_all_rows"); - file= m_file; - do + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if ((error= (*file)->ha_delete_all_rows())) + /* Can be pruned, like DELETE FROM t PARTITION (pX) */ + if ((error= m_file[i]->ha_delete_all_rows())) DBUG_RETURN(error); - } while (*(++file)); + } DBUG_RETURN(0); } @@ -3836,8 +4346,8 @@ int ha_partition::truncate() it so that it will be initialized again at the next use. */ lock_auto_increment(); - table_share->ha_part_data->next_auto_inc_val= 0; - table_share->ha_part_data->auto_inc_initialized= FALSE; + part_share->next_auto_inc_val= 0; + part_share->auto_inc_initialized= false; unlock_auto_increment(); file= m_file; @@ -3878,8 +4388,8 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt) it so that it will be initialized again at the next use. */ lock_auto_increment(); - table_share->ha_part_data->next_auto_inc_val= 0; - table_share->ha_part_data->auto_inc_initialized= FALSE; + part_share->next_auto_inc_val= 0; + part_share->auto_inc_initialized= FALSE; unlock_auto_increment(); *binlog_stmt= true; @@ -3925,7 +4435,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt) SYNOPSIS start_bulk_insert() rows Number of rows to insert - flags Flags to control index creation + flags Flags to control index creation RETURN VALUE NONE @@ -3955,6 +4465,7 @@ void ha_partition::start_part_bulk_insert(THD *thd, uint part_id) if (!bitmap_is_set(&m_bulk_insert_started, part_id) && bitmap_is_set(&m_bulk_insert_started, m_tot_parts)) { + DBUG_ASSERT(bitmap_is_set(&(m_part_info->lock_partitions), part_id)); old_buffer_size= thd->variables.read_buff_size; /* Update read_buffer_size for this partition */ thd->variables.read_buff_size= estimate_read_buffer_size(old_buffer_size); @@ -4062,11 +4573,12 @@ int ha_partition::end_bulk_insert() if (!bitmap_is_set(&m_bulk_insert_started, m_tot_parts)) DBUG_RETURN(error); - for (i= 0; i < m_tot_parts; i++) + for (i= bitmap_get_first_set(&m_bulk_insert_started); + i < m_tot_parts; + i= bitmap_get_next_set(&m_bulk_insert_started, i)) { int tmp; - if (bitmap_is_set(&m_bulk_insert_started, i) && - (tmp= m_file[i]->ha_end_bulk_insert())) + if ((tmp= m_file[i]->ha_end_bulk_insert())) error= tmp; } bitmap_clear_all(&m_bulk_insert_started); @@ -4114,7 +4626,7 @@ int ha_partition::rnd_init(bool scan) For operations that may need to change data, we may need to extend read_set. */ - if (m_lock_type == F_WRLCK) + if (get_lock_type() == F_WRLCK) { /* If write_set contains any of the fields used in partition and @@ -4139,9 +4651,9 @@ int ha_partition::rnd_init(bool scan) } /* Now we see what the index of our first important partition is */ - DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx", - (long) m_part_info->used_partitions.bitmap)); - part_id= bitmap_get_first_set(&(m_part_info->used_partitions)); + DBUG_PRINT("info", ("m_part_info->read_partitions: 0x%lx", + (long) m_part_info->read_partitions.bitmap)); + part_id= bitmap_get_first_set(&(m_part_info->read_partitions)); DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id)); if (MY_BIT_NONE == part_id) @@ -4168,13 +4680,12 @@ int ha_partition::rnd_init(bool scan) } else { - for (i= part_id; i < m_tot_parts; i++) + for (i= part_id; + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&(m_part_info->used_partitions), i)) - { - if ((error= m_file[i]->ha_rnd_init(scan))) - goto err; - } + if ((error= m_file[i]->ha_rnd_init(scan))) + goto err; } } m_scan_value= scan; @@ -4184,10 +4695,12 @@ int ha_partition::rnd_init(bool scan) DBUG_RETURN(0); err: - while ((int)--i >= (int)part_id) + /* Call rnd_end for all previously inited partitions. */ + for (; + part_id < i; + part_id= bitmap_get_next_set(&m_part_info->read_partitions, part_id)) { - if (bitmap_is_set(&(m_part_info->used_partitions), i)) - m_file[i]->ha_rnd_end(); + m_file[part_id]->ha_rnd_end(); } err1: m_scan_value= 2; @@ -4209,7 +4722,6 @@ err1: int ha_partition::rnd_end() { - handler **file; DBUG_ENTER("ha_partition::rnd_end"); switch (m_scan_value) { case 2: // Error @@ -4222,12 +4734,13 @@ int ha_partition::rnd_end() } break; case 0: - file= m_file; - do + uint i; + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) - (*file)->ha_rnd_end(); - } while (*(++file)); + m_file[i]->ha_rnd_end(); + } break; } m_scan_value= 2; @@ -4290,7 +4803,7 @@ int ha_partition::rnd_next(uchar *buf) } /* - if we get here, then the current partition rnd_next returned failure + if we get here, then the current partition ha_rnd_next returned failure */ if (result == HA_ERR_RECORD_DELETED) continue; // Probably MyISAM @@ -4305,9 +4818,7 @@ int ha_partition::rnd_next(uchar *buf) break; /* Shift to next partition */ - while (++part_id < m_tot_parts && - !bitmap_is_set(&(m_part_info->used_partitions), part_id)) - ; + part_id= bitmap_get_next_set(&m_part_info->read_partitions, part_id); if (part_id >= m_tot_parts) { result= HA_ERR_END_OF_FILE; @@ -4359,6 +4870,7 @@ void ha_partition::position(const uchar *record) { handler *file= m_file[m_last_part]; uint pad_length; + DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), m_last_part)); DBUG_ENTER("ha_partition::position"); file->position(record); @@ -4372,14 +4884,6 @@ void ha_partition::position(const uchar *record) } -void ha_partition::column_bitmaps_signal() -{ - handler::column_bitmaps_signal(); - /* Must read all partition fields to make position() call possible */ - bitmap_union(table->read_set, &m_part_info->full_part_field_set); -} - - /* Read row using position @@ -4411,6 +4915,7 @@ int ha_partition::rnd_pos(uchar * buf, uchar *pos) part_id= uint2korr((const uchar *) pos); DBUG_ASSERT(part_id < m_tot_parts); file= m_file[part_id]; + DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id)); m_last_part= part_id; DBUG_RETURN(file->rnd_pos(buf, (pos + PARTITION_BYTES_IN_POS))); } @@ -4480,7 +4985,7 @@ bool ha_partition::init_record_priority_queue() if (!m_ordered_rec_buffer) { uint alloc_len; - uint used_parts= bitmap_bits_set(&m_part_info->used_partitions); + uint used_parts= bitmap_bits_set(&m_part_info->read_partitions); /* Allocate record buffer for each used partition. */ alloc_len= used_parts * (m_rec_length + PARTITION_BYTES_IN_POS); /* Allocate a key for temporary use when setting up the scan. */ @@ -4497,16 +5002,15 @@ bool ha_partition::init_record_priority_queue() setting up the scan. */ char *ptr= (char*) m_ordered_rec_buffer; - uint16 i= 0; - do + uint i; + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&m_part_info->used_partitions, i)) - { - DBUG_PRINT("info", ("init rec-buf for part %u", i)); - int2store(ptr, i); - ptr+= m_rec_length + PARTITION_BYTES_IN_POS; - } - } while (++i < m_tot_parts); + DBUG_PRINT("info", ("init rec-buf for part %u", i)); + int2store(ptr, i); + ptr+= m_rec_length + PARTITION_BYTES_IN_POS; + } m_start_key.key= (const uchar*)ptr; /* Initialize priority queue, initialized to reading forward. */ if (init_queue(&m_queue, used_parts, (uint) PARTITION_BYTES_IN_POS, @@ -4558,7 +5062,7 @@ void ha_partition::destroy_record_priority_queue() int ha_partition::index_init(uint inx, bool sorted) { int error= 0; - handler **file; + uint i; DBUG_ENTER("ha_partition::index_init"); DBUG_PRINT("info", ("inx %u sorted %u", inx, sorted)); @@ -4591,7 +5095,7 @@ int ha_partition::index_init(uint inx, bool sorted) calculate the partition id to place updated and deleted records. But this is required for operations that may need to change data only. */ - if (m_lock_type == F_WRLCK) + if (get_lock_type() == F_WRLCK) bitmap_union(table->read_set, &m_part_info->full_part_field_set); if (sorted) { @@ -4607,25 +5111,39 @@ int ha_partition::index_init(uint inx, bool sorted) TODO: handle COUNT(*) queries via unordered scan. */ - uint i; KEY **key_info= m_curr_key_info; do { - for (i= 0; i < (*key_info)->key_parts; i++) + for (i= 0; i < (*key_info)->user_defined_key_parts; i++) bitmap_set_bit(table->read_set, (*key_info)->key_part[i].field->field_index); } while (*(++key_info)); } - file= m_file; - do + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) - if ((error= (*file)->ha_index_init(inx, sorted))) - { - DBUG_ASSERT(0); // Should never happen - break; - } - } while (*(++file)); + if ((error= m_file[i]->ha_index_init(inx, sorted))) + goto err; + + DBUG_EXECUTE_IF("ha_partition_fail_index_init", { + i++; + error= HA_ERR_NO_PARTITION_FOUND; + goto err; + }); + } +err: + if (error) + { + /* End the previously initialized indexes. */ + uint j; + for (j= bitmap_get_first_set(&m_part_info->read_partitions); + j < i; + j= bitmap_get_next_set(&m_part_info->read_partitions, j)) + { + (void) m_file[j]->ha_index_end(); + } + } DBUG_RETURN(error); } @@ -4648,19 +5166,19 @@ int ha_partition::index_init(uint inx, bool sorted) int ha_partition::index_end() { int error= 0; - handler **file; + uint i; DBUG_ENTER("ha_partition::index_end"); active_index= MAX_KEY; m_part_spec.start_part= NO_CURRENT_PART_ID; - file= m_file; - do + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { int tmp; - if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) - if ((tmp= (*file)->ha_index_end())) - error= tmp; - } while (*(++file)); + if ((tmp= m_file[i]->ha_index_end())) + error= tmp; + } destroy_record_priority_queue(); DBUG_RETURN(error); } @@ -4906,17 +5424,20 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, or no matching partitions (start_part > end_part) */ DBUG_ASSERT(m_part_spec.start_part >= m_part_spec.end_part); - - for (part= m_part_spec.start_part; part <= m_part_spec.end_part; part++) + /* The start part is must be marked as used. */ + DBUG_ASSERT(m_part_spec.start_part > m_part_spec.end_part || + bitmap_is_set(&(m_part_info->read_partitions), + m_part_spec.start_part)); + + for (part= m_part_spec.start_part; + part <= m_part_spec.end_part; + part= bitmap_get_next_set(&m_part_info->read_partitions, part)) { - if (bitmap_is_set(&(m_part_info->used_partitions), part)) - { - error= m_file[part]->index_read_idx_map(buf, index, key, - keypart_map, find_flag); - if (error != HA_ERR_KEY_NOT_FOUND && - error != HA_ERR_END_OF_FILE) - break; - } + error= m_file[part]->ha_index_read_idx_map(buf, index, key, + keypart_map, find_flag); + if (error != HA_ERR_KEY_NOT_FOUND && + error != HA_ERR_END_OF_FILE) + break; } if (part <= m_part_spec.end_part) m_last_part= part; @@ -5058,15 +5579,7 @@ int ha_partition::read_range_first(const key_range *start_key, m_ordered= sorted; eq_range= eq_range_arg; - end_range= 0; - if (end_key) - { - end_range= &save_end_range; - save_end_range= *end_key; - key_compare_result_on_equal= - ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 : - (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0); - } + set_end_range(end_key); range_key_part= m_curr_key_info[0]->key_part; if (start_key) @@ -5168,7 +5681,7 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag) Verify this, also bitmap must have at least one bit set otherwise the result from this table is the empty set. */ - uint start_part= bitmap_get_first_set(&(m_part_info->used_partitions)); + uint start_part= bitmap_get_first_set(&(m_part_info->read_partitions)); if (start_part == MY_BIT_NONE) { DBUG_PRINT("info", ("scan with no partition to scan")); @@ -5285,18 +5798,21 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same) int ha_partition::handle_unordered_scan_next_partition(uchar * buf) { - uint i; + uint i= m_part_spec.start_part; int saved_error= HA_ERR_END_OF_FILE; DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition"); - for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) + if (i) + i= bitmap_get_next_set(&m_part_info->read_partitions, i - 1); + else + i= bitmap_get_first_set(&m_part_info->read_partitions); + + for (; + i <= m_part_spec.end_part; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { int error; - handler *file; - - if (!(bitmap_is_set(&(m_part_info->used_partitions), i))) - continue; - file= m_file[i]; + handler *file= m_file[i]; m_part_spec.start_part= i; switch (m_index_scan_type) { case partition_read_range: @@ -5395,6 +5911,8 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) } m_top_entry= NO_CURRENT_PART_ID; queue_remove_all(&m_queue); + DBUG_ASSERT(bitmap_is_set(&m_part_info->read_partitions, + m_part_spec.start_part)); /* Position part_rec_buf_ptr to point to the first used partition >= @@ -5402,18 +5920,18 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) but is before start_part. These partitions has allocated record buffers but is dynamically pruned, so those buffers must be skipped. */ - uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions); - for (; first_used_part < m_part_spec.start_part; first_used_part++) + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_part_spec.start_part; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part)) - part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; + part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS; } DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u", - m_part_spec.start_part, first_used_part)); - for (i= first_used_part; i <= m_part_spec.end_part; i++) + m_part_spec.start_part, i)); + for (/* continue from above */ ; + i <= m_part_spec.end_part; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (!(bitmap_is_set(&(m_part_info->used_partitions), i))) - continue; DBUG_PRINT("info", ("reading from part %u (scan_type: %u)", i, m_index_scan_type)); DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr)); @@ -5421,12 +5939,6 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order) int error; handler *file= m_file[i]; - /* - Reset null bits (to avoid valgrind warnings) and to give a default - value for not read null fields. - */ - bfill(rec_buf_ptr, table->s->null_bytes, 255); - switch (m_index_scan_type) { case partition_index_read: error= file->ha_index_read_map(rec_buf_ptr, @@ -5542,11 +6054,10 @@ int ha_partition::handle_ordered_index_scan_key_not_found() Loop over all used partitions to get the correct offset into m_ordered_rec_buffer. */ - for (i= 0; i < m_tot_parts; i++) + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (!bitmap_is_set(&m_part_info->used_partitions, i)) - continue; - if (bitmap_is_set(&m_key_not_found_partitions, i)) { /* @@ -5554,7 +6065,7 @@ int ha_partition::handle_ordered_index_scan_key_not_found() in index_read_map. */ curr_rec_buf= part_buf + PARTITION_BYTES_IN_POS; - error= m_file[i]->index_next(curr_rec_buf); + error= m_file[i]->ha_index_next(curr_rec_buf); /* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */ DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND); if (!error) @@ -5809,27 +6320,36 @@ int ha_partition::info(uint flag) uint extra_var_flag= flag & HA_STATUS_VARIABLE_EXTRA; DBUG_ENTER("ha_partition::info"); +#ifndef DBUG_OFF + if (bitmap_is_set_all(&(m_part_info->read_partitions))) + DBUG_PRINT("info", ("All partitions are used")); +#endif /* DBUG_OFF */ if (flag & HA_STATUS_AUTO) { bool auto_inc_is_first_in_idx= (table_share->next_number_keypart == 0); DBUG_PRINT("info", ("HA_STATUS_AUTO")); if (!table->found_next_number_field) stats.auto_increment_value= 0; - else if (table_share->ha_part_data->auto_inc_initialized) + else if (part_share->auto_inc_initialized) { lock_auto_increment(); - stats.auto_increment_value= table_share->ha_part_data->next_auto_inc_val; + stats.auto_increment_value= part_share->next_auto_inc_val; unlock_auto_increment(); } else { lock_auto_increment(); /* to avoid two concurrent initializations, check again when locked */ - if (table_share->ha_part_data->auto_inc_initialized) - stats.auto_increment_value= - table_share->ha_part_data->next_auto_inc_val; + if (part_share->auto_inc_initialized) + stats.auto_increment_value= part_share->next_auto_inc_val; else { + /* + The auto-inc mutex in the table_share is locked, so we do not need + to have the handlers locked. + HA_STATUS_NO_LOCK is not checked, since we cannot skip locking + the mutex, because it is initialized. + */ handler *file, **file_array; ulonglong auto_increment_value= 0; file_array= m_file; @@ -5847,11 +6367,11 @@ int ha_partition::info(uint flag) stats.auto_increment_value= auto_increment_value; if (auto_inc_is_first_in_idx) { - set_if_bigger(table_share->ha_part_data->next_auto_inc_val, + set_if_bigger(part_share->next_auto_inc_val, auto_increment_value); - table_share->ha_part_data->auto_inc_initialized= TRUE; + part_share->auto_inc_initialized= true; DBUG_PRINT("info", ("initializing next_auto_inc_val to %lu", - (ulong) table_share->ha_part_data->next_auto_inc_val)); + (ulong) part_share->next_auto_inc_val)); } } unlock_auto_increment(); @@ -5859,6 +6379,7 @@ int ha_partition::info(uint flag) } if (flag & HA_STATUS_VARIABLE) { + uint i; DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); /* Calculates statistical variables @@ -5879,29 +6400,27 @@ int ha_partition::info(uint flag) check_time: Time of last check (only applicable to MyISAM) We report last time of all underlying handlers */ - handler *file, **file_array; + handler *file; stats.records= 0; stats.deleted= 0; stats.data_file_length= 0; stats.index_file_length= 0; stats.check_time= 0; stats.delete_length= 0; - file_array= m_file; - do + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - if (bitmap_is_set(&(m_part_info->used_partitions), (file_array - m_file))) - { - file= *file_array; - file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag); - stats.records+= file->stats.records; - stats.deleted+= file->stats.deleted; - stats.data_file_length+= file->stats.data_file_length; - stats.index_file_length+= file->stats.index_file_length; - stats.delete_length+= file->stats.delete_length; - if (file->stats.check_time > stats.check_time) - stats.check_time= file->stats.check_time; - } - } while (*(++file_array)); + file= m_file[i]; + file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag); + stats.records+= file->stats.records; + stats.deleted+= file->stats.deleted; + stats.data_file_length+= file->stats.data_file_length; + stats.index_file_length+= file->stats.index_file_length; + stats.delete_length+= file->stats.delete_length; + if (file->stats.check_time > stats.check_time) + stats.check_time= file->stats.check_time; + } if (stats.records && stats.records < 2 && !(m_file[0]->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)) stats.records= 2; @@ -5972,7 +6491,7 @@ int ha_partition::info(uint flag) file= *file_array; /* Get variables if not already done */ if (!(flag & HA_STATUS_VARIABLE) || - !bitmap_is_set(&(m_part_info->used_partitions), + !bitmap_is_set(&(m_part_info->read_partitions), (file_array - m_file))) file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag); if (file->stats.records > max_records) @@ -6039,6 +6558,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info, uint part_id) { handler *file= m_file[part_id]; + DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id)); file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE | HA_STATUS_VARIABLE_EXTRA | HA_STATUS_NO_LOCK); @@ -6062,7 +6582,6 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info, General function to prepare handler for certain behavior. @param[in] operation operation to execute - operation Operation type for extra call @return status @retval 0 success @@ -6125,6 +6644,10 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info, ensure disk based tables are flushed at end of query execution. Currently is never used. + HA_EXTRA_FORCE_REOPEN: + Only used by MyISAM and Archive, called when altering table, + closing tables to enforce a reopen of the table files. + 2) Operations used by some non-MyISAM handlers ---------------------------------------------- HA_EXTRA_KEYREAD_PRESERVE_FIELDS: @@ -6249,6 +6772,9 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info, HA_EXTRA_PREPARE_FOR_RENAME: Informs the handler we are about to attempt a rename of the table. + For handlers that have share open files (MyISAM key-file and + Archive writer) they must close the files before rename is possible + on Windows. HA_EXTRA_READCHECK: HA_EXTRA_NO_READCHECK: @@ -6269,10 +6795,6 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info, HA_EXTRA_NO_READCHECK=5 No readcheck on update HA_EXTRA_READCHECK=6 Use readcheck (def) - HA_EXTRA_FORCE_REOPEN: - Only used by MyISAM, called when altering table, closing tables to - enforce a reopen of the table files. - 4) Operations only used by temporary tables for query processing ---------------------------------------------------------------- HA_EXTRA_RESET_STATE: @@ -6381,6 +6903,10 @@ int ha_partition::extra(enum ha_extra_function operation) case HA_EXTRA_FLUSH: case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE: DBUG_RETURN(loop_extra(operation)); + case HA_EXTRA_PREPARE_FOR_RENAME: + case HA_EXTRA_FORCE_REOPEN: + DBUG_RETURN(loop_extra_alter(operation)); + break; /* Category 2), used by non-MyISAM handlers */ case HA_EXTRA_IGNORE_DUP_KEY: @@ -6393,9 +6919,6 @@ int ha_partition::extra(enum ha_extra_function operation) } /* Category 3), used by MyISAM handlers */ - case HA_EXTRA_PREPARE_FOR_RENAME: - DBUG_RETURN(prepare_for_rename()); - break; case HA_EXTRA_PREPARE_FOR_UPDATE: /* Needs to be run on the first partition in the range now, and @@ -6412,7 +6935,6 @@ int ha_partition::extra(enum ha_extra_function operation) break; case HA_EXTRA_NORMAL: case HA_EXTRA_QUICK: - case HA_EXTRA_FORCE_REOPEN: case HA_EXTRA_PREPARE_FOR_DROP: case HA_EXTRA_FLUSH_CACHE: { @@ -6517,33 +7039,34 @@ int ha_partition::extra(enum ha_extra_function operation) } -/* +/** Special extra call to reset extra parameters - SYNOPSIS - reset() - - RETURN VALUE - >0 Error code - 0 Success + @return Operation status. + @retval >0 Error code + @retval 0 Success - DESCRIPTION - Called at end of each statement to reset buffers + @note Called at end of each statement to reset buffers. + To avoid excessive calls, the m_partitions_to_reset bitmap keep records + of which partitions that have been used in extra(), external_lock() or + start_stmt() and is needed to be called. */ int ha_partition::reset(void) { - int result= 0, tmp; - handler **file; + int result= 0; + int tmp; + uint i; DBUG_ENTER("ha_partition::reset"); - if (m_part_info) - bitmap_set_all(&m_part_info->used_partitions); - file= m_file; - do + + for (i= bitmap_get_first_set(&m_partitions_to_reset); + i < m_tot_parts; + i= bitmap_get_next_set(&m_partitions_to_reset, i)) { - if ((tmp= (*file)->ha_reset())) + if ((tmp= m_file[i]->ha_reset())) result= tmp; - } while (*(++file)); + } + bitmap_clear_all(&m_partitions_to_reset); DBUG_RETURN(result); } @@ -6590,41 +7113,48 @@ void ha_partition::prepare_extra_cache(uint cachesize) m_extra_cache_size= cachesize; if (m_part_spec.start_part != NO_CURRENT_PART_ID) { + DBUG_ASSERT(bitmap_is_set(&m_partitions_to_reset, + m_part_spec.start_part)); + bitmap_set_bit(&m_partitions_to_reset, m_part_spec.start_part); late_extra_cache(m_part_spec.start_part); } DBUG_VOID_RETURN; } -/* - Prepares our new and reorged handlers for rename or delete +/** + Prepares our new and reorged handlers for rename or delete. - SYNOPSIS - prepare_for_delete() + @param operation Operation to forward - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval 0 Success + @retval !0 Error */ -int ha_partition::prepare_for_rename() +int ha_partition::loop_extra_alter(enum ha_extra_function operation) { int result= 0, tmp; handler **file; - DBUG_ENTER("ha_partition::prepare_for_rename()"); - + DBUG_ENTER("ha_partition::loop_extra_alter()"); + DBUG_ASSERT(operation == HA_EXTRA_PREPARE_FOR_RENAME || + operation == HA_EXTRA_FORCE_REOPEN); + if (m_new_file != NULL) { for (file= m_new_file; *file; file++) - if ((tmp= (*file)->extra(HA_EXTRA_PREPARE_FOR_RENAME))) - result= tmp; + if ((tmp= (*file)->extra(operation))) + result= tmp; + } + if (m_reorged_file != NULL) + { for (file= m_reorged_file; *file; file++) - if ((tmp= (*file)->extra(HA_EXTRA_PREPARE_FOR_RENAME))) - result= tmp; - DBUG_RETURN(result); + if ((tmp= (*file)->extra(operation))) + result= tmp; } - - DBUG_RETURN(loop_extra(HA_EXTRA_PREPARE_FOR_RENAME)); + if ((tmp= loop_extra(operation))) + result= tmp; + DBUG_RETURN(result); } /* @@ -6642,20 +7172,18 @@ int ha_partition::prepare_for_rename() int ha_partition::loop_extra(enum ha_extra_function operation) { int result= 0, tmp; - handler **file; - bool is_select; + uint i; DBUG_ENTER("ha_partition::loop_extra()"); - is_select= (thd_sql_command(ha_thd()) == SQLCOM_SELECT); - for (file= m_file; *file; file++) + for (i= bitmap_get_first_set(&m_part_info->lock_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) { - if (!is_select || - bitmap_is_set(&(m_part_info->used_partitions), file - m_file)) - { - if ((tmp= (*file)->extra(operation))) - result= tmp; - } + if ((tmp= m_file[i]->extra(operation))) + result= tmp; } + /* Add all used partitions to be called in reset(). */ + bitmap_union(&m_partitions_to_reset, &m_part_info->lock_partitions); DBUG_RETURN(result); } @@ -6729,20 +7257,18 @@ void ha_partition::late_extra_no_cache(uint partition_id) MODULE optimiser support ****************************************************************************/ -/* - Get keys to use for scanning +/** + Get keys to use for scanning. - SYNOPSIS - keys_to_use_for_scanning() + @return key_map of keys usable for scanning - RETURN VALUE - key_map of keys usable for scanning + @note No need to use read_partitions here, since it does not depend on + which partitions is used, only which storage engine used. */ const key_map *ha_partition::keys_to_use_for_scanning() { DBUG_ENTER("ha_partition::keys_to_use_for_scanning"); - DBUG_RETURN(m_file[0]->keys_to_use_for_scanning()); } @@ -6756,7 +7282,7 @@ ha_rows ha_partition::min_rows_for_estimate() uint i, max_used_partitions, tot_used_partitions; DBUG_ENTER("ha_partition::min_rows_for_estimate"); - tot_used_partitions= bitmap_bits_set(&m_part_info->used_partitions); + tot_used_partitions= bitmap_bits_set(&m_part_info->read_partitions); /* All partitions might have been left as unused during partition pruning @@ -6819,7 +7345,7 @@ uint ha_partition::get_biggest_used_partition(uint *part_index) while ((*part_index) < m_tot_parts) { part_id= m_part_ids_sorted_by_num_of_records[(*part_index)++]; - if (bitmap_is_set(&m_part_info->used_partitions, part_id)) + if (bitmap_is_set(&m_part_info->read_partitions, part_id)) return part_id; } return NO_CURRENT_PART_ID; @@ -6839,12 +7365,13 @@ uint ha_partition::get_biggest_used_partition(uint *part_index) double ha_partition::scan_time() { double scan_time= 0; - handler **file; + uint i; DBUG_ENTER("ha_partition::scan_time"); - for (file= m_file; *file; file++) - if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) - scan_time+= (*file)->scan_time(); + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) + scan_time+= m_file[i]->scan_time(); DBUG_RETURN(scan_time); } @@ -6930,7 +7457,7 @@ ha_rows ha_partition::estimate_rows_upper_bound() do { - if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file))) + if (bitmap_is_set(&(m_part_info->read_partitions), (file - m_file))) { rows= (*file)->estimate_rows_upper_bound(); if (rows == HA_POS_ERROR) @@ -6972,27 +7499,25 @@ double ha_partition::read_time(uint index, uint ranges, ha_rows rows) /** Number of rows in table. see handler.h - SYNOPSIS - records() - RETURN VALUE - Number of total rows in a partitioned table. + @return Number of records in the table (after pruning!) */ ha_rows ha_partition::records() { ha_rows rows, tot_rows= 0; - handler **file; + uint i; DBUG_ENTER("ha_partition::records"); - file= m_file; - do + for (i= bitmap_get_first_set(&m_part_info->read_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->read_partitions, i)) { - rows= (*file)->records(); + rows= m_file[i]->records(); if (rows == HA_POS_ERROR) DBUG_RETURN(HA_POS_ERROR); tot_rows+= rows; - } while (*(++file)); + } DBUG_RETURN(tot_rows); } @@ -7043,82 +7568,77 @@ uint8 ha_partition::table_cache_type() } +/** + Calculate hash value for KEY partitioning using an array of fields. + + @param field_array An array of the fields in KEY partitioning + + @return hash_value calculated + + @note Uses the hash function on the character set of the field. + Integer and floating point fields use the binary character set by default. +*/ + +uint32 ha_partition::calculate_key_hash_value(Field **field_array) +{ + ulong nr1= 1; + ulong nr2= 4; + + do + { + Field *field= *field_array; + field->hash(&nr1, &nr2); + } while (*(++field_array)); + return (uint32) nr1; +} + + /**************************************************************************** MODULE print messages ****************************************************************************/ const char *ha_partition::index_type(uint inx) { + uint first_used_partition; DBUG_ENTER("ha_partition::index_type"); - DBUG_RETURN(m_file[0]->index_type(inx)); -} - - -enum row_type ha_partition::get_row_type() const -{ - handler **file; - enum row_type type= (*m_file)->get_row_type(); + first_used_partition= bitmap_get_first_set(&(m_part_info->read_partitions)); - for (file= m_file, file++; *file; file++) + if (first_used_partition == MY_BIT_NONE) { - enum row_type part_type= (*file)->get_row_type(); - if (part_type != type) - return ROW_TYPE_NOT_USED; + DBUG_ASSERT(0); // How can this happen? + DBUG_RETURN(handler::index_type(inx)); } - return type; + DBUG_RETURN(m_file[first_used_partition]->index_type(inx)); } -void ha_partition::append_row_to_str(String &str) +enum row_type ha_partition::get_row_type() const { - Field **field_ptr; - const uchar *rec; - bool is_rec0= !m_err_rec || m_err_rec == table->record[0]; - if (is_rec0) - rec= table->record[0]; - else - rec= m_err_rec; - // If PK, use full PK instead of full part field array! - if (table->s->primary_key != MAX_KEY) - { - KEY *key= table->key_info + table->s->primary_key; - KEY_PART_INFO *key_part= key->key_part; - KEY_PART_INFO *key_part_end= key_part + key->key_parts; - if (!is_rec0) - set_key_field_ptr(key, rec, table->record[0]); - for (; key_part != key_part_end; key_part++) - { - Field *field= key_part->field; - str.append(" "); - str.append(field->field_name); - str.append(":"); - field_unpack(&str, field, rec, 0, false); - } - if (!is_rec0) - set_key_field_ptr(key, table->record[0], rec); - } - else + uint i; + enum row_type type; + DBUG_ENTER("ha_partition::get_row_type"); + + i= bitmap_get_first_set(&m_part_info->read_partitions); + DBUG_ASSERT(i < m_tot_parts); + if (i >= m_tot_parts) + DBUG_RETURN(ROW_TYPE_NOT_USED); + + type= m_file[i]->get_row_type(); + DBUG_PRINT("info", ("partition %u, row_type: %d", i, type)); + + for (i= bitmap_get_next_set(&m_part_info->lock_partitions, i); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) { - if (!is_rec0) - set_field_ptr(m_part_info->full_part_field_array, rec, - table->record[0]); - /* No primary key, use full partition field array. */ - for (field_ptr= m_part_info->full_part_field_array; - *field_ptr; - field_ptr++) - { - Field *field= *field_ptr; - str.append(" "); - str.append(field->field_name); - str.append(":"); - field_unpack(&str, field, rec, 0, false); - } - if (!is_rec0) - set_field_ptr(m_part_info->full_part_field_array, table->record[0], - rec); + enum row_type part_type= m_file[i]->get_row_type(); + DBUG_PRINT("info", ("partition %u, row_type: %d", i, type)); + if (part_type != type) + DBUG_RETURN(ROW_TYPE_NOT_USED); } + + DBUG_RETURN(type); } @@ -7130,72 +7650,24 @@ void ha_partition::print_error(int error, myf errflag) /* Should probably look for my own errors first */ DBUG_PRINT("enter", ("error: %d", error)); - if (error == HA_ERR_NO_PARTITION_FOUND) + if ((error == HA_ERR_NO_PARTITION_FOUND) && + ! (thd->lex->alter_info.flags & Alter_info::ALTER_TRUNCATE_PARTITION)) + m_part_info->print_no_partition_found(table); + else { - switch(thd_sql_command(thd)) + /* In case m_file has not been initialized, like in bug#42438 */ + if (m_file) { - case SQLCOM_DELETE: - case SQLCOM_DELETE_MULTI: - case SQLCOM_UPDATE: - case SQLCOM_UPDATE_MULTI: - if (m_err_rec) + if (m_last_part >= m_tot_parts) { - uint max_length; - char buf[MAX_KEY_LENGTH]; - const char *msg= "Found a row in wrong partition ("; - String str(buf,sizeof(buf),system_charset_info); - uint32 part_id; - /* Should only happen on DELETE or UPDATE! */ - str.length(0); - str.append_ulonglong(m_last_part); - str.append(" != "); - if (!get_part_for_delete(m_err_rec, m_rec0, m_part_info, &part_id)) - { - str.append_ulonglong(part_id); - } - str.append(")"); - append_row_to_str(str); - /* Log this error, so the DBA can notice it and fix it! */ - sql_print_error("Table '%-192s' corrupted: %s%s\n" - "Please CHECK and REPAIR the table!", - table->s->table_name.str, msg, str.c_ptr_safe()); - - max_length= (MYSQL_ERRMSG_SIZE- - (uint) strlen(msg)); - if (str.length() >= max_length) - { - str.length(max_length-4); - str.append(STRING_WITH_LEN("...")); - } - my_printf_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, "%s%s", MYF(0), - msg, str.c_ptr_safe()); - m_err_rec= NULL; - DBUG_VOID_RETURN; - } - default: - { - if (!(thd->lex->alter_info.flags & ALTER_TRUNCATE_PARTITION)) - { - m_part_info->print_no_partition_found(table); - DBUG_VOID_RETURN; - } + DBUG_ASSERT(0); + m_last_part= 0; } - /* fall through to generic error handling. */ + m_file[m_last_part]->print_error(error, errflag); } + else + handler::print_error(error, errflag); } - - /* In case m_file has not been initialized, like in bug#42438 */ - if (m_file) - { - if (m_last_part >= m_tot_parts) - { - DBUG_ASSERT(0); - m_last_part= 0; - } - m_file[m_last_part]->print_error(error, errflag); - } - else - handler::print_error(error, errflag); DBUG_VOID_RETURN; } @@ -7215,49 +7687,48 @@ bool ha_partition::get_error_message(int error, String *buf) /**************************************************************************** - MODULE handler characteristics + MODULE in-place ALTER ****************************************************************************/ /** + Get table flags. +*/ + +handler::Table_flags ha_partition::table_flags() const +{ + uint first_used_partition= 0; + DBUG_ENTER("ha_partition::table_flags"); + if (m_handler_status < handler_initialized || + m_handler_status >= handler_closed) + DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS); + + if (get_lock_type() != F_UNLCK) + { + /* + The flags are cached after external_lock, and may depend on isolation + level. So we should use a locked partition to get the correct flags. + */ + first_used_partition= bitmap_get_first_set(&m_part_info->lock_partitions); + if (first_used_partition == MY_BIT_NONE) + first_used_partition= 0; + } + DBUG_RETURN((m_file[first_used_partition]->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); +} + + +/** alter_table_flags must be on handler/table level, not on hton level due to the ha_partition hton does not know what the underlying hton is. */ uint ha_partition::alter_table_flags(uint flags) { - uint flags_to_return, flags_to_check; + uint flags_to_return; DBUG_ENTER("ha_partition::alter_table_flags"); flags_to_return= ht->alter_table_flags(flags); - flags_to_return|= m_file[0]->alter_table_flags(flags); + flags_to_return|= m_file[0]->alter_table_flags(flags); - /* - If one partition fails we must be able to revert the change for the other, - already altered, partitions. So both ADD and DROP can only be supported in - pairs. - */ - flags_to_check= HA_INPLACE_ADD_INDEX_NO_READ_WRITE; - flags_to_check|= HA_INPLACE_DROP_INDEX_NO_READ_WRITE; - if ((flags_to_return & flags_to_check) != flags_to_check) - flags_to_return&= ~flags_to_check; - flags_to_check= HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE; - flags_to_check|= HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE; - if ((flags_to_return & flags_to_check) != flags_to_check) - flags_to_return&= ~flags_to_check; - flags_to_check= HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE; - flags_to_check|= HA_INPLACE_DROP_PK_INDEX_NO_READ_WRITE; - if ((flags_to_return & flags_to_check) != flags_to_check) - flags_to_return&= ~flags_to_check; - flags_to_check= HA_INPLACE_ADD_INDEX_NO_WRITE; - flags_to_check|= HA_INPLACE_DROP_INDEX_NO_WRITE; - if ((flags_to_return & flags_to_check) != flags_to_check) - flags_to_return&= ~flags_to_check; - flags_to_check= HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE; - flags_to_check|= HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE; - if ((flags_to_return & flags_to_check) != flags_to_check) - flags_to_return&= ~flags_to_check; - flags_to_check= HA_INPLACE_ADD_PK_INDEX_NO_WRITE; - flags_to_check|= HA_INPLACE_DROP_PK_INDEX_NO_WRITE; - if ((flags_to_return & flags_to_check) != flags_to_check) - flags_to_return&= ~flags_to_check; DBUG_RETURN(flags_to_return); } @@ -7286,228 +7757,297 @@ bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info, /** - Helper class for [final_]add_index, see handler.h + Support of in-place alter table. */ -class ha_partition_add_index : public handler_add_index +/** + Helper class for in-place alter, see handler.h +*/ + +class ha_partition_inplace_ctx : public inplace_alter_handler_ctx { public: - handler_add_index **add_array; - ha_partition_add_index(TABLE* table_arg, KEY* key_info_arg, - uint num_of_keys_arg) - : handler_add_index(table_arg, key_info_arg, num_of_keys_arg) - {} - ~ha_partition_add_index() {} -}; - + inplace_alter_handler_ctx **handler_ctx_array; + bool rollback_done; +private: + uint m_tot_parts; -/** - Support of in-place add/drop index +public: + ha_partition_inplace_ctx(THD *thd, uint tot_parts) + : inplace_alter_handler_ctx(), + handler_ctx_array(NULL), + rollback_done(false), + m_tot_parts(tot_parts) + {} - @param table_arg Table to add index to - @param key_info Struct over the new keys to add - @param num_of_keys Number of keys to add - @param[out] add Data to be submitted with final_add_index + ~ha_partition_inplace_ctx() + { + if (handler_ctx_array) + { + for (uint index= 0; index < m_tot_parts; index++) + delete handler_ctx_array[index]; + } + } +}; - @return Operation status - @retval 0 Success - @retval != 0 Failure (error code returned, and all operations rollbacked) -*/ -int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys, - handler_add_index **add) +enum_alter_inplace_result +ha_partition::check_if_supported_inplace_alter(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) { - uint i; - int ret= 0; +#ifdef PARTITION_SUPPORTS_INPLACE_ALTER + uint index= 0; + enum_alter_inplace_result result= HA_ALTER_INPLACE_NO_LOCK; + ha_partition_inplace_ctx *part_inplace_ctx; THD *thd= ha_thd(); - ha_partition_add_index *part_add_index; +#else + enum_alter_inplace_result result= HA_ALTER_INPLACE_NOT_SUPPORTED; +#endif - DBUG_ENTER("ha_partition::add_index"); - /* - There has already been a check in fix_partition_func in mysql_alter_table - before this call, which checks for unique/primary key violations of the - partitioning function. So no need for extra check here. - */ - + DBUG_ENTER("ha_partition::check_if_supported_inplace_alter"); + +#ifndef PARTITION_SUPPORTS_INPLACE_ALTER /* - This will be freed at the end of the statement. - And destroyed at final_add_index. (Sql_alloc does not free in delete). + Due to bug#14760210 partitions can be out-of-sync in case + commit_inplace_alter_table fails after the first partition. + + Until we can either commit all partitions at the same time or + have an atomic recover on failure/crash we don't support any + inplace alter. + + TODO: investigate what happens when indexes are out-of-sync + between partitions. If safe and possible to recover from, + then we could allow ADD/DROP INDEX. */ - part_add_index= new (thd->mem_root) - ha_partition_add_index(table_arg, key_info, num_of_keys); - if (!part_add_index) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - part_add_index->add_array= (handler_add_index **) - thd->alloc(sizeof(void *) * m_tot_parts); - if (!part_add_index->add_array) - { - delete part_add_index; - DBUG_RETURN(HA_ERR_OUT_OF_MEM); + DBUG_RETURN(result); +#else + part_inplace_ctx= + new (thd->mem_root) ha_partition_inplace_ctx(thd, m_tot_parts); + if (!part_inplace_ctx) + DBUG_RETURN(HA_ALTER_ERROR); + + part_inplace_ctx->handler_ctx_array= (inplace_alter_handler_ctx **) + thd->alloc(sizeof(inplace_alter_handler_ctx *) * m_tot_parts); + if (!part_inplace_ctx->handler_ctx_array) + DBUG_RETURN(HA_ALTER_ERROR); + + for (index= 0; index < m_tot_parts; index++) + part_inplace_ctx->handler_ctx_array[index]= NULL; + + for (index= 0; index < m_tot_parts; index++) + { + enum_alter_inplace_result p_result= + m_file[index]->check_if_supported_inplace_alter(altered_table, + ha_alter_info); + part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx; + + if (p_result < result) + result= p_result; + if (result == HA_ALTER_ERROR) + break; } + ha_alter_info->handler_ctx= part_inplace_ctx; - for (i= 0; i < m_tot_parts; i++) - { - if ((ret= m_file[i]->add_index(table_arg, key_info, num_of_keys, - &part_add_index->add_array[i]))) - goto err; - } - *add= part_add_index; - DBUG_RETURN(ret); -err: - /* Rollback all prepared partitions. i - 1 .. 0 */ - while (i) - { - i--; - (void) m_file[i]->final_add_index(part_add_index->add_array[i], false); - } - delete part_add_index; - DBUG_RETURN(ret); + DBUG_RETURN(result); +#endif } -/** - Second phase of in-place add index. +bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) +{ + uint index= 0; + bool error= false; + ha_partition_inplace_ctx *part_inplace_ctx; - @param add Info from add_index - @param commit Should we commit or rollback the add_index operation + DBUG_ENTER("ha_partition::prepare_inplace_alter_table"); - @return Operation status - @retval 0 Success - @retval != 0 Failure (error code returned) + part_inplace_ctx= + static_cast<class ha_partition_inplace_ctx*>(ha_alter_info->handler_ctx); - @note If commit is false, index changes are rolled back by dropping the - added indexes. If commit is true, nothing is done as the indexes - were already made active in ::add_index() -*/ + for (index= 0; index < m_tot_parts && !error; index++) + { + ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index]; + if (m_file[index]->ha_prepare_inplace_alter_table(altered_table, + ha_alter_info)) + error= true; + part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx; + } + ha_alter_info->handler_ctx= part_inplace_ctx; -int ha_partition::final_add_index(handler_add_index *add, bool commit) + DBUG_RETURN(error); +} + + +bool ha_partition::inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) { - ha_partition_add_index *part_add_index; - uint i; - int ret= 0; + uint index= 0; + bool error= false; + ha_partition_inplace_ctx *part_inplace_ctx; - DBUG_ENTER("ha_partition::final_add_index"); - - if (!add) + DBUG_ENTER("ha_partition::inplace_alter_table"); + + part_inplace_ctx= + static_cast<class ha_partition_inplace_ctx*>(ha_alter_info->handler_ctx); + + for (index= 0; index < m_tot_parts && !error; index++) { - DBUG_ASSERT(!commit); - DBUG_RETURN(0); + ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index]; + if (m_file[index]->ha_inplace_alter_table(altered_table, + ha_alter_info)) + error= true; + part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx; } - part_add_index= static_cast<class ha_partition_add_index*>(add); + ha_alter_info->handler_ctx= part_inplace_ctx; - for (i= 0; i < m_tot_parts; i++) + DBUG_RETURN(error); +} + + +/* + Note that this function will try rollback failed ADD INDEX by + executing DROP INDEX for the indexes that were committed (if any) + before the error occured. This means that the underlying storage + engine must be able to drop index in-place with X-lock held. + (As X-lock will be held here if new indexes are to be committed) +*/ +bool ha_partition::commit_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info, + bool commit) +{ + uint index= 0; + ha_partition_inplace_ctx *part_inplace_ctx; + + DBUG_ENTER("ha_partition::commit_inplace_alter_table"); + + part_inplace_ctx= + static_cast<class ha_partition_inplace_ctx*>(ha_alter_info->handler_ctx); + + if (!commit && part_inplace_ctx->rollback_done) + DBUG_RETURN(false); // We have already rolled back changes. + + for (index= 0; index < m_tot_parts; index++) { - if ((ret= m_file[i]->final_add_index(part_add_index->add_array[i], commit))) + ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index]; + if (m_file[index]->ha_commit_inplace_alter_table(altered_table, + ha_alter_info, commit)) + { + part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx; goto err; + } + part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx; DBUG_EXECUTE_IF("ha_partition_fail_final_add_index", { - /* Simulate a failure by rollback the second partition */ + /* Simulate failure by rollback of the second partition */ if (m_tot_parts > 1) { - i++; - m_file[i]->final_add_index(part_add_index->add_array[i], false); - /* Set an error that is specific to ha_partition. */ - ret= HA_ERR_NO_PARTITION_FOUND; + index++; + ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index]; + m_file[index]->ha_commit_inplace_alter_table(altered_table, + ha_alter_info, false); + part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx; goto err; } }); } - delete part_add_index; - DBUG_RETURN(ret); -err: - uint j; - uint *key_numbers= NULL; - KEY *old_key_info= NULL; - uint num_of_keys= 0; - int error; - - /* How could this happen? Needed to create a covering test case :) */ - DBUG_ASSERT(ret == HA_ERR_NO_PARTITION_FOUND); + ha_alter_info->handler_ctx= part_inplace_ctx; - if (i > 0) - { - num_of_keys= part_add_index->num_of_keys; - key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys); - if (!key_numbers) + DBUG_RETURN(false); + +err: + ha_alter_info->handler_ctx= part_inplace_ctx; + /* + Reverting committed changes is (for now) only possible for ADD INDEX + For other changes we will just try to rollback changes. + */ + if (index > 0 && + ha_alter_info->handler_flags & (Alter_inplace_info::ADD_INDEX | + Alter_inplace_info::ADD_UNIQUE_INDEX | + Alter_inplace_info::ADD_PK_INDEX)) + { + Alter_inplace_info drop_info(ha_alter_info->create_info, + ha_alter_info->alter_info, + NULL, 0, + ha_alter_info->modified_part_info, + ha_alter_info->ignore); + + if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_INDEX) + drop_info.handler_flags|= Alter_inplace_info::DROP_INDEX; + if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_UNIQUE_INDEX) + drop_info.handler_flags|= Alter_inplace_info::DROP_UNIQUE_INDEX; + if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_PK_INDEX) + drop_info.handler_flags|= Alter_inplace_info::DROP_PK_INDEX; + drop_info.index_drop_count= ha_alter_info->index_add_count; + drop_info.index_drop_buffer= + (KEY**) ha_thd()->alloc(sizeof(KEY*) * drop_info.index_drop_count); + if (!drop_info.index_drop_buffer) { sql_print_error("Failed with error handling of adding index:\n" "committing index failed, and when trying to revert " "already committed partitions we failed allocating\n" "memory for the index for table '%s'", table_share->table_name.str); - DBUG_RETURN(HA_ERR_OUT_OF_MEM); + DBUG_RETURN(true); } - old_key_info= table->key_info; - /* - Use the newly added key_info as table->key_info to remove them. - Note that this requires the subhandlers to use name lookup of the - index. They must use given table->key_info[key_number], they cannot - use their local view of the keys, since table->key_info only include - the indexes to be removed here. - */ - for (j= 0; j < num_of_keys; j++) - key_numbers[j]= j; - table->key_info= part_add_index->key_info; - } + for (uint i= 0; i < drop_info.index_drop_count; i++) + drop_info.index_drop_buffer[i]= + &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]]; - for (j= 0; j < m_tot_parts; j++) - { - if (j < i) + // Drop index for each partition where we already committed new index. + for (uint i= 0; i < index; i++) { - /* Remove the newly added index */ - error= m_file[j]->prepare_drop_index(table, key_numbers, num_of_keys); - if (error || m_file[j]->final_drop_index(table)) - { + bool error= m_file[i]->ha_prepare_inplace_alter_table(altered_table, + &drop_info); + error|= m_file[i]->ha_inplace_alter_table(altered_table, &drop_info); + error|= m_file[i]->ha_commit_inplace_alter_table(altered_table, + &drop_info, true); + if (error) sql_print_error("Failed with error handling of adding index:\n" "committing index failed, and when trying to revert " "already committed partitions we failed removing\n" "the index for table '%s' partition nr %d", - table_share->table_name.str, j); - } + table_share->table_name.str, i); } - else if (j > i) + + // Rollback uncommitted changes. + for (uint i= index+1; i < m_tot_parts; i++) { - /* Rollback non finished partitions */ - if (m_file[j]->final_add_index(part_add_index->add_array[j], false)) + ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[i]; + if (m_file[i]->ha_commit_inplace_alter_table(altered_table, + ha_alter_info, false)) { /* How could this happen? */ sql_print_error("Failed with error handling of adding index:\n" "Rollback of add_index failed for table\n" "'%s' partition nr %d", - table_share->table_name.str, j); + table_share->table_name.str, i); } + part_inplace_ctx->handler_ctx_array[i]= ha_alter_info->handler_ctx; } + + // We have now reverted/rolled back changes. Set flag to prevent + // it from being done again. + part_inplace_ctx->rollback_done= true; + + print_error(HA_ERR_NO_PARTITION_FOUND, MYF(0)); } - if (i > 0) - table->key_info= old_key_info; - delete part_add_index; - DBUG_RETURN(ret); -} -int ha_partition::prepare_drop_index(TABLE *table_arg, uint *key_num, - uint num_of_keys) -{ - handler **file; - int ret= 0; + ha_alter_info->handler_ctx= part_inplace_ctx; - /* - DROP INDEX does not affect partitioning. - */ - for (file= m_file; *file; file++) - if ((ret= (*file)->prepare_drop_index(table_arg, key_num, num_of_keys))) - break; - return ret; + DBUG_RETURN(true); } -int ha_partition::final_drop_index(TABLE *table_arg) +void ha_partition::notify_table_changed() { handler **file; - int ret= HA_ERR_WRONG_COMMAND; + + DBUG_ENTER("ha_partition::notify_table_changed"); for (file= m_file; *file; file++) - if ((ret= (*file)->final_drop_index(table_arg))) - break; - return ret; + (*file)->ha_notify_table_changed(); + + DBUG_VOID_RETURN; } @@ -7647,8 +8187,8 @@ int ha_partition::reset_auto_increment(ulonglong value) int res; DBUG_ENTER("ha_partition::reset_auto_increment"); lock_auto_increment(); - table_share->ha_part_data->auto_inc_initialized= FALSE; - table_share->ha_part_data->next_auto_inc_val= 0; + part_share->auto_inc_initialized= false; + part_share->next_auto_inc_val= 0; do { if ((res= (*file)->ha_reset_auto_increment(value)) != 0) @@ -7662,7 +8202,7 @@ int ha_partition::reset_auto_increment(ulonglong value) /** This method is called by update_auto_increment which in turn is called by the individual handlers as part of write_row. We use the - table_share->ha_part_data->next_auto_inc_val, or search all + part_share->next_auto_inc_val, or search all partitions for the highest auto_increment_value if not initialized or if auto_increment field is a secondary part of a key, we must search every partition when holding a mutex to be sure of correctness. @@ -7718,9 +8258,9 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, /* This is initialized in the beginning of the first write_row call. */ - DBUG_ASSERT(table_share->ha_part_data->auto_inc_initialized); + DBUG_ASSERT(part_share->auto_inc_initialized); /* - Get a lock for handling the auto_increment in table_share->ha_part_data + Get a lock for handling the auto_increment in part_share for avoiding two concurrent statements getting the same number. */ @@ -7747,9 +8287,8 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment, } /* this gets corrected (for offset/increment) in update_auto_increment */ - *first_value= table_share->ha_part_data->next_auto_inc_val; - table_share->ha_part_data->next_auto_inc_val+= - nb_desired_values * increment; + *first_value= part_share->next_auto_inc_val; + part_share->next_auto_inc_val+= nb_desired_values * increment; unlock_auto_increment(); DBUG_PRINT("info", ("*first_value: %lu", (ulong) *first_value)); @@ -7764,14 +8303,19 @@ void ha_partition::release_auto_increment() if (table->s->next_number_keypart) { - for (uint i= 0; i < m_tot_parts; i++) + uint i; + for (i= bitmap_get_first_set(&m_part_info->lock_partitions); + i < m_tot_parts; + i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) + { m_file[i]->ha_release_auto_increment(); + } } else if (next_insert_id) { ulonglong next_auto_inc_val; lock_auto_increment(); - next_auto_inc_val= table_share->ha_part_data->next_auto_inc_val; + next_auto_inc_val= part_share->next_auto_inc_val; /* If the current auto_increment values is lower than the reserved value, and the reserved value was reserved by this thread, @@ -7786,10 +8330,10 @@ void ha_partition::release_auto_increment() with SET INSERT_ID, i.e. forced/non generated values. */ if (thd->auto_inc_intervals_forced.maximum() < next_insert_id) - table_share->ha_part_data->next_auto_inc_val= next_insert_id; + part_share->next_auto_inc_val= next_insert_id; } - DBUG_PRINT("info", ("table_share->ha_part_data->next_auto_inc_val: %lu", - (ulong) table_share->ha_part_data->next_auto_inc_val)); + DBUG_PRINT("info", ("part_share->next_auto_inc_val: %lu", + (ulong) part_share->next_auto_inc_val)); /* Unlock the multi row statement lock taken in get_auto_increment */ if (auto_increment_safe_stmt_log_lock) @@ -7813,6 +8357,27 @@ void ha_partition::init_table_handle_for_HANDLER() } +/** + Return the checksum of the table (all partitions) +*/ + +uint ha_partition::checksum() const +{ + ha_checksum sum= 0; + + DBUG_ENTER("ha_partition::checksum"); + if ((table_flags() & (HA_HAS_OLD_CHECKSUM | HA_HAS_NEW_CHECKSUM))) + { + handler **file= m_file; + do + { + sum+= (*file)->checksum(); + } while (*(++file)); + } + DBUG_RETURN(sum); +} + + /**************************************************************************** MODULE enable/disable indexes ****************************************************************************/ @@ -7832,6 +8397,7 @@ int ha_partition::disable_indexes(uint mode) handler **file; int error= 0; + DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions))); for (file= m_file; *file; file++) { if ((error= (*file)->ha_disable_indexes(mode))) @@ -7856,6 +8422,7 @@ int ha_partition::enable_indexes(uint mode) handler **file; int error= 0; + DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions))); for (file= m_file; *file; file++) { if ((error= (*file)->ha_enable_indexes(mode))) @@ -7880,6 +8447,7 @@ int ha_partition::indexes_are_disabled(void) handler **file; int error= 0; + DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions))); for (file= m_file; *file; file++) { if ((error= (*file)->indexes_are_disabled())) @@ -7889,288 +8457,6 @@ int ha_partition::indexes_are_disabled(void) } -/** - Check/fix misplaced rows. - - @param read_part_id Partition to check/fix. - @param repair If true, move misplaced rows to correct partition. - - @return Operation status. - @retval 0 Success - @retval != 0 Error -*/ - -int ha_partition::check_misplaced_rows(uint read_part_id, bool repair) -{ - int result= 0; - uint32 correct_part_id; - longlong func_value; - longlong num_misplaced_rows= 0; - - DBUG_ENTER("ha_partition::check_misplaced_rows"); - - DBUG_ASSERT(m_file); - - if (repair) - { - /* We must read the full row, if we need to move it! */ - bitmap_set_all(table->read_set); - bitmap_set_all(table->write_set); - } - else - { - /* Only need to read the partitioning fields. */ - bitmap_union(table->read_set, &m_part_info->full_part_field_set); - } - - if ((result= m_file[read_part_id]->ha_rnd_init(1))) - DBUG_RETURN(result); - - while (true) - { - if ((result= m_file[read_part_id]->rnd_next(m_rec0))) - { - if (result == HA_ERR_RECORD_DELETED) - continue; - if (result != HA_ERR_END_OF_FILE) - break; - - if (num_misplaced_rows > 0) - { - print_admin_msg(ha_thd(), "warning", table_share->db.str, table->alias, - opt_op_name[REPAIR_PARTS], - "Moved %lld misplaced rows", - num_misplaced_rows); - } - /* End-of-file reached, all rows are now OK, reset result and break. */ - result= 0; - break; - } - - result= m_part_info->get_partition_id(m_part_info, &correct_part_id, - &func_value); - if (result) - break; - - if (correct_part_id != read_part_id) - { - num_misplaced_rows++; - if (!repair) - { - /* Check. */ - print_admin_msg(ha_thd(), "error", table_share->db.str, table->alias, - opt_op_name[CHECK_PARTS], - "Found a misplaced row"); - /* Break on first misplaced row! */ - result= HA_ADMIN_NEEDS_UPGRADE; - break; - } - else - { - DBUG_PRINT("info", ("Moving row from partition %d to %d", - read_part_id, correct_part_id)); - - /* - Insert row into correct partition. Notice that there are no commit - for every N row, so the repair will be one large transaction! - */ - if ((result= m_file[correct_part_id]->ha_write_row(m_rec0))) - { - /* - We have failed to insert a row, it might have been a duplicate! - */ - char buf[MAX_KEY_LENGTH]; - String str(buf,sizeof(buf),system_charset_info); - str.length(0); - if (result == HA_ERR_FOUND_DUPP_KEY) - { - str.append("Duplicate key found, " - "please update or delete the record:\n"); - result= HA_ADMIN_CORRUPT; - } - m_err_rec= NULL; - append_row_to_str(str); - - /* - If the engine supports transactions, the failure will be - rollbacked. - */ - if (!m_file[correct_part_id]->has_transactions()) - { - /* Log this error, so the DBA can notice it and fix it! */ - sql_print_error("Table '%-192s' failed to move/insert a row" - " from part %d into part %d:\n%s", - table->s->table_name.str, - read_part_id, - correct_part_id, - str.c_ptr_safe()); - } - print_admin_msg(ha_thd(), "error", table_share->db.str, table->alias, - opt_op_name[REPAIR_PARTS], - "Failed to move/insert a row" - " from part %d into part %d:\n%s", - read_part_id, - correct_part_id, - str.c_ptr_safe()); - break; - } - - /* Delete row from wrong partition. */ - if ((result= m_file[read_part_id]->ha_delete_row(m_rec0))) - { - if (m_file[correct_part_id]->has_transactions()) - break; - /* - We have introduced a duplicate, since we failed to remove it - from the wrong partition. - */ - char buf[MAX_KEY_LENGTH]; - String str(buf,sizeof(buf),system_charset_info); - str.length(0); - m_err_rec= NULL; - append_row_to_str(str); - - /* Log this error, so the DBA can notice it and fix it! */ - sql_print_error("Table '%-192s': Delete from part %d failed with" - " error %d. But it was already inserted into" - " part %d, when moving the misplaced row!" - "\nPlease manually fix the duplicate row:\n%s", - table->s->table_name.str, - read_part_id, - result, - correct_part_id, - str.c_ptr_safe()); - break; - } - } - } - } - - int tmp_result= m_file[read_part_id]->ha_rnd_end(); - DBUG_RETURN(result ? result : tmp_result); -} - - -#define KEY_PARTITIONING_CHANGED_STR \ - "KEY () partitioning changed, please run:\nALTER TABLE %s.%s %s" - -int ha_partition::check_for_upgrade(HA_CHECK_OPT *check_opt) -{ - int error= HA_ADMIN_NEEDS_CHECK; - DBUG_ENTER("ha_partition::check_for_upgrade"); - - /* - This is called even without FOR UPGRADE, - if the .frm version is lower than the current version. - In that case return that it needs checking! - */ - if (!(check_opt->sql_flags & TT_FOR_UPGRADE)) - DBUG_RETURN(error); - - /* - Partitions will be checked for during their ha_check! - - Check if KEY (sub)partitioning was used and any field's hash calculation - differs from 5.1, see bug#14521864. - */ - if (table->s->mysql_version < 50503 && // 5.1 table (<5.5.3) - ((m_part_info->part_type == HASH_PARTITION && // KEY partitioned - m_part_info->list_of_part_fields) || - (m_is_sub_partitioned && // KEY subpartitioned - m_part_info->list_of_subpart_fields))) - { - Field **field; - if (m_is_sub_partitioned) - { - field= m_part_info->subpart_field_array; - } - else - { - field= m_part_info->part_field_array; - } - for (; *field; field++) - { - switch ((*field)->real_type()) { - case MYSQL_TYPE_TINY: - case MYSQL_TYPE_SHORT: - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_FLOAT: - case MYSQL_TYPE_DOUBLE: - case MYSQL_TYPE_NEWDECIMAL: - case MYSQL_TYPE_TIMESTAMP: - case MYSQL_TYPE_LONGLONG: - case MYSQL_TYPE_INT24: - case MYSQL_TYPE_TIME: - case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_YEAR: - case MYSQL_TYPE_NEWDATE: - case MYSQL_TYPE_ENUM: - case MYSQL_TYPE_SET: - { - THD *thd= ha_thd(); - char *part_buf; - String db_name, table_name; - uint part_buf_len; - bool skip_generation= false; - partition_info::enum_key_algorithm old_algorithm; - old_algorithm= m_part_info->key_algorithm; - error= HA_ADMIN_FAILED; - append_identifier(ha_thd(), &db_name, table_share->db.str, - table_share->db.length); - append_identifier(ha_thd(), &table_name, table_share->table_name.str, - table_share->table_name.length); - if (m_part_info->key_algorithm != partition_info::KEY_ALGORITHM_NONE) - { - /* - Only possible when someone tampered with .frm files, - like during tests :) - */ - skip_generation= true; - } - m_part_info->key_algorithm= partition_info::KEY_ALGORITHM_51; - if (skip_generation || - !(part_buf= generate_partition_syntax(m_part_info, - &part_buf_len, - true, - true, - NULL, - NULL, - NULL)) || - /* Also check that the length is smaller than the output field! */ - (part_buf_len + db_name.length() + table_name.length()) >= - (SQL_ADMIN_MSG_TEXT_SIZE - - (strlen(KEY_PARTITIONING_CHANGED_STR) - 3))) - { - print_admin_msg(thd, "error", table_share->db.str, table->alias, - opt_op_name[CHECK_PARTS], - KEY_PARTITIONING_CHANGED_STR, - db_name.c_ptr_safe(), table_name.c_ptr_safe(), - "<old partition clause>, but add ALGORITHM = 1" - " between 'KEY' and '(' to change the metadata" - " without the need of a full table rebuild."); - } - else - { - print_admin_msg(thd, "error", table_share->db.str, table->alias, - opt_op_name[CHECK_PARTS], - KEY_PARTITIONING_CHANGED_STR, - db_name.c_ptr_safe(), table_name.c_ptr_safe(), - part_buf); - } - m_part_info->key_algorithm= old_algorithm; - DBUG_RETURN(error); - } - default: - /* Not affected! */ - ; - } - } - } - - DBUG_RETURN(error); -} - - struct st_mysql_storage_engine partition_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index fd1056d7b3f..fc1f1a600d0 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -2,24 +2,21 @@ #define HA_PARTITION_INCLUDED /* - Copyright (c) 2005, 2013, Oracle and/or its affiliates. + Copyright (c) 2005, 2012, Oracle and/or its affiliates. + Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifdef __GNUC__ -#pragma interface /* gcc class implementation */ -#endif + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "sql_partition.h" /* part_id_range, partition_element */ #include "queues.h" /* QUEUE */ @@ -27,27 +24,98 @@ enum partition_keywords { PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR, - PKW_COLUMNS, PKW_ALGORITHM + PKW_COLUMNS }; + #define PARTITION_BYTES_IN_POS 2 -#define PARTITION_ENABLED_TABLE_FLAGS (HA_FILE_BASED | \ - HA_REC_NOT_IN_SEQ | \ - HA_CAN_REPAIR) -#define PARTITION_DISABLED_TABLE_FLAGS (HA_CAN_GEOMETRY | \ - HA_CAN_FULLTEXT | \ - HA_DUPLICATE_POS | \ - HA_CAN_SQL_HANDLER | \ - HA_CAN_INSERT_DELAYED) - -/* First 4 bytes in the .par file is the number of 32-bit words in the file */ -#define PAR_WORD_SIZE 4 -/* offset to the .par file checksum */ -#define PAR_CHECKSUM_OFFSET 4 -/* offset to the total number of partitions */ -#define PAR_NUM_PARTS_OFFSET 8 -/* offset to the engines array */ -#define PAR_ENGINES_OFFSET 12 + + +/** Struct used for partition_name_hash */ +typedef struct st_part_name_def +{ + uchar *partition_name; + uint length; + uint32 part_id; + my_bool is_subpart; +} PART_NAME_DEF; + +/** class where to save partitions Handler_share's */ +class Parts_share_refs +{ +public: + uint num_parts; /**< Size of ha_share array */ + Handler_share **ha_shares; /**< Storage for each part */ + Parts_share_refs() + { + num_parts= 0; + ha_shares= NULL; + } + ~Parts_share_refs() + { + uint i; + for (i= 0; i < num_parts; i++) + if (ha_shares[i]) + delete ha_shares[i]; + if (ha_shares) + delete [] ha_shares; + } + bool init(uint arg_num_parts) + { + DBUG_ASSERT(!num_parts && !ha_shares); + num_parts= arg_num_parts; + /* Allocate an array of Handler_share pointers */ + ha_shares= new Handler_share *[num_parts]; + if (!ha_shares) + { + num_parts= 0; + return true; + } + memset(ha_shares, 0, sizeof(Handler_share*) * num_parts); + return false; + } +}; + + +/** + Partition specific Handler_share. +*/ +class Partition_share : public Handler_share +{ +public: + bool auto_inc_initialized; + mysql_mutex_t auto_inc_mutex; /**< protecting auto_inc val */ + ulonglong next_auto_inc_val; /**< first non reserved value */ + /** + Hash of partition names. Initialized in the first ha_partition::open() + for the table_share. After that it is read-only, i.e. no locking required. + */ + bool partition_name_hash_initialized; + HASH partition_name_hash; + /** Storage for each partitions Handler_share */ + Parts_share_refs *partitions_share_refs; + Partition_share() {} + ~Partition_share() + { + DBUG_ENTER("Partition_share::~Partition_share"); + mysql_mutex_destroy(&auto_inc_mutex); + if (partition_name_hash_initialized) + my_hash_free(&partition_name_hash); + if (partitions_share_refs) + delete partitions_share_refs; + DBUG_VOID_RETURN; + } + bool init(uint num_parts); + void lock_auto_inc() + { + mysql_mutex_lock(&auto_inc_mutex); + } + void unlock_auto_inc() + { + mysql_mutex_unlock(&auto_inc_mutex); + } +}; + class ha_partition :public handler { @@ -58,13 +126,14 @@ private: partition_index_first= 1, partition_index_first_unordered= 2, partition_index_last= 3, - partition_read_range = 4, - partition_no_index_scan= 5 + partition_index_read_last= 4, + partition_read_range = 5, + partition_no_index_scan= 6 }; /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Content of the .par file + uchar *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name MEM_ROOT m_mem_root; plugin_ref *m_engine_array; // Array of types of the handlers @@ -86,7 +155,6 @@ private: */ KEY *m_curr_key_info[3]; // Current index uchar *m_rec0; // table->record[0] - const uchar *m_err_rec; // record which gave error QUEUE m_queue; // Prio queue used by sorted read /* Since the partition handler is a handler on top of other handlers, it @@ -108,8 +176,6 @@ private: uint m_tot_parts; // Total number of partitions; uint m_num_locks; // For engines like ha_blackhole, which needs no locks uint m_last_part; // Last file that we update,write,read - int m_lock_type; // Remembers type of last - // external_lock part_id_range m_part_spec; // Which parts to scan uint m_scan_value; // Value passed in rnd_init // call @@ -179,16 +245,25 @@ private: ha_rows m_bulk_inserted_rows; /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; + /** keep track of locked partitions */ + MY_BITMAP m_locked_partitions; + /** Stores shared auto_increment etc. */ + Partition_share *part_share; + /** Temporary storage for new partitions Handler_shares during ALTER */ + List<Parts_share_refs> m_new_partitions_share_refs; /** Sorted array of partition ids in descending order of number of rows. */ uint32 *m_part_ids_sorted_by_num_of_records; /* Compare function for my_qsort2, for reversed order. */ static int compare_number_of_records(ha_partition *me, const uint32 *a, const uint32 *b); + /** keep track of partitions to call ha_reset */ + MY_BITMAP m_partitions_to_reset; /** partitions that returned HA_ERR_KEY_NOT_FOUND. */ MY_BITMAP m_key_not_found_partitions; bool m_key_not_found; public: + Partition_share *get_part_share() { return part_share; } handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { @@ -263,18 +338,16 @@ public: virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info, uint table_changes); private: - int prepare_for_rename(); int copy_partitions(ulonglong * const copied, ulonglong * const deleted); void cleanup_new_partition(uint part_count); int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info, handler *file, const char *part_name, partition_element *p_elem); /* - delete_table, rename_table and create uses very similar logic which + delete_table and rename_table uses very similar logic which is packed into this routine. */ - uint del_ren_cre_table(const char *from, const char *to, - TABLE *table_arg, HA_CREATE_INFO *create_info); + uint del_ren_table(const char *from, const char *to); /* One method to create the table_name.par file containing the names of the underlying partitions, their engine and the number of partitions. @@ -291,9 +364,16 @@ private: int set_up_table_before_create(TABLE *table_arg, const char *partition_name_with_path, HA_CREATE_INFO *info, - uint part_id, partition_element *p_elem); partition_element *find_partition_element(uint part_id); + bool insert_partition_name_in_hash(const char *name, uint part_id, + bool is_subpart); + bool populate_partition_name_hash(); + Partition_share *get_share(); + bool set_ha_share_ref(Handler_share **ha_share); + void fix_data_dir(char* path); + bool init_partition_bitmaps(); + void free_partition_bitmaps(); public: @@ -311,8 +391,6 @@ public: If the object was opened it will also be closed before being deleted. */ virtual int open(const char *name, int mode, uint test_if_locked); - virtual void unbind_psi(); - virtual void rebind_psi(); virtual int close(void); /* @@ -355,6 +433,18 @@ public: virtual void try_semi_consistent_read(bool); /* + NOTE: due to performance and resource issues with many partitions, + we only use the m_psi on the ha_partition handler, excluding all + partitions m_psi. + */ +#ifdef HAVE_M_PSI_PER_PARTITION + /* + Bind the table/handler thread to track table i/o. + */ + virtual void unbind_psi(); + virtual void rebind_psi(); +#endif + /* ------------------------------------------------------------------------- MODULE change record ------------------------------------------------------------------------- @@ -399,10 +489,13 @@ public: virtual bool is_fatal_error(int error, uint flags) { if (!handler::is_fatal_error(error, flags) || - error == HA_ERR_NO_PARTITION_FOUND) + error == HA_ERR_NO_PARTITION_FOUND || + error == HA_ERR_NOT_IN_LOCK_PARTITIONS) return FALSE; return TRUE; } + + /* ------------------------------------------------------------------------- MODULE full table scan @@ -527,7 +620,6 @@ private: int handle_ordered_next(uchar * buf, bool next_same); int handle_ordered_prev(uchar * buf); void return_top_record(uchar * buf); - void column_bitmaps_signal(); public: /* ------------------------------------------------------------------------- @@ -553,13 +645,17 @@ public: private: my_bool reg_query_cache_dependant_table(THD *thd, - char *key, uint key_len, uint8 type, + char *engine_key, + uint engine_key_len, + char *query_key, uint query_key_len, + uint8 type, Query_cache *cache, Query_cache_block_table **block_table, handler *file, uint *n); static const uint NO_CURRENT_PART_ID; int loop_extra(enum ha_extra_function operation); + int loop_extra_alter(enum ha_extra_function operations); void late_extra_cache(uint partition_id); void late_extra_no_cache(uint partition_id); void prepare_extra_cache(uint cachesize); @@ -628,6 +724,9 @@ public: virtual uint8 table_cache_type(); virtual ha_rows records(); + /* Calculate hash value for PARTITION BY KEY tables. */ + static uint32 calculate_key_hash_value(Field **field_array); + /* ------------------------------------------------------------------------- MODULE print messages @@ -643,6 +742,9 @@ public: */ virtual const char *index_type(uint inx); + /* The name of the table type that will be used for display purposes */ + virtual const char *table_type() const; + /* The name of the row type used for the underlying tables. */ virtual enum row_type get_row_type() const; @@ -804,17 +906,7 @@ public: HA_CAN_INSERT_DELAYED, HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is disabled until further investigated. */ - virtual Table_flags table_flags() const - { - DBUG_ENTER("ha_partition::table_flags"); - if (m_handler_status < handler_initialized || - m_handler_status >= handler_closed) - DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS); - - DBUG_RETURN((m_file[0]->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } + virtual Table_flags table_flags() const; /* This is a bitmap of flags that says how the storage engine @@ -955,16 +1047,15 @@ private: /* lock already taken */ if (auto_increment_safe_stmt_log_lock) return; - DBUG_ASSERT(table_share->ha_part_data && !auto_increment_lock); + DBUG_ASSERT(!auto_increment_lock); if(table_share->tmp_table == NO_TMP_TABLE) { auto_increment_lock= TRUE; - mysql_mutex_lock(&table_share->ha_part_data->LOCK_auto_inc); + part_share->lock_auto_inc(); } } virtual void unlock_auto_increment() { - DBUG_ASSERT(table_share->ha_part_data); /* If auto_increment_safe_stmt_log_lock is true, we have to keep the lock. It will be set to false and thus unlocked at the end of the statement by @@ -972,7 +1063,7 @@ private: */ if(auto_increment_lock && !auto_increment_safe_stmt_log_lock) { - mysql_mutex_unlock(&table_share->ha_part_data->LOCK_auto_inc); + part_share->unlock_auto_inc(); auto_increment_lock= FALSE; } } @@ -981,10 +1072,10 @@ private: ulonglong nr= (((Field_num*) field)->unsigned_flag || field->val_int() > 0) ? field->val_int() : 0; lock_auto_increment(); - DBUG_ASSERT(table_share->ha_part_data->auto_inc_initialized == TRUE); + DBUG_ASSERT(part_share->auto_inc_initialized); /* must check when the mutex is taken */ - if (nr >= table_share->ha_part_data->next_auto_inc_val) - table_share->ha_part_data->next_auto_inc_val= nr + 1; + if (nr >= part_share->next_auto_inc_val) + part_share->next_auto_inc_val= nr + 1; unlock_auto_increment(); } @@ -1050,18 +1141,23 @@ public: /* ------------------------------------------------------------------------- - MODULE on-line ALTER TABLE + MODULE in-place ALTER TABLE ------------------------------------------------------------------------- These methods are in the handler interface. (used by innodb-plugin) - They are used for on-line/fast alter table add/drop index: + They are used for in-place alter table: ------------------------------------------------------------------------- */ - virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys, - handler_add_index **add); - virtual int final_add_index(handler_add_index *add, bool commit); - virtual int prepare_drop_index(TABLE *table_arg, uint *key_num, - uint num_of_keys); - virtual int final_drop_index(TABLE *table_arg); + virtual enum_alter_inplace_result + check_if_supported_inplace_alter(TABLE *altered_table, + Alter_inplace_info *ha_alter_info); + virtual bool prepare_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info); + virtual bool inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info); + virtual bool commit_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info, + bool commit); + virtual void notify_table_changed(); /* ------------------------------------------------------------------------- @@ -1092,18 +1188,9 @@ public: virtual bool check_and_repair(THD *thd); virtual bool auto_repair(int error) const; virtual bool is_crashed() const; - virtual int check_for_upgrade(HA_CHECK_OPT *check_opt); private: int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, uint flags); - int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, uint part_id, - uint flag); - /** - Check if the rows are placed in the correct partition. If the given - argument is true, then move the rows to the correct partition. - */ - int check_misplaced_rows(uint read_part_id, bool repair); - void append_row_to_str(String &str); public: /* ------------------------------------------------------------------------- @@ -1115,8 +1202,8 @@ public: virtual int restore(THD* thd, HA_CHECK_OPT *check_opt); virtual int dump(THD* thd, int fd = -1); virtual int net_read_dump(NET* net); - virtual uint checksum() const; */ + virtual uint checksum() const; /* Enabled keycache for performance reasons, WL#4571 */ virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt); virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/sql/handler.cc b/sql/handler.cc index 726b663341b..685bb6e6c30 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -62,7 +62,7 @@ static handlerton *installed_htons[128]; #define BITMAP_STACKBUF_SIZE (128/8) KEY_CREATE_INFO default_key_create_info= - { HA_KEY_ALG_UNDEF, 0, {NullS, 0}, {NullS, 0} }; +{ HA_KEY_ALG_UNDEF, 0, {NullS, 0}, {NullS, 0}, true }; /* number of entries in handlertons[] */ ulong total_ha= 0; @@ -99,6 +99,7 @@ uint known_extensions_id= 0; static int commit_one_phase_2(THD *thd, bool all, THD_TRANS *trans, bool is_real_trans); + static plugin_ref ha_default_plugin(THD *thd) { if (thd->variables.table_plugin) @@ -1142,10 +1143,11 @@ int ha_prepare(THD *thd) } else { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_GET_ERRNO, ER(ER_GET_ERRNO), HA_ERR_WRONG_COMMAND, ha_resolve_storage_engine_name(ht)); + } } } @@ -1251,7 +1253,7 @@ int ha_commit_trans(THD *thd, bool all) /* Just a random warning to test warnings pushed during autocommit. */ DBUG_EXECUTE_IF("warn_during_ha_commit_trans", - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARNING_NOT_COMPLETE_ROLLBACK, ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));); @@ -1571,7 +1573,7 @@ int ha_rollback_trans(THD *thd, bool all) trans->no_2pc=0; if (is_real_trans && thd->transaction_rollback_request && thd->transaction.xid_state.xa_state != XA_NOTR) - thd->transaction.xid_state.rm_error= thd->stmt_da->sql_errno(); + thd->transaction.xid_state.rm_error= thd->get_stmt_da()->sql_errno(); } /* Always cleanup. Even if nht==0. There may be savepoints. */ if (is_real_trans) @@ -1594,7 +1596,7 @@ int ha_rollback_trans(THD *thd, bool all) */ if (is_real_trans && thd->transaction.all.modified_non_trans_table && !thd->slave_thread && thd->killed < KILL_CONNECTION) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARNING_NOT_COMPLETE_ROLLBACK, ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); (void) RUN_HOOK(transaction, after_rollback, (thd, FALSE)); @@ -2087,7 +2089,7 @@ int ha_start_consistent_snapshot(THD *thd) exist: */ if (warn) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "This MySQL server does not support any " "consistent-read capable storage engine"); return 0; @@ -2183,9 +2185,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); char buff[MYSQL_ERRMSG_SIZE]; }; @@ -2195,9 +2197,9 @@ Ha_delete_table_error_handler:: handle_condition(THD *, uint, const char*, - MYSQL_ERROR::enum_warning_level, + Sql_condition::enum_warning_level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; /* Grab the error message */ @@ -2262,7 +2264,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, XXX: should we convert *all* errors to warnings here? What if the error is fatal? */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, error, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, error, ha_delete_table_error_handler.buff); } delete file; @@ -2276,8 +2278,11 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, handler *handler::clone(const char *name, MEM_ROOT *mem_root) { handler *new_handler= get_new_handler(table->s, mem_root, ht); - if (! new_handler) + + if (!new_handler) return NULL; + if (new_handler->set_ha_share_ref(ha_share)) + goto err; /* Allocate handler->ref here because otherwise ha_open will allocate it @@ -2287,7 +2292,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root) if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) - return NULL; + goto err; /* TODO: Implement a more efficient way to have more than one index open for @@ -2298,9 +2303,13 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root) */ if (new_handler->ha_open(table, name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) - return NULL; + goto err; return new_handler; + +err: + delete new_handler; + return NULL; } @@ -2377,6 +2386,8 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, table= table_arg; DBUG_ASSERT(table->s == table_share); + DBUG_ASSERT(m_lock_type == F_UNLCK); + DBUG_PRINT("info", ("old m_lock_type: %d F_UNLCK %d", m_lock_type, F_UNLCK)); DBUG_ASSERT(alloc_root_inited(&table->mem_root)); if ((error=open(name,mode,test_if_locked))) @@ -2397,7 +2408,15 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, { DBUG_ASSERT(m_psi == NULL); DBUG_ASSERT(table_share != NULL); - m_psi= PSI_CALL_open_table(ha_table_share_psi(), this); + /* + Do not call this for partitions handlers, since it may take too much + resources. + So only use the m_psi on table level, not for individual partitions. + */ + if (!(test_if_locked & HA_OPEN_NO_PSI_CALL)) + { + m_psi= PSI_CALL_open_table(ha_table_share_psi(), this); + } if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA) table->db_stat|=HA_READ_ONLY; @@ -2431,12 +2450,18 @@ int handler::ha_close(void) PSI_CALL_close_table(m_psi); m_psi= NULL; /* instrumentation handle, invalid after close_table() */ + DBUG_ASSERT(m_lock_type == F_UNLCK); + DBUG_ASSERT(inited == NONE); DBUG_RETURN(close()); } int handler::ha_rnd_next(uchar *buf) { int result; + DBUG_ENTER("handler::ha_rnd_next"); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); + DBUG_ASSERT(inited == RND); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0, { result= rnd_next(buf); }) @@ -2451,12 +2476,17 @@ int handler::ha_rnd_next(uchar *buf) increment_statistics(&SSV::ha_read_rnd_next_count); table->status=result ? STATUS_NOT_FOUND: 0; - return result; + DBUG_RETURN(result); } int handler::ha_rnd_pos(uchar *buf, uchar *pos) { int result; + DBUG_ENTER("handler::ha_rnd_pos"); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); + /* TODO: Find out how to solve ha_rnd_pos when finding duplicate update. */ + /* DBUG_ASSERT(inited == RND); */ MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0, { result= rnd_pos(buf, pos); }) @@ -2464,7 +2494,7 @@ int handler::ha_rnd_pos(uchar *buf, uchar *pos) if (!result) update_rows_read(); table->status=result ? STATUS_NOT_FOUND: 0; - return result; + DBUG_RETURN(result); } int handler::ha_index_read_map(uchar *buf, const uchar *key, @@ -2472,6 +2502,9 @@ int handler::ha_index_read_map(uchar *buf, const uchar *key, enum ha_rkey_function find_flag) { int result; + DBUG_ENTER("handler::ha_index_read_map"); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(inited==INDEX); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, @@ -2480,7 +2513,7 @@ int handler::ha_index_read_map(uchar *buf, const uchar *key, if (!result) update_index_statistics(); table->status=result ? STATUS_NOT_FOUND: 0; - return result; + DBUG_RETURN(result); } /* @@ -2495,6 +2528,8 @@ int handler::ha_index_read_idx_map(uchar *buf, uint index, const uchar *key, { int result; DBUG_ASSERT(inited==NONE); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(end_range == NULL); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, index, 0, { result= index_read_idx_map(buf, index, key, keypart_map, find_flag); }) @@ -2511,6 +2546,9 @@ int handler::ha_index_read_idx_map(uchar *buf, uint index, const uchar *key, int handler::ha_index_next(uchar * buf) { int result; + DBUG_ENTER("handler::ha_index_next"); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(inited==INDEX); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, @@ -2519,12 +2557,15 @@ int handler::ha_index_next(uchar * buf) if (!result) update_index_statistics(); table->status=result ? STATUS_NOT_FOUND: 0; - return result; + DBUG_RETURN(result); } int handler::ha_index_prev(uchar * buf) { int result; + DBUG_ENTER("handler::ha_index_prev"); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(inited==INDEX); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, @@ -2533,12 +2574,14 @@ int handler::ha_index_prev(uchar * buf) if (!result) update_index_statistics(); table->status=result ? STATUS_NOT_FOUND: 0; - return result; + DBUG_RETURN(result); } int handler::ha_index_first(uchar * buf) { int result; + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(inited==INDEX); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, @@ -2553,6 +2596,8 @@ int handler::ha_index_first(uchar * buf) int handler::ha_index_last(uchar * buf) { int result; + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(inited==INDEX); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, @@ -2567,6 +2612,8 @@ int handler::ha_index_last(uchar * buf) int handler::ha_index_next_same(uchar *buf, const uchar *key, uint keylen) { int result; + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ASSERT(inited==INDEX); MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0, @@ -3090,6 +3137,9 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment, void handler::ha_release_auto_increment() { DBUG_ENTER("ha_release_auto_increment"); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK || + (!next_insert_id && !insert_id_for_cur_row)); release_auto_increment(); insert_id_for_cur_row= 0; auto_inc_interval_for_cur_row.replace(0, 0, 0); @@ -3107,13 +3157,25 @@ void handler::ha_release_auto_increment() } -void handler::print_keydup_error(uint key_nr, const char *msg, myf errflag) +/** + Construct and emit duplicate key error message using information + from table's record buffer. + + @param table TABLE object which record buffer should be used as + source for column values. + @param key Key description. + @param msg Error message template to which key value should be + added. + @param errflag Flags for my_error() call. +*/ + +void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag) { /* Write the duplicated key in the error message */ - char key[MAX_KEY_LENGTH]; - String str(key,sizeof(key),system_charset_info); + char key_buff[MAX_KEY_LENGTH]; + String str(key_buff,sizeof(key_buff),system_charset_info); - if (key_nr == MAX_KEY) + if (key == NULL) { /* Key is unknown */ str.copy("", 0, system_charset_info); @@ -3122,18 +3184,29 @@ void handler::print_keydup_error(uint key_nr, const char *msg, myf errflag) else { /* Table is opened and defined at this point */ - key_unpack(&str,table,(uint) key_nr); + key_unpack(&str,table, key); uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg); if (str.length() >= max_length) { str.length(max_length-4); str.append(STRING_WITH_LEN("...")); } - my_printf_error(ER_DUP_ENTRY, msg, - errflag, str.c_ptr_safe(), table->key_info[key_nr].name); + my_printf_error(ER_DUP_ENTRY, msg, errflag, str.c_ptr_safe(), key->name); } } +/** + Construct and emit duplicate key error message using information + from table's record buffer. + + @sa print_keydup_error(table, key, msg, errflag). +*/ + +void print_keydup_error(TABLE *table, KEY *key, myf errflag) +{ + print_keydup_error(table, key, ER(ER_DUP_ENTRY_WITH_KEY_NAME), errflag); +} + /** Print error that we got from handler function. @@ -3198,7 +3271,9 @@ void handler::print_error(int error, myf errflag) uint key_nr=get_dup_key(error); if ((int) key_nr >= 0) { - print_keydup_error(key_nr, ER(ER_DUP_ENTRY_WITH_KEY_NAME), errflag); + print_keydup_error(table, + key_nr == MAX_KEY ? NULL : &table->key_info[key_nr], + errflag); DBUG_VOID_RETURN; } } @@ -3210,9 +3285,12 @@ void handler::print_error(int error, myf errflag) char rec_buf[MAX_KEY_LENGTH]; String rec(rec_buf, sizeof(rec_buf), system_charset_info); /* Table is opened and defined at this point */ - key_unpack(&rec, table, 0 /* just print the subset of fields that are - part of the first index, printing the whole - row from there is not easy */); + + /* + Just print the subset of fields that are part of the first index, + printing the whole row from there is not easy. + */ + key_unpack(&rec, table, &table->key_info[0]); char child_table_name[NAME_LEN + 1]; char child_key_name[NAME_LEN + 1]; @@ -3349,7 +3427,7 @@ void handler::print_error(int error, myf errflag) case HA_ERR_AUTOINC_ERANGE: textno= error; my_error(textno, errflag, table->next_number_field->field_name, - table->in_use->warning_info->current_row_for_warning()); + table->in_use->get_stmt_da()->current_row_for_warning()); DBUG_VOID_RETURN; break; case HA_ERR_TOO_MANY_CONCURRENT_TRXS: @@ -3358,6 +3436,9 @@ void handler::print_error(int error, myf errflag) case HA_ERR_INDEX_COL_TOO_LONG: textno= ER_INDEX_COLUMN_TOO_LONG; break; + case HA_ERR_NOT_IN_LOCK_PARTITIONS: + textno=ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET; + break; case HA_ERR_INDEX_CORRUPT: textno= ER_INDEX_CORRUPT; break; @@ -3455,7 +3536,7 @@ int handler::check_collation_compatibility() for (; key < key_end; key++) { KEY_PART_INFO *key_part= key->key_part; - KEY_PART_INFO *key_part_end= key_part + key->key_parts; + KEY_PART_INFO *key_part_end= key_part + key->user_defined_key_parts; for (; key_part < key_part_end; key_part++) { if (!key_part->fieldnr) @@ -3496,7 +3577,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) for (; keyinfo < keyend; keyinfo++) { keypart= keyinfo->key_part; - keypartend= keypart + keyinfo->key_parts; + keypartend= keypart + keyinfo->user_defined_key_parts; for (; keypart < keypartend; keypart++) { if (!keypart->fieldnr) @@ -3588,6 +3669,8 @@ err: */ uint handler::get_dup_key(int error) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); DBUG_ENTER("handler::get_dup_key"); table->file->errkey = (uint) -1; if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY || @@ -3698,6 +3781,8 @@ void handler::drop_table(const char *name) int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt) { int error; + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); if ((table->s->mysql_version >= MYSQL_VERSION_ID) && (check_opt->sql_flags & TT_FOR_UPGRADE)) @@ -3787,6 +3872,8 @@ int handler::ha_bulk_update_row(const uchar *old_data, uchar *new_data, uint *dup_key_found) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); mark_trx_read_write(); return bulk_update_row(old_data, new_data, dup_key_found); @@ -3802,6 +3889,8 @@ handler::ha_bulk_update_row(const uchar *old_data, uchar *new_data, int handler::ha_delete_all_rows() { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); mark_trx_read_write(); return delete_all_rows(); @@ -3817,6 +3906,8 @@ handler::ha_delete_all_rows() int handler::ha_truncate() { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); mark_trx_read_write(); return truncate(); @@ -3832,6 +3923,8 @@ handler::ha_truncate() int handler::ha_reset_auto_increment(ulonglong value) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); mark_trx_read_write(); return reset_auto_increment(value); @@ -3847,6 +3940,8 @@ handler::ha_reset_auto_increment(ulonglong value) int handler::ha_optimize(THD* thd, HA_CHECK_OPT* check_opt) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); mark_trx_read_write(); return optimize(thd, check_opt); @@ -3862,6 +3957,8 @@ handler::ha_optimize(THD* thd, HA_CHECK_OPT* check_opt) int handler::ha_analyze(THD* thd, HA_CHECK_OPT* check_opt) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); mark_trx_read_write(); return analyze(thd, check_opt); @@ -3877,6 +3974,8 @@ handler::ha_analyze(THD* thd, HA_CHECK_OPT* check_opt) bool handler::ha_check_and_repair(THD *thd) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_UNLCK); mark_trx_read_write(); return check_and_repair(thd); @@ -3892,6 +3991,8 @@ handler::ha_check_and_repair(THD *thd) int handler::ha_disable_indexes(uint mode) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); mark_trx_read_write(); return disable_indexes(mode); @@ -3907,6 +4008,8 @@ handler::ha_disable_indexes(uint mode) int handler::ha_enable_indexes(uint mode) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); mark_trx_read_write(); return enable_indexes(mode); @@ -3922,26 +4025,116 @@ handler::ha_enable_indexes(uint mode) int handler::ha_discard_or_import_tablespace(my_bool discard) { + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); mark_trx_read_write(); return discard_or_import_tablespace(discard); } -/** - Prepare for alter: public interface. +bool handler::ha_prepare_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) +{ + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); + mark_trx_read_write(); + + return prepare_inplace_alter_table(altered_table, ha_alter_info); +} - Called to prepare an *online* ALTER. - @sa handler::prepare_for_alter() +bool handler::ha_commit_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info, + bool commit) +{ + /* + At this point we should have an exclusive metadata lock on the table. + The exception is if we're about to roll back changes (commit= false). + In this case, we might be rolling back after a failed lock upgrade, + so we could be holding the same lock level as for inplace_alter_table(). + */ + DBUG_ASSERT(ha_thd()->mdl_context.is_lock_owner(MDL_key::TABLE, + table->s->db.str, + table->s->table_name.str, + MDL_EXCLUSIVE) || + !commit); + + return commit_inplace_alter_table(altered_table, ha_alter_info, commit); +} + + +/* + Default implementation to support in-place alter table + and old online add/drop index API */ -void -handler::ha_prepare_for_alter() +enum_alter_inplace_result +handler::check_if_supported_inplace_alter(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) { - mark_trx_read_write(); + DBUG_ENTER("check_if_supported_alter"); + + HA_CREATE_INFO *create_info= ha_alter_info->create_info; + + Alter_inplace_info::HA_ALTER_FLAGS inplace_offline_operations= + Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH | + Alter_inplace_info::ALTER_COLUMN_NAME | + Alter_inplace_info::ALTER_COLUMN_DEFAULT | + Alter_inplace_info::CHANGE_CREATE_OPTION | + Alter_inplace_info::ALTER_RENAME; + + /* Is there at least one operation that requires copy algorithm? */ + if (ha_alter_info->handler_flags & ~inplace_offline_operations) + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + + /* + ALTER TABLE tbl_name CONVERT TO CHARACTER SET .. and + ALTER TABLE table_name DEFAULT CHARSET = .. most likely + change column charsets and so not supported in-place through + old API. + + Changing of PACK_KEYS, MAX_ROWS and ROW_FORMAT options were + not supported as in-place operations in old API either. + */ + if (create_info->used_fields & (HA_CREATE_USED_CHARSET | + HA_CREATE_USED_DEFAULT_CHARSET | + HA_CREATE_USED_PACK_KEYS | + HA_CREATE_USED_MAX_ROWS) || + (table->s->row_type != create_info->row_type)) + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); + + uint table_changes= (ha_alter_info->handler_flags & + Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH) ? + IS_EQUAL_PACK_LENGTH : IS_EQUAL_YES; + if (table->file->check_if_incompatible_data(create_info, table_changes) + == COMPATIBLE_DATA_YES) + DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK); - prepare_for_alter(); + DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED); +} + + +/* + Default implementation to support in-place alter table + and old online add/drop index API +*/ + +void handler::notify_table_changed() +{ + ha_create_partitioning_metadata(table->s->path.str, NULL, CHF_INDEX_FLAG); +} + + +void Alter_inplace_info::report_unsupported_error(const char *not_supported, + const char *try_instead) +{ + if (unsupported_reason == NULL) + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0), + not_supported, try_instead); + else + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), + not_supported, unsupported_reason, try_instead); } @@ -3954,6 +4147,7 @@ handler::ha_prepare_for_alter() int handler::ha_rename_table(const char *from, const char *to) { + DBUG_ASSERT(m_lock_type == F_UNLCK); mark_trx_read_write(); return rename_table(from, to); @@ -3986,6 +4180,7 @@ handler::ha_delete_table(const char *name) void handler::ha_drop_table(const char *name) { + DBUG_ASSERT(m_lock_type == F_UNLCK); mark_trx_read_write(); return drop_table(name); @@ -4001,6 +4196,7 @@ handler::ha_drop_table(const char *name) int handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info) { + DBUG_ASSERT(m_lock_type == F_UNLCK); mark_trx_read_write(); int error= create(name, form, info); if (!error && @@ -4020,6 +4216,13 @@ int handler::ha_create_partitioning_metadata(const char *name, const char *old_name, int action_flag) { + /* + Normally this is done when unlocked, but in fast_alter_partition_table, + it is done on an already locked handler when preparing to alter/rename + partitions. + */ + DBUG_ASSERT(m_lock_type == F_UNLCK || + (!old_name && strcmp(name, table_share->path.str))); mark_trx_read_write(); return create_partitioning_metadata(name, old_name, action_flag); @@ -4039,7 +4242,13 @@ handler::ha_change_partitions(HA_CREATE_INFO *create_info, ulonglong * const deleted, const uchar *pack_frm_data, size_t pack_frm_len) -{ +{ /* + Must have at least RDLCK or be a TMP table. Read lock is needed to read + from current partitions and write lock will be taken on new partitions. + */ + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type != F_UNLCK); + mark_trx_read_write(); return change_partitions(create_info, path, copied, deleted, @@ -4056,6 +4265,8 @@ handler::ha_change_partitions(HA_CREATE_INFO *create_info, int handler::ha_drop_partitions(const char *path) { + DBUG_ASSERT(!table->db_stat); + mark_trx_read_write(); return drop_partitions(path); @@ -4071,6 +4282,8 @@ handler::ha_drop_partitions(const char *path) int handler::ha_rename_partitions(const char *path) { + DBUG_ASSERT(!table->db_stat); + mark_trx_read_write(); return rename_partitions(path); @@ -4131,7 +4344,7 @@ int handler::index_next_same(uchar *buf, const uchar *key, uint keylen) table->record[0]= buf; key_info= table->key_info + active_index; key_part= key_info->key_part; - key_part_end= key_part + key_info->key_parts; + key_part_end= key_part + key_info->user_defined_key_parts; for (; key_part < key_part_end; key_part++) { DBUG_ASSERT(key_part->field); @@ -4587,9 +4800,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; if (sql_errno == ER_NO_SUCH_TABLE || @@ -4600,7 +4813,7 @@ public: return TRUE; } - if (level == MYSQL_ERROR::WARN_LEVEL_ERROR) + if (level == Sql_condition::WARN_LEVEL_ERROR) m_unhandled_errors++; return FALSE; } @@ -4762,10 +4975,10 @@ void Discovered_table_list::remove_duplicates() { LEX_STRING **src= tables->front(); LEX_STRING **dst= src; - while (++dst < tables->back()) + while (++dst <= tables->back()) { LEX_STRING *s= *src, *d= *dst; - DBUG_ASSERT(strncmp(s->str, d->str, min(s->length, d->length)) <= 0); + DBUG_ASSERT(strncmp(s->str, d->str, MY_MIN(s->length, d->length)) <= 0); if ((s->length != d->length || strncmp(s->str, d->str, d->length))) { src++; @@ -4773,7 +4986,7 @@ void Discovered_table_list::remove_duplicates() *src= *dst; } } - tables->set_elements(src - tables->front() + 1); + tables->elements(src - tables->front() + 1); } struct st_discover_names_args @@ -5005,14 +5218,7 @@ int handler::read_range_first(const key_range *start_key, DBUG_ENTER("handler::read_range_first"); eq_range= eq_range_arg; - end_range= 0; - if (end_key) - { - end_range= &save_end_range; - save_end_range= *end_key; - key_compare_result_on_equal= ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 : - (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0); - } + set_end_range(end_key); range_key_part= table->key_info[active_index].key_part; if (!start_key) // Read first record @@ -5088,12 +5294,26 @@ int handler::read_range_next() } +void handler::set_end_range(const key_range *end_key) +{ + end_range= 0; + if (end_key) + { + end_range= &save_end_range; + save_end_range= *end_key; + key_compare_result_on_equal= + ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 : + (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0); + } +} + + /** Compare if found key (in row) is over max-value. @param range range to compare to row. May be 0 for no range - @seealso + @see also key.cc::key_cmp() @return @@ -5323,6 +5543,7 @@ static bool check_table_binlog_row_based(THD *thd, TABLE *table) if (table->s->cached_row_logging_check == -1) { int const check(table->s->tmp_table == NO_TMP_TABLE && + ! table->no_replicate && binlog_filter->db_ok(table->s->db.str)); table->s->cached_row_logging_check= check; } @@ -5430,8 +5651,6 @@ static int binlog_log_row(TABLE* table, const uchar *after_record, Log_func *log_func) { - if (table->no_replicate) - return 0; bool error= 0; THD *const thd= table->in_use; @@ -5486,6 +5705,12 @@ int handler::ha_external_lock(THD *thd, int lock_type) taken a table lock), ha_release_auto_increment() was too. */ DBUG_ASSERT(next_insert_id == 0); + /* Consecutive calls for lock without unlocking in between is not allowed */ + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + ((lock_type != F_UNLCK && m_lock_type == F_UNLCK) || + lock_type == F_UNLCK)); + /* SQL HANDLER call locks/unlock while scanning (RND/INDEX). */ + DBUG_ASSERT(inited == NONE || table->open_by_handler); if (MYSQL_HANDLER_RDLOCK_START_ENABLED() || MYSQL_HANDLER_WRLOCK_START_ENABLED() || @@ -5519,6 +5744,7 @@ int handler::ha_external_lock(THD *thd, int lock_type) if (error == 0) { + m_lock_type= lock_type; cached_table_flags= table_flags(); if (table_share->tmp_table == NO_TMP_TABLE) mysql_audit_external_lock(thd, table_share, lock_type); @@ -5575,6 +5801,8 @@ int handler::ha_write_row(uchar *buf) { int error; Log_func *log_func= Write_rows_log_event::binlog_row_logging_function; + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); DBUG_ENTER("handler::ha_write_row"); DEBUG_SYNC_C("ha_write_row_start"); DBUG_EXECUTE_IF("inject_error_ha_write_row", @@ -5603,6 +5831,8 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data) { int error; Log_func *log_func= Update_rows_log_event::binlog_row_logging_function; + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); /* Some storage engines require that the new record is in record[0] @@ -5638,6 +5868,8 @@ int handler::ha_delete_row(const uchar *buf) buf == table->record[1]); DBUG_EXECUTE_IF("inject_error_ha_delete_row", return HA_ERR_INTERNAL_ERROR; ); + DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || + m_lock_type == F_WRLCK); MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str); mark_trx_read_write(); @@ -5668,6 +5900,77 @@ void handler::use_hidden_primary_key() } +/** + Get an initialized ha_share. + + @return Initialized ha_share + @retval NULL ha_share is not yet initialized. + @retval != NULL previous initialized ha_share. + + @note + If not a temp table, then LOCK_ha_data must be held. +*/ + +Handler_share *handler::get_ha_share_ptr() +{ + DBUG_ENTER("handler::get_ha_share_ptr"); + DBUG_ASSERT(ha_share && table_share); + +#ifndef DBUG_OFF + if (table_share->tmp_table == NO_TMP_TABLE) + mysql_mutex_assert_owner(&table_share->LOCK_ha_data); +#endif + + DBUG_RETURN(*ha_share); +} + + +/** + Set ha_share to be used by all instances of the same table/partition. + + @param ha_share Handler_share to be shared. + + @note + If not a temp table, then LOCK_ha_data must be held. +*/ + +void handler::set_ha_share_ptr(Handler_share *arg_ha_share) +{ + DBUG_ENTER("handler::set_ha_share_ptr"); + DBUG_ASSERT(ha_share); +#ifndef DBUG_OFF + if (table_share->tmp_table == NO_TMP_TABLE) + mysql_mutex_assert_owner(&table_share->LOCK_ha_data); +#endif + + *ha_share= arg_ha_share; + DBUG_VOID_RETURN; +} + + +/** + Take a lock for protecting shared handler data. +*/ + +void handler::lock_shared_ha_data() +{ + DBUG_ASSERT(table_share); + if (table_share->tmp_table == NO_TMP_TABLE) + mysql_mutex_lock(&table_share->LOCK_ha_data); +} + + +/** + Release lock for protecting ha_share. +*/ + +void handler::unlock_shared_ha_data() +{ + DBUG_ASSERT(table_share); + if (table_share->tmp_table == NO_TMP_TABLE) + mysql_mutex_unlock(&table_share->LOCK_ha_data); +} + /** @brief Dummy function which accept information about log files which is not need by handlers diff --git a/sql/handler.h b/sql/handler.h index 9f8290ee176..478317e881d 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -42,6 +42,8 @@ #error MAX_KEY is too large. Values up to 128 are supported. #endif +class Alter_info; + // the following is for checking tables #define HA_ADMIN_ALREADY_DONE 1 @@ -59,6 +61,22 @@ #define HA_ADMIN_NEEDS_ALTER -11 #define HA_ADMIN_NEEDS_CHECK -12 +/** + Return values for check_if_supported_inplace_alter(). + + @see check_if_supported_inplace_alter() for description of + the individual values. +*/ +enum enum_alter_inplace_result { + HA_ALTER_ERROR, + HA_ALTER_INPLACE_NOT_SUPPORTED, + HA_ALTER_INPLACE_EXCLUSIVE_LOCK, + HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE, + HA_ALTER_INPLACE_SHARED_LOCK, + HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE, + HA_ALTER_INPLACE_NO_LOCK +}; + /* Bits in table_flags() to show what database can do */ #define HA_NO_TRANSACTIONS (1ULL << 0) /* Doesn't support transactions */ @@ -99,8 +117,8 @@ #define HA_CAN_INSERT_DELAYED (1ULL << 14) /* If we get the primary key columns for free when we do an index read - It also implies that we have to retrive the primary key when using - position() and rnd_pos(). + (usually, it also implies that HA_PRIMARY_KEY_REQUIRED_FOR_POSITION + flag is set). */ #define HA_PRIMARY_KEY_IN_READ_INDEX (1ULL << 15) /* @@ -190,6 +208,64 @@ */ #define HA_MUST_USE_TABLE_CONDITION_PUSHDOWN (1ULL << 42) +/** + The handler supports read before write removal optimization + + Read before write removal may be used for storage engines which support + write without previous read of the row to be updated. Handler returning + this flag must implement start_read_removal() and end_read_removal(). + The handler may return "fake" rows constructed from the key of the row + asked for. This is used to optimize UPDATE and DELETE by reducing the + numer of roundtrips between handler and storage engine. + + Example: + UPDATE a=1 WHERE pk IN (<keys>) + + mysql_update() + { + if (<conditions for starting read removal>) + start_read_removal() + -> handler returns true if read removal supported for this table/query + + while(read_record("pk=<key>")) + -> handler returns fake row with column "pk" set to <key> + + ha_update_row() + -> handler sends write "a=1" for row with "pk=<key>" + + end_read_removal() + -> handler returns the number of rows actually written + } + + @note This optimization in combination with batching may be used to + remove even more roundtrips. +*/ +#define HA_READ_BEFORE_WRITE_REMOVAL (1LL << 43) + +/* + Engine supports extended fulltext API + */ +#define HA_CAN_FULLTEXT_EXT (1LL << 44) + +/* + Storage engine doesn't synchronize result set with expected table contents. + Used by replication slave to check if it is possible to retrieve rows from + the table when deciding whether to do a full table scan, index scan or hash + scan while applying a row event. + */ +#define HA_READ_OUT_OF_SYNC (1LL << 45) + +/* + Storage engine supports table export using the + FLUSH TABLE <table_list> FOR EXPORT statement. + */ +#define HA_CAN_EXPORT (1LL << 46) + +/* + The handler don't want accesses to this table to + be const-table optimized +*/ +#define HA_BLOCK_CONST_TABLE (1LL << 47) /* Set of all binlog flags. Currently only contain the capabilities flags. @@ -384,9 +460,15 @@ enum legacy_db_type enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED, ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT, - /** Unused. Reserved for future versions. */ ROW_TYPE_PAGE }; +/* Specifies data storage format for individual columns */ +enum column_format_type { + COLUMN_FORMAT_TYPE_DEFAULT= 0, /* Not specified (use engine default) */ + COLUMN_FORMAT_TYPE_FIXED= 1, /* FIXED format */ + COLUMN_FORMAT_TYPE_DYNAMIC= 2 /* DYNAMIC format */ +}; + enum enum_binlog_func { BFN_RESET_LOGS= 1, BFN_RESET_SLAVE= 2, @@ -431,6 +513,45 @@ enum enum_binlog_command { /* The following two are used by Maria engine: */ #define HA_CREATE_USED_TRANSACTIONAL (1L << 20) #define HA_CREATE_USED_PAGE_CHECKSUM (1L << 21) +/** This is set whenever STATS_PERSISTENT=0|1|default has been +specified in CREATE/ALTER TABLE. See also HA_OPTION_STATS_PERSISTENT in +include/my_base.h. It is possible to distinguish whether +STATS_PERSISTENT=default has been specified or no STATS_PERSISTENT= is +given at all. */ +#define HA_CREATE_USED_STATS_PERSISTENT (1L << 22) +/** + This is set whenever STATS_AUTO_RECALC=0|1|default has been + specified in CREATE/ALTER TABLE. See enum_stats_auto_recalc. + It is possible to distinguish whether STATS_AUTO_RECALC=default + has been specified or no STATS_AUTO_RECALC= is given at all. +*/ +#define HA_CREATE_USED_STATS_AUTO_RECALC (1L << 23) +/** + This is set whenever STATS_SAMPLE_PAGES=N|default has been + specified in CREATE/ALTER TABLE. It is possible to distinguish whether + STATS_SAMPLE_PAGES=default has been specified or no STATS_SAMPLE_PAGES= is + given at all. +*/ +#define HA_CREATE_USED_STATS_SAMPLE_PAGES (1L << 24) + + +/* + This is master database for most of system tables. However there + can be other databases which can hold system tables. Respective + storage engines define their own system database names. +*/ +extern const char *mysqld_system_database; + +/* + Structure to hold list of system_database.system_table. + This is used at both mysqld and storage engine layer. +*/ +struct st_system_tablename +{ + const char *db; + const char *tablename; +}; + typedef ulonglong my_xid; // this line is the same as in log_event.h #define MYSQL_XID_PREFIX "MySQLXid" @@ -1244,6 +1365,7 @@ static inline sys_var *find_hton_sysvar(handlerton *hton, st_mysql_sys_var *var) #define HTON_ALTER_NOT_SUPPORTED (1 << 1) //Engine does not support alter #define HTON_CAN_RECREATE (1 << 2) //Delete all is used for truncate #define HTON_HIDDEN (1 << 3) //Engine does not appear in lists +#define HTON_FLUSH_AFTER_RENAME (1 << 4) #define HTON_NOT_USER_SELECTABLE (1 << 5) #define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported #define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables @@ -1305,6 +1427,22 @@ struct THD_TRANS void reset() { no_2pc= FALSE; modified_non_trans_table= FALSE; } bool is_empty() const { return ha_list == NULL; } THD_TRANS() {} /* Remove gcc warning */ + + unsigned int m_unsafe_rollback_flags; + /* + Define the type of statemens which cannot be rolled back safely. + Each type occupies one bit in m_unsafe_rollback_flags. + */ + static unsigned int const MODIFIED_NON_TRANS_TABLE= 0x01; + static unsigned int const CREATED_TEMP_TABLE= 0x02; + static unsigned int const DROPPED_TEMP_TABLE= 0x04; + + void mark_created_temp_table() + { + DBUG_PRINT("debug", ("mark_created_temp_table")); + m_unsafe_rollback_flags|= CREATED_TEMP_TABLE; + } + }; @@ -1428,10 +1566,13 @@ struct st_table_log_memory_entry; class partition_info; struct st_partition_iter; -#define NOT_A_PARTITION_ID ((uint32)-1) enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES }; +enum enum_stats_auto_recalc { HA_STATS_AUTO_RECALC_DEFAULT= 0, + HA_STATS_AUTO_RECALC_ON, + HA_STATS_AUTO_RECALC_OFF }; + struct HA_CREATE_INFO { CHARSET_INFO *table_charset, *default_table_charset; @@ -1447,6 +1588,9 @@ struct HA_CREATE_INFO ulong avg_row_length; ulong used_fields; ulong key_block_size; + uint stats_sample_pages; /* number of pages to sample during + stats estimation, if used, otherwise 0. */ + enum_stats_auto_recalc stats_auto_recalc; SQL_I_List<TABLE_LIST> merge_list; handlerton *db_type; /** @@ -1478,12 +1622,309 @@ struct HA_CREATE_INFO }; +/** + In-place alter handler context. + + This is a superclass intended to be subclassed by individual handlers + in order to store handler unique context between in-place alter API calls. + + The handler is responsible for creating the object. This can be done + as early as during check_if_supported_inplace_alter(). + + The SQL layer is responsible for destroying the object. + The class extends Sql_alloc so the memory will be mem root allocated. + + @see Alter_inplace_info +*/ + +class inplace_alter_handler_ctx : public Sql_alloc +{ +public: + inplace_alter_handler_ctx() {} + + virtual ~inplace_alter_handler_ctx() {} +}; + + +/** + Class describing changes to be done by ALTER TABLE. + Instance of this class is passed to storage engine in order + to determine if this ALTER TABLE can be done using in-place + algorithm. It is also used for executing the ALTER TABLE + using in-place algorithm. +*/ + +class Alter_inplace_info +{ +public: + /** + Bits to show in detail what operations the storage engine is + to execute. + + All these operations are supported as in-place operations by the + SQL layer. This means that operations that by their nature must + be performed by copying the table to a temporary table, will not + have their own flags here (e.g. ALTER TABLE FORCE, ALTER TABLE + ENGINE). + + We generally try to specify handler flags only if there are real + changes. But in cases when it is cumbersome to determine if some + attribute has really changed we might choose to set flag + pessimistically, for example, relying on parser output only. + */ + typedef ulong HA_ALTER_FLAGS; + + // Add non-unique, non-primary index + static const HA_ALTER_FLAGS ADD_INDEX = 1L << 0; + + // Drop non-unique, non-primary index + static const HA_ALTER_FLAGS DROP_INDEX = 1L << 1; + + // Add unique, non-primary index + static const HA_ALTER_FLAGS ADD_UNIQUE_INDEX = 1L << 2; + + // Drop unique, non-primary index + static const HA_ALTER_FLAGS DROP_UNIQUE_INDEX = 1L << 3; + + // Add primary index + static const HA_ALTER_FLAGS ADD_PK_INDEX = 1L << 4; + + // Drop primary index + static const HA_ALTER_FLAGS DROP_PK_INDEX = 1L << 5; + + // Add column + static const HA_ALTER_FLAGS ADD_COLUMN = 1L << 6; + + // Drop column + static const HA_ALTER_FLAGS DROP_COLUMN = 1L << 7; + + // Rename column + static const HA_ALTER_FLAGS ALTER_COLUMN_NAME = 1L << 8; + + // Change column datatype + static const HA_ALTER_FLAGS ALTER_COLUMN_TYPE = 1L << 9; + + /** + Change column datatype in such way that new type has compatible + packed representation with old type, so it is theoretically + possible to perform change by only updating data dictionary + without changing table rows. + */ + static const HA_ALTER_FLAGS ALTER_COLUMN_EQUAL_PACK_LENGTH = 1L << 10; + + // Reorder column + static const HA_ALTER_FLAGS ALTER_COLUMN_ORDER = 1L << 11; + + // Change column from NOT NULL to NULL + static const HA_ALTER_FLAGS ALTER_COLUMN_NULLABLE = 1L << 12; + + // Change column from NULL to NOT NULL + static const HA_ALTER_FLAGS ALTER_COLUMN_NOT_NULLABLE = 1L << 13; + + // Set or remove default column value + static const HA_ALTER_FLAGS ALTER_COLUMN_DEFAULT = 1L << 14; + + // Add foreign key + static const HA_ALTER_FLAGS ADD_FOREIGN_KEY = 1L << 15; + + // Drop foreign key + static const HA_ALTER_FLAGS DROP_FOREIGN_KEY = 1L << 16; + + // table_options changed, see HA_CREATE_INFO::used_fields for details. + static const HA_ALTER_FLAGS CHANGE_CREATE_OPTION = 1L << 17; + + // Table is renamed + static const HA_ALTER_FLAGS ALTER_RENAME = 1L << 18; + + // Change the storage type of column + static const HA_ALTER_FLAGS ALTER_COLUMN_STORAGE_TYPE = 1L << 19; + + // Change the column format of column + static const HA_ALTER_FLAGS ALTER_COLUMN_COLUMN_FORMAT = 1L << 20; + + // Add partition + static const HA_ALTER_FLAGS ADD_PARTITION = 1L << 21; + + // Drop partition + static const HA_ALTER_FLAGS DROP_PARTITION = 1L << 22; + + // Changing partition options + static const HA_ALTER_FLAGS ALTER_PARTITION = 1L << 23; + + // Coalesce partition + static const HA_ALTER_FLAGS COALESCE_PARTITION = 1L << 24; + + // Reorganize partition ... into + static const HA_ALTER_FLAGS REORGANIZE_PARTITION = 1L << 25; + + // Reorganize partition + static const HA_ALTER_FLAGS ALTER_TABLE_REORG = 1L << 26; + + // Remove partitioning + static const HA_ALTER_FLAGS ALTER_REMOVE_PARTITIONING = 1L << 27; + + // Partition operation with ALL keyword + static const HA_ALTER_FLAGS ALTER_ALL_PARTITION = 1L << 28; + + // Partition operation with ALL keyword + static const HA_ALTER_FLAGS ALTER_COLUMN_VCOL = 1L << 29; + + /** + Create options (like MAX_ROWS) for the new version of table. + + @note The referenced instance of HA_CREATE_INFO object was already + used to create new .FRM file for table being altered. So it + has been processed by mysql_prepare_create_table() already. + For example, this means that it has HA_OPTION_PACK_RECORD + flag in HA_CREATE_INFO::table_options member correctly set. + */ + HA_CREATE_INFO *create_info; + + /** + Alter options, fields and keys for the new version of table. + + @note The referenced instance of Alter_info object was already + used to create new .FRM file for table being altered. So it + has been processed by mysql_prepare_create_table() already. + In particular, this means that in Create_field objects for + fields which were present in some form in the old version + of table, Create_field::field member points to corresponding + Field instance for old version of table. + */ + Alter_info *alter_info; + + /** + Array of KEYs for new version of table - including KEYs to be added. + + @note Currently this array is produced as result of + mysql_prepare_create_table() call. + This means that it follows different convention for + KEY_PART_INFO::fieldnr values than objects in TABLE::key_info + array. + + @todo This is mainly due to the fact that we need to keep compatibility + with removed handler::add_index() call. We plan to switch to + TABLE::key_info numbering later. + + KEYs are sorted - see sort_keys(). + */ + KEY *key_info_buffer; + + /** Size of key_info_buffer array. */ + uint key_count; + + /** Size of index_drop_buffer array. */ + uint index_drop_count; + + /** + Array of pointers to KEYs to be dropped belonging to the TABLE instance + for the old version of the table. + */ + KEY **index_drop_buffer; + + /** Size of index_add_buffer array. */ + uint index_add_count; + + /** + Array of indexes into key_info_buffer for KEYs to be added, + sorted in increasing order. + */ + uint *index_add_buffer; + + /** + Context information to allow handlers to keep context between in-place + alter API calls. + + @see inplace_alter_handler_ctx for information about object lifecycle. + */ + inplace_alter_handler_ctx *handler_ctx; + + /** + Flags describing in detail which operations the storage engine is to execute. + */ + HA_ALTER_FLAGS handler_flags; + + /** + Partition_info taking into account the partition changes to be performed. + Contains all partitions which are present in the old version of the table + with partitions to be dropped or changed marked as such + all partitions + to be added in the new version of table marked as such. + */ + partition_info *modified_part_info; + + /** true for ALTER IGNORE TABLE ... */ + const bool ignore; + + /** true for online operation (LOCK=NONE) */ + bool online; + + /** + Can be set by handler to describe why a given operation cannot be done + in-place (HA_ALTER_INPLACE_NOT_SUPPORTED) or why it cannot be done + online (HA_ALTER_INPLACE_NO_LOCK or + HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) + If set, it will be used with ER_ALTER_OPERATION_NOT_SUPPORTED_REASON if + results from handler::check_if_supported_inplace_alter() doesn't match + requirements set by user. If not set, the more generic + ER_ALTER_OPERATION_NOT_SUPPORTED will be used. + + Please set to a properly localized string, for example using + my_get_err_msg(), so that the error message as a whole is localized. + */ + const char *unsupported_reason; + + Alter_inplace_info(HA_CREATE_INFO *create_info_arg, + Alter_info *alter_info_arg, + KEY *key_info_arg, uint key_count_arg, + partition_info *modified_part_info_arg, + bool ignore_arg) + : create_info(create_info_arg), + alter_info(alter_info_arg), + key_info_buffer(key_info_arg), + key_count(key_count_arg), + index_drop_count(0), + index_drop_buffer(NULL), + index_add_count(0), + index_add_buffer(NULL), + handler_ctx(NULL), + handler_flags(0), + modified_part_info(modified_part_info_arg), + ignore(ignore_arg), + online(false), + unsupported_reason(NULL) + {} + + ~Alter_inplace_info() + { + delete handler_ctx; + } + + /** + Used after check_if_supported_inplace_alter() to report + error if the result does not match the LOCK/ALGORITHM + requirements set by the user. + + @param not_supported Part of statement that was not supported. + @param try_instead Suggestion as to what the user should + replace not_supported with. + */ + void report_unsupported_error(const char *not_supported, + const char *try_instead); +}; + + typedef struct st_key_create_information { enum ha_key_alg algorithm; ulong block_size; LEX_STRING parser_name; LEX_STRING comment; + /** + A flag to determine if we will check for duplicate indexes. + This typically means that the key information was specified + directly by the user (set by the parser). + */ + bool check_for_duplicate_indexes; } KEY_CREATE_INFO; @@ -1885,34 +2326,61 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map); #define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1) -/** - Index creation context. - Created by handler::add_index() and destroyed by handler::final_add_index(). - And finally freed at the end of the statement. - (Sql_alloc does not free in delete). -*/ - -class handler_add_index : public Sql_alloc +/** Base class to be used by handlers different shares */ +class Handler_share { public: - /* Table where the indexes are added */ - TABLE* const table; - /* Indexes being created */ - KEY* const key_info; - /* Size of key_info[] */ - const uint num_of_keys; - handler_add_index(TABLE *table_arg, KEY *key_info_arg, uint num_of_keys_arg) - : table (table_arg), key_info (key_info_arg), num_of_keys (num_of_keys_arg) - {} - virtual ~handler_add_index() {} + Handler_share() {} + virtual ~Handler_share() {} }; -class Query_cache; -struct Query_cache_block_table; + /** The handler class is the interface for dynamically loadable storage engines. Do not add ifdefs and take care when adding or changing virtual functions to avoid vtable confusion + + Functions in this class accept and return table columns data. Two data + representation formats are used: + 1. TableRecordFormat - Used to pass [partial] table records to/from + storage engine + + 2. KeyTupleFormat - used to pass index search tuples (aka "keys") to + storage engine. See opt_range.cc for description of this format. + + TableRecordFormat + ================= + [Warning: this description is work in progress and may be incomplete] + The table record is stored in a fixed-size buffer: + + record: null_bytes, column1_data, column2_data, ... + + The offsets of the parts of the buffer are also fixed: every column has + an offset to its column{i}_data, and if it is nullable it also has its own + bit in null_bytes. + + The record buffer only includes data about columns that are marked in the + relevant column set (table->read_set and/or table->write_set, depending on + the situation). + <not-sure>It could be that it is required that null bits of non-present + columns are set to 1</not-sure> + + VARIOUS EXCEPTIONS AND SPECIAL CASES + + If the table has no nullable columns, then null_bytes is still + present, its length is one byte <not-sure> which must be set to 0xFF + at all times. </not-sure> + + If the table has columns of type BIT, then certain bits from those columns + may be stored in null_bytes as well. Grep around for Field_bit for + details. + + For blob columns (see Field_blob), the record buffer stores length of the + data, following by memory pointer to the blob data. The pointer is owned + by the storage engine and is valid until the next operation. + + If a blob column has NULL value, then its length and blob data pointer + must be set to 0. */ class handler :public Sql_alloc @@ -1965,7 +2433,6 @@ public: uint ref_length; FT_INFO *ft_handler; enum {NONE=0, INDEX, RND} inited; - bool locked; bool implicit_emptied; /* Can be !=0 only if HEAP */ bool mark_trx_done; const COND *pushed_cond; @@ -2026,6 +2493,21 @@ public: virtual void unbind_psi(); virtual void rebind_psi(); +private: + /** + The lock type set by when calling::ha_external_lock(). This is + propagated down to the storage engine. The reason for also storing + it here, is that when doing MRR we need to create/clone a second handler + object. This cloned handler object needs to know about the lock_type used. + */ + int m_lock_type; + /** + Pointer where to store/retrieve the Handler_share pointer. + For non partitioned handlers this is &TABLE_SHARE::ha_share. + */ + Handler_share **ha_share; + +public: handler(handlerton *ht_arg, TABLE_SHARE *share_arg) :table_share(share_arg), table(0), estimation_rows_to_insert(0), ht(ht_arg), @@ -2033,18 +2515,21 @@ public: in_range_check_pushed_down(FALSE), ref_length(sizeof(my_off_t)), ft_handler(0), inited(NONE), - locked(FALSE), implicit_emptied(0), mark_trx_done(FALSE), + implicit_emptied(0), mark_trx_done(FALSE), pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0), pushed_idx_cond(NULL), pushed_idx_cond_keyno(MAX_KEY), auto_inc_intervals_count(0), - m_psi(NULL) + m_psi(NULL), m_lock_type(F_UNLCK), ha_share(NULL) { + DBUG_PRINT("info", + ("handler created F_UNLCK %d F_RDLCK %d F_WRLCK %d", + F_UNLCK, F_RDLCK, F_WRLCK)); reset_statistics(); } virtual ~handler(void) { - DBUG_ASSERT(locked == FALSE); + DBUG_ASSERT(m_lock_type == F_UNLCK); DBUG_ASSERT(inited == NONE); } virtual handler *clone(const char *name, MEM_ROOT *mem_root); @@ -2159,7 +2644,6 @@ public: int ha_disable_indexes(uint mode); int ha_enable_indexes(uint mode); int ha_discard_or_import_tablespace(my_bool discard); - void ha_prepare_for_alter(); int ha_rename_table(const char *from, const char *to); int ha_delete_table(const char *name); void ha_drop_table(const char *name); @@ -2180,7 +2664,6 @@ public: void adjust_next_insert_id_after_explicit_value(ulonglong nr); int update_auto_increment(); - void print_keydup_error(uint key_nr, const char *msg, myf errflag); virtual void print_error(int error, myf errflag); virtual bool get_error_message(int error, String *buf); uint get_dup_key(int error); @@ -2443,6 +2926,7 @@ public: const key_range *end_key, bool eq_range, bool sorted); virtual int read_range_next(); + void set_end_range(const key_range *end_key); int compare_key(key_range *range); int compare_key2(key_range *range); virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } @@ -2460,6 +2944,7 @@ private: */ virtual int rnd_pos_by_record(uchar *record) { + DBUG_ASSERT(table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION); position(record); return rnd_pos(record, ref); } @@ -2575,10 +3060,15 @@ public: { return FALSE; } virtual char* get_foreign_key_create_info() { return(NULL);} /* gets foreign key create string from InnoDB */ - virtual char* get_tablespace_name(THD *thd, char *name, uint name_len) - { return(NULL);} /* gets tablespace name from handler */ - /** used in ALTER TABLE; 1 if changing storage engine is allowed */ - virtual bool can_switch_engines() { return 1; } + /** + Used in ALTER TABLE to check if changing storage engine is allowed. + + @note Called without holding thr_lock.c lock. + + @retval true Changing storage engine is allowed. + @retval false Changing storage engine not allowed. + */ + virtual bool can_switch_engines() { return true; } virtual int can_continue_handler_scan() { return 0; } /** Get the list of foreign keys in this table. @@ -2629,52 +3119,16 @@ public: virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0; -/** - First phase of in-place add index. - Handlers are supposed to create new indexes here but not make them - visible. - - @param table_arg Table to add index to - @param key_info Information about new indexes - @param num_of_key Number of new indexes - @param add[out] Context of handler specific information needed - for final_add_index(). - - @note This function can be called with less than exclusive metadata - lock depending on which flags are listed in alter_table_flags. -*/ - virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys, - handler_add_index **add) - { return (HA_ERR_WRONG_COMMAND); } - -/** - Second and last phase of in-place add index. - Commit or rollback pending new indexes. - - @param add Context of handler specific information from add_index(). - @param commit If true, commit. If false, rollback index changes. - - @note This function is called with exclusive metadata lock. -*/ - virtual int final_add_index(handler_add_index *add, bool commit) - { return (HA_ERR_WRONG_COMMAND); } - - virtual int prepare_drop_index(TABLE *table_arg, uint *key_num, - uint num_of_keys) - { return (HA_ERR_WRONG_COMMAND); } - virtual int final_drop_index(TABLE *table_arg) - { return (HA_ERR_WRONG_COMMAND); } - uint max_record_length() const - { return min(HA_MAX_REC_LENGTH, max_supported_record_length()); } + { return MY_MIN(HA_MAX_REC_LENGTH, max_supported_record_length()); } uint max_keys() const - { return min(MAX_KEY, max_supported_keys()); } + { return MY_MIN(MAX_KEY, max_supported_keys()); } uint max_key_parts() const - { return min(MAX_REF_PARTS, max_supported_key_parts()); } + { return MY_MIN(MAX_REF_PARTS, max_supported_key_parts()); } uint max_key_length() const - { return min(MAX_KEY_LENGTH, max_supported_key_length()); } + { return MY_MIN(MAX_KEY_LENGTH, max_supported_key_length()); } uint max_key_part_length() const - { return min(MAX_KEY_LENGTH, max_supported_key_part_length()); } + { return MY_MIN(MAX_KEY_LENGTH, max_supported_key_part_length()); } virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } virtual uint max_supported_keys() const { return 0; } @@ -2898,10 +3352,266 @@ public: pushed_idx_cond_keyno= MAX_KEY; in_range_check_pushed_down= false; } + /** + Part of old, deprecated in-place ALTER API. + */ virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info, uint table_changes) { return COMPATIBLE_DATA_NO; } + /* On-line/in-place ALTER TABLE interface. */ + + /* + Here is an outline of on-line/in-place ALTER TABLE execution through + this interface. + + Phase 1 : Initialization + ======================== + During this phase we determine which algorithm should be used + for execution of ALTER TABLE and what level concurrency it will + require. + + *) This phase starts by opening the table and preparing description + of the new version of the table. + *) Then we check if it is impossible even in theory to carry out + this ALTER TABLE using the in-place algorithm. For example, because + we need to change storage engine or the user has explicitly requested + usage of the "copy" algorithm. + *) If in-place ALTER TABLE is theoretically possible, we continue + by compiling differences between old and new versions of the table + in the form of HA_ALTER_FLAGS bitmap. We also build a few + auxiliary structures describing requested changes and store + all these data in the Alter_inplace_info object. + *) Then the handler::check_if_supported_inplace_alter() method is called + in order to find if the storage engine can carry out changes requested + by this ALTER TABLE using the in-place algorithm. To determine this, + the engine can rely on data in HA_ALTER_FLAGS/Alter_inplace_info + passed to it as well as on its own checks. If the in-place algorithm + can be used for this ALTER TABLE, the level of required concurrency for + its execution is also returned. + If any errors occur during the handler call, ALTER TABLE is aborted + and no further handler functions are called. + *) Locking requirements of the in-place algorithm are compared to any + concurrency requirements specified by user. If there is a conflict + between them, we either switch to the copy algorithm or emit an error. + + Phase 2 : Execution + =================== + + In this phase the operations are executed. + + *) As the first step, we acquire a lock corresponding to the concurrency + level which was returned by handler::check_if_supported_inplace_alter() + and requested by the user. This lock is held for most of the + duration of in-place ALTER (if HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE + or HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE were returned we acquire an + exclusive lock for duration of the next step only). + *) After that we call handler::ha_prepare_inplace_alter_table() to give the + storage engine a chance to update its internal structures with a higher + lock level than the one that will be used for the main step of algorithm. + After that we downgrade the lock if it is necessary. + *) After that, the main step of this phase and algorithm is executed. + We call the handler::ha_inplace_alter_table() method, which carries out the + changes requested by ALTER TABLE but does not makes them visible to other + connections yet. + *) We ensure that no other connection uses the table by upgrading our + lock on it to exclusive. + *) a) If the previous step succeeds, handler::ha_commit_inplace_alter_table() is + called to allow the storage engine to do any final updates to its structures, + to make all earlier changes durable and visible to other connections. + b) If we have failed to upgrade lock or any errors have occured during the + handler functions calls (including commit), we call + handler::ha_commit_inplace_alter_table() + to rollback all changes which were done during previous steps. + + Phase 3 : Final + =============== + + In this phase we: + + *) Update SQL-layer data-dictionary by installing .FRM file for the new version + of the table. + *) Inform the storage engine about this change by calling the + handler::ha_notify_table_changed() method. + *) Destroy the Alter_inplace_info and handler_ctx objects. + + */ + + /** + Check if a storage engine supports a particular alter table in-place + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval HA_ALTER_ERROR Unexpected error. + @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported, must use copy. + @retval HA_ALTER_INPLACE_EXCLUSIVE_LOCK Supported, but requires X lock. + @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE + Supported, but requires SNW lock + during main phase. Prepare phase + requires X lock. + @retval HA_ALTER_INPLACE_SHARED_LOCK Supported, but requires SNW lock. + @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE + Supported, concurrent reads/writes + allowed. However, prepare phase + requires X lock. + @retval HA_ALTER_INPLACE_NO_LOCK Supported, concurrent + reads/writes allowed. + + @note The default implementation uses the old in-place ALTER API + to determine if the storage engine supports in-place ALTER or not. + + @note Called without holding thr_lock.c lock. + */ + virtual enum_alter_inplace_result + check_if_supported_inplace_alter(TABLE *altered_table, + Alter_inplace_info *ha_alter_info); + + + /** + Public functions wrapping the actual handler call. + @see prepare_inplace_alter_table() + */ + bool ha_prepare_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info); + + + /** + Public function wrapping the actual handler call. + @see inplace_alter_table() + */ + bool ha_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) + { + return inplace_alter_table(altered_table, ha_alter_info); + } + + + /** + Public function wrapping the actual handler call. + Allows us to enforce asserts regardless of handler implementation. + @see commit_inplace_alter_table() + */ + bool ha_commit_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info, + bool commit); + + + /** + Public function wrapping the actual handler call. + @see notify_table_changed() + */ + void ha_notify_table_changed() + { + notify_table_changed(); + } + + +protected: + /** + Allows the storage engine to update internal structures with concurrent + writes blocked. If check_if_supported_inplace_alter() returns + HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or + HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with + exclusive lock otherwise the same level of locking as for + inplace_alter_table() will be used. + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function reports error, commit_inplace_alter_table() + will be called with commit= false. + + @note For partitioning, failing to prepare one partition, means that + commit_inplace_alter_table() will be called to roll back changes for + all partitions. This means that commit_inplace_alter_table() might be + called without prepare_inplace_alter_table() having been called first + for a given partition. + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval true Error + @retval false Success + */ + virtual bool prepare_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) + { return false; } + + + /** + Alter the table structure in-place with operations specified using HA_ALTER_FLAGS + and Alter_inplace_info. The level of concurrency allowed during this + operation depends on the return value from check_if_supported_inplace_alter(). + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function reports error, commit_inplace_alter_table() + will be called with commit= false. + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + + @retval true Error + @retval false Success + */ + virtual bool inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info) + { return false; } + + + /** + Commit or rollback the changes made during prepare_inplace_alter_table() + and inplace_alter_table() inside the storage engine. + Note that in case of rollback the allowed level of concurrency during + this operation will be the same as for inplace_alter_table() and thus + might be higher than during prepare_inplace_alter_table(). (For example, + concurrent writes were blocked during prepare, but might not be during + rollback). + + @note Storage engines are responsible for reporting any errors by + calling my_error()/print_error() + + @note If this function with commit= true reports error, it will be called + again with commit= false. + + @note In case of partitioning, this function might be called for rollback + without prepare_inplace_alter_table() having been called first. + @see prepare_inplace_alter_table(). + + @param altered_table TABLE object for new version of table. + @param ha_alter_info Structure describing changes to be done + by ALTER TABLE and holding data used + during in-place alter. + @param commit True => Commit, False => Rollback. + + @retval true Error + @retval false Success + */ + virtual bool commit_inplace_alter_table(TABLE *altered_table, + Alter_inplace_info *ha_alter_info, + bool commit) + { return false; } + + + /** + Notify the storage engine that the table structure (.FRM) has been updated. + + @note No errors are allowed during notify_table_changed(). + */ + virtual void notify_table_changed(); + +public: + /* End of On-line/in-place ALTER TABLE interface. */ + + /** use_hidden_primary_key() is called in case of an update/delete when (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined @@ -2944,28 +3654,6 @@ protected: */ PSI_table_share *ha_table_share_psi() const; - inline void psi_open() - { - DBUG_ASSERT(m_psi == NULL); - DBUG_ASSERT(table_share != NULL); - if (PSI_server) - { - PSI_table_share *share_psi= ha_table_share_psi(); - if (share_psi) - m_psi= PSI_CALL_open_table(share_psi, this); - } - } - - inline void psi_close() - { - if (PSI_server && m_psi) - { - PSI_CALL_close_table(m_psi); - m_psi= NULL; /* instrumentation handle, invalid after close_table() */ - } - DBUG_ASSERT(m_psi == NULL); - } - /** Default rename_table() and delete_table() rename/delete files with a given name and extensions from bas_ext(). @@ -3015,6 +3703,14 @@ private: return HA_ERR_WRONG_COMMAND; } + /** + Update a single row. + + Note: If HA_ERR_FOUND_DUPP_KEY is returned, the handler must read + all columns of the row so MySQL can create an error message. If + the columns required for the error message are not read, the error + message will contain garbage. + */ virtual int update_row(const uchar *old_data __attribute__((unused)), uchar *new_data __attribute__((unused))) { @@ -3078,9 +3774,12 @@ private: } virtual void start_bulk_insert(ha_rows rows, uint flags) {} virtual int end_bulk_insert() { return 0; } +protected: virtual int index_read(uchar * buf, const uchar * key, uint key_len, enum ha_rkey_function find_flag) { return HA_ERR_WRONG_COMMAND; } + friend class ha_partition; +public: /** This method is similar to update_row, however the handler doesn't need to execute the updates at this point in time. The handler can be certain @@ -3165,7 +3864,16 @@ private: { return HA_ERR_WRONG_COMMAND; } virtual int rename_partitions(const char *path) { return HA_ERR_WRONG_COMMAND; } - friend class ha_partition; + virtual bool set_ha_share_ref(Handler_share **arg_ha_share) + { + DBUG_ASSERT(!ha_share); + DBUG_ASSERT(arg_ha_share); + if (ha_share || !arg_ha_share) + return true; + ha_share= arg_ha_share; + return false; + } + int get_lock_type() const { return m_lock_type; } public: /* XXX to be removed, see ha_partition::partition_ht() */ virtual handlerton *partition_ht() const @@ -3174,6 +3882,11 @@ public: inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data); friend enum icp_result handler_index_cond_check(void* h_arg); +protected: + Handler_share *get_ha_share_ptr(); + void set_ha_share_ptr(Handler_share *arg_ha_share); + void lock_shared_ha_data(); + void unlock_shared_ha_data(); }; #include "multi_range_read.h" @@ -3340,4 +4053,7 @@ inline const char *table_case_name(HA_CREATE_INFO *info, const char *name) { return ((lower_case_table_names == 2 && info->alias) ? info->alias : name); } + +void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag); +void print_keydup_error(TABLE *table, KEY *key, myf errflag); #endif diff --git a/sql/hash_filo.h b/sql/hash_filo.h index b6068348d1d..abba4824c9e 100644 --- a/sql/hash_filo.h +++ b/sql/hash_filo.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -47,8 +47,11 @@ private: class hash_filo { - const uint size, key_offset, key_length; +private: + const uint key_offset, key_length; const my_hash_get_key get_key; + /** Size of this hash table. */ + uint m_size; my_hash_free_key free_element; bool init; CHARSET_INFO *hash_charset; @@ -61,9 +64,12 @@ public: hash_filo(uint size_arg, uint key_offset_arg , uint key_length_arg, my_hash_get_key get_key_arg, my_hash_free_key free_element_arg, CHARSET_INFO *hash_charset_arg) - :size(size_arg), key_offset(key_offset_arg), key_length(key_length_arg), - get_key(get_key_arg), free_element(free_element_arg),init(0), - hash_charset(hash_charset_arg) + :key_offset(key_offset_arg), key_length(key_length_arg), + get_key(get_key_arg), m_size(size_arg), + free_element(free_element_arg),init(0), + hash_charset(hash_charset_arg), + first_link(NULL), + last_link(NULL) { bzero((char*) &cache,sizeof(cache)); } @@ -86,32 +92,61 @@ public: } if (!locked) mysql_mutex_lock(&lock); + first_link= NULL; + last_link= NULL; (void) my_hash_free(&cache); - (void) my_hash_init(&cache,hash_charset,size,key_offset, + (void) my_hash_init(&cache,hash_charset,m_size,key_offset, key_length, get_key, free_element,0); if (!locked) mysql_mutex_unlock(&lock); - first_link=last_link=0; + } + + hash_filo_element *first() + { + mysql_mutex_assert_owner(&lock); + return first_link; + } + + hash_filo_element *last() + { + mysql_mutex_assert_owner(&lock); + return last_link; } hash_filo_element *search(uchar* key, size_t length) { + mysql_mutex_assert_owner(&lock); + hash_filo_element *entry=(hash_filo_element*) my_hash_search(&cache,(uchar*) key,length); if (entry) { // Found; link it first + DBUG_ASSERT(first_link != NULL); + DBUG_ASSERT(last_link != NULL); if (entry != first_link) { // Relink used-chain if (entry == last_link) - last_link=entry->prev_used; + { + last_link= last_link->prev_used; + /* + The list must have at least 2 elements, + otherwise entry would be equal to first_link. + */ + DBUG_ASSERT(last_link != NULL); + last_link->next_used= NULL; + } else { + DBUG_ASSERT(entry->next_used != NULL); + DBUG_ASSERT(entry->prev_used != NULL); entry->next_used->prev_used = entry->prev_used; entry->prev_used->next_used = entry->next_used; } - if ((entry->next_used= first_link)) - first_link->prev_used=entry; - first_link=entry; + entry->prev_used= NULL; + entry->next_used= first_link; + + first_link->prev_used= entry; + first_link=entry; } } return entry; @@ -119,10 +154,20 @@ public: bool add(hash_filo_element *entry) { - if (cache.records == size) + if (!m_size) return 1; + if (cache.records == m_size) { hash_filo_element *tmp=last_link; - last_link=last_link->prev_used; + last_link= last_link->prev_used; + if (last_link != NULL) + { + last_link->next_used= NULL; + } + else + { + /* Pathological case, m_size == 1 */ + first_link= NULL; + } my_hash_delete(&cache,(uchar*) tmp); } if (my_hash_insert(&cache,(uchar*) entry)) @@ -131,13 +176,27 @@ public: (*free_element)(entry); // This should never happen return 1; } - if ((entry->next_used=first_link)) - first_link->prev_used=entry; + entry->prev_used= NULL; + entry->next_used= first_link; + if (first_link != NULL) + first_link->prev_used= entry; else - last_link=entry; - first_link=entry; + last_link= entry; + first_link= entry; + return 0; } + + uint size() + { return m_size; } + + void resize(uint new_size) + { + mysql_mutex_lock(&lock); + m_size= new_size; + clear(true); + mysql_mutex_unlock(&lock); + } }; #endif diff --git a/sql/hostname.cc b/sql/hostname.cc index 3540dd8c8ab..6c3c70aa7ea 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -24,7 +24,6 @@ Hostnames are checked with reverse name lookup and checked that they doesn't resemble an IP address. */ - #include "sql_priv.h" #include "hostname.h" #include "my_global.h" @@ -50,54 +49,101 @@ extern "C" { // Because of SCO 3.2V4.2 } #endif -/* - HOST_ENTRY_KEY_SIZE -- size of IP address string in the hash cache. -*/ - -#define HOST_ENTRY_KEY_SIZE INET6_ADDRSTRLEN - -/** - An entry in the hostname hash table cache. - - Host name cache does two things: - - caches host names to save DNS look ups; - - counts connect errors from IP. - - Host name can be NULL (that means DNS look up failed), but connect errors - still are counted. -*/ - -class Host_entry :public hash_filo_element +Host_errors::Host_errors() +: m_connect(0), + m_host_blocked(0), + m_nameinfo_transient(0), + m_nameinfo_permanent(0), + m_format(0), + m_addrinfo_transient(0), + m_addrinfo_permanent(0), + m_FCrDNS(0), + m_host_acl(0), + m_no_auth_plugin(0), + m_auth_plugin(0), + m_handshake(0), + m_proxy_user(0), + m_proxy_user_acl(0), + m_authentication(0), + m_ssl(0), + m_max_user_connection(0), + m_max_user_connection_per_hour(0), + m_default_database(0), + m_init_connect(0), + m_local(0) +{} + +Host_errors::~Host_errors() +{} + +void Host_errors::reset() { -public: - /** - Client IP address. This is the key used with the hash table. - - The client IP address is always expressed in IPv6, even when the - network IPv6 stack is not present. - - This IP address is never used to connect to a socket. - */ - char ip_key[HOST_ENTRY_KEY_SIZE]; - - /** - Number of errors during handshake phase from the IP address. - */ - uint connect_errors; + m_connect= 0; + m_host_blocked= 0; + m_nameinfo_transient= 0; + m_nameinfo_permanent= 0; + m_format= 0; + m_addrinfo_transient= 0; + m_addrinfo_permanent= 0; + m_FCrDNS= 0; + m_host_acl= 0; + m_no_auth_plugin= 0; + m_auth_plugin= 0; + m_handshake= 0; + m_proxy_user= 0; + m_proxy_user_acl= 0; + m_authentication= 0; + m_ssl= 0; + m_max_user_connection= 0; + m_max_user_connection_per_hour= 0; + m_default_database= 0; + m_init_connect= 0; + m_local= 0; +} - /** - One of the host names for the IP address. May be NULL. - */ - const char *hostname; -}; +void Host_errors::aggregate(const Host_errors *errors) +{ + m_connect+= errors->m_connect; + m_host_blocked+= errors->m_host_blocked; + m_nameinfo_transient+= errors->m_nameinfo_transient; + m_nameinfo_permanent+= errors->m_nameinfo_permanent; + m_format+= errors->m_format; + m_addrinfo_transient+= errors->m_addrinfo_transient; + m_addrinfo_permanent+= errors->m_addrinfo_permanent; + m_FCrDNS+= errors->m_FCrDNS; + m_host_acl+= errors->m_host_acl; + m_no_auth_plugin+= errors->m_no_auth_plugin; + m_auth_plugin+= errors->m_auth_plugin; + m_handshake+= errors->m_handshake; + m_proxy_user+= errors->m_proxy_user; + m_proxy_user_acl+= errors->m_proxy_user_acl; + m_authentication+= errors->m_authentication; + m_ssl+= errors->m_ssl; + m_max_user_connection+= errors->m_max_user_connection; + m_max_user_connection_per_hour+= errors->m_max_user_connection_per_hour; + m_default_database+= errors->m_default_database; + m_init_connect+= errors->m_init_connect; + m_local+= errors->m_local; +} static hash_filo *hostname_cache; +ulong host_cache_size; void hostname_cache_refresh() { hostname_cache->clear(); } +uint hostname_cache_size() +{ + return hostname_cache->size(); +} + +void hostname_cache_resize(uint size) +{ + hostname_cache->resize(size); +} + bool hostname_cache_init() { Host_entry tmp; @@ -120,6 +166,16 @@ void hostname_cache_free() hostname_cache= NULL; } +void hostname_cache_lock() +{ + mysql_mutex_lock(&hostname_cache->lock); +} + +void hostname_cache_unlock() +{ + mysql_mutex_unlock(&hostname_cache->lock); +} + static void prepare_hostname_cache_key(const char *ip_string, char *ip_key) { @@ -130,69 +186,119 @@ static void prepare_hostname_cache_key(const char *ip_string, memcpy(ip_key, ip_string, ip_string_length); } +Host_entry *hostname_cache_first() +{ return (Host_entry *) hostname_cache->first(); } + static inline Host_entry *hostname_cache_search(const char *ip_key) { return (Host_entry *) hostname_cache->search((uchar *) ip_key, 0); } -static bool add_hostname_impl(const char *ip_key, const char *hostname) +static void add_hostname_impl(const char *ip_key, const char *hostname, + bool validated, Host_errors *errors, + ulonglong now) { - if (hostname_cache_search(ip_key)) - return FALSE; - - size_t hostname_size= hostname ? strlen(hostname) + 1 : 0; - - Host_entry *entry= (Host_entry *) malloc(sizeof (Host_entry) + hostname_size); - - if (!entry) - return TRUE; - - char *hostname_copy; + Host_entry *entry; + bool need_add= false; - memcpy(&entry->ip_key, ip_key, HOST_ENTRY_KEY_SIZE); + entry= hostname_cache_search(ip_key); - if (hostname_size) + if (likely(entry == NULL)) { - hostname_copy= (char *) (entry + 1); - memcpy(hostname_copy, hostname, hostname_size); - - DBUG_PRINT("info", ("Adding '%s' -> '%s' to the hostname cache...'", - (const char *) ip_key, - (const char *) hostname_copy)); + entry= (Host_entry *) malloc(sizeof (Host_entry)); + if (entry == NULL) + return; + + need_add= true; + memcpy(&entry->ip_key, ip_key, HOST_ENTRY_KEY_SIZE); + entry->m_errors.reset(); + entry->m_hostname_length= 0; + entry->m_host_validated= false; + entry->m_first_seen= now; + entry->m_last_seen= now; + entry->m_first_error_seen= 0; + entry->m_last_error_seen= 0; } else { - hostname_copy= NULL; + entry->m_last_seen= now; + } - DBUG_PRINT("info", ("Adding '%s' -> NULL to the hostname cache...'", - (const char *) ip_key)); + if (validated) + { + if (hostname != NULL) + { + uint len= strlen(hostname); + if (len > sizeof(entry->m_hostname) - 1) + len= sizeof(entry->m_hostname) - 1; + memcpy(entry->m_hostname, hostname, len); + entry->m_hostname[len]= '\0'; + entry->m_hostname_length= len; + + DBUG_PRINT("info", + ("Adding/Updating '%s' -> '%s' (validated) to the hostname cache...'", + (const char *) ip_key, + (const char *) entry->m_hostname)); + } + else + { + entry->m_hostname_length= 0; + DBUG_PRINT("info", + ("Adding/Updating '%s' -> NULL (validated) to the hostname cache...'", + (const char *) ip_key)); + } + entry->m_host_validated= true; + /* + New errors that are considered 'blocking', + that will eventually cause the IP to be black listed and blocked. + */ + errors->sum_connect_errors(); + } + else + { + entry->m_hostname_length= 0; + entry->m_host_validated= false; + /* Do not count new blocking errors during DNS failures. */ + errors->clear_connect_errors(); + DBUG_PRINT("info", + ("Adding/Updating '%s' -> NULL (not validated) to the hostname cache...'", + (const char *) ip_key)); } - entry->hostname= hostname_copy; - entry->connect_errors= 0; + if (errors->has_error()) + entry->set_error_timestamps(now); + + entry->m_errors.aggregate(errors); - return hostname_cache->add(entry); + if (need_add) + hostname_cache->add(entry); + + return; } -static bool add_hostname(const char *ip_key, const char *hostname) +static void add_hostname(const char *ip_key, const char *hostname, + bool validated, Host_errors *errors) { if (specialflag & SPECIAL_NO_HOST_CACHE) - return FALSE; + return; + + ulonglong now= my_hrtime().val; mysql_mutex_lock(&hostname_cache->lock); - bool err_status= add_hostname_impl(ip_key, hostname); + add_hostname_impl(ip_key, hostname, validated, errors, now); mysql_mutex_unlock(&hostname_cache->lock); - return err_status; + return; } -void inc_host_errors(const char *ip_string) +void inc_host_errors(const char *ip_string, Host_errors *errors) { if (!ip_string) return; + ulonglong now= my_hrtime().val; char ip_key[HOST_ENTRY_KEY_SIZE]; prepare_hostname_cache_key(ip_string, ip_key); @@ -201,13 +307,20 @@ void inc_host_errors(const char *ip_string) Host_entry *entry= hostname_cache_search(ip_key); if (entry) - entry->connect_errors++; + { + if (entry->m_host_validated) + errors->sum_connect_errors(); + else + errors->clear_connect_errors(); + + entry->m_errors.aggregate(errors); + entry->set_error_timestamps(now); + } mysql_mutex_unlock(&hostname_cache->lock); } - -void reset_host_errors(const char *ip_string) +void reset_host_connect_errors(const char *ip_string) { if (!ip_string) return; @@ -220,12 +333,11 @@ void reset_host_errors(const char *ip_string) Host_entry *entry= hostname_cache_search(ip_key); if (entry) - entry->connect_errors= 0; + entry->m_errors.clear_connect_errors(); mysql_mutex_unlock(&hostname_cache->lock); } - static inline bool is_ip_loopback(const struct sockaddr *ip) { switch (ip->sa_family) { @@ -277,6 +389,7 @@ static inline bool is_hostname_valid(const char *hostname) - returns host name if IP-address is validated; - set value to out-variable connect_errors -- this variable represents the number of connection errors from the specified IP-address. + - update the host_cache statistics NOTE: connect_errors are counted (are supported) only for the clients where IP-address can be resolved and FCrDNS check is passed. @@ -287,37 +400,43 @@ static inline bool is_hostname_valid(const char *hostname) @param [out] connect_errors @return Error status - @retval FALSE Success - @retval TRUE Error + @retval 0 Success + @retval RC_BLOCKED_HOST The host is blocked. The function does not set/report MySQL server error in case of failure. It's caller's responsibility to handle failures of this function properly. */ -bool ip_to_hostname(struct sockaddr_storage *ip_storage, - const char *ip_string, - char **hostname, uint *connect_errors) +int ip_to_hostname(struct sockaddr_storage *ip_storage, + const char *ip_string, + char **hostname, + uint *connect_errors) { const struct sockaddr *ip= (const sockaddr *) ip_storage; int err_code; bool err_status; + Host_errors errors; DBUG_ENTER("ip_to_hostname"); DBUG_PRINT("info", ("IP address: '%s'; family: %d.", (const char *) ip_string, (int) ip->sa_family)); + /* Default output values, for most cases. */ + *hostname= NULL; + *connect_errors= 0; + /* Check if we have loopback address (127.0.0.1 or ::1). */ if (is_ip_loopback(ip)) { DBUG_PRINT("info", ("Loopback address detected.")); - *connect_errors= 0; /* Do not count connect errors from localhost. */ + /* Do not count connect errors from localhost. */ *hostname= (char *) my_localhost; - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } /* Prepare host name cache key. */ @@ -329,27 +448,45 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, if (!(specialflag & SPECIAL_NO_HOST_CACHE)) { + ulonglong now= my_hrtime().val; + mysql_mutex_lock(&hostname_cache->lock); Host_entry *entry= hostname_cache_search(ip_key); if (entry) { - *connect_errors= entry->connect_errors; - *hostname= NULL; + entry->m_last_seen= now; + + if (entry->m_errors.m_connect > max_connect_errors) + { + entry->m_errors.m_host_blocked++; + entry->set_error_timestamps(now); + *connect_errors= entry->m_errors.m_connect; + mysql_mutex_unlock(&hostname_cache->lock); + DBUG_RETURN(RC_BLOCKED_HOST); + } - if (entry->hostname) - *hostname= my_strdup(entry->hostname, MYF(0)); + /* + If there is an IP -> HOSTNAME association in the cache, + but for a hostname that was not validated, + do not return that hostname: perform the network validation again. + */ + if (entry->m_host_validated) + { + if (entry->m_hostname_length) + *hostname= my_strdup(entry->m_hostname, MYF(0)); - DBUG_PRINT("info",("IP (%s) has been found in the cache. " - "Hostname: '%s'; connect_errors: %d", - (const char *) ip_key, - (const char *) (*hostname? *hostname : "null"), - (int) *connect_errors)); + DBUG_PRINT("info",("IP (%s) has been found in the cache. " + "Hostname: '%s'", + (const char *) ip_key, + (const char *) (*hostname? *hostname : "null") + )); - mysql_mutex_unlock(&hostname_cache->lock); + mysql_mutex_unlock(&hostname_cache->lock); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); + } } mysql_mutex_unlock(&hostname_cache->lock); @@ -367,13 +504,60 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, err_code= vio_getnameinfo(ip, hostname_buffer, NI_MAXHOST, NULL, 0, NI_NAMEREQD); - /* BEGIN : DEBUG */ - DBUG_EXECUTE_IF("addr_fake_ipv4", + /* + =========================================================================== + DEBUG code only (begin) + Simulate various output from vio_getnameinfo(). + =========================================================================== + */ + + DBUG_EXECUTE_IF("getnameinfo_error_noname", + { + strcpy(hostname_buffer, "<garbage>"); + err_code= EAI_NONAME; + } + ); + + DBUG_EXECUTE_IF("getnameinfo_error_again", + { + strcpy(hostname_buffer, "<garbage>"); + err_code= EAI_AGAIN; + } + ); + + DBUG_EXECUTE_IF("getnameinfo_fake_ipv4", { strcpy(hostname_buffer, "santa.claus.ipv4.example.com"); err_code= 0; - };); - /* END : DEBUG */ + } + ); + + DBUG_EXECUTE_IF("getnameinfo_fake_ipv6", + { + strcpy(hostname_buffer, "santa.claus.ipv6.example.com"); + err_code= 0; + } + ); + + DBUG_EXECUTE_IF("getnameinfo_format_ipv4", + { + strcpy(hostname_buffer, "12.12.12.12"); + err_code= 0; + } + ); + + DBUG_EXECUTE_IF("getnameinfo_format_ipv6", + { + strcpy(hostname_buffer, "12:DEAD:BEEF:0"); + err_code= 0; + } + ); + + /* + =========================================================================== + DEBUG code only (end) + =========================================================================== + */ if (err_code) { @@ -387,23 +571,29 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, (const char *) ip_key, (const char *) gai_strerror(err_code)); + bool validated; if (vio_is_no_name_error(err_code)) { /* The no-name error means that there is no reverse address mapping for the IP address. A host name can not be resolved. - + */ + errors.m_nameinfo_permanent= 1; + validated= true; + } + else + { + /* If it is not the no-name error, we should not cache the hostname (or rather its absence), because the failure might be transient. + Only the ip error statistics are cached. */ - - add_hostname(ip_key, NULL); - - *hostname= NULL; - *connect_errors= 0; /* New IP added to the cache. */ + errors.m_nameinfo_transient= 1; + validated= false; } + add_hostname(ip_key, NULL, validated, &errors); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } DBUG_PRINT("info", ("IP '%s' resolved to '%s'.", @@ -439,24 +629,21 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, (const char *) ip_key, (const char *) hostname_buffer); - err_status= add_hostname(ip_key, NULL); - - *hostname= NULL; - *connect_errors= 0; /* New IP added to the cache. */ + errors.m_format= 1; + add_hostname(ip_key, hostname_buffer, false, &errors); - DBUG_RETURN(err_status); + DBUG_RETURN(false); } - /* - To avoid crashing the server in DBUG_EXECUTE_IF, - Define a variable which depicts state of addr_info_list. - */ - bool free_addr_info_list= false; - /* Get IP-addresses for the resolved host name (FCrDNS technique). */ struct addrinfo hints; struct addrinfo *addr_info_list; + /* + Makes fault injection with DBUG_EXECUTE_IF easier. + Invoking free_addr_info(NULL) crashes on some platforms. + */ + bool free_addr_info_list= false; memset(&hints, 0, sizeof (struct addrinfo)); hints.ai_flags= AI_PASSIVE; @@ -470,8 +657,72 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, if (err_code == 0) free_addr_info_list= true; - /* BEGIN : DEBUG */ - DBUG_EXECUTE_IF("addr_fake_ipv4", + /* + =========================================================================== + DEBUG code only (begin) + Simulate various output from getaddrinfo(). + =========================================================================== + */ + DBUG_EXECUTE_IF("getaddrinfo_error_noname", + { + if (free_addr_info_list) + freeaddrinfo(addr_info_list); + + addr_info_list= NULL; + err_code= EAI_NONAME; + free_addr_info_list= false; + } + ); + + DBUG_EXECUTE_IF("getaddrinfo_error_again", + { + if (free_addr_info_list) + freeaddrinfo(addr_info_list); + + addr_info_list= NULL; + err_code= EAI_AGAIN; + free_addr_info_list= false; + } + ); + + DBUG_EXECUTE_IF("getaddrinfo_fake_bad_ipv4", + { + if (free_addr_info_list) + freeaddrinfo(addr_info_list); + + struct sockaddr_in *debug_addr; + /* + Not thread safe, which is ok. + Only one connection at a time is tested with + fault injection. + */ + static struct sockaddr_in debug_sock_addr[2]; + static struct addrinfo debug_addr_info[2]; + /* Simulating ipv4 192.0.2.126 */ + debug_addr= & debug_sock_addr[0]; + debug_addr->sin_family= AF_INET; + debug_addr->sin_addr.s_addr= inet_addr("192.0.2.126"); + + /* Simulating ipv4 192.0.2.127 */ + debug_addr= & debug_sock_addr[1]; + debug_addr->sin_family= AF_INET; + debug_addr->sin_addr.s_addr= inet_addr("192.0.2.127"); + + debug_addr_info[0].ai_addr= (struct sockaddr*) & debug_sock_addr[0]; + debug_addr_info[0].ai_addrlen= sizeof (struct sockaddr_in); + debug_addr_info[0].ai_next= & debug_addr_info[1]; + + debug_addr_info[1].ai_addr= (struct sockaddr*) & debug_sock_addr[1]; + debug_addr_info[1].ai_addrlen= sizeof (struct sockaddr_in); + debug_addr_info[1].ai_next= NULL; + + addr_info_list= & debug_addr_info[0]; + err_code= 0; + free_addr_info_list= false; + } + ); + + DBUG_EXECUTE_IF("getaddrinfo_fake_good_ipv4", { if (free_addr_info_list) freeaddrinfo(addr_info_list); @@ -500,30 +751,186 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, addr_info_list= & debug_addr_info[0]; err_code= 0; free_addr_info_list= false; - };); + } + ); - /* END : DEBUG */ +#ifdef HAVE_IPV6 + DBUG_EXECUTE_IF("getaddrinfo_fake_bad_ipv6", + { + if (free_addr_info_list) + freeaddrinfo(addr_info_list); - if (err_code == EAI_NONAME) - { - /* - Don't cache responses when the DNS server is down, as otherwise - transient DNS failure may leave any number of clients (those - that attempted to connect during the outage) unable to connect - indefinitely. - */ + struct sockaddr_in6 *debug_addr; + struct in6_addr *ip6; + /* + Not thread safe, which is ok. + Only one connection at a time is tested with + fault injection. + */ + static struct sockaddr_in6 debug_sock_addr[2]; + static struct addrinfo debug_addr_info[2]; + /* Simulating ipv6 2001:DB8::6:7E */ + debug_addr= & debug_sock_addr[0]; + debug_addr->sin6_family= AF_INET6; + ip6= & debug_addr->sin6_addr; + /* inet_pton not available on Windows XP. */ + ip6->s6_addr[ 0] = 0x20; + ip6->s6_addr[ 1] = 0x01; + ip6->s6_addr[ 2] = 0x0d; + ip6->s6_addr[ 3] = 0xb8; + ip6->s6_addr[ 4] = 0x00; + ip6->s6_addr[ 5] = 0x00; + ip6->s6_addr[ 6] = 0x00; + ip6->s6_addr[ 7] = 0x00; + ip6->s6_addr[ 8] = 0x00; + ip6->s6_addr[ 9] = 0x00; + ip6->s6_addr[10] = 0x00; + ip6->s6_addr[11] = 0x00; + ip6->s6_addr[12] = 0x00; + ip6->s6_addr[13] = 0x06; + ip6->s6_addr[14] = 0x00; + ip6->s6_addr[15] = 0x7e; + + /* Simulating ipv6 2001:DB8::6:7F */ + debug_addr= & debug_sock_addr[1]; + debug_addr->sin6_family= AF_INET6; + ip6= & debug_addr->sin6_addr; + ip6->s6_addr[ 0] = 0x20; + ip6->s6_addr[ 1] = 0x01; + ip6->s6_addr[ 2] = 0x0d; + ip6->s6_addr[ 3] = 0xb8; + ip6->s6_addr[ 4] = 0x00; + ip6->s6_addr[ 5] = 0x00; + ip6->s6_addr[ 6] = 0x00; + ip6->s6_addr[ 7] = 0x00; + ip6->s6_addr[ 8] = 0x00; + ip6->s6_addr[ 9] = 0x00; + ip6->s6_addr[10] = 0x00; + ip6->s6_addr[11] = 0x00; + ip6->s6_addr[12] = 0x00; + ip6->s6_addr[13] = 0x06; + ip6->s6_addr[14] = 0x00; + ip6->s6_addr[15] = 0x7f; + + debug_addr_info[0].ai_addr= (struct sockaddr*) & debug_sock_addr[0]; + debug_addr_info[0].ai_addrlen= sizeof (struct sockaddr_in6); + debug_addr_info[0].ai_next= & debug_addr_info[1]; - err_status= add_hostname(ip_key, NULL); + debug_addr_info[1].ai_addr= (struct sockaddr*) & debug_sock_addr[1]; + debug_addr_info[1].ai_addrlen= sizeof (struct sockaddr_in6); + debug_addr_info[1].ai_next= NULL; - *hostname= NULL; - *connect_errors= 0; /* New IP added to the cache. */ + addr_info_list= & debug_addr_info[0]; + err_code= 0; + free_addr_info_list= false; + } + ); - DBUG_RETURN(err_status); - } - else if (err_code) + DBUG_EXECUTE_IF("getaddrinfo_fake_good_ipv6", + { + if (free_addr_info_list) + freeaddrinfo(addr_info_list); + + struct sockaddr_in6 *debug_addr; + struct in6_addr *ip6; + /* + Not thread safe, which is ok. + Only one connection at a time is tested with + fault injection. + */ + static struct sockaddr_in6 debug_sock_addr[2]; + static struct addrinfo debug_addr_info[2]; + /* Simulating ipv6 2001:DB8::6:7 */ + debug_addr= & debug_sock_addr[0]; + debug_addr->sin6_family= AF_INET6; + ip6= & debug_addr->sin6_addr; + ip6->s6_addr[ 0] = 0x20; + ip6->s6_addr[ 1] = 0x01; + ip6->s6_addr[ 2] = 0x0d; + ip6->s6_addr[ 3] = 0xb8; + ip6->s6_addr[ 4] = 0x00; + ip6->s6_addr[ 5] = 0x00; + ip6->s6_addr[ 6] = 0x00; + ip6->s6_addr[ 7] = 0x00; + ip6->s6_addr[ 8] = 0x00; + ip6->s6_addr[ 9] = 0x00; + ip6->s6_addr[10] = 0x00; + ip6->s6_addr[11] = 0x00; + ip6->s6_addr[12] = 0x00; + ip6->s6_addr[13] = 0x06; + ip6->s6_addr[14] = 0x00; + ip6->s6_addr[15] = 0x07; + + /* Simulating ipv6 2001:DB8::6:6 */ + debug_addr= & debug_sock_addr[1]; + debug_addr->sin6_family= AF_INET6; + ip6= & debug_addr->sin6_addr; + ip6->s6_addr[ 0] = 0x20; + ip6->s6_addr[ 1] = 0x01; + ip6->s6_addr[ 2] = 0x0d; + ip6->s6_addr[ 3] = 0xb8; + ip6->s6_addr[ 4] = 0x00; + ip6->s6_addr[ 5] = 0x00; + ip6->s6_addr[ 6] = 0x00; + ip6->s6_addr[ 7] = 0x00; + ip6->s6_addr[ 8] = 0x00; + ip6->s6_addr[ 9] = 0x00; + ip6->s6_addr[10] = 0x00; + ip6->s6_addr[11] = 0x00; + ip6->s6_addr[12] = 0x00; + ip6->s6_addr[13] = 0x06; + ip6->s6_addr[14] = 0x00; + ip6->s6_addr[15] = 0x06; + + debug_addr_info[0].ai_addr= (struct sockaddr*) & debug_sock_addr[0]; + debug_addr_info[0].ai_addrlen= sizeof (struct sockaddr_in6); + debug_addr_info[0].ai_next= & debug_addr_info[1]; + + debug_addr_info[1].ai_addr= (struct sockaddr*) & debug_sock_addr[1]; + debug_addr_info[1].ai_addrlen= sizeof (struct sockaddr_in6); + debug_addr_info[1].ai_next= NULL; + + addr_info_list= & debug_addr_info[0]; + err_code= 0; + free_addr_info_list= false; + } + ); +#endif /* HAVE_IPV6 */ + + /* + =========================================================================== + DEBUG code only (end) + =========================================================================== + */ + + if (err_code != 0) { - DBUG_PRINT("error", ("getaddrinfo() failed with error code %d.", err_code)); - DBUG_RETURN(TRUE); + sql_print_warning("Host name '%s' could not be resolved: %s", + (const char *) hostname_buffer, + (const char *) gai_strerror(err_code)); + + bool validated; + + if (err_code == EAI_NONAME) + { + errors.m_addrinfo_permanent= 1; + validated= true; + } + else + { + /* + Don't cache responses when the DNS server is down, as otherwise + transient DNS failure may leave any number of clients (those + that attempted to connect during the outage) unable to connect + indefinitely. + Only cache error statistics. + */ + errors.m_addrinfo_transient= 1; + validated= false; + } + add_hostname(ip_key, NULL, validated, &errors); + + DBUG_RETURN(false); } /* Check that getaddrinfo() returned the used IP (FCrDNS technique). */ @@ -545,7 +952,7 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, DBUG_PRINT("info", (" - '%s'", (const char *) ip_buffer)); - if (strcmp(ip_key, ip_buffer) == 0) + if (strcasecmp(ip_key, ip_buffer) == 0) { /* Copy host name string to be stored in the cache. */ @@ -557,7 +964,7 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, if (free_addr_info_list) freeaddrinfo(addr_info_list); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } break; @@ -568,9 +975,11 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, if (!*hostname) { - sql_print_information("Hostname '%s' does not resolve to '%s'.", - (const char *) hostname_buffer, - (const char *) ip_key); + errors.m_FCrDNS= 1; + + sql_print_warning("Hostname '%s' does not resolve to '%s'.", + (const char *) hostname_buffer, + (const char *) ip_key); sql_print_information("Hostname '%s' has the following IP addresses:", (const char *) hostname_buffer); @@ -584,30 +993,16 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage, ip_buffer, sizeof (ip_buffer)); DBUG_ASSERT(!err_status); - sql_print_information(" - %s\n", (const char *) ip_buffer); + sql_print_information(" - %s", (const char *) ip_buffer); } } - /* Free the result of getaddrinfo(). */ + /* Add an entry for the IP to the cache. */ + add_hostname(ip_key, *hostname, true, &errors); + /* Free the result of getaddrinfo(). */ if (free_addr_info_list) freeaddrinfo(addr_info_list); - /* Add an entry for the IP to the cache. */ - - if (*hostname) - { - err_status= add_hostname(ip_key, *hostname); - *connect_errors= 0; - } - else - { - DBUG_PRINT("error",("Couldn't verify hostname with getaddrinfo().")); - - err_status= add_hostname(ip_key, NULL); - *hostname= NULL; - *connect_errors= 0; - } - - DBUG_RETURN(err_status); + DBUG_RETURN(false); } diff --git a/sql/hostname.h b/sql/hostname.h index 6e9535c2947..81a1d0de88d 100644 --- a/sql/hostname.h +++ b/sql/hostname.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,14 +17,168 @@ #define HOSTNAME_INCLUDED #include "my_global.h" /* uint */ +#include "my_net.h" +#include "hash_filo.h" -bool ip_to_hostname(struct sockaddr_storage *ip_storage, - const char *ip_string, - char **hostname, uint *connect_errors); -void inc_host_errors(const char *ip_string); -void reset_host_errors(const char *ip_string); +struct Host_errors +{ +public: + Host_errors(); + ~Host_errors(); + + void reset(); + void aggregate(const Host_errors *errors); + + /** Number of connect errors. */ + ulong m_connect; + + /** Number of host blocked errors. */ + ulong m_host_blocked; + /** Number of transient errors from getnameinfo(). */ + ulong m_nameinfo_transient; + /** Number of permanent errors from getnameinfo(). */ + ulong m_nameinfo_permanent; + /** Number of errors from is_hostname_valid(). */ + ulong m_format; + /** Number of transient errors from getaddrinfo(). */ + ulong m_addrinfo_transient; + /** Number of permanent errors from getaddrinfo(). */ + ulong m_addrinfo_permanent; + /** Number of errors from Forward-Confirmed reverse DNS checks. */ + ulong m_FCrDNS; + /** Number of errors from host grants. */ + ulong m_host_acl; + /** Number of errors from missing auth plugin. */ + ulong m_no_auth_plugin; + /** Number of errors from auth plugin. */ + ulong m_auth_plugin; + /** Number of errors from authentication plugins. */ + ulong m_handshake; + /** Number of errors from proxy user. */ + ulong m_proxy_user; + /** Number of errors from proxy user acl. */ + ulong m_proxy_user_acl; + /** Number of errors from authentication. */ + ulong m_authentication; + /** Number of errors from ssl. */ + ulong m_ssl; + /** Number of errors from max user connection. */ + ulong m_max_user_connection; + /** Number of errors from max user connection per hour. */ + ulong m_max_user_connection_per_hour; + /** Number of errors from the default database. */ + ulong m_default_database; + /** Number of errors from init_connect. */ + ulong m_init_connect; + /** Number of errors from the server itself. */ + ulong m_local; + + bool has_error() const + { + return ((m_host_blocked != 0) + || (m_nameinfo_transient != 0) + || (m_nameinfo_permanent != 0) + || (m_format != 0) + || (m_addrinfo_transient != 0) + || (m_addrinfo_permanent != 0) + || (m_FCrDNS != 0) + || (m_host_acl != 0) + || (m_no_auth_plugin != 0) + || (m_auth_plugin != 0) + || (m_handshake != 0) + || (m_proxy_user != 0) + || (m_proxy_user_acl != 0) + || (m_authentication != 0) + || (m_ssl != 0) + || (m_max_user_connection != 0) + || (m_max_user_connection_per_hour != 0) + || (m_default_database != 0) + || (m_init_connect != 0) + || (m_local != 0)); + } + + void sum_connect_errors() + { + /* Current (historical) behavior: */ + m_connect= m_handshake; + } + + void clear_connect_errors() + { + m_connect= 0; + } +}; + +/** Size of IP address string in the hash cache. */ +#define HOST_ENTRY_KEY_SIZE INET6_ADDRSTRLEN + +/** + An entry in the hostname hash table cache. + + Host name cache does two things: + - caches host names to save DNS look ups; + - counts errors from IP. + + Host name can be empty (that means DNS look up failed), + but errors still are counted. +*/ +class Host_entry : public hash_filo_element +{ +public: + Host_entry *next() + { return (Host_entry*) hash_filo_element::next(); } + + /** + Client IP address. This is the key used with the hash table. + + The client IP address is always expressed in IPv6, even when the + network IPv6 stack is not present. + + This IP address is never used to connect to a socket. + */ + char ip_key[HOST_ENTRY_KEY_SIZE]; + + /** + One of the host names for the IP address. May be a zero length string. + */ + char m_hostname[HOSTNAME_LENGTH + 1]; + /** Length in bytes of @c m_hostname. */ + uint m_hostname_length; + /** The hostname is validated and used for authorization. */ + bool m_host_validated; + ulonglong m_first_seen; + ulonglong m_last_seen; + ulonglong m_first_error_seen; + ulonglong m_last_error_seen; + /** Error statistics. */ + Host_errors m_errors; + + void set_error_timestamps(ulonglong now) + { + if (m_first_error_seen == 0) + m_first_error_seen= now; + m_last_error_seen= now; + } +}; + +/** The size of the host_cache. */ +extern ulong host_cache_size; + +#define RC_OK 0 +#define RC_BLOCKED_HOST 1 +int ip_to_hostname(struct sockaddr_storage *ip_storage, + const char *ip_string, + char **hostname, uint *connect_errors); + +void inc_host_errors(const char *ip_string, Host_errors *errors); +void reset_host_connect_errors(const char *ip_string); bool hostname_cache_init(); void hostname_cache_free(); void hostname_cache_refresh(void); +uint hostname_cache_size(); +void hostname_cache_resize(uint size); +void hostname_cache_lock(); +void hostname_cache_unlock(); +Host_entry *hostname_cache_first(); #endif /* HOSTNAME_INCLUDED */ diff --git a/sql/innodb_priv.h b/sql/innodb_priv.h index 33ba7b0f5b3..82d74236ff9 100644 --- a/sql/innodb_priv.h +++ b/sql/innodb_priv.h @@ -26,11 +26,9 @@ int get_quote_char_for_identifier(THD *thd, const char *name, uint length); bool schema_table_store_record(THD *thd, TABLE *table); void localtime_to_TIME(MYSQL_TIME *to, struct tm *from); bool check_global_access(THD *thd, ulong want_access, bool no_errors=false); -uint strconvert(CHARSET_INFO *from_cs, const char *from, +uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length, CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors); void sql_print_error(const char *format, ...); - - #endif /* INNODB_PRIV_INCLUDED */ diff --git a/sql/item.cc b/sql/item.cc index 6ce93f501fe..ac920004b80 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -105,7 +105,7 @@ void Hybrid_type_traits_decimal::fix_length_and_dec(Item *item, Item *arg) const { item->decimals= arg->decimals; - item->max_length= min(arg->max_length + DECIMAL_LONGLONG_DIGITS, + item->max_length= MY_MIN(arg->max_length + DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_STR_LENGTH); } @@ -297,7 +297,7 @@ String *Item::val_string_from_decimal(String *str) String *Item::val_string_from_date(String *str) { MYSQL_TIME ltime; - if (get_date(<ime, 0) || + if (get_date(<ime, sql_mode_for_dates()) || str->alloc(MAX_DATE_STRING_REP_LENGTH)) { null_value= 1; @@ -341,7 +341,7 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value) decimal_value) & E_DEC_BAD_NUM) { ErrConvString err(res); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "DECIMAL", err.ptr()); @@ -354,7 +354,7 @@ my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); MYSQL_TIME ltime; - if (get_date(<ime, 0)) + if (get_date(<ime, sql_mode_for_dates())) { my_decimal_set_zero(decimal_value); null_value= 1; // set NULL, stop processing @@ -551,9 +551,9 @@ uint Item::decimal_precision() const uint prec= my_decimal_length_to_precision(max_char_length(), decimals, unsigned_flag); - return min(prec, DECIMAL_MAX_PRECISION); + return MY_MIN(prec, DECIMAL_MAX_PRECISION); } - return min(max_char_length(), DECIMAL_MAX_PRECISION); + return MY_MIN(max_char_length(), DECIMAL_MAX_PRECISION); } @@ -1005,14 +1005,14 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) { char buff[SAFE_NAME_LEN]; strmake(buff, str_start, - min(sizeof(buff)-1, length + (int) (str-str_start))); + MY_MIN(sizeof(buff)-1, length + (int) (str-str_start))); if (length == 0) - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_NAME_BECOMES_EMPTY, ER(ER_NAME_BECOMES_EMPTY), buff); else - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES), buff); } @@ -1026,7 +1026,7 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) name_length= res_length; } else - name= sql_strmake(str, (name_length= min(length,MAX_ALIAS_NAME))); + name= sql_strmake(str, (name_length= MY_MIN(length,MAX_ALIAS_NAME))); } @@ -1155,11 +1155,26 @@ Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs) Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs) { + return charset_converter(tocs, true); +} + + +/** + Convert a string item into the requested character set. + + @param tocs Character set to to convert the string to. + @param lossless Whether data loss is acceptable. + + @return A new item representing the converted string. +*/ +Item *Item_string::charset_converter(CHARSET_INFO *tocs, bool lossless) +{ Item_string *conv; uint conv_errors; char *ptr; String tmp, cstr, *ostr= val_str(&tmp); cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); + conv_errors= lossless && conv_errors; if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), cstr.charset(), collation.derivation))) @@ -1180,7 +1195,6 @@ Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs) return conv; } - Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs) { if (const_item()) @@ -2998,7 +3012,7 @@ double_from_string_with_check(CHARSET_INFO *cs, const char *cptr, We can use err.ptr() here as ErrConvString is guranteed to put an end \0 here. */ - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE", err.ptr()); @@ -3035,7 +3049,7 @@ longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr, (end != end_of_num && !check_if_only_end_space(cs, end_of_num, end)))) { ErrConvString err(cptr, end - cptr, cs); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", err.ptr()); @@ -3244,7 +3258,7 @@ void Item_param::set_time(MYSQL_TIME *tm, timestamp_type time_type, if (check_datetime_range(&value.time)) { ErrConvTime str(&value.time); - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, &str, time_type, 0); set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR); } @@ -4323,7 +4337,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, return TRUE; if (thd->lex->describe & DESCRIBE_EXTENDED) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_FIELD_RESOLVED, ER(ER_WARN_FIELD_RESOLVED), db_name, (db_name[0] ? "." : ""), table_name, (table_name [0] ? "." : ""), @@ -4571,7 +4585,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) !((*group_by_ref)->eq(*select_ref, 0))) { ambiguous_fields= TRUE; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), ref->full_name(), current_thd->where); @@ -5553,7 +5567,7 @@ String *Item::check_well_formed_result(String *str, bool send_error) { str->length(wlen); } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING, ER(ER_INVALID_CHARACTER_STRING), cs->csname, hexbuf); } return str; @@ -5689,10 +5703,6 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) field= new Field_double((uchar*) 0, max_length, null_ptr, 0, Field::NONE, name, decimals, 0, unsigned_flag); break; - case MYSQL_TYPE_NULL: - field= new Field_null((uchar*) 0, max_length, Field::NONE, - name, &my_charset_bin); - break; case MYSQL_TYPE_INT24: field= new Field_medium((uchar*) 0, max_length, null_ptr, 0, Field::NONE, name, 0, unsigned_flag); @@ -5723,6 +5733,7 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length) /* This case should never be chosen */ DBUG_ASSERT(0); /* If something goes awfully wrong, it's better to get a string than die */ + case MYSQL_TYPE_NULL: case MYSQL_TYPE_STRING: if (fixed_length && !too_big_for_varchar()) { @@ -6176,7 +6187,7 @@ longlong Item_hex_hybrid::val_int() // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); char *end=(char*) str_value.ptr()+str_value.length(), - *ptr=end-min(str_value.length(),sizeof(longlong)); + *ptr=end-MY_MIN(str_value.length(),sizeof(longlong)); ulonglong value=0; for (; ptr != end ; ptr++) @@ -6212,7 +6223,7 @@ int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions) warn: if (!field->store((longlong) nr, TRUE)) - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, + field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } @@ -6220,7 +6231,7 @@ warn: void Item_hex_hybrid::print(String *str, enum_query_type query_type) { - uint32 len= min(str_value.length(), sizeof(longlong)); + uint32 len= MY_MIN(str_value.length(), sizeof(longlong)); const char *ptr= str_value.ptr() + str_value.length() - len; str->append("0x"); str->append_hex(ptr, len); @@ -8225,7 +8236,7 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions) { TABLE_LIST *view= cached_table->top_table(); push_warning_printf(field_arg->table->in_use, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT_FOR_VIEW_FIELD, ER(ER_NO_DEFAULT_FOR_VIEW_FIELD), view->view_db.str, @@ -8234,7 +8245,7 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions) else { push_warning_printf(field_arg->table->in_use, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT_FOR_FIELD, ER(ER_NO_DEFAULT_FOR_FIELD), field_arg->field_name); @@ -9386,14 +9397,14 @@ bool Item_type_holder::join_types(THD *thd, Item *item) /* fix variable decimals which always is NOT_FIXED_DEC */ if (Field::result_merge_type(fld_type) == INT_RESULT) item_decimals= 0; - decimals= max(decimals, item_decimals); + decimals= MY_MAX(decimals, item_decimals); } if (Field::result_merge_type(fld_type) == DECIMAL_RESULT) { - decimals= min(max(decimals, item->decimals), DECIMAL_MAX_SCALE); + decimals= MY_MIN(MY_MAX(decimals, item->decimals), DECIMAL_MAX_SCALE); int item_int_part= item->decimal_int_part(); - int item_prec = max(prev_decimal_int_part, item_int_part) + decimals; - int precision= min(item_prec, DECIMAL_MAX_PRECISION); + int item_prec = MY_MAX(prev_decimal_int_part, item_int_part) + decimals; + int precision= MY_MIN(item_prec, DECIMAL_MAX_PRECISION); unsigned_flag&= item->unsigned_flag; max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, @@ -9424,7 +9435,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item) */ if (collation.collation != &my_charset_bin) { - max_length= max(old_max_chars * collation.collation->mbmaxlen, + max_length= MY_MAX(old_max_chars * collation.collation->mbmaxlen, display_length(item) / item->collation.collation->mbmaxlen * collation.collation->mbmaxlen); @@ -9446,7 +9457,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item) { int delta1= max_length_orig - decimals_orig; int delta2= item->max_length - item->decimals; - max_length= max(delta1, delta2) + decimals; + max_length= MY_MAX(delta1, delta2) + decimals; if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2) { max_length= MAX_FLOAT_STR_LENGTH; @@ -9464,7 +9475,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item) break; } default: - max_length= max(max_length, display_length(item)); + max_length= MY_MAX(max_length, display_length(item)); }; maybe_null|= item->maybe_null; get_full_info(item); diff --git a/sql/item.h b/sql/item.h index 5514231e4fd..fb2948a9149 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1060,7 +1060,7 @@ public: Item **ref, bool skip_registered); virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); bool get_time(MYSQL_TIME *ltime) - { return get_date(ltime, TIME_TIME_ONLY); } + { return get_date(ltime, TIME_TIME_ONLY | TIME_INVALID_DATES); } bool get_seconds(ulonglong *sec, ulong *sec_part); virtual bool get_date_result(MYSQL_TIME *ltime, ulonglong fuzzydate) { return get_date(ltime,fuzzydate); } @@ -1396,7 +1396,7 @@ public: virtual void bring_value() {} Field *tmp_table_field_from_field_type(TABLE *table, bool fixed_length); - virtual Item_field *filed_for_view_update() { return 0; } + virtual Item_field *field_for_view_update() { return 0; } virtual Item *neg_transformer(THD *thd) { return NULL; } virtual Item *update_value_transformer(uchar *select_arg) { return this; } @@ -2125,7 +2125,7 @@ public: bool set_no_const_sub(uchar *arg); Item *replace_equal_field(uchar *arg); inline uint32 max_disp_length() { return field->max_display_length(); } - Item_field *filed_for_view_update() { return this; } + Item_field *field_for_view_update() { return this; } Item *safe_charset_converter(CHARSET_INFO *tocs); int fix_outer_field(THD *thd, Field **field, Item **reference); virtual Item *update_value_transformer(uchar *select_arg); @@ -2616,6 +2616,7 @@ public: str_value.length(), collation.collation); } Item *safe_charset_converter(CHARSET_INFO *tocs); + Item *charset_converter(CHARSET_INFO *tocs, bool lossless); inline void append(char *str, uint length) { str_value.append(str, length); @@ -3140,8 +3141,8 @@ public: } virtual void print(String *str, enum_query_type query_type); void cleanup(); - Item_field *filed_for_view_update() - { return (*ref)->filed_for_view_update(); } + Item_field *field_for_view_update() + { return (*ref)->field_for_view_update(); } virtual Ref_Type ref_type() { return REF; } // Row emulation: forwarding of ROW-related calls to ref @@ -3362,8 +3363,8 @@ public: } bool enumerate_field_refs_processor(uchar *arg) { return orig_item->enumerate_field_refs_processor(arg); } - Item_field *filed_for_view_update() - { return orig_item->filed_for_view_update(); } + Item_field *field_for_view_update() + { return orig_item->field_for_view_update(); } /* Row emulation: forwarding of ROW-related calls to orig_item */ uint cols() diff --git a/sql/item_buff.cc b/sql/item_buff.cc index ce396736d6f..a08ae8d8403 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -71,7 +71,7 @@ Cached_item::~Cached_item() {} Cached_item_str::Cached_item_str(THD *thd, Item *arg) :item(arg), - value_max_length(min(arg->max_length, thd->variables.max_sort_length)), + value_max_length(MY_MIN(arg->max_length, thd->variables.max_sort_length)), value(value_max_length) {} @@ -81,7 +81,7 @@ bool Cached_item_str::cmp(void) bool tmp; if ((res=item->val_str(&tmp_value))) - res->length(min(res->length(), value_max_length)); + res->length(MY_MIN(res->length(), value_max_length)); if (null_value != item->null_value) { if ((null_value= item->null_value)) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 5dbd7f8b152..33b94ece45d 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -679,7 +679,7 @@ int Arg_comparator::set_compare_func(Item_result_field *item, Item_result type) { if ((*a)->decimals < NOT_FIXED_DEC && (*b)->decimals < NOT_FIXED_DEC) { - precision= 5 / log_10[max((*a)->decimals, (*b)->decimals) + 1]; + precision= 5 / log_10[MY_MAX((*a)->decimals, (*b)->decimals) + 1]; if (func == &Arg_comparator::compare_real) func= &Arg_comparator::compare_real_fixed; else if (func == &Arg_comparator::compare_e_real) @@ -746,7 +746,7 @@ bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type, } if (status.warnings > 0) - make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN, &err, warn_type, warn_name); return value; @@ -1020,7 +1020,7 @@ int Arg_comparator::compare_binary_string() owner->null_value= 0; uint res1_length= res1->length(); uint res2_length= res2->length(); - int cmp= memcmp(res1->ptr(), res2->ptr(), min(res1_length,res2_length)); + int cmp= memcmp(res1->ptr(), res2->ptr(), MY_MIN(res1_length,res2_length)); return cmp ? cmp : (int) (res1_length - res2_length); } } @@ -2418,7 +2418,7 @@ Item_func_ifnull::fix_length_and_dec() uint32 char_length; agg_result_type(&hybrid_type, args, 2); maybe_null=args[1]->maybe_null; - decimals= max(args[0]->decimals, args[1]->decimals); + decimals= MY_MAX(args[0]->decimals, args[1]->decimals); unsigned_flag= args[0]->unsigned_flag && args[1]->unsigned_flag; if (hybrid_type == DECIMAL_RESULT || hybrid_type == INT_RESULT) @@ -2429,10 +2429,10 @@ Item_func_ifnull::fix_length_and_dec() int len1= args[1]->max_char_length() - args[1]->decimals - (args[1]->unsigned_flag ? 0 : 1); - char_length= max(len0, len1) + decimals + (unsigned_flag ? 0 : 1); + char_length= MY_MAX(len0, len1) + decimals + (unsigned_flag ? 0 : 1); } else - char_length= max(args[0]->max_char_length(), args[1]->max_char_length()); + char_length= MY_MAX(args[0]->max_char_length(), args[1]->max_char_length()); switch (hybrid_type) { case STRING_RESULT: @@ -2459,9 +2459,9 @@ uint Item_func_ifnull::decimal_precision() const { int arg0_int_part= args[0]->decimal_int_part(); int arg1_int_part= args[1]->decimal_int_part(); - int max_int_part= max(arg0_int_part, arg1_int_part); + int max_int_part= MY_MAX(arg0_int_part, arg1_int_part); int precision= max_int_part + decimals; - return min(precision, DECIMAL_MAX_PRECISION); + return MY_MIN(precision, DECIMAL_MAX_PRECISION); } @@ -2638,7 +2638,7 @@ Item_func_if::fix_length_and_dec() agg_result_type(&cached_result_type, args + 1, 2); maybe_null= args[1]->maybe_null || args[2]->maybe_null; - decimals= max(args[1]->decimals, args[2]->decimals); + decimals= MY_MAX(args[1]->decimals, args[2]->decimals); unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag; if (cached_result_type == STRING_RESULT) @@ -2662,10 +2662,10 @@ Item_func_if::fix_length_and_dec() int len2= args[2]->max_length - args[2]->decimals - (args[2]->unsigned_flag ? 0 : 1); - char_length= max(len1, len2) + decimals + (unsigned_flag ? 0 : 1); + char_length= MY_MAX(len1, len2) + decimals + (unsigned_flag ? 0 : 1); } else - char_length= max(args[1]->max_char_length(), args[2]->max_char_length()); + char_length= MY_MAX(args[1]->max_char_length(), args[2]->max_char_length()); fix_char_length(char_length); } @@ -2674,8 +2674,8 @@ uint Item_func_if::decimal_precision() const { int arg1_prec= args[1]->decimal_int_part(); int arg2_prec= args[2]->decimal_int_part(); - int precision=max(arg1_prec,arg2_prec) + decimals; - return min(precision, DECIMAL_MAX_PRECISION); + int precision=MY_MAX(arg1_prec,arg2_prec) + decimals; + return MY_MIN(precision, DECIMAL_MAX_PRECISION); } @@ -2976,7 +2976,7 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref) void Item_func_case::agg_str_lengths(Item* arg) { - fix_char_length(max(max_char_length(), arg->max_char_length())); + fix_char_length(MY_MAX(max_char_length(), arg->max_char_length())); set_if_bigger(decimals, arg->decimals); unsigned_flag= unsigned_flag && arg->unsigned_flag; } @@ -3176,7 +3176,7 @@ uint Item_func_case::decimal_precision() const if (else_expr_num != -1) set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part()); - return min(max_int_part + decimals, DECIMAL_MAX_PRECISION); + return MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); } @@ -5169,7 +5169,7 @@ void Item_func_like::turboBM_compute_suffixes(int *suff) else { if (i < g) - g = i; // g = min(i, g) + g = i; // g = MY_MIN(i, g) f = i; while (g >= 0 && pattern[g] == pattern[g + plm1 - f]) g--; @@ -5188,7 +5188,7 @@ void Item_func_like::turboBM_compute_suffixes(int *suff) else { if (i < g) - g = i; // g = min(i, g) + g = i; // g = MY_MIN(i, g) f = i; while (g >= 0 && likeconv(cs, pattern[g]) == likeconv(cs, pattern[g + plm1 - f])) @@ -5309,14 +5309,14 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const register const int v = plm1 - i; turboShift = u - v; bcShift = bmBc[(uint) (uchar) text[i + j]] - plm1 + i; - shift = max(turboShift, bcShift); - shift = max(shift, bmGs[i]); + shift = MY_MAX(turboShift, bcShift); + shift = MY_MAX(shift, bmGs[i]); if (shift == bmGs[i]) - u = min(pattern_len - shift, v); + u = MY_MIN(pattern_len - shift, v); else { if (turboShift < bcShift) - shift = max(shift, u + 1); + shift = MY_MAX(shift, u + 1); u = 0; } j+= shift; @@ -5340,14 +5340,14 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const register const int v = plm1 - i; turboShift = u - v; bcShift = bmBc[(uint) likeconv(cs, text[i + j])] - plm1 + i; - shift = max(turboShift, bcShift); - shift = max(shift, bmGs[i]); + shift = MY_MAX(turboShift, bcShift); + shift = MY_MAX(shift, bmGs[i]); if (shift == bmGs[i]) - u = min(pattern_len - shift, v); + u = MY_MIN(pattern_len - shift, v); else { if (turboShift < bcShift) - shift = max(shift, u + 1); + shift = MY_MAX(shift, u + 1); u = 0; } j+= shift; diff --git a/sql/item_create.cc b/sql/item_create.cc index ba1ce2b0d3b..962ea73f320 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -56,7 +56,7 @@ static void wrong_precision_error(uint errcode, Item *a, char buff[1024]; String buf(buff, sizeof(buff), system_charset_info); - my_error(errcode, MYF(0), (uint) min(number, UINT_MAX32), + my_error(errcode, MYF(0), (uint) MY_MIN(number, UINT_MAX32), item_name(a, &buf), maximum); } @@ -2080,19 +2080,6 @@ protected: }; -class Create_func_row_count : public Create_func_arg0 -{ -public: - virtual Item *create_builder(THD *thd); - - static Create_func_row_count s_singleton; - -protected: - Create_func_row_count() {} - virtual ~Create_func_row_count() {} -}; - - class Create_func_rpad : public Create_func_arg3 { public: @@ -4838,18 +4825,6 @@ Create_func_round::create_native(THD *thd, LEX_STRING name, } -Create_func_row_count Create_func_row_count::s_singleton; - -Item* -Create_func_row_count::create_builder(THD *thd) -{ - DBUG_ENTER("Create_func_row_count::create"); - thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); - thd->lex->safe_to_cache_query= 0; - DBUG_RETURN(new (thd->mem_root) Item_func_row_count()); -} - - Create_func_rpad Create_func_rpad::s_singleton; Item* @@ -5520,7 +5495,6 @@ static Native_func_registry func_array[] = { { C_STRING_WITH_LEN("RELEASE_LOCK") }, BUILDER(Create_func_release_lock)}, { { C_STRING_WITH_LEN("REVERSE") }, BUILDER(Create_func_reverse)}, { { C_STRING_WITH_LEN("ROUND") }, BUILDER(Create_func_round)}, - { { C_STRING_WITH_LEN("ROW_COUNT") }, BUILDER(Create_func_row_count)}, { { C_STRING_WITH_LEN("RPAD") }, BUILDER(Create_func_rpad)}, { { C_STRING_WITH_LEN("RTRIM") }, BUILDER(Create_func_rtrim)}, { { C_STRING_WITH_LEN("SEC_TO_TIME") }, BUILDER(Create_func_sec_to_time)}, diff --git a/sql/item_func.cc b/sql/item_func.cc index 7ac42ebdbad..69b53871f9f 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -634,7 +634,7 @@ void Item_func::count_decimal_length() set_if_bigger(max_int_part, args[i]->decimal_int_part()); set_if_smaller(unsigned_flag, args[i]->unsigned_flag); } - int precision= min(max_int_part + decimals, DECIMAL_MAX_PRECISION); + int precision= MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); fix_char_length(my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag)); @@ -694,7 +694,7 @@ void Item_func::signal_divide_by_null() { THD *thd= current_thd; if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO, ER(ER_DIVISION_BY_ZERO)); null_value= 1; } @@ -1028,7 +1028,7 @@ longlong Item_func_signed::val_int_from_str(int *error) if (*error > 0 || end != start+ length) { ErrConvString err(res); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", err.ptr()); @@ -1065,7 +1065,7 @@ longlong Item_func_signed::val_int() return value; err: - push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR, + push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR, "Cast to signed converted positive out-of-range integer to " "it's negative complement"); return value; @@ -1121,7 +1121,7 @@ longlong Item_func_unsigned::val_int() return value; err: - push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR, + push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR, "Cast to unsigned converted negative integer to it's " "positive complement"); return value; @@ -1189,7 +1189,7 @@ my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec) return dec; err: - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, ER(ER_WARN_DATA_OUT_OF_RANGE), name, 1L); @@ -1231,7 +1231,7 @@ double Item_double_typecast::val_real() if ((error= truncate_double(&tmp, max_length, decimals, 0, DBL_MAX))) { push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, ER(ER_WARN_DATA_OUT_OF_RANGE), name, 1); @@ -1369,10 +1369,10 @@ my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value) */ void Item_func_additive_op::result_precision() { - decimals= max(args[0]->decimals, args[1]->decimals); + decimals= MY_MAX(args[0]->decimals, args[1]->decimals); int arg1_int= args[0]->decimal_precision() - args[0]->decimals; int arg2_int= args[1]->decimal_precision() - args[1]->decimals; - int precision= max(arg1_int, arg2_int) + 1 + decimals; + int precision= MY_MAX(arg1_int, arg2_int) + 1 + decimals; /* Integer operations keep unsigned_flag if one of arguments is unsigned */ if (result_type() == INT_RESULT) @@ -1610,9 +1610,9 @@ void Item_func_mul::result_precision() unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; else unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; - decimals= min(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE); uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision(); - uint precision= min(est_prec, DECIMAL_MAX_PRECISION); + uint precision= MY_MIN(est_prec, DECIMAL_MAX_PRECISION); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); } @@ -1664,7 +1664,7 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value) void Item_func_div::result_precision() { - uint precision=min(args[0]->decimal_precision() + + uint precision=MY_MIN(args[0]->decimal_precision() + args[1]->decimals + prec_increment, DECIMAL_MAX_PRECISION); @@ -1673,7 +1673,7 @@ void Item_func_div::result_precision() unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; else unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; - decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); } @@ -1687,7 +1687,7 @@ void Item_func_div::fix_length_and_dec() switch (hybrid_type) { case REAL_RESULT: { - decimals=max(args[0]->decimals,args[1]->decimals)+prec_increment; + decimals=MY_MAX(args[0]->decimals,args[1]->decimals)+prec_increment; set_if_smaller(decimals, NOT_FIXED_DEC); uint tmp=float_length(decimals); if (decimals == NOT_FIXED_DEC) @@ -1877,8 +1877,8 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value) void Item_func_mod::result_precision() { - decimals= max(args[0]->decimals, args[1]->decimals); - max_length= max(args[0]->max_length, args[1]->max_length); + decimals= MY_MAX(args[0]->decimals, args[1]->decimals); + max_length= MY_MAX(args[0]->max_length, args[1]->max_length); } @@ -2433,7 +2433,7 @@ void Item_func_round::fix_length_and_dec() if (args[0]->decimals == NOT_FIXED_DEC) { - decimals= min(decimals_to_set, NOT_FIXED_DEC); + decimals= MY_MIN(decimals_to_set, NOT_FIXED_DEC); max_length= float_length(decimals); hybrid_type= REAL_RESULT; return; @@ -2443,7 +2443,7 @@ void Item_func_round::fix_length_and_dec() case REAL_RESULT: case STRING_RESULT: hybrid_type= REAL_RESULT; - decimals= min(decimals_to_set, NOT_FIXED_DEC); + decimals= MY_MIN(decimals_to_set, NOT_FIXED_DEC); max_length= float_length(decimals); break; case INT_RESULT: @@ -2460,13 +2460,13 @@ void Item_func_round::fix_length_and_dec() case DECIMAL_RESULT: { hybrid_type= DECIMAL_RESULT; - decimals_to_set= min(DECIMAL_MAX_SCALE, decimals_to_set); + decimals_to_set= MY_MIN(DECIMAL_MAX_SCALE, decimals_to_set); int decimals_delta= args[0]->decimals - decimals_to_set; int precision= args[0]->decimal_precision(); int length_increase= ((decimals_delta <= 0) || truncate) ? 0:1; precision-= decimals_delta - length_increase; - decimals= min(decimals_to_set, DECIMAL_MAX_SCALE); + decimals= MY_MIN(decimals_to_set, DECIMAL_MAX_SCALE); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); @@ -2577,7 +2577,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) my_decimal val, *value= args[0]->val_decimal(&val); longlong dec= args[1]->val_int(); if (dec >= 0 || args[1]->unsigned_flag) - dec= min((ulonglong) dec, decimals); + dec= MY_MIN((ulonglong) dec, decimals); else if (dec < INT_MIN) dec= INT_MIN; @@ -3443,7 +3443,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func, free_udf(u_d); DBUG_RETURN(TRUE); } - func->max_length=min(initid.max_length,MAX_BLOB_WIDTH); + func->max_length=MY_MIN(initid.max_length,MAX_BLOB_WIDTH); func->maybe_null=initid.maybe_null; const_item_cache=initid.const_item; /* @@ -3452,7 +3452,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func, */ if (!const_item_cache && !used_tables_cache) used_tables_cache= RAND_TABLE_BIT; - func->decimals=min(initid.decimals,NOT_FIXED_DEC); + func->decimals=MY_MIN(initid.decimals,NOT_FIXED_DEC); } initialized=1; if (error) @@ -3792,7 +3792,7 @@ longlong Item_master_pos_wait::val_int() connection_name= thd->variables.default_master_connection; if (!(mi= master_info_index->get_master_info(&connection_name, - MYSQL_ERROR::WARN_LEVEL_WARN))) + Sql_condition::WARN_LEVEL_WARN))) goto err; if ((event_count = mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2) { @@ -3991,18 +3991,18 @@ public: bool handle_condition(THD * /* thd */, uint sql_errno, const char * /* sqlstate */, - MYSQL_ERROR::enum_warning_level /* level */, + Sql_condition::enum_warning_level /* level */, const char *message, - MYSQL_ERROR ** /* cond_hdl */); + Sql_condition ** /* cond_hdl */); }; bool Lock_wait_timeout_handler:: handle_condition(THD * /* thd */, uint sql_errno, const char * /* sqlstate */, - MYSQL_ERROR::enum_warning_level /* level */, + Sql_condition::enum_warning_level /* level */, const char *message, - MYSQL_ERROR ** /* cond_hdl */) + Sql_condition ** /* cond_hdl */) { if (sql_errno == ER_LOCK_WAIT_TIMEOUT) { @@ -4268,7 +4268,7 @@ longlong Item_func_benchmark::val_int() { char buff[22]; llstr(((longlong) loop_count), buff); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE), "count", buff, "benchmark"); } @@ -6118,7 +6118,7 @@ bool Item_func_match::fix_index() for (keynr=0 ; keynr < fts ; keynr++) { KEY *ft_key=&table->key_info[ft_to_key[keynr]]; - uint key_parts=ft_key->key_parts; + uint key_parts=ft_key->user_defined_key_parts; for (uint part=0 ; part < key_parts ; part++) { @@ -6150,7 +6150,7 @@ bool Item_func_match::fix_index() { // partial keys doesn't work if (max_cnt < arg_count-1 || - max_cnt < table->key_info[ft_to_key[keynr]].key_parts) + max_cnt < table->key_info[ft_to_key[keynr]].user_defined_key_parts) continue; key=ft_to_key[keynr]; diff --git a/sql/item_func.h b/sql/item_func.h index bbe70724f79..71225c71639 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -561,8 +561,8 @@ public: longlong val_int_from_str(int *error); void fix_length_and_dec() { - fix_char_length(min(args[0]->max_char_length(), - MY_INT64_NUM_DECIMAL_DIGITS)); + fix_char_length(MY_MIN(args[0]->max_char_length(), + MY_INT64_NUM_DECIMAL_DIGITS)); } virtual void print(String *str, enum_query_type query_type); uint decimal_precision() const { return args[0]->decimal_precision(); } diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 0a7f18e6546..b36375a6e40 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -500,7 +500,7 @@ String *Item_func_spatial_collection::val_str(String *str) } if (str->length() > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 09518bb4bd5..854a99bea02 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -106,23 +106,6 @@ String *Item_str_func::val_str_from_val_str_ascii(String *str, String *str2) } - -/* - Convert an array of bytes to a hexadecimal representation. - - Used to generate a hexadecimal representation of a message digest. -*/ -static void array_to_hex(char *to, const unsigned char *str, uint len) -{ - const unsigned char *str_end= str + len; - for (; str != str_end; ++str) - { - *to++= _dig_vec_lower[((uchar) *str) >> 4]; - *to++= _dig_vec_lower[((uchar) *str) & 0x0F]; - } -} - - bool Item_str_func::fix_fields(THD *thd, Item **ref) { bool res= Item_func::fix_fields(thd, ref); @@ -221,17 +204,11 @@ String *Item_func_sha::val_str_ascii(String *str) String * sptr= args[0]->val_str(str); if (sptr) /* If we got value different from NULL */ { - SHA1_CONTEXT context; /* Context used to generate SHA1 hash */ /* Temporary buffer to store 160bit digest */ uint8 digest[SHA1_HASH_SIZE]; - mysql_sha1_reset(&context); /* We do not have to check for error here */ - /* No need to check error as the only case would be too long message */ - mysql_sha1_input(&context, - (const uchar *) sptr->ptr(), sptr->length()); - + compute_sha1_hash(digest, (const char *) sptr->ptr(), sptr->length()); /* Ensure that memory is free and we got result */ - if (!( str->alloc(SHA1_HASH_SIZE*2) || - (mysql_sha1_result(&context,digest)))) + if (!str->alloc(SHA1_HASH_SIZE*2)) { array_to_hex((char *) str->ptr(), digest, SHA1_HASH_SIZE); str->set_charset(&my_charset_numeric); @@ -309,9 +286,9 @@ String *Item_func_sha2::val_str_ascii(String *str) default: if (!args[1]->const_item()) push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WRONG_PARAMETERS_TO_NATIVE_FCT, - ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2"); + Sql_condition::WARN_LEVEL_WARN, + ER_WRONG_PARAMETERS_TO_NATIVE_FCT, + ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2"); null_value= TRUE; return NULL; } @@ -333,7 +310,7 @@ String *Item_func_sha2::val_str_ascii(String *str) #else push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), "sha2", "--with-ssl"); @@ -371,7 +348,7 @@ void Item_func_sha2::fix_length_and_dec() #endif default: push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_WRONG_PARAMETERS_TO_NATIVE_FCT, ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2"); } @@ -390,7 +367,7 @@ void Item_func_sha2::fix_length_and_dec() DERIVATION_COERCIBLE); #else push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), "sha2", "--with-ssl"); @@ -592,7 +569,7 @@ String *Item_func_concat::val_str(String *str) if (res->length()+res2->length() > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -665,7 +642,7 @@ String *Item_func_concat::val_str(String *str) } else { - uint new_len = max(tmp_value.alloced_length() * 2, concat_len); + uint new_len = MY_MAX(tmp_value.alloced_length() * 2, concat_len); if (tmp_value.realloc(new_len)) goto null; @@ -798,11 +775,11 @@ String *Item_func_des_encrypt::val_str(String *str) return &tmp_value; error: - push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN, code, ER(code), "des_encrypt"); #else - push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN, ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), "des_encrypt", "--with-ssl"); #endif /* defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) */ @@ -876,12 +853,12 @@ String *Item_func_des_decrypt::val_str(String *str) return &tmp_value; error: - push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN, code, ER(code), "des_decrypt"); wrong_key: #else - push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN, ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), "des_decrypt", "--with-ssl"); #endif /* defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) */ @@ -932,7 +909,7 @@ String *Item_func_concat_ws::val_str(String *str) if (res->length() + sep_str->length() + res2->length() > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -1014,7 +991,7 @@ String *Item_func_concat_ws::val_str(String *str) } else { - uint new_len = max(tmp_value.alloced_length() * 2, concat_len); + uint new_len = MY_MAX(tmp_value.alloced_length() * 2, concat_len); if (tmp_value.realloc(new_len)) goto null; @@ -1191,7 +1168,7 @@ redo: if (res->length()-from_length + to_length > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), @@ -1220,7 +1197,7 @@ skip: if (res->length()-from_length + to_length > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -1307,7 +1284,7 @@ String *Item_func_insert::val_str(String *str) if ((ulonglong) (res->length() - length + res2->length()) > (ulonglong) current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -1506,7 +1483,7 @@ String *Item_func_substr::val_str(String *str) length= res->charpos((int) length, (uint32) start); tmp_length= res->length() - start; - length= min(length, tmp_length); + length= MY_MIN(length, tmp_length); if (!start && (longlong) res->length() == length) return res; @@ -1529,7 +1506,7 @@ void Item_func_substr::fix_length_and_dec() else if (start < 0) max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start); else - max_length-= min((uint)(start - 1), max_length); + max_length-= MY_MIN((uint)(start - 1), max_length); } if (arg_count == 3 && args[2]->const_item()) { @@ -1911,28 +1888,133 @@ void Item_func_trim::print(String *str, enum_query_type query_type) /* Item_func_password */ +/** + Helper function for calculating a new password. Used in + Item_func_password::fix_length_and_dec() for const parameters and in + Item_func_password::val_str_ascii() for non-const parameters. + @param str The plain text password which should be digested + @param buffer a pointer to the buffer where the digest will be stored. + + @note The buffer must be of at least CRYPT_MAX_PASSWORD_SIZE size. + + @return Size of the password. +*/ + +static int calculate_password(String *str, char *buffer) +{ + DBUG_ASSERT(str); + if (str->length() == 0) // PASSWORD('') returns '' + return 0; + + int buffer_len= 0; + THD *thd= current_thd; + int old_passwords= 0; + if (thd) + old_passwords= thd->variables.old_passwords; + +#if defined(HAVE_OPENSSL) + if (old_passwords == 2) + { + my_make_scrambled_password(buffer, str->ptr(), + str->length()); + buffer_len= (int) strlen(buffer) + 1; + } + else +#endif + if (old_passwords == 0) + { + my_make_scrambled_password_sha1(buffer, str->ptr(), + str->length()); + buffer_len= SCRAMBLED_PASSWORD_CHAR_LENGTH; + } + else + if (old_passwords == 1) + { + my_make_scrambled_password_323(buffer, str->ptr(), + str->length()); + buffer_len= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; + } + return buffer_len; +} + +/* Item_func_password */ +void Item_func_password::fix_length_and_dec() +{ + maybe_null= false; // PASSWORD() never returns NULL + + if (args[0]->const_item()) + { + String str; + String *res= args[0]->val_str(&str); + if (!args[0]->null_value) + { + m_hashed_password_buffer_len= + calculate_password(res, m_hashed_password_buffer); + fix_length_and_charset(m_hashed_password_buffer_len, default_charset()); + m_recalculate_password= false; + return; + } + } + + m_recalculate_password= true; + fix_length_and_charset(CRYPT_MAX_PASSWORD_SIZE, default_charset()); +} + String *Item_func_password::val_str_ascii(String *str) { DBUG_ASSERT(fixed == 1); - String *res= args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; - if (res->length() == 0) + + String *res= args[0]->val_str(str); + + if (args[0]->null_value) + res= make_empty_result(); + + /* we treat NULLs as equal to empty string when calling the plugin */ + check_password_policy(res); + + null_value= 0; + if (args[0]->null_value) // PASSWORD(NULL) returns '' + return res; + + if (m_recalculate_password) + m_hashed_password_buffer_len= calculate_password(res, + m_hashed_password_buffer); + + if (m_hashed_password_buffer_len == 0) return make_empty_result(); - my_make_scrambled_password(tmp_value, res->ptr(), res->length()); - str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, &my_charset_latin1); + + str->set(m_hashed_password_buffer, m_hashed_password_buffer_len, + default_charset()); + return str; } -char *Item_func_password::alloc(THD *thd, const char *password, - size_t pass_len) +char *Item_func_password:: + create_password_hash_buffer(THD *thd, const char *password, size_t pass_len) { - char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1); - if (buff) + String *password_str= new (thd->mem_root)String(password, thd->variables. + character_set_client); + check_password_policy(password_str); + + char *buff= NULL; + if (thd->variables.old_passwords == 0) + { + /* Allocate memory for the password scramble and one extra byte for \0 */ + buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH + 1); + my_make_scrambled_password_sha1(buff, password, pass_len); + } +#if defined(HAVE_OPENSSL) + else + { + /* Allocate memory for the password scramble and one extra byte for \0 */ + buff= (char *) thd->alloc(CRYPT_MAX_PASSWORD_SIZE + 1); my_make_scrambled_password(buff, password, pass_len); + } +#endif return buff; } + /* Item_func_old_password */ String *Item_func_old_password::val_str_ascii(String *str) @@ -2223,7 +2305,7 @@ String *Item_func_soundex::val_str(String *str) if ((null_value= args[0]->null_value)) return 0; /* purecov: inspected */ - if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen))) + if (tmp_value.alloc(MY_MAX(res->length(), 4 * cs->mbminlen))) return str; /* purecov: inspected */ char *to= (char *) tmp_value.ptr(); char *to_end= to + tmp_value.alloced_length(); @@ -2333,7 +2415,7 @@ MY_LOCALE *Item_func_format::get_locale(Item *item) if (!locale_name || !(lc= my_locale_by_name(locale_name->c_ptr_safe()))) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_LOCALE, ER(ER_UNKNOWN_LOCALE), locale_name ? locale_name->c_ptr_safe() : "NULL"); @@ -2723,7 +2805,7 @@ String *Item_func_repeat::val_str(String *str) // Safe length check if (length > current_thd->variables.max_allowed_packet / (uint) count) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -2856,7 +2938,7 @@ String *Item_func_rpad::val_str(String *str) byte_count= count * collation.collation->mbmaxlen; if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -2964,7 +3046,7 @@ String *Item_func_lpad::val_str(String *str) if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -3342,7 +3424,7 @@ String *Item_load_file::val_str(String *str) } if (stat_info.st_size > (long) current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), current_thd->variables.max_allowed_packet); @@ -3428,12 +3510,12 @@ String* Item_func_export_set::val_str(String* str) const ulong max_allowed_packet= current_thd->variables.max_allowed_packet; const uint num_separators= num_set_values > 0 ? num_set_values - 1 : 0; const ulonglong max_total_length= - num_set_values * max(yes->length(), no->length()) + + num_set_values * MY_MAX(yes->length(), no->length()) + num_separators * sep->length(); if (unlikely(max_total_length > max_allowed_packet)) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(), max_allowed_packet); @@ -3457,11 +3539,11 @@ String* Item_func_export_set::val_str(String* str) void Item_func_export_set::fix_length_and_dec() { - uint32 length= max(args[1]->max_char_length(), args[2]->max_char_length()); + uint32 length= MY_MAX(args[1]->max_char_length(), args[2]->max_char_length()); uint32 sep_length= (arg_count > 3 ? args[3]->max_char_length() : 1); if (agg_arg_charsets_for_string_result(collation, - args + 1, min(4, arg_count) - 1)) + args + 1, MY_MIN(4, arg_count) - 1)) return; fix_char_length(length * 64 + sep_length * 63); } @@ -3681,7 +3763,7 @@ longlong Item_func_uncompressed_length::val_int() */ if (res->length() <= 4) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_ZLIB_Z_DATA_ERROR, ER(ER_ZLIB_Z_DATA_ERROR)); null_value= 1; @@ -3758,7 +3840,7 @@ String *Item_func_compress::val_str(String *str) res->length())) != Z_OK) { code= err==Z_MEM_ERROR ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_BUF_ERROR; - push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,code,ER(code)); + push_warning(current_thd,Sql_condition::WARN_LEVEL_WARN,code,ER(code)); null_value= 1; return 0; } @@ -3796,7 +3878,7 @@ String *Item_func_uncompress::val_str(String *str) /* If length is less than 4 bytes, data is corrupt */ if (res->length() <= 4) { - push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN, ER_ZLIB_Z_DATA_ERROR, ER(ER_ZLIB_Z_DATA_ERROR)); goto err; @@ -3806,7 +3888,7 @@ String *Item_func_uncompress::val_str(String *str) new_size= uint4korr(res->ptr()) & 0x3FFFFFFF; if (new_size > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN, ER_TOO_BIG_FOR_UNCOMPRESS, ER(ER_TOO_BIG_FOR_UNCOMPRESS), static_cast<int>(current_thd->variables. @@ -3825,7 +3907,7 @@ String *Item_func_uncompress::val_str(String *str) code= ((err == Z_BUF_ERROR) ? ER_ZLIB_Z_BUF_ERROR : ((err == Z_MEM_ERROR) ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_DATA_ERROR)); - push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,code,ER(code)); + push_warning(current_thd,Sql_condition::WARN_LEVEL_WARN,code,ER(code)); err: null_value= 1; @@ -4100,10 +4182,8 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg) } break; case DYN_COL_DATETIME: - args[valpos]->get_date(&vals[i].x.time_value, 0); - break; case DYN_COL_DATE: - args[valpos]->get_date(&vals[i].x.time_value, 0); + args[valpos]->get_date(&vals[i].x.time_value, sql_mode_for_dates()); break; case DYN_COL_TIME: args[valpos]->get_time(&vals[i].x.time_value); @@ -4517,7 +4597,7 @@ longlong Item_dyncol_get::val_int() { char buff[30]; sprintf(buff, "%lg", val.x.double_value); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_DATA_OVERFLOW, ER(ER_DATA_OVERFLOW), buff, @@ -4535,9 +4615,9 @@ longlong Item_dyncol_get::val_int() if (end != org_end || error > 0) { char buff[80]; - strmake(buff, val.x.string.value.str, min(sizeof(buff)-1, + strmake(buff, val.x.string.value.str, MY_MIN(sizeof(buff)-1, val.x.string.value.length)); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_DATA, ER(ER_BAD_DATA), buff, @@ -4599,9 +4679,9 @@ double Item_dyncol_get::val_real() error) { char buff[80]; - strmake(buff, val.x.string.value.str, min(sizeof(buff)-1, + strmake(buff, val.x.string.value.str, MY_MIN(sizeof(buff)-1, val.x.string.value.length)); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_DATA, ER(ER_BAD_DATA), buff, "DOUBLE"); @@ -4655,11 +4735,11 @@ my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value) rc= str2my_decimal(0, val.x.string.value.str, val.x.string.value.length, val.x.string.charset, decimal_value); char buff[80]; - strmake(buff, val.x.string.value.str, min(sizeof(buff)-1, + strmake(buff, val.x.string.value.str, MY_MIN(sizeof(buff)-1, val.x.string.value.length)); if (rc != E_DEC_OK) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_DATA, ER(ER_BAD_DATA), buff, "DECIMAL"); diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 84d91a879ff..9b380108542 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -21,6 +21,8 @@ /* This file defines all string functions */ +#include "crypt_genhash_impl.h" + #ifdef USE_PRAGMA_INTERFACE #pragma interface /* gcc class implementation */ #endif @@ -329,16 +331,21 @@ public: class Item_func_password :public Item_str_ascii_func { - char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; + char m_hashed_password_buffer[CRYPT_MAX_PASSWORD_SIZE + 1]; + unsigned int m_hashed_password_buffer_len; + bool m_recalculate_password; public: - Item_func_password(Item *a) :Item_str_ascii_func(a) {} - String *val_str_ascii(String *str); - void fix_length_and_dec() + Item_func_password(Item *a) :Item_str_ascii_func(a) { - fix_length_and_charset(SCRAMBLED_PASSWORD_CHAR_LENGTH, default_charset()); + m_hashed_password_buffer_len= 0; + m_recalculate_password= false; } + String *val_str_ascii(String *str); + void fix_length_and_dec(); const char *func_name() const { return "password"; } static char *alloc(THD *thd, const char *password, size_t pass_len); + static char *create_password_hash_buffer(THD *thd, const char *password, + size_t pass_len); }; @@ -823,7 +830,7 @@ public: collation.set(args[0]->collation); ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + 2 * collation.collation->mbmaxlen; - max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); + max_length= (uint32) MY_MIN(max_result_length, MAX_BLOB_WIDTH); } }; @@ -917,10 +924,10 @@ public: const char *func_name() const { return "collate"; } enum Functype functype() const { return COLLATE_FUNC; } virtual void print(String *str, enum_query_type query_type); - Item_field *filed_for_view_update() + Item_field *field_for_view_update() { /* this function is transparent for view updating */ - return args[0]->filed_for_view_update(); + return args[0]->field_for_view_update(); } }; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 2128de391e0..87fa8147411 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1089,7 +1089,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } substitution= select_lex->item_list.head(); @@ -1758,7 +1758,7 @@ Item_in_subselect::single_value_transformer(JOIN *join) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } DBUG_RETURN(false); @@ -4230,7 +4230,7 @@ void subselect_uniquesubquery_engine::print(String *str) { KEY *key_info= tab->table->key_info + tab->ref.key; str->append(STRING_WITH_LEN("<primary_index_lookup>(")); - for (uint i= 0; i < key_info->key_parts; i++) + for (uint i= 0; i < key_info->user_defined_key_parts; i++) tab->ref.items[i]->print(str); str->append(STRING_WITH_LEN(" in ")); str->append(tab->table->s->table_name.str, tab->table->s->table_name.length); @@ -4788,7 +4788,8 @@ bool subselect_hash_sj_engine::init(List<Item> *tmp_columns, uint subquery_id) DBUG_ASSERT( tmp_table->s->uniques || tmp_table->key_info->key_length >= tmp_table->file->max_key_length() || - tmp_table->key_info->key_parts > tmp_table->file->max_key_parts()); + tmp_table->key_info->user_defined_key_parts > + tmp_table->file->max_key_parts()); free_tmp_table(thd, tmp_table); tmp_table= NULL; delete result; @@ -4802,7 +4803,7 @@ bool subselect_hash_sj_engine::init(List<Item> *tmp_columns, uint subquery_id) */ DBUG_ASSERT(tmp_table->s->keys == 1 && ((Item_in_subselect *) item)->left_expr->cols() == - tmp_table->key_info->key_parts); + tmp_table->key_info->user_defined_key_parts); if (make_semi_join_conds() || /* A unique_engine is used both for complete and partial matching. */ diff --git a/sql/item_sum.cc b/sql/item_sum.cc index bed9499834a..b3be7339849 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -36,7 +36,7 @@ ulonglong Item_sum::ram_limitation(THD *thd) { - return min(thd->variables.tmp_table_size, + return MY_MIN(thd->variables.tmp_table_size, thd->variables.max_heap_table_size); } @@ -1629,18 +1629,18 @@ void Item_sum_avg::fix_length_and_dec() if (hybrid_type == DECIMAL_RESULT) { int precision= args[0]->decimal_precision() + prec_increment; - decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); - f_precision= min(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION); + f_precision= MY_MIN(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION); f_scale= args[0]->decimals; dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale); } else { - decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC); - max_length= min(args[0]->max_length + prec_increment, float_length(decimals)); + decimals= MY_MIN(args[0]->decimals + prec_increment, NOT_FIXED_DEC); + max_length= MY_MIN(args[0]->max_length + prec_increment, float_length(decimals)); } } @@ -1836,13 +1836,13 @@ void Item_sum_variance::fix_length_and_dec() switch (args[0]->result_type()) { case REAL_RESULT: case STRING_RESULT: - decimals= min(args[0]->decimals + 4, NOT_FIXED_DEC); + decimals= MY_MIN(args[0]->decimals + 4, NOT_FIXED_DEC); break; case INT_RESULT: case DECIMAL_RESULT: { int precision= args[0]->decimal_precision()*2 + prec_increment; - decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); max_length= my_decimal_precision_to_length_no_truncation(precision, decimals, unsigned_flag); @@ -3127,7 +3127,7 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)), &well_formed_error); result->length(old_length + add_length); item->warning_for_row= TRUE; - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_CUT_VALUE_GROUP_CONCAT, ER(ER_CUT_VALUE_GROUP_CONCAT), item->row_count); @@ -3555,7 +3555,7 @@ bool Item_func_group_concat::setup(THD *thd) syntax of this function). If there is no ORDER BY clause, we don't create this tree. */ - init_tree(tree, (uint) min(thd->variables.max_heap_table_size, + init_tree(tree, (uint) MY_MIN(thd->variables.max_heap_table_size, thd->variables.sortbuff_size/16), 0, tree_key_length, group_concat_key_cmp_with_order, NULL, (void*) this, diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 788da1a5713..a4b5a18de35 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -146,14 +146,14 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, switch (*++ptr) { /* Year */ case 'Y': - tmp= (char*) val + min(4, val_len); + tmp= (char*) val + MY_MIN(4, val_len); l_time->year= (int) my_strtoll10(val, &tmp, &error); if ((int) (tmp-val) <= 2) l_time->year= year_2000_handling(l_time->year); val= tmp; break; case 'y': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->year= (int) my_strtoll10(val, &tmp, &error); val= tmp; l_time->year= year_2000_handling(l_time->year); @@ -162,7 +162,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, /* Month */ case 'm': case 'c': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->month= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; @@ -179,15 +179,15 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, /* Day */ case 'd': case 'e': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->day= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; case 'D': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->day= (int) my_strtoll10(val, &tmp, &error); /* Skip 'st, 'nd, 'th .. */ - val= tmp + min((int) (val_end-tmp), 2); + val= tmp + MY_MIN((int) (val_end-tmp), 2); break; /* Hour */ @@ -198,14 +198,14 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, /* fall through */ case 'k': case 'H': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->hour= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; /* Minute */ case 'i': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->minute= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; @@ -213,7 +213,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, /* Second */ case 's': case 'S': - tmp= (char*) val + min(2, val_len); + tmp= (char*) val + MY_MIN(2, val_len); l_time->second= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; @@ -265,7 +265,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, val= tmp; break; case 'j': - tmp= (char*) val + min(val_len, 3); + tmp= (char*) val + MY_MIN(val_len, 3); yearday= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; @@ -277,7 +277,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, case 'u': sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V'); strict_week_number= (*ptr=='V' || *ptr=='v'); - tmp= (char*) val + min(val_len, 2); + tmp= (char*) val + MY_MIN(val_len, 2); if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 || (strict_week_number && !week_number) || week_number > 53) @@ -289,7 +289,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, case 'X': case 'x': strict_week_number_year_type= (*ptr=='X'); - tmp= (char*) val + min(4, val_len); + tmp= (char*) val + MY_MIN(4, val_len); strict_week_number_year= (int) my_strtoll10(val, &tmp, &error); val= tmp; break; @@ -425,7 +425,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, { if (!my_isspace(&my_charset_latin1,*val)) { - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, val_begin, length, cached_timestamp_type, NullS); break; @@ -437,8 +437,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, err: { char buff[128]; - strmake(buff, val_begin, min(length, sizeof(buff)-1)); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + strmake(buff, val_begin, MY_MIN(length, sizeof(buff)-1)); + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE), date_time_type, buff, "str_to_date"); } @@ -1714,7 +1714,7 @@ overflow: ltime->hour= TIME_MAX_HOUR+1; check_time_range(ltime, decimals, &unused); - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, err->ptr(), err->length(), MYSQL_TIMESTAMP_TIME, NullS); return 0; @@ -1744,7 +1744,7 @@ void Item_func_date_format::fix_length_and_dec() else { fixed_length=0; - max_length=min(arg1->max_length, MAX_BLOB_WIDTH) * 10 * + max_length=MY_MIN(arg1->max_length, MAX_BLOB_WIDTH) * 10 * collation.collation->mbmaxlen; set_if_smaller(max_length,MAX_BLOB_WIDTH); } @@ -2268,7 +2268,7 @@ String *Item_char_typecast::val_str(String *str) if (cast_length != ~0U && cast_length > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), cast_cs == &my_charset_bin ? @@ -2326,7 +2326,7 @@ String *Item_char_typecast::val_str(String *str) res= &str_value; } ErrConvString err(res); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), char_type, err.ptr()); @@ -2348,7 +2348,7 @@ String *Item_char_typecast::val_str(String *str) if (res->length() > current_thd->variables.max_allowed_packet) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), cast_cs == &my_charset_bin ? @@ -2429,6 +2429,7 @@ bool Item_time_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) bool Item_date_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) { + fuzzy_date |= sql_mode_for_dates(); if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY)) return 1; @@ -2441,6 +2442,7 @@ bool Item_date_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) { + fuzzy_date |= sql_mode_for_dates(); if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY)) return 1; @@ -2456,7 +2458,7 @@ bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) if (ltime->neg) { ErrConvTime str(ltime); - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, &str, MYSQL_TIMESTAMP_DATETIME, 0); return (null_value= 1); } @@ -2513,7 +2515,7 @@ err: void Item_func_add_time::fix_length_and_dec() { enum_field_types arg0_field_type; - decimals= max(args[0]->decimals, args[1]->decimals); + decimals= MY_MAX(args[0]->decimals, args[1]->decimals); /* The field type for the result of an Item_func_add_time function is defined @@ -2610,7 +2612,7 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) check_time_range(ltime, decimals, &was_cut); if (was_cut) - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, &str, MYSQL_TIMESTAMP_TIME, NullS); return (null_value= 0); @@ -2698,7 +2700,7 @@ bool Item_func_timediff::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) check_time_range(ltime, decimals, &was_cut); if (was_cut) - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, &str, MYSQL_TIMESTAMP_TIME, NullS); return (null_value= 0); } @@ -2750,7 +2752,7 @@ bool Item_func_maketime::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) char buf[28]; char *ptr= longlong10_to_str(hour, buf, args[0]->unsigned_flag ? 10 : -10); int len = (int)(ptr - buf) + sprintf(ptr, ":%02u:%02u", (uint)minute, (uint)second); - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, buf, len, MYSQL_TIMESTAMP_TIME, NullS); } @@ -3109,7 +3111,7 @@ bool Item_func_str_to_date::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date) date_time_format.format.length= format->length(); if (extract_date_time(&date_time_format, val->ptr(), val->length(), ltime, cached_timestamp_type, 0, "datetime", - fuzzy_date)) + fuzzy_date | sql_mode_for_dates())) return (null_value=1); if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day) { diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 8a8419c7bd8..11e84cfc1cd 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -115,7 +115,7 @@ public: { int *input_version= (int*)int_arg; /* This function was introduced in 5.5 */ - int output_version= max(*input_version, 50500); + int output_version= MY_MAX(*input_version, 50500); *input_version= output_version; return 0; } @@ -933,7 +933,7 @@ public: const char *func_name() const { return "timediff"; } void fix_length_and_dec() { - decimals= max(args[0]->decimals, args[1]->decimals); + decimals= MY_MAX(args[0]->decimals, args[1]->decimals); Item_timefunc::fix_length_and_dec(); } bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date); diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 723429f107a..1aab6b45c74 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -1037,7 +1037,7 @@ static char simpletok[128]= /* ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _ - ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ € + ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ \200 */ 0,1,0,0,1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0, @@ -2501,12 +2501,12 @@ my_xpath_parse_VariableReference(MY_XPATH *xpath) xpath->item= new Item_func_get_user_var(name); else { - sp_variable_t *spv; + sp_variable *spv; sp_pcontext *spc; LEX *lex; if ((lex= current_thd->lex) && (spc= lex->spcont) && - (spv= spc->find_variable(&name))) + (spv= spc->find_variable(name, false))) { Item_splocal *splocal= new Item_splocal(name, spv->offset, spv->type, 0); #ifndef DBUG_OFF @@ -2815,7 +2815,7 @@ String *Item_xml_str_func::parse_xml(String *raw_xml, String *parsed_xml_buf) my_xml_error_lineno(&p) + 1, (ulong) my_xml_error_pos(&p) + 1, my_xml_error_string(&p)); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE, ER(ER_WRONG_VALUE), "XML", buf); } diff --git a/sql/key.cc b/sql/key.cc index 0d3db2d5bf5..97388f43ebc 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -78,7 +78,7 @@ int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field, KEY_PART_INFO *key_part; *key_length=0; for (j=0, key_part=key_info->key_part ; - j < key_info->key_parts ; + j < key_info->user_defined_key_parts ; j++, key_part++) { if (key_part->offset == fieldpos) @@ -132,7 +132,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, Don't copy data for null values The -1 below is to subtract the null byte which is already handled */ - length= min(key_length, (uint) key_part->store_length-1); + length= MY_MIN(key_length, (uint) key_part->store_length-1); if (with_zerofill) bzero((char*) to_key, length); continue; @@ -142,7 +142,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, key_part->key_part_flag & HA_VAR_LENGTH_PART) { key_length-= HA_KEY_BLOB_LENGTH; - length= min(key_length, key_part->length); + length= MY_MIN(key_length, key_part->length); uint bytes= key_part->field->get_key_image(to_key, length, Field::itRAW); if (with_zerofill && bytes < length) bzero((char*) to_key + bytes, length - bytes); @@ -150,7 +150,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, } else { - length= min(key_length, key_part->length); + length= MY_MIN(key_length, key_part->length); Field *field= key_part->field; CHARSET_INFO *cs= field->charset(); uint bytes= field->get_key_image(to_key, length, Field::itRAW); @@ -202,7 +202,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, Don't copy data for null bytes The -1 below is to subtract the null byte which is already handled */ - length= min(key_length, (uint) key_part->store_length-1); + length= MY_MIN(key_length, (uint) key_part->store_length-1); continue; } } @@ -244,7 +244,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, my_ptrdiff_t ptrdiff= to_record - field->table->record[0]; field->move_field_offset(ptrdiff); key_length-= HA_KEY_BLOB_LENGTH; - length= min(key_length, key_part->length); + length= MY_MIN(key_length, key_part->length); old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set); field->set_key_image(from_key, length); dbug_tmp_restore_column_map(field->table->write_set, old_map); @@ -253,7 +253,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, } else { - length= min(key_length, key_part->length); + length= MY_MIN(key_length, key_part->length); /* skip the byte with 'uneven' bits, if used */ memcpy(to_record + key_part->offset, from_key + used_uneven_bits , (size_t) length - used_uneven_bits); @@ -311,7 +311,7 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length) return 1; continue; } - length= min((uint) (key_end-key), store_length); + length= MY_MIN((uint) (key_end-key), store_length); if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ FIELDFLAG_PACK))) { @@ -389,7 +389,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, tmp.length(charpos); } if (max_length < field->pack_length()) - tmp.length(min(tmp.length(),max_length)); + tmp.length(MY_MIN(tmp.length(),max_length)); ErrConvString err(&tmp); to->append(err.ptr()); } @@ -413,15 +413,15 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, idx Key number */ -void key_unpack(String *to,TABLE *table,uint idx) +void key_unpack(String *to,TABLE *table, KEY *key) { KEY_PART_INFO *key_part,*key_part_end; my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); DBUG_ENTER("key_unpack"); to->length(0); - for (key_part=table->key_info[idx].key_part,key_part_end=key_part+ - table->key_info[idx].key_parts ; + for (key_part=key->key_part,key_part_end=key_part+ + key->user_defined_key_parts ; key_part < key_part_end; key_part++) { @@ -431,8 +431,8 @@ void key_unpack(String *to,TABLE *table,uint idx) { if (table->record[0][key_part->null_offset] & key_part->null_bit) { - to->append(STRING_WITH_LEN("NULL")); - continue; + to->append(STRING_WITH_LEN("NULL")); + continue; } } field_unpack(to, key_part->field, table->record[0], key_part->length, @@ -574,7 +574,7 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec) /* loop over all given keys */ do { - key_parts= key_info->key_parts; + key_parts= key_info->user_defined_key_parts; key_part= key_info->key_part; key_part_num= 0; @@ -586,8 +586,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec) if (key_part->null_bit) { /* The key_part can contain NULL values */ - bool first_is_null= field->is_null_in_record_with_offset(first_diff); - bool sec_is_null= field->is_null_in_record_with_offset(sec_diff); + bool first_is_null= field->is_real_null(first_diff); + bool sec_is_null= field->is_real_null(sec_diff); /* NULL is smaller then everything so if first is NULL and the other not then we know that we should return -1 and for the opposite diff --git a/sql/key.h b/sql/key.h index 0eeda58cd17..de2b00a4773 100644 --- a/sql/key.h +++ b/sql/key.h @@ -32,7 +32,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, uint key_length, void key_restore(uchar *to_record, uchar *from_key, KEY *key_info, uint key_length); bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length); -void key_unpack(String *to,TABLE *form,uint index); +void key_unpack(String *to, TABLE *form, KEY *key); void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, bool prefix_key); bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields); diff --git a/sql/lex.h b/sql/lex.h index 7edb1456e09..c5229beb653 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -151,6 +151,7 @@ static SYMBOL symbols[] = { { "CREATE", SYM(CREATE)}, { "CROSS", SYM(CROSS)}, { "CUBE", SYM(CUBE_SYM)}, + { "CURRENT", SYM(CURRENT_SYM)}, { "CURRENT_DATE", SYM(CURDATE)}, { "CURRENT_POS", SYM(CURRENT_POS_SYM)}, { "CURRENT_TIME", SYM(CURTIME)}, @@ -182,6 +183,7 @@ static SYMBOL symbols[] = { { "DESCRIBE", SYM(DESCRIBE)}, { "DES_KEY_FILE", SYM(DES_KEY_FILE)}, { "DETERMINISTIC", SYM(DETERMINISTIC_SYM)}, + { "DIAGNOSTICS", SYM(DIAGNOSTICS_SYM)}, { "DIRECTORY", SYM(DIRECTORY_SYM)}, { "DISABLE", SYM(DISABLE_SYM)}, { "DISCARD", SYM(DISCARD)}, @@ -214,6 +216,7 @@ static SYMBOL symbols[] = { { "EVENTS", SYM(EVENTS_SYM)}, { "EVERY", SYM(EVERY_SYM)}, { "EXAMINED", SYM(EXAMINED_SYM)}, + { "EXCHANGE", SYM(EXCHANGE_SYM)}, { "EXECUTE", SYM(EXECUTE_SYM)}, { "EXISTS", SYM(EXISTS)}, { "EXIT", SYM(EXIT_SYM)}, @@ -246,6 +249,7 @@ static SYMBOL symbols[] = { { "GEOMETRY", SYM(GEOMETRY_SYM)}, { "GEOMETRYCOLLECTION",SYM(GEOMETRYCOLLECTION)}, { "GET_FORMAT", SYM(GET_FORMAT)}, + { "GET", SYM(GET_SYM)}, { "GLOBAL", SYM(GLOBAL_SYM)}, { "GRANT", SYM(GRANT)}, { "GRANTS", SYM(GRANTS)}, @@ -398,6 +402,7 @@ static SYMBOL symbols[] = { { "NOT", SYM(NOT_SYM)}, { "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)}, { "NULL", SYM(NULL_SYM)}, + { "NUMBER", SYM(NUMBER_SYM)}, { "NUMERIC", SYM(NUMERIC_SYM)}, { "NVARCHAR", SYM(NVARCHAR_SYM)}, { "OFFSET", SYM(OFFSET_SYM)}, @@ -484,6 +489,7 @@ static SYMBOL symbols[] = { { "RESTORE", SYM(RESTORE_SYM)}, { "RESTRICT", SYM(RESTRICT)}, { "RESUME", SYM(RESUME_SYM)}, + { "RETURNED_SQLSTATE",SYM(RETURNED_SQLSTATE_SYM)}, { "RETURN", SYM(RETURN_SYM)}, { "RETURNS", SYM(RETURNS_SYM)}, { "REVOKE", SYM(REVOKE)}, @@ -493,6 +499,7 @@ static SYMBOL symbols[] = { { "ROLLUP", SYM(ROLLUP_SYM)}, { "ROUTINE", SYM(ROUTINE_SYM)}, { "ROW", SYM(ROW_SYM)}, + { "ROW_COUNT", SYM(ROW_COUNT_SYM)}, { "ROWS", SYM(ROWS_SYM)}, { "ROW_FORMAT", SYM(ROW_FORMAT_SYM)}, { "RTREE", SYM(RTREE_SYM)}, @@ -555,6 +562,9 @@ static SYMBOL symbols[] = { { "START", SYM(START_SYM)}, { "STARTING", SYM(STARTING)}, { "STARTS", SYM(STARTS_SYM)}, + { "STATS_AUTO_RECALC",SYM(STATS_AUTO_RECALC_SYM)}, + { "STATS_PERSISTENT", SYM(STATS_PERSISTENT_SYM)}, + { "STATS_SAMPLE_PAGES",SYM(STATS_SAMPLE_PAGES_SYM)}, { "STATUS", SYM(STATUS_SYM)}, { "STOP", SYM(STOP_SYM)}, { "STORAGE", SYM(STORAGE_SYM)}, diff --git a/sql/lock.cc b/sql/lock.cc index 67c8b240c6f..c3f6da02ca1 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -312,8 +312,8 @@ bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, uint flags) thd_proc_info(thd, "Table lock"); /* Copy the lock data array. thr_multi_lock() reorders its contents. */ - memcpy(sql_lock->locks + sql_lock->lock_count, sql_lock->locks, - sql_lock->lock_count * sizeof(*sql_lock->locks)); + memmove(sql_lock->locks + sql_lock->lock_count, sql_lock->locks, + sql_lock->lock_count * sizeof(*sql_lock->locks)); /* Lock on the copied half of the lock data array. */ rc= thr_lock_errno_to_mysql[(int) thr_multi_lock(sql_lock->locks + sql_lock->lock_count, @@ -692,7 +692,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count) MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags) { - uint i,tables,lock_count; + uint i,lock_count,table_count; MYSQL_LOCK *sql_lock; THR_LOCK_DATA **locks, **locks_buf; TABLE **to, **table_buf; @@ -701,16 +701,15 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags) DBUG_ASSERT((flags == GET_LOCK_UNLOCK) || (flags == GET_LOCK_STORE_LOCKS)); DBUG_PRINT("info", ("count %d", count)); - for (i=tables=lock_count=0 ; i < count ; i++) + for (i=lock_count=table_count=0 ; i < count ; i++) { TABLE *t= table_ptr[i]; - if (t->s->tmp_table != NON_TRANSACTIONAL_TMP_TABLE && t->s->tmp_table != INTERNAL_TMP_TABLE) { - tables+= t->file->lock_count(); - lock_count++; + lock_count+= t->file->lock_count(); + table_count++; } } @@ -722,13 +721,13 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags) */ if (!(sql_lock= (MYSQL_LOCK*) my_malloc(sizeof(*sql_lock) + - sizeof(THR_LOCK_DATA*) * tables * 2 + - sizeof(table_ptr) * lock_count, + sizeof(THR_LOCK_DATA*) * lock_count * 2 + + sizeof(table_ptr) * table_count, MYF(0)))) DBUG_RETURN(0); locks= locks_buf= sql_lock->locks= (THR_LOCK_DATA**) (sql_lock + 1); - to= table_buf= sql_lock->table= (TABLE**) (locks + tables * 2); - sql_lock->table_count=lock_count; + to= table_buf= sql_lock->table= (TABLE**) (locks + lock_count * 2); + sql_lock->table_count= table_count; for (i=0 ; i < count ; i++) { @@ -764,7 +763,7 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags) } } /* - We do not use 'tables', because there are cases where store_lock() + We do not use 'lock_count', because there are cases where store_lock() returns less locks than lock_count() claimed. This can happen when a FLUSH TABLES tries to abort locks from a MERGE table of another thread. When that thread has just opened the table, but not yet @@ -778,6 +777,7 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags) And in the FLUSH case, the memory is released quickly anyway. */ sql_lock->lock_count= locks - locks_buf; + DBUG_ASSERT(sql_lock->lock_count <= lock_count); DBUG_PRINT("info", ("sql_lock->table_count %d sql_lock->lock_count %d", sql_lock->table_count, sql_lock->lock_count)); DBUG_RETURN(sql_lock); diff --git a/sql/log.cc b/sql/log.cc index 1a3b651f76f..1295dc087fd 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -171,9 +171,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sql_state, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); const char *message() const { return m_message; } }; @@ -181,9 +181,9 @@ bool Silence_log_table_errors::handle_condition(THD *, uint, const char*, - MYSQL_ERROR::enum_warning_level, + Sql_condition::enum_warning_level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; strmake_buf(m_message, msg); @@ -783,8 +783,8 @@ bool Log_to_csv_event_handler:: Open_tables_backup open_tables_backup; CHARSET_INFO *client_cs= thd->variables.character_set_client; bool save_time_zone_used; - long query_time= (long) min(query_utime/1000000, TIME_MAX_VALUE_SECONDS); - long lock_time= (long) min(lock_utime/1000000, TIME_MAX_VALUE_SECONDS); + long query_time= (long) MY_MIN(query_utime/1000000, TIME_MAX_VALUE_SECONDS); + long lock_time= (long) MY_MIN(lock_utime/1000000, TIME_MAX_VALUE_SECONDS); long query_time_micro= (long) (query_utime % 1000000); long lock_time_micro= (long) (lock_utime % 1000000); @@ -897,6 +897,9 @@ bool Log_to_csv_event_handler:: if (table->field[10]->store(sql_text, sql_text_len, client_cs) < 0) goto err; + if (table->field[11]->store((longlong) thd->thread_id, TRUE)) + goto err; + /* log table entries are not replicated */ if (table->file->ha_write_row(table->record[0])) goto err; @@ -2059,7 +2062,7 @@ bool MYSQL_BIN_LOG::check_write_error(THD *thd) if (!thd->is_error()) DBUG_RETURN(checked); - switch (thd->stmt_da->sql_errno()) + switch (thd->get_stmt_da()->sql_errno()) { case ER_TRANS_CACHE_FULL: case ER_STMT_CACHE_FULL: @@ -2927,7 +2930,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name, { char *p= fn_ext(log_name); uint length= (uint) (p - log_name); - strmake(buff, log_name, min(length, FN_REFLEN-1)); + strmake(buff, log_name, MY_MIN(length, FN_REFLEN-1)); return (const char*)buff; } return log_name; @@ -3825,7 +3828,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log) { if (my_errno == ENOENT) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE), linfo.log_file_name); sql_print_information("Failed to delete file '%s'", @@ -3835,7 +3838,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log) } else { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BINLOG_PURGE_FATAL_ERR, "a problem with deleting %s; " "consider examining correspondence " @@ -3861,7 +3864,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log) { if (my_errno == ENOENT) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE), index_file_name); sql_print_information("Failed to delete file '%s'", @@ -3871,7 +3874,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log) } else { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BINLOG_PURGE_FATAL_ERR, "a problem with deleting %s; " "consider examining correspondence " @@ -4311,7 +4314,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space, */ if (thd) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE), log_info.log_file_name); } @@ -4326,7 +4329,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space, */ if (thd) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BINLOG_PURGE_FATAL_ERR, "a problem with getting info on being purged %s; " "consider examining correspondence " @@ -4354,7 +4357,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space, { if (thd) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BINLOG_PURGE_FATAL_ERR, "a problem with deleting %s and " "reading the binlog index file", @@ -4390,7 +4393,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space, { if (thd) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE), log_info.log_file_name); } @@ -4402,7 +4405,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space, { if (thd) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BINLOG_PURGE_FATAL_ERR, "a problem with deleting %s; " "consider examining correspondence " @@ -4492,7 +4495,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time) */ if (thd) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BINLOG_PURGE_FATAL_ERR, "a problem with getting info on being purged %s; " "consider examining correspondence " @@ -5390,7 +5393,7 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone, mysql_mutex_lock(&LOCK_rpl_gtid_state); err= rpl_global_gtid_binlog_state.update(>id, opt_gtid_strict_mode); mysql_mutex_unlock(&LOCK_rpl_gtid_state); - if (err && thd->stmt_da->sql_errno()==ER_GTID_STRICT_OUT_OF_ORDER) + if (err && thd->get_stmt_da()->sql_errno()==ER_GTID_STRICT_OUT_OF_ORDER) errno= ER_GTID_STRICT_OUT_OF_ORDER; } else @@ -6372,9 +6375,9 @@ int query_error_code(THD *thd, bool not_killed) if (not_killed || (killed_mask_hard(thd->killed) == KILL_BAD_DATA)) { - error= thd->is_error() ? thd->stmt_da->sql_errno() : 0; + error= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0; - /* thd->stmt_da->sql_errno() might be ER_SERVER_SHUTDOWN or + /* thd->get_get_stmt_da()->sql_errno() might be ER_SERVER_SHUTDOWN or ER_QUERY_INTERRUPTED, So here we need to make sure that error is not set to these errors when specified not_killed by the caller. @@ -7307,7 +7310,7 @@ static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, DBUG_ENTER("print_buffer_to_nt_eventlog"); /* Add ending CR/LF's to string, overwrite last chars if necessary */ - strmov(buffptr+min(length, buffLen-5), "\r\n\r\n"); + strmov(buffptr+MY_MIN(length, buffLen-5), "\r\n\r\n"); setup_windows_event_source(); if ((event= RegisterEventSource(NULL,"MySQL"))) @@ -8546,7 +8549,8 @@ binlog_background_thread(void *arg __attribute__((unused))) sql_print_warning("Failed to load slave replication state from table " "%s.%s: %u: %s", "mysql", rpl_gtid_slave_state_table_name.str, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); #endif mysql_mutex_lock(&mysql_bin_log.LOCK_binlog_background_thread); diff --git a/sql/log_event.cc b/sql/log_event.cc index 4a92414c548..01a5dd7f4e3 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -16,19 +16,12 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ -#ifdef MYSQL_CLIENT - #include "sql_priv.h" -#else - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - +#ifndef MYSQL_CLIENT +#include "my_global.h" // REQUIRED by log_event.h > m_string.h > my_bitmap.h #include "sql_priv.h" #include "unireg.h" -#include "my_global.h" // REQUIRED by log_event.h > m_string.h > my_bitmap.h #include "log_event.h" #include "sql_base.h" // close_thread_tables #include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE @@ -56,6 +49,7 @@ #include <my_bitmap.h> #include "rpl_utility.h" +#define my_b_write_string(A, B) my_b_write((A), (B), (uint) (sizeof(B) - 1)) /** BINLOG_CHECKSUM variable. @@ -217,8 +211,9 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, char buff[MAX_SLAVE_ERRMSG], *slider; const char *buff_end= buff + sizeof(buff); uint len; - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; + Diagnostics_area::Sql_condition_iterator it= + thd->get_stmt_da()->sql_conditions(); + const Sql_condition *err; buff[0]= 0; for (err= it++, slider= buff; err && slider < buff_end - 1; @@ -230,7 +225,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, } if (ha_error != 0) - rli->report(level, thd->is_error() ? thd->stmt_da->sql_errno() : 0, + rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0, "Could not execute %s event on table %s.%s;" "%s handler error %s; " "the event's master log %s, end_log_pos %lu", @@ -238,7 +233,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, buff, handler_error == NULL ? "<unknown>" : handler_error, log_name, pos); else - rli->report(level, thd->is_error() ? thd->stmt_da->sql_errno() : 0, + rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0, "Could not execute %s event on table %s.%s;" "%s the event's master log %s, end_log_pos %lu", type, table->s->db.str, table->s->table_name.str, @@ -336,24 +331,24 @@ uint debug_not_change_ts_if_art_event= 1; // bug#29309 simulation static void pretty_print_str(IO_CACHE* cache, const char* str, int len) { const char* end = str + len; - my_b_printf(cache, "\'"); + my_b_write_byte(cache, '\''); while (str < end) { char c; switch ((c=*str++)) { - case '\n': my_b_printf(cache, "\\n"); break; - case '\r': my_b_printf(cache, "\\r"); break; - case '\\': my_b_printf(cache, "\\\\"); break; - case '\b': my_b_printf(cache, "\\b"); break; - case '\t': my_b_printf(cache, "\\t"); break; - case '\'': my_b_printf(cache, "\\'"); break; - case 0 : my_b_printf(cache, "\\0"); break; + case '\n': my_b_write(cache, "\\n", 2); break; + case '\r': my_b_write(cache, "\\r", 2); break; + case '\\': my_b_write(cache, "\\\\", 2); break; + case '\b': my_b_write(cache, "\\b", 2); break; + case '\t': my_b_write(cache, "\\t", 2); break; + case '\'': my_b_write(cache, "\\'", 2); break; + case 0 : my_b_write(cache, "\\0", 2); break; default: - my_b_printf(cache, "%c", c); + my_b_write_byte(cache, c); break; } } - my_b_printf(cache, "\'"); + my_b_write_byte(cache, '\''); } #endif /* MYSQL_CLIENT */ @@ -442,13 +437,13 @@ inline int ignored_error_code(int err_code) */ int convert_handler_error(int error, THD* thd, TABLE *table) { - uint actual_error= (thd->is_error() ? thd->stmt_da->sql_errno() : + uint actual_error= (thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0); if (actual_error == 0) { table->file->print_error(error, MYF(0)); - actual_error= (thd->is_error() ? thd->stmt_da->sql_errno() : + actual_error= (thd->is_error() ? thd->get_stmt_da()->sql_errno() : ER_UNKNOWN_ERROR); if (actual_error == ER_UNKNOWN_ERROR) if (global_system_variables.log_warnings) @@ -554,9 +549,8 @@ static char *load_data_tmp_prefix(char *name, /* Add marker that this is a multi-master-file */ *name++='-'; /* Convert connection_name to a safe filename */ - buf_length= strconvert(system_charset_info, connection_name->str, - &my_charset_filename, name, FN_REFLEN, - &errors); + buf_length= strconvert(system_charset_info, connection_name->str, FN_REFLEN, + &my_charset_filename, name, FN_REFLEN, &errors); name+= buf_length; *name++= '-'; } @@ -756,7 +750,7 @@ static void print_set_option(IO_CACHE* file, uint32 bits_changed, if (bits_changed & option) { if (*need_comma) - my_b_printf(file,", "); + my_b_write(file, ", ", 2); my_b_printf(file,"%s=%d", name, test(flags & option)); *need_comma= 1; } @@ -1411,7 +1405,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, of 13 bytes, whereas LOG_EVENT_MINIMAL_HEADER_LEN is 19 bytes (it's "minimal" over the set {MySQL >=4.0}). */ - uint header_size= min(description_event->common_header_len, + uint header_size= MY_MIN(description_event->common_header_len, LOG_EVENT_MINIMAL_HEADER_LEN); LOCK_MUTEX; @@ -1768,7 +1762,7 @@ void Log_event::print_header(IO_CACHE* file, my_off_t hexdump_from= print_event_info->hexdump_from; DBUG_ENTER("Log_event::print_header"); - my_b_printf(file, "#"); + my_b_write_byte(file, '#'); print_timestamp(file); my_b_printf(file, " server id %lu end_log_pos %s ", (ulong) server_id, llstr(log_pos,llbuff)); @@ -1788,7 +1782,7 @@ void Log_event::print_header(IO_CACHE* file, /* mysqlbinlog --hexdump */ if (print_event_info->hexdump_from) { - my_b_printf(file, "\n"); + my_b_write_byte(file, '\n'); uchar *ptr= (uchar*)temp_buf; my_off_t size= uint4korr(ptr + EVENT_LEN_OFFSET) - LOG_EVENT_MINIMAL_HEADER_LEN; @@ -1889,11 +1883,11 @@ static void my_b_write_quoted(IO_CACHE *file, const uchar *ptr, uint length) { const uchar *s; - my_b_printf(file, "'"); + my_b_write_byte(file, '\''); for (s= ptr; length > 0 ; s++, length--) { if (*s > 0x1F) - my_b_write(file, s, 1); + my_b_write_byte(file, *s); else if (*s == '\'') my_b_write(file, "\\'", 2); else if (*s == '\\') @@ -1905,7 +1899,7 @@ my_b_write_quoted(IO_CACHE *file, const uchar *ptr, uint length) my_b_write(file, hex, len); } } - my_b_printf(file, "'"); + my_b_write_byte(file, '\''); } @@ -1920,13 +1914,13 @@ static void my_b_write_bit(IO_CACHE *file, const uchar *ptr, uint nbits) { uint bitnum, nbits8= ((nbits + 7) / 8) * 8, skip_bits= nbits8 - nbits; - my_b_printf(file, "b'"); + my_b_write(file, "b'", 2); for (bitnum= skip_bits ; bitnum < nbits8; bitnum++) { int is_set= (ptr[(bitnum) / 8] >> (7 - bitnum % 8)) & 0x01; - my_b_write(file, (const uchar*) (is_set ? "1" : "0"), 1); + my_b_write_byte(file, (is_set ? '1' : '0')); } - my_b_printf(file, "'"); + my_b_write_byte(file, '\''); } @@ -2021,7 +2015,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, int32 si= sint4korr(ptr); uint32 ui= uint4korr(ptr); my_b_write_sint32_and_uint32(file, si, ui); - my_snprintf(typestr, typestr_length, "INT"); + strmake(typestr, "INT", typestr_length); return 4; } @@ -2029,7 +2023,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, { my_b_write_sint32_and_uint32(file, (int) (signed char) *ptr, (uint) (unsigned char) *ptr); - my_snprintf(typestr, typestr_length, "TINYINT"); + strmake(typestr, "TINYINT", typestr_length); return 1; } @@ -2038,7 +2032,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, int32 si= (int32) sint2korr(ptr); uint32 ui= (uint32) uint2korr(ptr); my_b_write_sint32_and_uint32(file, si, ui); - my_snprintf(typestr, typestr_length, "SHORTINT"); + strmake(typestr, "SHORTINT", typestr_length); return 2; } @@ -2047,23 +2041,24 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, int32 si= sint3korr(ptr); uint32 ui= uint3korr(ptr); my_b_write_sint32_and_uint32(file, si, ui); - my_snprintf(typestr, typestr_length, "MEDIUMINT"); + strmake(typestr, "MEDIUMINT", typestr_length); return 3; } case MYSQL_TYPE_LONGLONG: { char tmp[64]; + size_t length; longlong si= sint8korr(ptr); - longlong10_to_str(si, tmp, -10); - my_b_printf(file, "%s", tmp); + length= (longlong10_to_str(si, tmp, -10) - tmp); + my_b_write(file, tmp, length); if (si < 0) { ulonglong ui= uint8korr(ptr); longlong10_to_str((longlong) ui, tmp, 10); my_b_printf(file, " (%s)", tmp); } - my_snprintf(typestr, typestr_length, "LONGINT"); + strmake(typestr, "LONGINT", typestr_length); return 8; } @@ -2072,6 +2067,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, uint precision= meta >> 8; uint decimals= meta & 0xFF; uint bin_size= my_decimal_get_binary_size(precision, decimals); + uint length; my_decimal dec; binary2my_decimal(E_DEC_FATAL_ERROR, (uchar*) ptr, &dec, precision, decimals); @@ -2083,7 +2079,8 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, for (i=0; i < end; i++) pos+= sprintf(pos, "%09d.", dec.buf[i]); pos+= sprintf(pos, "%09d", dec.buf[i]); - my_b_printf(file, "%s", buff); + length= (uint) (pos - buff); + my_b_write(file, buff, length); my_snprintf(typestr, typestr_length, "DECIMAL(%d,%d)", precision, decimals); return bin_size; @@ -2096,7 +2093,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, char tmp[320]; sprintf(tmp, "%-20g", (double) fl); my_b_printf(file, "%s", tmp); /* my_snprintf doesn't support %-20g */ - my_snprintf(typestr, typestr_length, "FLOAT"); + strmake(typestr, "FLOAT", typestr_length); return 4; } @@ -2105,8 +2102,8 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, double dbl; float8get(dbl, ptr); char tmp[320]; - sprintf(tmp, "%-.20g", dbl); /* my_snprintf doesn't support %-20g */ - my_b_printf(file, "%s", tmp); + sprintf(tmp, "%-.20g", dbl); /* strmake doesn't support %-20g */ + my_b_printf(file, tmp, "%s"); strcpy(typestr, "DOUBLE"); return 8; } @@ -2125,7 +2122,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, { uint32 i32= uint4korr(ptr); my_b_printf(file, "%d", i32); - my_snprintf(typestr, typestr_length, "TIMESTAMP"); + strmake(typestr, "TIMESTAMP", typestr_length); return 4; } @@ -2150,7 +2147,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, my_b_printf(file, "%04d-%02d-%02d %02d:%02d:%02d", (int) (d / 10000), (int) (d % 10000) / 100, (int) (d % 100), (int) (t / 10000), (int) (t % 10000) / 100, (int) t % 100); - my_snprintf(typestr, typestr_length, "DATETIME"); + strmake(typestr, "DATETIME", typestr_length); return 8; } @@ -2173,7 +2170,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, const char *sign= tmp < 0 ? "-" : ""; my_b_printf(file, "'%s%02d:%02d:%02d'", sign, i32 / 10000, (i32 % 10000) / 100, i32 % 100, i32); - my_snprintf(typestr, typestr_length, "TIME"); + strmake(typestr, "TIME", typestr_length); return 3; } @@ -2212,7 +2209,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, *pos--= (char) ('0'+part%10); part/=10; *pos= (char) ('0'+part); my_b_printf(file , "'%s'", buf); - my_snprintf(typestr, typestr_length, "DATE"); + strmake(typestr, "DATE", typestr_length); return 3; } @@ -2220,8 +2217,9 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, { uint i32= uint3korr(ptr); my_b_printf(file , "'%04d:%02d:%02d'", - (i32 / (16L * 32L)), (i32 / 32L % 16L), (i32 % 32L)); - my_snprintf(typestr, typestr_length, "DATE"); + (int)(i32 / (16L * 32L)), (int)(i32 / 32L % 16L), + (int)(i32 % 32L)); + strmake(typestr, "DATE", typestr_length); return 3; } @@ -2229,7 +2227,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, { uint32 i32= *ptr; my_b_printf(file, "%04d", i32+ 1900); - my_snprintf(typestr, typestr_length, "YEAR"); + strmake(typestr, "YEAR", typestr_length); return 1; } @@ -2237,13 +2235,13 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, switch (meta & 0xFF) { case 1: my_b_printf(file, "%d", (int) *ptr); - my_snprintf(typestr, typestr_length, "ENUM(1 byte)"); + strmake(typestr, "ENUM(1 byte)", typestr_length); return 1; case 2: { int32 i32= uint2korr(ptr); my_b_printf(file, "%d", i32); - my_snprintf(typestr, typestr_length, "ENUM(2 bytes)"); + strmake(typestr, "ENUM(2 bytes)", typestr_length); return 2; } default: @@ -2262,22 +2260,22 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr, case 1: length= *ptr; my_b_write_quoted(file, ptr + 1, length); - my_snprintf(typestr, typestr_length, "TINYBLOB/TINYTEXT"); + strmake(typestr, "TINYBLOB/TINYTEXT", typestr_length); return length + 1; case 2: length= uint2korr(ptr); my_b_write_quoted(file, ptr + 2, length); - my_snprintf(typestr, typestr_length, "BLOB/TEXT"); + strmake(typestr, "BLOB/TEXT", typestr_length); return length + 2; case 3: length= uint3korr(ptr); my_b_write_quoted(file, ptr + 3, length); - my_snprintf(typestr, typestr_length, "MEDIUMBLOB/MEDIUMTEXT"); + strmake(typestr, "MEDIUMBLOB/MEDIUMTEXT", typestr_length); return length + 3; case 4: length= uint4korr(ptr); my_b_write_quoted(file, ptr + 4, length); - my_snprintf(typestr, typestr_length, "LONGBLOB/LONGTEXT"); + strmake(typestr, "LONGBLOB/LONGTEXT", typestr_length); return length + 4; default: my_b_printf(file, "!! Unknown BLOB packlen=%d", length); @@ -2348,11 +2346,11 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, if (is_null) { - my_b_printf(file, "### @%d=NULL", i + 1); + my_b_printf(file, "### @%lu=NULL", (ulong)i + 1); } else { - my_b_printf(file, "### @%d=", i + 1); + my_b_printf(file, "### @%lu=", (ulong)i + 1); size_t size= log_event_print_value(file, value, td->type(i), td->field_metadata(i), typestr, sizeof(typestr)); @@ -2364,7 +2362,7 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, if (print_event_info->verbose > 1) { - my_b_printf(file, " /* "); + my_b_write(file, " /* ", 4); if (typestr[0]) my_b_printf(file, "%s ", typestr); @@ -2374,10 +2372,10 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td, my_b_printf(file, "meta=%d nullable=%d is_null=%d ", td->field_metadata(i), td->maybe_null(i), is_null); - my_b_printf(file, "*/"); + my_b_write(file, "*/", 2); } - my_b_printf(file, "\n"); + my_b_write_byte(file, '\n'); null_bit_index++; } @@ -2423,10 +2421,19 @@ void Rows_log_event::print_verbose(IO_CACHE *file, if (!(map= print_event_info->m_table_map.get_table(m_table_id)) || !(td= map->create_table_def())) { - my_b_printf(file, "### Row event for unknown table #%d", m_table_id); + my_b_printf(file, "### Row event for unknown table #%lu", + (ulong) m_table_id); return; } + /* If the write rows event contained no values for the AI */ + if (((type_code == WRITE_ROWS_EVENT) && (m_rows_buf==m_rows_end))) + { + my_b_printf(file, "### INSERT INTO %`s.%`s VALUES ()\n", + map->get_db_name(), map->get_table_name()); + goto end; + } + for (const uchar *value= m_rows_buf; value < m_rows_end; ) { size_t length; @@ -2484,7 +2491,7 @@ void Log_event::print_base64(IO_CACHE* file, if (print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS) { if (my_b_tell(file) == 0) - my_b_printf(file, "\nBINLOG '\n"); + my_b_write_string(file, "\nBINLOG '\n"); my_b_printf(file, "%s\n", tmp_str); @@ -3202,7 +3209,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, be even bigger, but this will suffice to catch most corruption errors that can lead to a crash. */ - if (status_vars_len > min(data_len, MAX_SIZE_LOG_EVENT_STATUS)) + if (status_vars_len > MY_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS)) { DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0", status_vars_len, data_len)); @@ -3670,7 +3677,7 @@ void Query_log_event::print_query_header(IO_CACHE* file, if (unlikely(tmp)) /* some bits have changed */ { bool need_comma= 0; - my_b_printf(file, "SET "); + my_b_write_string(file, "SET "); print_set_option(file, tmp, OPTION_NO_FOREIGN_KEY_CHECKS, ~flags2, "@@session.foreign_key_checks", &need_comma); print_set_option(file, tmp, OPTION_AUTO_IS_NULL, flags2, @@ -4056,7 +4063,8 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli, "Error during COMMIT: failed to update GTID state in " "%s.%s: %d: %s", "mysql", rpl_gtid_slave_state_table_name.str, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); trans_rollback(thd); sub_id= 0; thd->is_slave_error= 1; @@ -4129,7 +4137,8 @@ START SLAVE; . Query: '%s'", expected_error, thd->query()); } /* If the query was not ignored, it is printed to the general log */ - if (!thd->is_error() || thd->stmt_da->sql_errno() != ER_SLAVE_IGNORED_TABLE) + if (!thd->is_error() || + thd->get_stmt_da()->sql_errno() != ER_SLAVE_IGNORED_TABLE) general_log_write(thd, COM_QUERY, thd->query(), thd->query_length()); else { @@ -4154,14 +4163,14 @@ compare_errors: not exist errors", we silently clear the error if TEMPORARY was used. */ if (thd->lex->sql_command == SQLCOM_DROP_TABLE && thd->lex->drop_temporary && - thd->is_error() && thd->stmt_da->sql_errno() == ER_BAD_TABLE_ERROR && + thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_BAD_TABLE_ERROR && !expected_error) - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); /* If we expected a non-zero error code, and we don't get the same error code, and it should be ignored or is related to a concurrency issue. */ - actual_error= thd->is_error() ? thd->stmt_da->sql_errno() : 0; + actual_error= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0; DBUG_PRINT("info",("expected_error: %d sql_errno: %d", expected_error, actual_error)); @@ -4179,7 +4188,7 @@ Error on slave: actual message='%s', error code=%d. \ Default database: '%s'. Query: '%s'", ER_SAFE(expected_error), expected_error, - actual_error ? thd->stmt_da->message() : "no error", + actual_error ? thd->get_stmt_da()->message() : "no error", actual_error, print_slave_db_safe(db), query_arg); thd->is_slave_error= 1; @@ -4203,7 +4212,7 @@ Default database: '%s'. Query: '%s'", { rli->report(ERROR_LEVEL, actual_error, "Error '%s' on query. Default database: '%s'. Query: '%s'", - (actual_error ? thd->stmt_da->message() : + (actual_error ? thd->get_stmt_da()->message() : "unexpected success or fatal error"), print_slave_db_safe(thd->db), query_arg); thd->is_slave_error= 1; @@ -5436,33 +5445,33 @@ void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info, my_b_printf(&cache, "%sLOAD DATA ", commented ? "# " : ""); if (check_fname_outside_temp_buf()) - my_b_printf(&cache, "LOCAL "); + my_b_write_string(&cache, "LOCAL "); my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname); if (sql_ex.opt_flags & REPLACE_FLAG) - my_b_printf(&cache,"REPLACE "); + my_b_write_string(&cache, "REPLACE "); else if (sql_ex.opt_flags & IGNORE_FLAG) - my_b_printf(&cache,"IGNORE "); + my_b_write_string(&cache, "IGNORE "); my_b_printf(&cache, "INTO TABLE `%s`", table_name); - my_b_printf(&cache, " FIELDS TERMINATED BY "); + my_b_write_string(&cache, " FIELDS TERMINATED BY "); pretty_print_str(&cache, sql_ex.field_term, sql_ex.field_term_len); if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG) - my_b_printf(&cache," OPTIONALLY "); - my_b_printf(&cache, " ENCLOSED BY "); + my_b_write_string(&cache, " OPTIONALLY "); + my_b_write_string(&cache, " ENCLOSED BY "); pretty_print_str(&cache, sql_ex.enclosed, sql_ex.enclosed_len); - my_b_printf(&cache, " ESCAPED BY "); + my_b_write_string(&cache, " ESCAPED BY "); pretty_print_str(&cache, sql_ex.escaped, sql_ex.escaped_len); - my_b_printf(&cache," LINES TERMINATED BY "); + my_b_write_string(&cache, " LINES TERMINATED BY "); pretty_print_str(&cache, sql_ex.line_term, sql_ex.line_term_len); if (sql_ex.line_start) { - my_b_printf(&cache," STARTING BY "); + my_b_write_string(&cache," STARTING BY "); pretty_print_str(&cache, sql_ex.line_start, sql_ex.line_start_len); } if ((long) skip_lines > 0) @@ -5472,16 +5481,16 @@ void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info, { uint i; const char* field = fields; - my_b_printf(&cache, " ("); + my_b_write_string(&cache, " ("); for (i = 0; i < num_fields; i++) { if (i) - my_b_printf(&cache, ","); + my_b_write_byte(&cache, ','); my_b_printf(&cache, "%`s", field); field += field_lens[i] + 1; } - my_b_printf(&cache, ")"); + my_b_write_byte(&cache, ')'); } my_b_printf(&cache, "%s\n", print_event_info->delimiter); @@ -5609,7 +5618,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli, { thd->set_time(when, when_sec_part); thd->set_query_id(next_query_id()); - thd->warning_info->opt_clear_warning_info(thd->query_id); + thd->get_stmt_da()->opt_clear_warning_info(thd->query_id); TABLE_LIST tables; tables.init_one_table(thd->strmake(thd->db, thd->db_length), @@ -5720,7 +5729,8 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli, update it inside mysql_load(). */ List<Item> tmp_list; - if (mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list, + if (open_temporary_tables(thd, &tables) || + mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list, handle_dup, ignore, net != 0)) thd->is_slave_error= 1; if (thd->cuted_fields) @@ -5755,9 +5765,9 @@ error: thd->catalog= 0; thd->set_db(NULL, 0); /* will free the current database */ thd->reset_query(); - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); close_thread_tables(thd); /* - If inside a multi-statement transaction, @@ -5784,8 +5794,8 @@ error: int sql_errno; if (thd->is_error()) { - err= thd->stmt_da->message(); - sql_errno= thd->stmt_da->sql_errno(); + err= thd->get_stmt_da()->message(); + sql_errno= thd->get_stmt_da()->sql_errno(); } else { @@ -5855,7 +5865,7 @@ void Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) if (print_event_info->short_form) return; print_header(&cache, print_event_info, FALSE); - my_b_printf(&cache, "\tRotate to "); + my_b_write_string(&cache, "\tRotate to "); if (new_log_ident) my_b_write(&cache, (uchar*) new_log_ident, (uint)ident_len); my_b_printf(&cache, " pos: %s\n", llstr(pos, buf)); @@ -6061,9 +6071,9 @@ void Binlog_checkpoint_log_event::print(FILE *file, if (print_event_info->short_form) return; print_header(&cache, print_event_info, FALSE); - my_b_printf(&cache, "\tBinlog checkpoint "); + my_b_write_string(&cache, "\tBinlog checkpoint "); my_b_write(&cache, (uchar*)binlog_file_name, binlog_file_len); - my_b_printf(&cache, "\n"); + my_b_write_byte(&cache, '\n'); } #endif /* MYSQL_CLIENT */ @@ -6264,7 +6274,7 @@ Gtid_log_event::do_apply_event(Relay_log_info const *rli) { /* Need to reset prior "ok" status to give an error. */ thd->clear_error(); - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); if (mysql_bin_log.check_strict_gtid_sequence(this->domain_id, this->server_id, this->seq_no)) return 1; @@ -6669,7 +6679,7 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) if (!print_event_info->short_form) { print_header(&cache, print_event_info, FALSE); - my_b_printf(&cache, "\tIntvar\n"); + my_b_write_string(&cache, "\tIntvar\n"); } my_b_printf(&cache, "SET "); @@ -6796,7 +6806,7 @@ void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) if (!print_event_info->short_form) { print_header(&cache, print_event_info, FALSE); - my_b_printf(&cache, "\tRand\n"); + my_b_write_string(&cache, "\tRand\n"); } my_b_printf(&cache, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s%s\n", llstr(seed1, llbuff),llstr(seed2, llbuff2), @@ -6960,7 +6970,8 @@ int Xid_log_event::do_apply_event(Relay_log_info const *rli) "Error during XID COMMIT: failed to update GTID state in " "%s.%s: %d: %s", "mysql", rpl_gtid_slave_state_table_name.str, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); trans_rollback(thd); thd->is_slave_error= 1; return err; @@ -7232,7 +7243,7 @@ bool User_var_log_event::write(IO_CACHE* file) char buf[UV_NAME_LEN_SIZE]; char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE]; - uchar buf2[max(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2; + uchar buf2[MY_MAX(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2; uint unsigned_len= 0; uint buf1_length; ulong event_length; @@ -7306,10 +7317,10 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) if (!print_event_info->short_form) { print_header(&cache, print_event_info, FALSE); - my_b_printf(&cache, "\tUser_var\n"); + my_b_write_string(&cache, "\tUser_var\n"); } - my_b_printf(&cache, "SET @"); + my_b_write_string(&cache, "SET @"); my_b_write_backtick_quote(&cache, name, name_len); if (is_null) @@ -7706,7 +7717,7 @@ void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) return; print_header(&cache, print_event_info, FALSE); - my_b_printf(&cache, "\tStop\n"); + my_b_write_string(&cache, "\tStop\n"); } #endif /* MYSQL_CLIENT */ @@ -7907,7 +7918,7 @@ void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info That one is for "file_id: etc" below: in mysqlbinlog we want the #, in SHOW BINLOG EVENTS we don't. */ - my_b_printf(&cache, "#"); + my_b_write_byte(&cache, '#'); } my_b_printf(&cache, " file_id: %d block_len: %d\n", file_id, block_len); @@ -8608,12 +8619,12 @@ void Execute_load_query_log_event::print(FILE* file, if (local_fname) { my_b_write(&cache, (uchar*) query, fn_pos_start); - my_b_printf(&cache, " LOCAL INFILE \'"); + my_b_write_string(&cache, " LOCAL INFILE \'"); my_b_printf(&cache, "%s", local_fname); - my_b_printf(&cache, "\'"); + my_b_write_string(&cache, "\'"); if (dup_handling == LOAD_DUP_REPLACE) - my_b_printf(&cache, " REPLACE"); - my_b_printf(&cache, " INTO"); + my_b_write_string(&cache, " REPLACE"); + my_b_write_string(&cache, " INTO"); my_b_write(&cache, (uchar*) query + fn_pos_end, q_len-fn_pos_end); my_b_printf(&cache, "\n%s\n", print_event_info->delimiter); } @@ -9016,7 +9027,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length) trigger false warnings. */ #ifndef HAVE_valgrind - DBUG_DUMP("row_data", row_data, min(length, 32)); + DBUG_DUMP("row_data", row_data, MY_MIN(length, 32)); #endif DBUG_ASSERT(m_rows_buf <= m_rows_cur); @@ -9141,7 +9152,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) if (open_and_lock_tables(thd, rli->tables_to_lock, FALSE, 0)) { - uint actual_error= thd->stmt_da->sql_errno(); + uint actual_error= thd->get_stmt_da()->sql_errno(); if (thd->is_slave_error || thd->is_fatal_error) { /* @@ -9152,7 +9163,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli) */ rli->report(ERROR_LEVEL, actual_error, "Error executing row event: '%s'", - (actual_error ? thd->stmt_da->message() : + (actual_error ? thd->get_stmt_da()->message() : "unexpected success or fatal error")); thd->is_slave_error= 1; } @@ -10098,7 +10109,7 @@ int Table_map_log_event::rewrite_db(const char* new_db, size_t new_len, DBUG_ENTER("Table_map_log_event::rewrite_db"); DBUG_ASSERT(temp_buf); - uint header_len= min(desc->common_header_len, + uint header_len= MY_MIN(desc->common_header_len, LOG_EVENT_MINIMAL_HEADER_LEN) + TABLE_MAP_HEADER_LEN; int len_diff; @@ -10485,7 +10496,7 @@ void Table_map_log_event::print(FILE *, PRINT_EVENT_INFO *print_event_info) print_header(&print_event_info->head_cache, print_event_info, TRUE); my_b_printf(&print_event_info->head_cache, "\tTable_map: %`s.%`s mapped to number %lu\n", - m_dbnam, m_tblnam, m_table_id); + m_dbnam, m_tblnam, (ulong) m_table_id); print_base64(&print_event_info->body_cache, print_event_info, TRUE); } } @@ -11091,7 +11102,7 @@ int Rows_log_event::find_key() We can only use a non-unique key if it allows range scans (ie. skip FULLTEXT indexes and such). */ - last_part= key->key_parts - 1; + last_part= key->user_defined_key_parts - 1; DBUG_PRINT("info", ("Index %s rec_per_key[%u]= %lu", key->name, last_part, key->rec_per_key[last_part])); if (!(m_table->file->index_flags(i, last_part, 1) & HA_READ_NEXT)) @@ -11369,7 +11380,7 @@ int Rows_log_event::find_row(const Relay_log_info *rli) field in the BI image that is null and part of UNNI. */ bool null_found= FALSE; - for (uint i=0; i < keyinfo->key_parts && !null_found; i++) + for (uint i=0; i < keyinfo->user_defined_key_parts && !null_found; i++) { uint fieldnr= keyinfo->key_part[i].fieldnr - 1; Field **f= table->field+fieldnr; diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 698118e3bda..6623d7655d7 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -99,7 +99,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info if (open_and_lock_tables(ev_thd, rli->tables_to_lock, FALSE, 0)) { - uint actual_error= ev_thd->stmt_da->sql_errno(); + uint actual_error= ev_thd->get_stmt_da()->sql_errno(); if (ev_thd->is_slave_error || ev_thd->is_fatal_error) { /* @@ -108,7 +108,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info */ rli->report(ERROR_LEVEL, actual_error, "Error '%s' on opening tables", - (actual_error ? ev_thd->stmt_da->message() : + (actual_error ? ev_thd->get_stmt_da()->message() : "unexpected success or fatal error")); ev_thd->is_slave_error= 1; } @@ -243,10 +243,10 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info break; default: - rli->report(ERROR_LEVEL, ev_thd->stmt_da->sql_errno(), + rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), "Error in %s event: row application failed. %s", ev->get_type_str(), - ev_thd->is_error() ? ev_thd->stmt_da->message() : ""); + ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : ""); thd->is_slave_error= 1; break; } @@ -260,12 +260,12 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info if (error) { /* error has occured during the transaction */ - rli->report(ERROR_LEVEL, ev_thd->stmt_da->sql_errno(), + rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), "Error in %s event: error during transaction execution " "on table %s.%s. %s", ev->get_type_str(), table->s->db.str, table->s->table_name.str, - ev_thd->is_error() ? ev_thd->stmt_da->message() : ""); + ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : ""); /* If one day we honour --skip-slave-errors in row-based replication, and @@ -1406,7 +1406,7 @@ int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length) trigger false warnings. */ #ifndef HAVE_valgrind - DBUG_DUMP("row_data", row_data, min(length, 32)); + DBUG_DUMP("row_data", row_data, MY_MIN(length, 32)); #endif DBUG_ASSERT(m_rows_buf <= m_rows_cur); @@ -2366,7 +2366,7 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli) field in the BI image that is null and part of UNNI. */ bool null_found= FALSE; - for (uint i=0; i < keyinfo->key_parts && !null_found; i++) + for (uint i=0; i < keyinfo->user_defined_key_parts && !null_found; i++) { uint fieldnr= keyinfo->key_part[i].fieldnr - 1; Field **f= table->field+fieldnr; diff --git a/sql/mdl.cc b/sql/mdl.cc index 03593f150bd..c3a78f4c40b 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2007, 2011, Oracle and/or its affiliates. +/* Copyright (c) 2007, 2012, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -14,9 +14,9 @@ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ -#include "mdl.h" #include "sql_class.h" #include "debug_sync.h" +#include "sql_array.h" #include <hash.h> #include <mysqld_error.h> #include <mysql/plugin.h> @@ -29,7 +29,7 @@ static PSI_mutex_key key_MDL_wait_LOCK_wait_status; static PSI_mutex_info all_mdl_mutexes[]= { - { &key_MDL_map_mutex, "MDL_map::mutex", PSI_FLAG_GLOBAL}, + { &key_MDL_map_mutex, "MDL_map::mutex", 0}, { &key_MDL_wait_LOCK_wait_status, "MDL_wait::LOCK_wait_status", 0} }; @@ -114,23 +114,28 @@ class MDL_object_lock_cache_adapter; /** - A collection of all MDL locks. A singleton, - there is only one instance of the map in the server. + A partition in a collection of all MDL locks. + MDL_map is partitioned for scalability reasons. Maps MDL_key to MDL_lock instances. */ -class MDL_map +class MDL_map_partition { public: - void init(); - void destroy(); - MDL_lock *find_or_insert(const MDL_key *key); - unsigned long get_lock_owner(const MDL_key *key); - void remove(MDL_lock *lock); + MDL_map_partition(); + ~MDL_map_partition(); + inline MDL_lock *find_or_insert(const MDL_key *mdl_key, + my_hash_value_type hash_value); + unsigned long get_lock_owner(const MDL_key *key, + my_hash_value_type hash_value); + inline void remove(MDL_lock *lock); + my_hash_value_type get_key_hash(const MDL_key *mdl_key) const + { + return my_calc_hash(&m_locks, mdl_key->ptr(), mdl_key->length()); + } private: bool move_from_hash_to_lock_mutex(MDL_lock *lock); -private: - /** All acquired locks in the server. */ + /** A partition of all acquired locks in the server. */ HASH m_locks; /* Protects access to m_locks hash. */ mysql_mutex_t m_mutex; @@ -153,6 +158,31 @@ private: I_P_List_counter> Lock_cache; Lock_cache m_unused_locks_cache; +}; + + +/** + Start-up parameter for the number of partitions of the MDL_lock hash. +*/ +ulong mdl_locks_hash_partitions; + +/** + A collection of all MDL locks. A singleton, + there is only one instance of the map in the server. + Contains instances of MDL_map_partition +*/ + +class MDL_map +{ +public: + void init(); + void destroy(); + MDL_lock *find_or_insert(const MDL_key *key); + unsigned long get_lock_owner(const MDL_key *key); + void remove(MDL_lock *lock); +private: + /** Array of partitions where the locks are actually stored. */ + Dynamic_array<MDL_map_partition *> m_partitions; /** Pre-allocated MDL_lock object for GLOBAL namespace. */ MDL_lock *m_global_lock; /** Pre-allocated MDL_lock object for COMMIT namespace. */ @@ -319,7 +349,7 @@ Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim) class MDL_lock { public: - typedef uchar bitmap_t; + typedef unsigned short bitmap_t; class Ticket_list { @@ -400,7 +430,9 @@ public: bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx, bool ignore_lock_priority) const; - inline static MDL_lock *create(const MDL_key *key); + inline static MDL_lock *create(const MDL_key *key, + MDL_map_partition *map_part); + inline unsigned long get_lock_owner() const; void reschedule_waiters(); @@ -428,13 +460,14 @@ public: public: - MDL_lock(const MDL_key *key_arg) + MDL_lock(const MDL_key *key_arg, MDL_map_partition *map_part) : key(key_arg), m_hog_lock_count(0), m_ref_usage(0), m_ref_release(0), m_is_destroyed(FALSE), - m_version(0) + m_version(0), + m_map_part(map_part) { mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock); } @@ -447,18 +480,18 @@ public: public: /** These three members are used to make it possible to separate - the mdl_locks.m_mutex mutex and MDL_lock::m_rwlock in + the MDL_map_partition::m_mutex mutex and MDL_lock::m_rwlock in MDL_map::find_or_insert() for increased scalability. The 'm_is_destroyed' member is only set by destroyers that - have both the mdl_locks.m_mutex and MDL_lock::m_rwlock, thus + have both the MDL_map_partition::m_mutex and MDL_lock::m_rwlock, thus holding any of the mutexes is sufficient to read it. The 'm_ref_usage; is incremented under protection by - mdl_locks.m_mutex, but when 'm_is_destroyed' is set to TRUE, this + MDL_map_partition::m_mutex, but when 'm_is_destroyed' is set to TRUE, this member is moved to be protected by the MDL_lock::m_rwlock. This means that the MDL_map::find_or_insert() which only holds the MDL_lock::m_rwlock can compare it to 'm_ref_release' - without acquiring mdl_locks.m_mutex again and if equal it can also - destroy the lock object safely. + without acquiring MDL_map_partition::m_mutex again and if equal + it can also destroy the lock object safely. The 'm_ref_release' is incremented under protection by MDL_lock::m_rwlock. Note since we are only interested in equality of these two @@ -472,19 +505,23 @@ public: /** We use the same idea and an additional version counter to support caching of unused MDL_lock object for further re-use. - This counter is incremented while holding both MDL_map::m_mutex and - MDL_lock::m_rwlock locks each time when a MDL_lock is moved from - the hash to the unused objects list (or destroyed). + This counter is incremented while holding both MDL_map_partition::m_mutex + and MDL_lock::m_rwlock locks each time when a MDL_lock is moved from + the partitioned hash to the paritioned unused objects list (or destroyed). A thread, which has found a MDL_lock object for the key in the hash - and then released the MDL_map::m_mutex before acquiring the + and then released the MDL_map_partition::m_mutex before acquiring the MDL_lock::m_rwlock, can determine that this object was moved to the unused objects list (or destroyed) while it held no locks by comparing - the version value which it read while holding the MDL_map::m_mutex + the version value which it read while holding the MDL_map_partition::m_mutex with the value read after acquiring the MDL_lock::m_rwlock. Note that since it takes several years to overflow this counter such theoretically possible overflows should not have any practical effects. */ ulonglong m_version; + /** + Partition of MDL_map where the lock is stored. + */ + MDL_map_partition *m_map_part; }; @@ -497,8 +534,8 @@ public: class MDL_scoped_lock : public MDL_lock { public: - MDL_scoped_lock(const MDL_key *key_arg) - : MDL_lock(key_arg) + MDL_scoped_lock(const MDL_key *key_arg, MDL_map_partition *map_part) + : MDL_lock(key_arg, map_part) { } virtual const bitmap_t *incompatible_granted_types_bitmap() const @@ -538,8 +575,8 @@ private: class MDL_object_lock : public MDL_lock { public: - MDL_object_lock(const MDL_key *key_arg) - : MDL_lock(key_arg) + MDL_object_lock(const MDL_key *key_arg, MDL_map_partition *map_part) + : MDL_lock(key_arg, map_part) { } /** @@ -572,7 +609,7 @@ public: } virtual bool needs_notification(const MDL_ticket *ticket) const { - return ticket->is_upgradable_or_exclusive(); + return (ticket->get_type() >= MDL_SHARED_NO_WRITE); } virtual void notify_conflicting_locks(MDL_context *ctx); @@ -669,33 +706,62 @@ void mdl_destroy() } -/** Initialize the global hash containing all MDL locks. */ +/** Initialize the container for all MDL locks. */ void MDL_map::init() { MDL_key global_lock_key(MDL_key::GLOBAL, "", ""); MDL_key commit_lock_key(MDL_key::COMMIT, "", ""); + m_global_lock= MDL_lock::create(&global_lock_key, NULL); + m_commit_lock= MDL_lock::create(&commit_lock_key, NULL); + + for (uint i= 0; i < mdl_locks_hash_partitions; i++) + { + MDL_map_partition *part= new (std::nothrow) MDL_map_partition(); + m_partitions.append(part); + } +} + + +/** Initialize the partition in the container with all MDL locks. */ + +MDL_map_partition::MDL_map_partition() +{ mysql_mutex_init(key_MDL_map_mutex, &m_mutex, NULL); my_hash_init(&m_locks, &my_charset_bin, 16 /* FIXME */, 0, 0, mdl_locks_key, 0, 0); - m_global_lock= MDL_lock::create(&global_lock_key); - m_commit_lock= MDL_lock::create(&commit_lock_key); -} +}; /** - Destroy the global hash containing all MDL locks. + Destroy the container for all MDL locks. @pre It must be empty. */ void MDL_map::destroy() { + MDL_lock::destroy(m_global_lock); + MDL_lock::destroy(m_commit_lock); + + while (m_partitions.elements() > 0) + { + MDL_map_partition *part= m_partitions.pop(); + delete part; + } +} + + +/** + Destroy the partition in container for all MDL locks. + @pre It must be empty. +*/ + +MDL_map_partition::~MDL_map_partition() +{ DBUG_ASSERT(!m_locks.records); mysql_mutex_destroy(&m_mutex); my_hash_free(&m_locks); - MDL_lock::destroy(m_global_lock); - MDL_lock::destroy(m_commit_lock); MDL_object_lock *lock; while ((lock= m_unused_locks_cache.pop_front())) @@ -715,13 +781,12 @@ void MDL_map::destroy() MDL_lock* MDL_map::find_or_insert(const MDL_key *mdl_key) { MDL_lock *lock; - my_hash_value_type hash_value; if (mdl_key->mdl_namespace() == MDL_key::GLOBAL || mdl_key->mdl_namespace() == MDL_key::COMMIT) { /* - Avoid locking m_mutex when lock for GLOBAL or COMMIT namespace is + Avoid locking any m_mutex when lock for GLOBAL or COMMIT namespace is requested. Return pointer to pre-allocated MDL_lock instance instead. Such an optimization allows to save one mutex lock/unlock for any statement changing data. @@ -739,8 +804,27 @@ MDL_lock* MDL_map::find_or_insert(const MDL_key *mdl_key) return lock; } + my_hash_value_type hash_value= m_partitions.at(0)->get_key_hash(mdl_key); + uint part_id= hash_value % mdl_locks_hash_partitions; + MDL_map_partition *part= m_partitions.at(part_id); + + return part->find_or_insert(mdl_key, hash_value); +} + + +/** + Find MDL_lock object corresponding to the key and hash value in + MDL_map partition, create it if it does not exist. + + @retval non-NULL - Success. MDL_lock instance for the key with + locked MDL_lock::m_rwlock. + @retval NULL - Failure (OOM). +*/ - hash_value= my_calc_hash(&m_locks, mdl_key->ptr(), mdl_key->length()); +MDL_lock* MDL_map_partition::find_or_insert(const MDL_key *mdl_key, + my_hash_value_type hash_value) +{ + MDL_lock *lock; retry: mysql_mutex_lock(&m_mutex); @@ -773,7 +857,7 @@ retry: } else { - lock= MDL_lock::create(mdl_key); + lock= MDL_lock::create(mdl_key, this); } if (!lock || my_hash_insert(&m_locks, (uchar*)lock)) @@ -804,7 +888,7 @@ retry: /** - Release mdl_locks.m_mutex mutex and lock MDL_lock::m_rwlock for lock + Release MDL_map_partition::m_mutex mutex and lock MDL_lock::m_rwlock for lock object from the hash. Handle situation when object was released while we held no locks. @@ -813,7 +897,7 @@ retry: should re-try looking up MDL_lock object in the hash. */ -bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock) +bool MDL_map_partition::move_from_hash_to_lock_mutex(MDL_lock *lock) { ulonglong version; @@ -822,8 +906,8 @@ bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock) /* We increment m_ref_usage which is a reference counter protected by - mdl_locks.m_mutex under the condition it is present in the hash and - m_is_destroyed is FALSE. + MDL_map_partition::m_mutex under the condition it is present in the hash + and m_is_destroyed is FALSE. */ lock->m_ref_usage++; /* Read value of the version counter under protection of m_mutex lock. */ @@ -897,22 +981,36 @@ MDL_map::get_lock_owner(const MDL_key *mdl_key) } else { - my_hash_value_type hash_value= my_calc_hash(&m_locks, - mdl_key->ptr(), - mdl_key->length()); - mysql_mutex_lock(&m_mutex); - lock= (MDL_lock*) my_hash_search_using_hash_value(&m_locks, - hash_value, - mdl_key->ptr(), - mdl_key->length()); - if (lock) - res= lock->get_lock_owner(); - mysql_mutex_unlock(&m_mutex); + my_hash_value_type hash_value= m_partitions.at(0)->get_key_hash(mdl_key); + uint part_id= hash_value % mdl_locks_hash_partitions; + MDL_map_partition *part= m_partitions.at(part_id); + res= part->get_lock_owner(mdl_key, hash_value); } return res; } + +unsigned long +MDL_map_partition::get_lock_owner(const MDL_key *mdl_key, + my_hash_value_type hash_value) +{ + MDL_lock *lock; + unsigned long res= 0; + + mysql_mutex_lock(&m_mutex); + lock= (MDL_lock*) my_hash_search_using_hash_value(&m_locks, + hash_value, + mdl_key->ptr(), + mdl_key->length()); + if (lock) + res= lock->get_lock_owner(); + mysql_mutex_unlock(&m_mutex); + + return res; +} + + /** Destroy MDL_lock object or delegate this responsibility to whatever thread that holds the last outstanding reference to @@ -932,28 +1030,41 @@ void MDL_map::remove(MDL_lock *lock) return; } + lock->m_map_part->remove(lock); +} + + +/** + Destroy MDL_lock object belonging to specific MDL_map + partition or delegate this responsibility to whatever + thread that holds the last outstanding reference to it. +*/ + +void MDL_map_partition::remove(MDL_lock *lock) +{ mysql_mutex_lock(&m_mutex); my_hash_delete(&m_locks, (uchar*) lock); /* To let threads holding references to the MDL_lock object know that it was moved to the list of unused objects or destroyed, we increment the version - counter under protection of both MDL_map::m_mutex and MDL_lock::m_rwlock - locks. This allows us to read the version value while having either one - of those locks. + counter under protection of both MDL_map_partition::m_mutex and + MDL_lock::m_rwlock locks. This allows us to read the version value while + having either one of those locks. */ lock->m_version++; if ((lock->key.mdl_namespace() != MDL_key::SCHEMA) && - (m_unused_locks_cache.elements() < mdl_locks_cache_size)) + (m_unused_locks_cache.elements() < + mdl_locks_cache_size/mdl_locks_hash_partitions)) { /* This is an object of MDL_object_lock type and the cache of unused objects has not reached its maximum size yet. So instead of destroying object we move it to the list of unused objects to allow its later re-use with possibly different key. Any threads holding references to - this object (owning MDL_map::m_mutex or MDL_lock::m_rwlock) will notice - this thanks to the fact that we have changed the MDL_lock::m_version - counter. + this object (owning MDL_map_partition::m_mutex or MDL_lock::m_rwlock) + will notice this thanks to the fact that we have changed the + MDL_lock::m_version counter. */ DBUG_ASSERT(lock->key.mdl_namespace() != MDL_key::GLOBAL && lock->key.mdl_namespace() != MDL_key::COMMIT); @@ -970,8 +1081,8 @@ void MDL_map::remove(MDL_lock *lock) has the responsibility to release it. Setting of m_is_destroyed to TRUE while holding _both_ - mdl_locks.m_mutex and MDL_lock::m_rwlock mutexes transfers the - protection of m_ref_usage from mdl_locks.m_mutex to + MDL_map_partition::m_mutex and MDL_lock::m_rwlock mutexes transfers + the protection of m_ref_usage from MDL_map_partition::m_mutex to MDL_lock::m_rwlock while removal of the object from the hash (and cache of unused objects) makes it read-only. Therefore whoever acquires MDL_lock::m_rwlock next will see the most up @@ -1001,7 +1112,8 @@ void MDL_map::remove(MDL_lock *lock) */ MDL_context::MDL_context() - : m_thd(NULL), + : + m_owner(NULL), m_needs_thr_lock_abort(FALSE), m_waiting_for(NULL) { @@ -1023,9 +1135,9 @@ MDL_context::MDL_context() void MDL_context::destroy() { - DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty() && - m_tickets[MDL_TRANSACTION].is_empty() && - m_tickets[MDL_EXPLICIT].is_empty()); + DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty()); + DBUG_ASSERT(m_tickets[MDL_TRANSACTION].is_empty()); + DBUG_ASSERT(m_tickets[MDL_EXPLICIT].is_empty()); mysql_prlock_destroy(&m_LOCK_waiting_for); } @@ -1090,16 +1202,17 @@ void MDL_request::init(const MDL_key *key_arg, @note Also chooses an MDL_lock descendant appropriate for object namespace. */ -inline MDL_lock *MDL_lock::create(const MDL_key *mdl_key) +inline MDL_lock *MDL_lock::create(const MDL_key *mdl_key, + MDL_map_partition *map_part) { switch (mdl_key->mdl_namespace()) { case MDL_key::GLOBAL: case MDL_key::SCHEMA: case MDL_key::COMMIT: - return new MDL_scoped_lock(mdl_key); + return new (std::nothrow) MDL_scoped_lock(mdl_key, map_part); default: - return new MDL_object_lock(mdl_key); + return new (std::nothrow) MDL_object_lock(mdl_key, map_part); } } @@ -1124,7 +1237,8 @@ MDL_ticket *MDL_ticket::create(MDL_context *ctx_arg, enum_mdl_type type_arg #endif ) { - return new MDL_ticket(ctx_arg, type_arg + return new (std::nothrow) + MDL_ticket(ctx_arg, type_arg #ifndef DBUG_OFF , duration_arg #endif @@ -1148,7 +1262,7 @@ void MDL_ticket::destroy(MDL_ticket *ticket) uint MDL_ticket::get_deadlock_weight() const { return (m_lock->key.mdl_namespace() == MDL_key::GLOBAL || - m_type >= MDL_SHARED_NO_WRITE ? + m_type >= MDL_SHARED_UPGRADABLE ? DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML); } @@ -1217,6 +1331,7 @@ void MDL_wait::reset_status() /** Wait for the status to be assigned to this wait slot. + @param owner MDL context owner. @param abs_timeout Absolute time after which waiting should stop. @param set_status_on_timeout TRUE - If in case of timeout waiting context should close the wait slot by @@ -1228,7 +1343,7 @@ void MDL_wait::reset_status() */ MDL_wait::enum_wait_status -MDL_wait::timed_wait(THD *thd, struct timespec *abs_timeout, +MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout, bool set_status_on_timeout, const PSI_stage_info *wait_state_name) { @@ -1239,16 +1354,16 @@ MDL_wait::timed_wait(THD *thd, struct timespec *abs_timeout, mysql_mutex_lock(&m_LOCK_wait_status); - THD_ENTER_COND(thd, &m_COND_wait_status, &m_LOCK_wait_status, - wait_state_name, & old_stage); - thd_wait_begin(thd, THD_WAIT_META_DATA_LOCK); - while (!m_wait_status && !thd->killed && + owner->ENTER_COND(&m_COND_wait_status, &m_LOCK_wait_status, + wait_state_name, & old_stage); + thd_wait_begin(NULL, THD_WAIT_META_DATA_LOCK); + while (!m_wait_status && !owner->is_killed() && wait_result != ETIMEDOUT && wait_result != ETIME) { wait_result= mysql_cond_timedwait(&m_COND_wait_status, &m_LOCK_wait_status, abs_timeout); } - thd_wait_end(thd); + thd_wait_end(NULL); if (m_wait_status == EMPTY) { @@ -1264,14 +1379,14 @@ MDL_wait::timed_wait(THD *thd, struct timespec *abs_timeout, false, which means that the caller intends to restart the wait. */ - if (thd->killed) + if (owner->is_killed()) m_wait_status= KILLED; else if (set_status_on_timeout) m_wait_status= TIMEOUT; } result= m_wait_status; - thd->EXIT_COND(& old_stage); + owner->EXIT_COND(& old_stage); DBUG_RETURN(result); } @@ -1480,22 +1595,12 @@ void MDL_lock::reschedule_waiters() lock. Arrays of bitmaps which elements specify which granted/waiting locks are incompatible with type of lock being requested. - Here is how types of individual locks are translated to type of scoped lock: - - ----------------+-------------+ - Type of request | Correspond. | - for indiv. lock | scoped lock | - ----------------+-------------+ - S, SH, SR, SW | IS | - SNW, SNRW, X | IX | - SNW, SNRW -> X | IX (*) | - The first array specifies if particular type of request can be satisfied if there is granted scoped lock of certain type. | Type of active | Request | scoped lock | - type | IS(**) IX S X | + type | IS(*) IX S X | ---------+------------------+ IS | + + + + | IX | + + - - | @@ -1508,7 +1613,7 @@ void MDL_lock::reschedule_waiters() | Pending | Request | scoped lock | - type | IS(**) IX S X | + type | IS(*) IX S X | ---------+-----------------+ IS | + + + + | IX | + + - - | @@ -1518,24 +1623,33 @@ void MDL_lock::reschedule_waiters() Here: "+" -- means that request can be satisfied "-" -- means that request can't be satisfied and should wait - (*) Since for upgradable locks we always take intention exclusive scoped - lock at the same time when obtaining the shared lock, there is no - need to obtain such lock during the upgrade itself. - (**) Since intention shared scoped locks are compatible with all other - type of locks we don't even have any accounting for them. + (*) Since intention shared scoped locks are compatible with all other + type of locks we don't even have any accounting for them. + + Note that relation between scoped locks and objects locks requested + by statement is not straightforward and is therefore fully defined + by SQL-layer. + For example, in order to support global read lock implementation + SQL-layer acquires IX lock in GLOBAL namespace for each statement + that can modify metadata or data (i.e. for each statement that + needs SW, SU, SNW, SNRW or X object locks). OTOH, to ensure that + DROP DATABASE works correctly with concurrent DDL, IX metadata locks + in SCHEMA namespace are acquired for DDL statements which can update + metadata in the schema (i.e. which acquire SU, SNW, SNRW and X locks + on schema objects) and aren't acquired for DML. */ const MDL_lock::bitmap_t MDL_scoped_lock::m_granted_incompatible[MDL_TYPE_END] = { MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED), - MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), 0, 0, 0, 0, 0, + MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), 0, 0, 0, 0, 0, 0, MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED) | MDL_BIT(MDL_INTENTION_EXCLUSIVE) }; const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] = { MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED), - MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0 + MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0 }; @@ -1547,35 +1661,39 @@ const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] = The first array specifies if particular type of request can be satisfied if there is granted lock of certain type. - Request | Granted requests for lock | - type | S SH SR SW SNW SNRW X | - ----------+------------------------------+ - S | + + + + + + - | - SH | + + + + + + - | - SR | + + + + + - - | - SW | + + + + - - - | - SNW | + + + - - - - | - SNRW | + + - - - - - | - X | - - - - - - - | - SNW -> X | - - - 0 0 0 0 | - SNRW -> X | - - 0 0 0 0 0 | + Request | Granted requests for lock | + type | S SH SR SW SU SNW SNRW X | + ----------+----------------------------------+ + S | + + + + + + + - | + SH | + + + + + + + - | + SR | + + + + + + - - | + SW | + + + + + - - - | + SU | + + + + - - - - | + SNW | + + + - - - - - | + SNRW | + + - - - - - - | + X | - - - - - - - - | + SU -> X | - - - - 0 0 0 0 | + SNW -> X | - - - 0 0 0 0 0 | + SNRW -> X | - - 0 0 0 0 0 0 | The second array specifies if particular type of request can be satisfied if there is waiting request for the same lock of certain type. In other words it specifies what is the priority of different lock types. - Request | Pending requests for lock | - type | S SH SR SW SNW SNRW X | - ----------+-----------------------------+ - S | + + + + + + - | - SH | + + + + + + + | - SR | + + + + + - - | - SW | + + + + - - - | - SNW | + + + + + + - | - SNRW | + + + + + + - | - X | + + + + + + + | - SNW -> X | + + + + + + + | - SNRW -> X | + + + + + + + | + Request | Pending requests for lock | + type | S SH SR SW SU SNW SNRW X | + ----------+---------------------------------+ + S | + + + + + + + - | + SH | + + + + + + + + | + SR | + + + + + + - - | + SW | + + + + + - - - | + SU | + + + + + + + - | + SNW | + + + + + + + - | + SNRW | + + + + + + + - | + X | + + + + + + + + | + SU -> X | + + + + + + + + | + SNW -> X | + + + + + + + + | + SNRW -> X | + + + + + + + + | Here: "+" -- means that request can be satisfied "-" -- means that request can't be satisfied and should wait @@ -1584,6 +1702,9 @@ const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] = @note In cases then current context already has "stronger" type of lock on the object it will be automatically granted thanks to usage of the MDL_context::find_ticket() method. + + @note IX locks are excluded since they are not used for per-object + metadata locks. */ const MDL_lock::bitmap_t @@ -1596,14 +1717,17 @@ MDL_object_lock::m_granted_incompatible[MDL_TYPE_END] = MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | MDL_BIT(MDL_SHARED_NO_WRITE), MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | - MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_WRITE), + MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE), + MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | + MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) | + MDL_BIT(MDL_SHARED_WRITE), MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | - MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_WRITE) | - MDL_BIT(MDL_SHARED_READ), + MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) | + MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ), MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | - MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_WRITE) | - MDL_BIT(MDL_SHARED_READ) | MDL_BIT(MDL_SHARED_HIGH_PRIO) | - MDL_BIT(MDL_SHARED) + MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) | + MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ) | + MDL_BIT(MDL_SHARED_HIGH_PRIO) | MDL_BIT(MDL_SHARED) }; @@ -1618,6 +1742,7 @@ MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END] = MDL_BIT(MDL_SHARED_NO_WRITE), MDL_BIT(MDL_EXCLUSIVE), MDL_BIT(MDL_EXCLUSIVE), + MDL_BIT(MDL_EXCLUSIVE), 0 }; @@ -1690,7 +1815,7 @@ MDL_lock::get_lock_owner() const MDL_ticket *ticket; if ((ticket= it++)) - return thd_get_thread_id(ticket->get_ctx()->get_thd()); + return ticket->get_ctx()->get_thread_id(); return 0; } @@ -1821,6 +1946,8 @@ MDL_context::find_ticket(MDL_request *mdl_request, if (mdl_request->key.is_equal(&ticket->m_lock->key) && ticket->has_stronger_or_equal_type(mdl_request->type)) { + DBUG_PRINT("info", ("Adding mdl lock %d to %d", + mdl_request->type, ticket->m_type)); *result_duration= duration; return ticket; } @@ -2047,7 +2174,7 @@ void MDL_object_lock::notify_conflicting_locks(MDL_context *ctx) { /* Only try to abort locks on which we back off. */ if (conflicting_ticket->get_ctx() != ctx && - conflicting_ticket->get_type() < MDL_SHARED_NO_WRITE) + conflicting_ticket->get_type() < MDL_SHARED_UPGRADABLE) { MDL_context *conflicting_ctx= conflicting_ticket->get_ctx(); @@ -2057,9 +2184,9 @@ void MDL_object_lock::notify_conflicting_locks(MDL_context *ctx) lock or some other non-MDL resource we might need to wake it up by calling code outside of MDL. */ - mysql_notify_thread_having_shared_lock(ctx->get_thd(), - conflicting_ctx->get_thd(), - conflicting_ctx->get_needs_thr_lock_abort()); + ctx->get_owner()-> + notify_shared_lock(conflicting_ctx->get_owner(), + conflicting_ctx->get_needs_thr_lock_abort()); } } } @@ -2089,9 +2216,9 @@ void MDL_scoped_lock::notify_conflicting_locks(MDL_context *ctx) insert delayed. We need to kill such threads in order to get global shared lock. We do this my calling code outside of MDL. */ - mysql_notify_thread_having_shared_lock(ctx->get_thd(), - conflicting_ctx->get_thd(), - conflicting_ctx->get_needs_thr_lock_abort()); + ctx->get_owner()-> + notify_shared_lock(conflicting_ctx->get_owner(), + conflicting_ctx->get_needs_thr_lock_abort()); } } } @@ -2117,6 +2244,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout) struct timespec abs_timeout; MDL_wait::enum_wait_status wait_status; DBUG_ENTER("MDL_context::acquire_lock"); + DBUG_PRINT("enter", ("lock_type: %d", mdl_request->type)); /* Do some work outside the critical section. */ set_timespec(abs_timeout, lock_wait_timeout); @@ -2131,6 +2259,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout) MDL_lock, MDL_context and MDL_request were updated accordingly, so we can simply return success. */ + DBUG_PRINT("info", ("Got lock without waiting")); DBUG_RETURN(FALSE); } @@ -2164,7 +2293,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout) will_wait_for(ticket); /* There is a shared or exclusive lock on the object. */ - DEBUG_SYNC(m_thd, "mdl_acquire_lock_wait"); + DEBUG_SYNC(get_thd(), "mdl_acquire_lock_wait"); find_deadlock(); @@ -2175,13 +2304,13 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout) while (cmp_timespec(abs_shortwait, abs_timeout) <= 0) { /* abs_timeout is far away. Wait a short while and notify locks. */ - wait_status= m_wait.timed_wait(m_thd, &abs_shortwait, FALSE, + wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE, mdl_request->key.get_wait_state_name()); if (wait_status != MDL_wait::EMPTY) break; /* Check if the client is gone while we were waiting. */ - if (! thd_is_connected(m_thd)) + if (! thd_is_connected(m_owner->get_thd())) { /* * The client is disconnected. Don't wait forever: @@ -2199,7 +2328,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout) set_timespec(abs_shortwait, 1); } if (wait_status == MDL_wait::EMPTY) - wait_status= m_wait.timed_wait(m_thd, &abs_timeout, TRUE, + wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE, mdl_request->key.get_wait_state_name()); done_waiting_for(); @@ -2284,8 +2413,7 @@ bool MDL_context::acquire_locks(MDL_request_list *mdl_requests, /* Sort requests according to MDL_key. */ if (! (sort_buf= (MDL_request **)my_malloc(req_count * sizeof(MDL_request*), - MYF(MY_WME | - MY_THREAD_SPECIFIC)))) + MYF(MY_WME)))) DBUG_RETURN(TRUE); for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++) @@ -2321,11 +2449,12 @@ err: /** - Upgrade a shared metadata lock to exclusive. + Upgrade a shared metadata lock. - Used in ALTER TABLE, when a copy of the table with the - new definition has been constructed. + Used in ALTER TABLE. + @param mdl_ticket Lock to upgrade. + @param new_type Lock type to upgrade to. @param lock_wait_timeout Seconds to wait before timeout. @note In case of failure to upgrade lock (e.g. because upgrader @@ -2333,7 +2462,7 @@ err: shared mode). @note There can be only one upgrader for a lock or we will have deadlock. - This invariant is ensured by the fact that upgradeable locks SNW + This invariant is ensured by the fact that upgradeable locks SU, SNW and SNRW are not compatible with each other and themselves. @retval FALSE Success @@ -2341,28 +2470,31 @@ err: */ bool -MDL_context::upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket, - ulong lock_wait_timeout) +MDL_context::upgrade_shared_lock(MDL_ticket *mdl_ticket, + enum_mdl_type new_type, + ulong lock_wait_timeout) { MDL_request mdl_xlock_request; MDL_savepoint mdl_svp= mdl_savepoint(); bool is_new_ticket; - - DBUG_ENTER("MDL_ticket::upgrade_shared_lock_to_exclusive"); - DEBUG_SYNC(get_thd(), "mdl_upgrade_shared_lock_to_exclusive"); + DBUG_ENTER("MDL_context::upgrade_shared_lock"); + DBUG_PRINT("enter",("new_type: %d lock_wait_timeout: %lu", new_type, + lock_wait_timeout)); + DEBUG_SYNC(get_thd(), "mdl_upgrade_lock"); /* Do nothing if already upgraded. Used when we FLUSH TABLE under LOCK TABLES and a table is listed twice in LOCK TABLES list. */ - if (mdl_ticket->m_type == MDL_EXCLUSIVE) + if (mdl_ticket->has_stronger_or_equal_type(new_type)) DBUG_RETURN(FALSE); - /* Only allow upgrades from MDL_SHARED_NO_WRITE/NO_READ_WRITE */ - DBUG_ASSERT(mdl_ticket->m_type == MDL_SHARED_NO_WRITE || + /* Only allow upgrades from SHARED_UPGRADABLE/NO_WRITE/NO_READ_WRITE */ + DBUG_ASSERT(mdl_ticket->m_type == MDL_SHARED_UPGRADABLE || + mdl_ticket->m_type == MDL_SHARED_NO_WRITE || mdl_ticket->m_type == MDL_SHARED_NO_READ_WRITE); - mdl_xlock_request.init(&mdl_ticket->m_lock->key, MDL_EXCLUSIVE, + mdl_xlock_request.init(&mdl_ticket->m_lock->key, new_type, MDL_TRANSACTION); if (acquire_lock(&mdl_xlock_request, lock_wait_timeout)) @@ -2380,7 +2512,7 @@ MDL_context::upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket, ticket from the granted queue and then include it back. */ mdl_ticket->m_lock->m_granted.remove_ticket(mdl_ticket); - mdl_ticket->m_type= MDL_EXCLUSIVE; + mdl_ticket->m_type= new_type; mdl_ticket->m_lock->m_granted.add_ticket(mdl_ticket); mysql_prlock_unlock(&mdl_ticket->m_lock->m_rwlock); @@ -2654,8 +2786,8 @@ void MDL_context::release_lock(enum_mdl_duration duration, MDL_ticket *ticket) { MDL_lock *lock= ticket->m_lock; DBUG_ENTER("MDL_context::release_lock"); - DBUG_PRINT("enter", ("db=%s name=%s", lock->key.db_name(), - lock->key.name())); + DBUG_PRINT("enter", ("db: '%s' name: '%s'", + lock->key.db_name(), lock->key.name())); DBUG_ASSERT(this == ticket->get_ctx()); mysql_mutex_assert_not_owner(&LOCK_open); @@ -2744,22 +2876,29 @@ void MDL_context::release_all_locks_for_name(MDL_ticket *name) /** - Downgrade an exclusive lock to shared metadata lock. + Downgrade an EXCLUSIVE or SHARED_NO_WRITE lock to shared metadata lock. @param type Type of lock to which exclusive lock should be downgraded. */ -void MDL_ticket::downgrade_exclusive_lock(enum_mdl_type type) +void MDL_ticket::downgrade_lock(enum_mdl_type type) { mysql_mutex_assert_not_owner(&LOCK_open); /* Do nothing if already downgraded. Used when we FLUSH TABLE under LOCK TABLES and a table is listed twice in LOCK TABLES list. + Note that this code might even try to "downgrade" a weak lock + (e.g. SW) to a stronger one (e.g SNRW). So we can't even assert + here that target lock is weaker than existing lock. */ - if (m_type != MDL_EXCLUSIVE) + if (m_type == type || !has_stronger_or_equal_type(type)) return; + /* Only allow downgrade from EXCLUSIVE and SHARED_NO_WRITE. */ + DBUG_ASSERT(m_type == MDL_EXCLUSIVE || + m_type == MDL_SHARED_NO_WRITE); + mysql_prlock_wrlock(&m_lock->m_rwlock); /* To update state of MDL_lock object correctly we need to temporarily diff --git a/sql/mdl.h b/sql/mdl.h index 944c6bb6349..e79df9b6cd7 100644 --- a/sql/mdl.h +++ b/sql/mdl.h @@ -1,6 +1,6 @@ #ifndef MDL_H #define MDL_H -/* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -12,8 +12,8 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + along with this program; if not, write to the Free Software Foundation, + 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ #if defined(__IBMC__) || defined(__IBMCPP__) /* Further down, "next_in_lock" and "next_in_context" have the same type, @@ -26,10 +26,11 @@ #include "sql_plist.h" #include <my_sys.h> -#include <my_pthread.h> #include <m_string.h> #include <mysql_com.h> +#include <algorithm> + class THD; class MDL_context; @@ -55,6 +56,67 @@ class MDL_ticket; #define EXIT_COND(S) exit_cond(S, __func__, __FILE__, __LINE__) /** + An interface to separate the MDL module from the THD, and the rest of the + server code. + */ + +class MDL_context_owner +{ +public: + virtual ~MDL_context_owner() {} + + /** + Enter a condition wait. + For @c enter_cond() / @c exit_cond() to work the mutex must be held before + @c enter_cond(); this mutex is then released by @c exit_cond(). + Usage must be: lock mutex; enter_cond(); your code; exit_cond(). + @param cond the condition to wait on + @param mutex the associated mutex + @param [in] stage the stage to enter, or NULL + @param [out] old_stage the previous stage, or NULL + @param src_function function name of the caller + @param src_file file name of the caller + @param src_line line number of the caller + @sa ENTER_COND(), THD::enter_cond() + @sa EXIT_COND(), THD::exit_cond() + */ + virtual void enter_cond(mysql_cond_t *cond, mysql_mutex_t *mutex, + const PSI_stage_info *stage, PSI_stage_info *old_stage, + const char *src_function, const char *src_file, + int src_line) = 0; + + /** + @def EXIT_COND(S) + End a wait on a condition + @param [in] stage the new stage to enter + @param src_function function name of the caller + @param src_file file name of the caller + @param src_line line number of the caller + @sa ENTER_COND(), THD::enter_cond() + @sa EXIT_COND(), THD::exit_cond() + */ + virtual void exit_cond(const PSI_stage_info *stage, + const char *src_function, const char *src_file, + int src_line) = 0; + /** + Has the owner thread been killed? + */ + virtual int is_killed() = 0; + + /** + This one is only used for DEBUG_SYNC. + (Do not use it to peek/poke into other parts of THD.) + */ + virtual THD* get_thd() = 0; + + /** + @see THD::notify_shared_lock() + */ + virtual bool notify_shared_lock(MDL_context_owner *in_use, + bool needs_thr_lock_abort) = 0; +}; + +/** Type of metadata lock request. @sa Comments for MDL_object_lock::can_grant_lock() and @@ -132,6 +194,15 @@ enum enum_mdl_type { */ MDL_SHARED_WRITE, /* + An upgradable shared metadata lock for cases when there is an intention + to modify (and not just read) data in the table. + Can be upgraded to MDL_SHARED_NO_WRITE and MDL_EXCLUSIVE. + A connection holding SU lock can read table metadata and modify or read + table data (after acquiring appropriate table and row-level locks). + To be used for the first phase of ALTER TABLE. + */ + MDL_SHARED_UPGRADABLE, + /* An upgradable shared metadata lock which blocks all attempts to update table data, allowing reads. A connection holding this kind of lock can read table metadata and read @@ -270,9 +341,12 @@ public: are not longer than NAME_LEN. Still we play safe and try to avoid buffer overruns. */ - m_db_name_length= (uint16) (strmake(m_ptr + 1, db, NAME_LEN) - m_ptr - 1); - m_length= (uint16) (strmake(m_ptr + m_db_name_length + 2, name, NAME_LEN) - - m_ptr + 1); + DBUG_ASSERT(strlen(db) <= NAME_LEN); + DBUG_ASSERT(strlen(name) <= NAME_LEN); + m_db_name_length= static_cast<uint16>(strmake(m_ptr + 1, db, NAME_LEN) - + m_ptr - 1); + m_length= static_cast<uint16>(strmake(m_ptr + m_db_name_length + 2, name, + NAME_LEN) - m_ptr + 1); } void mdl_key_init(const MDL_key *rhs) { @@ -295,6 +369,7 @@ public: character set is utf-8, we can safely assume that no character starts with a zero byte. */ + using std::min; return memcmp(m_ptr, rhs->m_ptr, min(m_length, rhs->m_length)); } @@ -509,14 +584,15 @@ public: MDL_context *get_ctx() const { return m_ctx; } bool is_upgradable_or_exclusive() const { - return m_type == MDL_SHARED_NO_WRITE || + return m_type == MDL_SHARED_UPGRADABLE || + m_type == MDL_SHARED_NO_WRITE || m_type == MDL_SHARED_NO_READ_WRITE || m_type == MDL_EXCLUSIVE; } enum_mdl_type get_type() const { return m_type; } MDL_lock *get_lock() const { return m_lock; } MDL_key *get_key() const; - void downgrade_exclusive_lock(enum_mdl_type type); + void downgrade_lock(enum_mdl_type type); bool has_stronger_or_equal_type(enum_mdl_type type) const; @@ -622,7 +698,7 @@ public: bool set_status(enum_wait_status result_arg); enum_wait_status get_status(); void reset_status(); - enum_wait_status timed_wait(THD *thd, + enum_wait_status timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout, bool signal_timeout, const PSI_stage_info *wait_state_name); @@ -668,8 +744,9 @@ public: bool try_acquire_lock(MDL_request *mdl_request); bool acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout); bool acquire_locks(MDL_request_list *requests, ulong lock_wait_timeout); - bool upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket, - ulong lock_wait_timeout); + bool upgrade_shared_lock(MDL_ticket *mdl_ticket, + enum_mdl_type new_type, + ulong lock_wait_timeout); bool clone_ticket(MDL_request *mdl_request); @@ -704,7 +781,7 @@ public: void release_transactional_locks(); void rollback_to_savepoint(const MDL_savepoint &mdl_savepoint); - inline THD *get_thd() const { return m_thd; } + MDL_context_owner *get_owner() { return m_owner; } /** @pre Only valid if we started waiting for lock. */ inline uint get_deadlock_weight() const @@ -717,7 +794,7 @@ public: already has received some signal or closed signal slot. */ - void init(THD *thd_arg) { m_thd= thd_arg; } + void init(MDL_context_owner *arg) { m_owner= arg; } void set_needs_thr_lock_abort(bool needs_thr_lock_abort) { @@ -797,7 +874,7 @@ private: involved schemas and global intention exclusive lock. */ Ticket_list m_tickets[MDL_DURATION_END]; - THD *m_thd; + MDL_context_owner *m_owner; /** TRUE - if for this context we will break protocol and try to acquire table-level locks while having only S lock on @@ -826,6 +903,7 @@ private: */ MDL_wait_for_subgraph *m_waiting_for; private: + THD *get_thd() const { return m_owner->get_thd(); } MDL_ticket *find_ticket(MDL_request *mdl_req, enum_mdl_duration *duration); void release_locks_stored_before(enum_mdl_duration duration, MDL_ticket *sentinel); @@ -836,6 +914,8 @@ private: public: void find_deadlock(); + ulong get_thread_id() const { return thd_get_thread_id(get_thd()); } + bool visit_subgraph(MDL_wait_for_graph_visitor *dvisitor); /** Inform the deadlock detector there is an edge in the wait-for graph. */ @@ -870,8 +950,6 @@ private: void mdl_init(); void mdl_destroy(); -extern bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use, - bool needs_thr_lock_abort); extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd); /** @@ -897,6 +975,14 @@ extern ulong mdl_locks_cache_size; static const ulong MDL_LOCKS_CACHE_SIZE_DEFAULT = 1024; /* + Start-up parameter for the number of partitions of the hash + containing all the MDL_lock objects and a constant for + its default value. +*/ +extern ulong mdl_locks_hash_partitions; +static const ulong MDL_LOCKS_HASH_PARTITIONS_DEFAULT = 8; + +/* Metadata locking subsystem tries not to grant more than max_write_lock_count high-prio, strong locks successively, to avoid starving out weak, low-prio locks. diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index 3aee7936b79..e42ea9ec452 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -550,7 +550,7 @@ int Mrr_ordered_index_reader::init(handler *h_arg, RANGE_SEQ_IF *seq_funcs, KEY *key_info= &file->get_table()->key_info[file->active_index]; keypar.index_ranges_unique= test(key_info->flags & HA_NOSAME && - key_info->key_parts == + key_info->user_defined_key_parts == my_count_bits(keypar.key_tuple_map)); mrr_iter= seq_funcs->init(seq_init_param, n_ranges, mode); @@ -1497,7 +1497,7 @@ ha_rows DsMrr_impl::dsmrr_info_const(uint keyno, RANGE_SEQ_IF *seq, bool key_uses_partial_cols(TABLE *table, uint keyno) { KEY_PART_INFO *kp= table->key_info[keyno].key_part; - KEY_PART_INFO *kp_end= kp + table->key_info[keyno].key_parts; + KEY_PART_INFO *kp_end= kp + table->key_info[keyno].user_defined_key_parts; for (; kp != kp_end; kp++) { if (!kp->field->part_of_key.is_set(keyno)) @@ -1648,7 +1648,7 @@ int DsMrr_impl::dsmrr_explain_info(uint mrr_mode, char *str, size_t size) used_str= rowid_ordered; uint used_str_len= strlen(used_str); - uint copy_len= min(used_str_len, size); + uint copy_len= MY_MIN(used_str_len, size); memcpy(str, used_str, copy_len); return copy_len; } @@ -1709,7 +1709,7 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags, else { cost->reset(); - *buffer_size= max(*buffer_size, + *buffer_size= MY_MAX(*buffer_size, (size_t)(1.2*rows_in_last_step) * elem_size + primary_file->ref_length + table->key_info[keynr].key_length); } diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index 21611afd87b..c11bf671cb1 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -45,21 +45,21 @@ int decimal_operation_results(int result, const char *value, const char *type) case E_DEC_OK: break; case E_DEC_TRUNCATED: - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_DATA_TRUNCATED, ER(ER_DATA_TRUNCATED), value, type); break; case E_DEC_OVERFLOW: - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_DATA_OVERFLOW, ER(ER_DATA_OVERFLOW), value, type); break; case E_DEC_DIV_ZERO: - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO, ER(ER_DIVISION_BY_ZERO)); break; case E_DEC_BAD_NUM: - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_DATA, ER(ER_BAD_DATA), value, type); break; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2cf0dddd1aa..2575ebed209 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -614,6 +614,19 @@ const char *in_left_expr_name= "<left expr>"; const char *in_additional_cond= "<IN COND>"; const char *in_having_cond= "<IN HAVING>"; +/** Number of connection errors when selecting on the listening port */ +ulong connection_errors_select= 0; +/** Number of connection errors when accepting sockets in the listening port. */ +ulong connection_errors_accept= 0; +/** Number of connection errors from TCP wrappers. */ +ulong connection_errors_tcpwrap= 0; +/** Number of connection errors from internal server errors. */ +ulong connection_errors_internal= 0; +/** Number of connection errors from the server max_connection limit. */ +ulong connection_errors_max_connection= 0; +/** Number of errors when reading the peer address. */ +ulong connection_errors_peer_addr= 0; + /* classes for comparation parsing/processing */ Eq_creator eq_creator; Ne_creator ne_creator; @@ -811,8 +824,6 @@ static struct my_option pfs_early_options[] __attribute__((unused)) = GET_BOOL, OPT_ARG, TRUE, 0, 0, 0, 0, 0} }; - - #ifdef HAVE_PSI_INTERFACE #ifdef HAVE_MMAP PSI_mutex_key key_PAGE_lock, key_LOCK_sync, key_LOCK_active, key_LOCK_pool, @@ -854,6 +865,7 @@ PSI_mutex_key key_LOCK_stats, PSI_mutex_key key_LOCK_rpl_gtid_state; PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered; +PSI_mutex_key key_TABLE_SHARE_LOCK_share; static PSI_mutex_info all_server_mutexes[]= { @@ -911,6 +923,7 @@ static PSI_mutex_info all_server_mutexes[]= { &key_relay_log_info_sleep_lock, "Relay_log_info::sleep_lock", 0}, { &key_structure_guard_mutex, "Query_cache::structure_guard_mutex", 0}, { &key_TABLE_SHARE_LOCK_ha_data, "TABLE_SHARE::LOCK_ha_data", 0}, + { &key_TABLE_SHARE_LOCK_share, "TABLE_SHARE::LOCK_share", 0}, { &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL}, { &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL}, { &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL}, @@ -1102,7 +1115,8 @@ void net_after_header_psi(struct st_net *net, void *user_data, size_t /* unused: { thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state, stmt_info_new_packet.m_key, - thd->db, thd->db_length); + thd->db, thd->db_length, + thd->charset()); THD_STAGE_INFO(thd, stage_init); } @@ -1134,12 +1148,6 @@ void init_net_server_extension(THD *thd) } #endif /* EMBEDDED_LIBRARY */ -/* - Since buffered_option_error_reporter is only used currently - for parsing performance schema options, this code is not needed - when the performance schema is not compiled in. -*/ -#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /** A log message for the error log, buffered in memory. Log messages are temporarily buffered when generated before the error log @@ -1275,13 +1283,16 @@ void Buffered_logs::print() /** Logs reported before a logger is available. */ static Buffered_logs buffered_logs; +static MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock; +struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD() + #ifndef EMBEDDED_LIBRARY /** Error reporter that buffer log messages. @param level log message level @param format log message format string */ -C_MODE_START + static void buffered_option_error_reporter(enum loglevel level, const char *format, ...) { @@ -1293,14 +1304,7 @@ static void buffered_option_error_reporter(enum loglevel level, va_end(args); buffered_logs.buffer(level, buffer); } -C_MODE_END -#endif /* !EMBEDDED_LIBRARY */ -#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */ -static MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock; -struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD() - -#ifndef EMBEDDED_LIBRARY struct passwd *user_info; static pthread_t select_thread; #endif @@ -2710,7 +2714,7 @@ static bool cache_thread() Delete the instrumentation for the job that just completed, before parking this pthread in the cache (blocked on COND_thread_cache). */ - PSI_CALL(delete_current_thread)(); + PSI_THREAD_CALL(delete_current_thread)(); #endif while (!abort_loop && ! wake_thread && ! kill_cached_threads) @@ -2733,9 +2737,9 @@ static bool cache_thread() Create new instrumentation for the new THD job, and attach it to this running pthread. */ - PSI_thread *psi= PSI_CALL(new_thread)(key_thread_one_connection, - thd, thd->thread_id); - PSI_CALL(set_thread)(psi); + PSI_thread *psi= PSI_THREAD_CALL(new_thread)(key_thread_one_connection, + thd, thd->thread_id); + PSI_THREAD_CALL(set_thread)(psi); #endif /* @@ -3264,7 +3268,7 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) abort_loop=1; // mark abort for threads #ifdef HAVE_PSI_THREAD_INTERFACE /* Delete the instrumentation for the signal thread */ - PSI_CALL(delete_current_thread)(); + PSI_THREAD_CALL(delete_current_thread)(); #endif #ifdef USE_ONE_SIGNAL_HAND pthread_t tmp; @@ -3334,7 +3338,7 @@ extern "C" void my_message_sql(uint error, const char *str, myf MyFlags); void my_message_sql(uint error, const char *str, myf MyFlags) { THD *thd= current_thd; - MYSQL_ERROR::enum_warning_level level; + Sql_condition::enum_warning_level level; sql_print_message_func func; DBUG_ENTER("my_message_sql"); DBUG_PRINT("error", ("error: %u message: '%s' Flag: %lu", error, str, @@ -3346,17 +3350,17 @@ void my_message_sql(uint error, const char *str, myf MyFlags) mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_ERROR, error, str); if (MyFlags & ME_JUST_INFO) { - level= MYSQL_ERROR::WARN_LEVEL_NOTE; + level= Sql_condition::WARN_LEVEL_NOTE; func= sql_print_information; } else if (MyFlags & ME_JUST_WARNING) { - level= MYSQL_ERROR::WARN_LEVEL_WARN; + level= Sql_condition::WARN_LEVEL_WARN; func= sql_print_warning; } else { - level= MYSQL_ERROR::WARN_LEVEL_ERROR; + level= Sql_condition::WARN_LEVEL_ERROR; func= sql_print_error; } @@ -3520,6 +3524,7 @@ SHOW_VAR com_status_vars[]= { {"empty_query", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EMPTY_QUERY]), SHOW_LONG_STATUS}, {"execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS}, {"flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS}, + {"get_diagnostics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GET_DIAGNOSTICS]), SHOW_LONG_STATUS}, {"grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS}, {"ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS}, {"ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS}, @@ -3725,13 +3730,17 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) However, this should never happen, so better to assert and fix this. */ +#ifdef ENABLE_BEFORE_END_OF_MERGE_QQ DBUG_ASSERT(thd); +#endif if (thd) { DBUG_PRINT("info", ("memory_used: %lld size: %lld", (longlong) thd->status_var.memory_used, size)); thd->status_var.memory_used+= size; +#ifdef ENABLE_BEFORE_END_OF_MERGE_QQ DBUG_ASSERT((longlong) thd->status_var.memory_used >= 0); +#endif } } } @@ -3745,6 +3754,12 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) static int init_common_variables() { umask(((~my_umask) & 0666)); + connection_errors_select= 0; + connection_errors_accept= 0; + connection_errors_tcpwrap= 0; + connection_errors_internal= 0; + connection_errors_max_connection= 0; + connection_errors_peer_addr= 0; my_decimal_set_zero(&decimal_zero); // set decimal_zero constant; if (pthread_key_create(&THR_THD,NULL) || @@ -4008,7 +4023,7 @@ static int init_common_variables() can't get max_connections*5 but still got no less than was requested (value of wanted_files). */ - max_open_files= max(max(wanted_files, + max_open_files= MY_MAX(MY_MAX(wanted_files, (max_connections + extra_max_connections)*5), open_files_limit); files= my_set_max_open_files(max_open_files); @@ -4021,15 +4036,15 @@ static int init_common_variables() If we have requested too much file handles than we bring max_connections in supported bounds. */ - max_connections= (ulong) min(files-10-TABLE_OPEN_CACHE_MIN*2, + max_connections= (ulong) MY_MIN(files-10-TABLE_OPEN_CACHE_MIN*2, max_connections); /* Decrease table_cache_size according to max_connections, but - not below TABLE_OPEN_CACHE_MIN. Outer min() ensures that we + not below TABLE_OPEN_CACHE_MIN. Outer MY_MIN() ensures that we never increase table_cache_size automatically (that could happen if max_connections is decreased above). */ - table_cache_size= (ulong) min(max((files-10-max_connections)/2, + table_cache_size= (ulong) MY_MIN(MY_MAX((files-10-max_connections)/2, TABLE_OPEN_CACHE_MIN), table_cache_size); DBUG_PRINT("warning", @@ -5023,7 +5038,6 @@ int mysqld_main(int argc, char **argv) sys_var_init(); -#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /* The performance schema needs to be initialized as early as possible, before to-be-instrumented objects of the server are initialized. @@ -5051,22 +5065,30 @@ int mysqld_main(int argc, char **argv) my_charset_error_reporter= buffered_option_error_reporter; pfs_param.m_pfs_instrument= const_cast<char*>(""); +#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /* Initialize the array of performance schema instrument configurations. */ init_pfs_instrument_array(); +#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */ ho_error= handle_options(&remaining_argc, &remaining_argv, (my_option*)(all_early_options.buffer), mysqld_get_one_option); delete_dynamic(&all_early_options); +#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE if (ho_error == 0) { /* Add back the program name handle_options removes */ remaining_argc++; remaining_argv--; - if (pfs_param.m_enabled) + if (pfs_param.m_enabled && !opt_help && !opt_bootstrap) { + /* Add sizing hints from the server sizing parameters. */ + pfs_param.m_hints.m_table_definition_cache= table_def_size; + pfs_param.m_hints.m_table_open_cache= table_cache_size; + pfs_param.m_hints.m_max_connections= max_connections; + pfs_param.m_hints.m_open_files_limit= open_files_limit; PSI_hook= initialize_performance_schema(&pfs_param); if (PSI_hook == NULL) { @@ -5107,8 +5129,8 @@ int mysqld_main(int argc, char **argv) */ init_server_psi_keys(); /* Instrument the main thread */ - PSI_thread *psi= PSI_CALL(new_thread)(key_thread_main, NULL, 0); - PSI_CALL(set_thread)(psi); + PSI_thread *psi= PSI_THREAD_CALL(new_thread)(key_thread_main, NULL, 0); + PSI_THREAD_CALL(set_thread)(psi); /* Now that some instrumentation is in place, @@ -5387,7 +5409,7 @@ int mysqld_main(int argc, char **argv) Disable the main thread instrumentation, to avoid recording events during the shutdown. */ - PSI_CALL(delete_current_thread)(); + PSI_THREAD_CALL(delete_current_thread)(); #endif /* Wait until cleanup is done */ @@ -5769,6 +5791,7 @@ void create_thread_to_handle_connection(THD *thd) mysql_mutex_unlock(&LOCK_connection_count); statistic_increment(aborted_connects,&LOCK_status); + statistic_increment(connection_errors_internal, &LOCK_status); /* Can't use my_error() since store_globals has not been called. */ my_snprintf(error_message_buff, sizeof(error_message_buff), ER_THD(thd, ER_CANT_CREATE_THREAD), error); @@ -5822,6 +5845,7 @@ static void create_new_thread(THD *thd) close_connection(thd, ER_CON_COUNT_ERROR); statistic_increment(denied_connections, &LOCK_status); delete thd; + statistic_increment(connection_errors_max_connection, &LOCK_status); DBUG_VOID_RETURN; } @@ -5934,6 +5958,12 @@ void handle_connections_sockets() { if (socket_errno != SOCKET_EINTR) { + /* + select(2)/poll(2) failed on the listening port. + There is not much details to report about the client, + increment the server global status variable. + */ + statistic_increment(connection_errors_accept, &LOCK_status); if (!select_errors++ && !abort_loop) /* purecov: inspected */ sql_print_error("mysqld: Got error %d from select",socket_errno); /* purecov: inspected */ } @@ -6014,6 +6044,12 @@ void handle_connections_sockets() #endif if (mysql_socket_getfd(new_sock) == INVALID_SOCKET) { + /* + accept(2) failed on the listening port, after many retries. + There is not much details to report about the client, + increment the server global status variable. + */ + statistic_increment(connection_errors_accept, &LOCK_status); if ((error_count++ & 255) == 0) // This can happen often sql_perror("Error in accept"); MAYBE_BROKEN_SYSCALL; @@ -6053,6 +6089,11 @@ void handle_connections_sockets() (void) mysql_socket_shutdown(new_sock, SHUT_RDWR); (void) mysql_socket_close(new_sock); + /* + The connection was refused by TCP wrappers. + There are no details (by client IP) available to update the host_cache. + */ + statistic_increment(connection_tcpwrap_errors, &LOCK_status); continue; } } @@ -6068,6 +6109,7 @@ void handle_connections_sockets() { (void) mysql_socket_shutdown(new_sock, SHUT_RDWR); (void) mysql_socket_close(new_sock); + statistic_increment(connection_errors_internal, &LOCK_status); continue; } /* Set to get io buffers to be part of THD */ @@ -6096,6 +6138,7 @@ void handle_connections_sockets() } delete thd; set_current_thd(0); + statistic_increment(connection_errors_internal, &LOCK_status); continue; } @@ -6960,7 +7003,7 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff) mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_NOTE); + Sql_condition::WARN_LEVEL_NOTE); if (mi) tmp= (my_bool) (mi->slave_running == MYSQL_SLAVE_RUN_CONNECT && mi->rli.slave_running); @@ -6986,7 +7029,7 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff) mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_NOTE); + Sql_condition::WARN_LEVEL_NOTE); if (mi) tmp= mi->received_heartbeats; mysql_mutex_unlock(&LOCK_active_mi); @@ -7011,7 +7054,7 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff) mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_NOTE); + Sql_condition::WARN_LEVEL_NOTE); if (mi) tmp= mi->heartbeat_period; mysql_mutex_unlock(&LOCK_active_mi); @@ -7483,6 +7526,12 @@ SHOW_VAR status_vars[]= { {"Com", (char*) com_status_vars, SHOW_ARRAY}, {"Compression", (char*) &show_net_compression, SHOW_SIMPLE_FUNC}, {"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH}, + {"Connection_errors_accept", (char*) &connection_errors_accept, SHOW_LONG}, + {"Connection_errors_internal", (char*) &connection_errors_internal, SHOW_LONG}, + {"Connection_errors_max_connections", (char*) &connection_errors_max_connection, SHOW_LONG}, + {"Connection_errors_peer_address", (char*) &connection_errors_peer_addr, SHOW_LONG}, + {"Connection_errors_select", (char*) &connection_errors_select, SHOW_LONG}, + {"Connection_errors_tcpwrap", (char*) &connection_errors_tcpwrap, SHOW_LONG}, {"Cpu_time", (char*) offsetof(STATUS_VAR, cpu_time), SHOW_DOUBLE_STATUS}, {"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables_), SHOW_LONG_STATUS}, {"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG}, @@ -9030,6 +9079,9 @@ static PSI_file_info all_server_files[]= PSI_stage_info stage_after_create= { 0, "After create", 0}; PSI_stage_info stage_allocating_local_table= { 0, "allocating local table", 0}; +PSI_stage_info stage_alter_inplace_prepare= { 0, "preparing for alter table", 0}; +PSI_stage_info stage_alter_inplace= { 0, "altering table", 0}; +PSI_stage_info stage_alter_inplace_commit= { 0, "committing alter table to storage engine", 0}; PSI_stage_info stage_changing_master= { 0, "Changing master", 0}; PSI_stage_info stage_checking_master_version= { 0, "Checking master version", 0}; PSI_stage_info stage_checking_permissions= { 0, "checking permissions", 0}; diff --git a/sql/mysqld.h b/sql/mysqld.h index d82bd79d935..2cf63d093ad 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -24,6 +24,7 @@ #include "mysql/psi/mysql_file.h" /* MYSQL_FILE */ #include "sql_list.h" /* I_List */ #include "sql_cmd.h" +#include <my_rnd.h> class THD; struct handlerton; @@ -213,6 +214,12 @@ extern int bootstrap_error; extern I_List<THD> threads; extern char err_shared_dir[]; extern TYPELIB thread_handling_typelib; +extern ulong connection_errors_select; +extern ulong connection_errors_accept; +extern ulong connection_errors_tcpwrap; +extern ulong connection_errors_internal; +extern ulong connection_errors_max_connection; +extern ulong connection_errors_peer_addr; extern ulong log_warnings; /* @@ -252,7 +259,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list, extern PSI_mutex_key key_RELAYLOG_LOCK_index; extern PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state; -extern PSI_mutex_key key_LOCK_stats, +extern PSI_mutex_key key_TABLE_SHARE_LOCK_share, key_LOCK_stats, key_LOCK_global_user_client_stats, key_LOCK_global_table_stats, key_LOCK_global_index_stats, key_LOCK_wakeup_ready; @@ -310,6 +317,9 @@ void init_server_psi_keys(); */ extern PSI_stage_info stage_after_create; extern PSI_stage_info stage_allocating_local_table; +extern PSI_stage_info stage_alter_inplace_prepare; +extern PSI_stage_info stage_alter_inplace; +extern PSI_stage_info stage_alter_inplace_commit; extern PSI_stage_info stage_changing_master; extern PSI_stage_info stage_checking_master_version; extern PSI_stage_info stage_checking_permissions; diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 93ca14337f5..fcb08bfbfc9 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -792,7 +792,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, { while (remain > 0) { - size_t length= min(remain, net->max_packet); + size_t length= MY_MIN(remain, net->max_packet); if (net_safe_read(net, net->buff, length, alarmed)) DBUG_RETURN(1); update_statistics(thd_increment_bytes_received(length)); @@ -989,7 +989,7 @@ my_real_read(NET *net, size_t *complen) len=uint3korr(net->buff+net->where_b); if (!len) /* End of big multi-packet */ goto end; - helping = max(len,*complen) + net->where_b; + helping = MY_MAX(len,*complen) + net->where_b; /* The necessary size of net->buff */ if (helping >= net->max_packet) { diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc index df9dae8e442..fb55aea1968 100644 --- a/sql/opt_index_cond_pushdown.cc +++ b/sql/opt_index_cond_pushdown.cc @@ -117,7 +117,7 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, return FALSE; KEY *key_info= tbl->key_info + keyno; KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *key_part_end= key_part + key_info->key_parts; + KEY_PART_INFO *key_part_end= key_part + key_info->user_defined_key_parts; for ( ; key_part < key_part_end; key_part++) { if (field->eq(key_part->field)) @@ -129,7 +129,7 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno, { key_info= tbl->key_info + tbl->s->primary_key; key_part= key_info->key_part; - key_part_end= key_part + key_info->key_parts; + key_part_end= key_part + key_info->user_defined_key_parts; for ( ; key_part < key_part_end; key_part++) { /* diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 75142e87f98..d9838543b58 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -112,7 +112,7 @@ #include "key.h" // is_key_used, key_copy, key_cmp, key_restore #include "sql_parse.h" // check_stack_overrun #include "sql_partition.h" // get_part_id_func, PARTITION_ITERATOR, - // struct partition_info + // struct partition_info, NOT_A_PARTITION_ID #include "sql_base.h" // free_io_cache #include "records.h" // init_read_record, end_read_record #include <m_ctype.h> @@ -2851,7 +2851,7 @@ static int fill_used_fields_bitmap(PARAM *param) /* The table uses clustered PK and it is not internally generated */ KEY_PART_INFO *key_part= param->table->key_info[pk].key_part; KEY_PART_INFO *key_part_end= key_part + - param->table->key_info[pk].key_parts; + param->table->key_info[pk].user_defined_key_parts; for (;key_part != key_part_end; ++key_part) bitmap_clear_bit(¶m->needed_fields, key_part->fieldnr-1); } @@ -3081,7 +3081,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, group_trp= get_best_group_min_max(¶m, tree, best_read_time); if (group_trp) { - param.table->quick_condition_rows= min(group_trp->records, + param.table->quick_condition_rows= MY_MIN(group_trp->records, head->stat_records()); if (group_trp->read_cost < best_read_time) { @@ -3529,7 +3529,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item *cond) /* Calculate selectivity of probably highly selective predicates */ ulong check_rows= - min(thd->variables.optimizer_selectivity_sampling_limit, + MY_MIN(thd->variables.optimizer_selectivity_sampling_limit, (ulong) (table_records * SELECTIVITY_SAMPLING_SHARE)); if (cond && check_rows > SELECTIVITY_SAMPLING_THRESHOLD && thd->variables.optimizer_use_condition_selectivity > 4) @@ -3814,29 +3814,26 @@ static void dbug_print_singlepoint_range(SEL_ARG **start, uint num); #endif -/* +/** Perform partition pruning for a given table and condition. - SYNOPSIS - prune_partitions() - thd Thread handle - table Table to perform partition pruning for - pprune_cond Condition to use for partition pruning + @param thd Thread handle + @param table Table to perform partition pruning for + @param pprune_cond Condition to use for partition pruning - DESCRIPTION - This function assumes that all partitions are marked as unused when it - is invoked. The function analyzes the condition, finds partitions that - need to be used to retrieve the records that match the condition, and - marks them as used by setting appropriate bit in part_info->used_partitions - In the worst case all partitions are marked as used. - - NOTE - This function returns promptly if called for non-partitioned table. - - RETURN - TRUE We've inferred that no partitions need to be used (i.e. no table - records will satisfy pprune_cond) - FALSE Otherwise + @note This function assumes that lock_partitions are setup when it + is invoked. The function analyzes the condition, finds partitions that + need to be used to retrieve the records that match the condition, and + marks them as used by setting appropriate bit in part_info->read_partitions + In the worst case all partitions are marked as used. If the table is not + yet locked, it will also unset bits in part_info->lock_partitions that is + not set in read_partitions. + + This function returns promptly if called for non-partitioned table. + + @return Operation status + @retval true Failure + @retval false Success */ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) @@ -3889,7 +3886,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) thd->no_errors=1; // Don't warn about NULL thd->mem_root=&alloc; - bitmap_clear_all(&part_info->used_partitions); + bitmap_clear_all(&part_info->read_partitions); prune_param.key= prune_param.range_param.key_parts; SEL_TREE *tree; @@ -3973,6 +3970,30 @@ end: thd->mem_root= range_par->old_root; free_root(&alloc,MYF(0)); // Return memory & allocator DBUG_RETURN(retval); + /* + Must be a subset of the locked partitions. + lock_partitions contains the partitions marked by explicit partition + selection (... t PARTITION (pX) ...) and we must only use partitions + within that set. + */ + bitmap_intersect(&prune_param.part_info->read_partitions, + &prune_param.part_info->lock_partitions); + /* + If not yet locked, also prune partitions to lock if not UPDATEing + partition key fields. This will also prune lock_partitions if we are under + LOCK TABLES, so prune away calls to start_stmt(). + TODO: enhance this prune locking to also allow pruning of + 'UPDATE t SET part_key = const WHERE cond_is_prunable' so it adds + a lock for part_key partition. + */ + if (!thd->lex->is_query_tables_locked() && + !partition_key_modified(table, table->write_set)) + { + bitmap_copy(&prune_param.part_info->lock_partitions, + &prune_param.part_info->read_partitions); + } + if (bitmap_is_clear_all(&(prune_param.part_info->read_partitions))) + table->all_partitions_pruned_away= true; } @@ -4009,7 +4030,7 @@ static void mark_full_partition_used_no_parts(partition_info* part_info, { DBUG_ENTER("mark_full_partition_used_no_parts"); DBUG_PRINT("enter", ("Mark partition %u as used", part_id)); - bitmap_set_bit(&part_info->used_partitions, part_id); + bitmap_set_bit(&part_info->read_partitions, part_id); DBUG_VOID_RETURN; } @@ -4025,7 +4046,7 @@ static void mark_full_partition_used_with_parts(partition_info *part_info, for (; start != end; start++) { DBUG_PRINT("info", ("1:Mark subpartition %u as used", start)); - bitmap_set_bit(&part_info->used_partitions, start); + bitmap_set_bit(&part_info->read_partitions, start); } DBUG_VOID_RETURN; } @@ -4053,7 +4074,7 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar, MY_BITMAP all_merges; uint bitmap_bytes; my_bitmap_map *bitmap_buf; - uint n_bits= ppar->part_info->used_partitions.n_bits; + uint n_bits= ppar->part_info->read_partitions.n_bits; bitmap_bytes= bitmap_buffer_size(n_bits); if (!(bitmap_buf= (my_bitmap_map*) alloc_root(ppar->range_param.mem_root, bitmap_bytes))) @@ -4079,14 +4100,15 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar, } if (res != -1) - bitmap_intersect(&all_merges, &ppar->part_info->used_partitions); + bitmap_intersect(&all_merges, &ppar->part_info->read_partitions); + if (bitmap_is_clear_all(&all_merges)) return 0; - bitmap_clear_all(&ppar->part_info->used_partitions); + bitmap_clear_all(&ppar->part_info->read_partitions); } - memcpy(ppar->part_info->used_partitions.bitmap, all_merges.bitmap, + memcpy(ppar->part_info->read_partitions.bitmap, all_merges.bitmap, bitmap_bytes); return 1; } @@ -4446,7 +4468,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) { for (uint i= 0; i < ppar->part_info->num_subparts; i++) if (bitmap_is_set(&ppar->subparts_bitmap, i)) - bitmap_set_bit(&ppar->part_info->used_partitions, + bitmap_set_bit(&ppar->part_info->read_partitions, part_id * ppar->part_info->num_subparts + i); } goto pop_and_go_right; @@ -4508,7 +4530,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) != NOT_A_PARTITION_ID) { - bitmap_set_bit(&part_info->used_partitions, + bitmap_set_bit(&part_info->read_partitions, part_id * part_info->num_subparts + subpart_id); } res= 1; /* Some partitions were marked as used */ @@ -4594,7 +4616,8 @@ pop_and_go_right: static void mark_all_partitions_as_used(partition_info *part_info) { - bitmap_set_all(&part_info->used_partitions); + bitmap_copy(&(part_info->read_partitions), + &(part_info->lock_partitions)); } @@ -5147,7 +5170,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, { imerge_trp->read_cost= imerge_cost; imerge_trp->records= non_cpk_scan_records + cpk_scan_records; - imerge_trp->records= min(imerge_trp->records, + imerge_trp->records= MY_MIN(imerge_trp->records, param->table->stat_records()); imerge_trp->range_scans= range_scans; imerge_trp->range_scans_end= range_scans + n_child_scans; @@ -5737,7 +5760,7 @@ bool prepare_search_best_index_intersect(PARAM *param, this number by #r. If we do not make any assumptions then we can only state that - #r<=min(#r1,#r2). + #r<=MY_MIN(#r1,#r2). With this estimate we can't say that the index intersection scan will be cheaper than the cheapest index scan. @@ -5770,7 +5793,7 @@ bool prepare_search_best_index_intersect(PARAM *param, #rt2_0 of the same range for sub-index idx2_0(dept) of the index idx2. The current code does not make an estimate either for #rt1_0, or for #rt2_0, but it can be adjusted to provide those numbers. - Alternatively, min(rec_per_key) for (dept) could be used to get an upper + Alternatively, MY_MIN(rec_per_key) for (dept) could be used to get an upper bound for the value of sel(Rt1&Rt2). Yet this statistics is not provided now. @@ -5781,7 +5804,7 @@ bool prepare_search_best_index_intersect(PARAM *param, sel(Rt1&Rt2)=sel(dept=5)*sel(last_name='Sm5')*sel(first_name='Robert') =sel(Rt2)*sel(dept=5) - Here max(rec_per_key) for (dept) could be used to get an upper bound for + Here MY_MAX(rec_per_key) for (dept) could be used to get an upper bound for the value of sel(Rt1&Rt2). When the intersected indexes have different major columns, but some @@ -5834,9 +5857,9 @@ bool prepare_search_best_index_intersect(PARAM *param, f_1 = rec_per_key[first_name]/rec_per_key[last_name]. The the number of records in the range tree: Rt_0: (first_name='Robert' OR first_name='Bob') - for the sub-index (first_name) is not greater than max(#r*f_1, #t). + for the sub-index (first_name) is not greater than MY_MAX(#r*f_1, #t). Strictly speaking, we can state only that it's not greater than - max(#r*max_f_1, #t), where + MY_MAX(#r*max_f_1, #t), where max_f_1= max_rec_per_key[first_name]/min_rec_per_key[last_name]. Yet, if #r/#t is big enough (and this is the case of an index intersection, because using this index range with a single index scan is cheaper than @@ -6292,7 +6315,7 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg) KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part; KEY_PART_INFO *key_part_end= key_part + - param->table->key_info[keynr].key_parts; + param->table->key_info[keynr].user_defined_key_parts; for (;key_part != key_part_end; ++key_part) { if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr-1)) @@ -6965,7 +6988,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan) (*scan)->key_components= - param->table->key_info[(*scan)->keynr].key_parts; + param->table->key_info[(*scan)->keynr].user_defined_key_parts; /* Run covering-ROR-search algorithm. @@ -9073,7 +9096,7 @@ and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, if (!key1) return &null_element; // Impossible ranges key1->use_count++; - key1->max_part_no= max(key2->max_part_no, key2->part+1); + key1->max_part_no= MY_MAX(key2->max_part_no, key2->part+1); return key1; } @@ -9166,7 +9189,7 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) key1->use_count--; key2->use_count--; SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0; - uint max_part_no= max(key1->max_part_no, key2->max_part_no); + uint max_part_no= MY_MAX(key1->max_part_no, key2->max_part_no); while (e1 && e2) { @@ -9364,7 +9387,7 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) b: [---- */ - uint max_part_no= max(key1->max_part_no, key2->max_part_no); + uint max_part_no= MY_MAX(key1->max_part_no, key2->max_part_no); for (key2=key2->first(); key2; ) { @@ -9574,11 +9597,11 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) are merged into one range by deleting first...last-1 from the key1 tree. In the figure, this applies to first and the two consecutive ranges. The range of last is then extended: - * last.min: Set to min(key2.min, first.min) + * last.min: Set to MY_MIN(key2.min, first.min) * last.max: If there is a last->next that overlaps key2 (i.e., last->next has a different next_key_part): Set adjacent to last->next.min - Otherwise: Set to max(key2.max, last.max) + Otherwise: Set to MY_MAX(key2.max, last.max) Result: key2: [****----------------------*******] @@ -9632,7 +9655,7 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2) ^ ^ last different next_key_part - Extend range of last up to max(last.max, key2.max): + Extend range of last up to MY_MAX(last.max, key2.max): key2: [--------*****] key1: [***----------**] [xxxx] */ @@ -10473,7 +10496,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only, param->table->quick_key_parts[keynr]= param->max_key_part+1; param->table->quick_n_ranges[keynr]= param->range_count; param->table->quick_condition_rows= - min(param->table->quick_condition_rows, rows); + MY_MIN(param->table->quick_condition_rows, rows); param->table->quick_rows[keynr]= rows; } } @@ -10551,7 +10574,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) KEY *table_key= param->table->key_info + keynr; KEY_PART_INFO *key_part= table_key->key_part + nparts; KEY_PART_INFO *key_part_end= (table_key->key_part + - table_key->key_parts); + table_key->user_defined_key_parts); uint pk_number; for (KEY_PART_INFO *kp= table_key->key_part; kp < key_part; kp++) @@ -10572,7 +10595,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part; KEY_PART_INFO *pk_part_end= pk_part + - param->table->key_info[pk_number].key_parts; + param->table->key_info[pk_number].user_defined_key_parts; for (;(key_part!=key_part_end) && (pk_part != pk_part_end); ++key_part, ++pk_part) { @@ -10733,7 +10756,7 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, { KEY *table_key=quick->head->key_info+quick->index; flag=EQ_RANGE; - if ((table_key->flags & HA_NOSAME) && key->part == table_key->key_parts-1) + if ((table_key->flags & HA_NOSAME) && key->part == table_key->user_defined_key_parts-1) { if (!(table_key->flags & HA_NULL_PART_KEY) || !null_part_in_key(key, @@ -11769,7 +11792,7 @@ int QUICK_SELECT_DESC::get_next() if (last_range) { // Already read through key result = ((last_range->flag & EQ_RANGE && - used_key_parts <= head->key_info[index].key_parts) ? + used_key_parts <= head->key_info[index].user_defined_key_parts) ? file->ha_index_next_same(record, last_range->min_key, last_range->min_length) : file->ha_index_prev(record)); @@ -11797,7 +11820,7 @@ int QUICK_SELECT_DESC::get_next() } if (last_range->flag & EQ_RANGE && - used_key_parts <= head->key_info[index].key_parts) + used_key_parts <= head->key_info[index].user_defined_key_parts) { result= file->ha_index_read_map(record, last_range->max_key, @@ -11808,7 +11831,7 @@ int QUICK_SELECT_DESC::get_next() { DBUG_ASSERT(last_range->flag & NEAR_MAX || (last_range->flag & EQ_RANGE && - used_key_parts > head->key_info[index].key_parts) || + used_key_parts > head->key_info[index].user_defined_key_parts) || range_reads_after_key(last_range)); result= file->ha_index_read_map(record, last_range->max_key, last_range->max_keypart_map, @@ -12258,7 +12281,7 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, TODO - What happens if the query groups by the MIN/MAX field, and there is no - other field as in: "select min(a) from t1 group by a" ? + other field as in: "select MY_MIN(a) from t1 group by a" ? - We assume that the general correctness of the GROUP-BY query was checked before this point. Is this correct, or do we have to check it completely? - Lift the limitation in condition (B3), that is, make this access method @@ -12425,7 +12448,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) does not qualify as covering in our case. If this is the case, below we check that all query fields are indeed covered by 'cur_index'. */ - if (cur_index_info->key_parts == table->actual_n_key_parts(cur_index_info) + if (cur_index_info->user_defined_key_parts == table->actual_n_key_parts(cur_index_info) && pk < MAX_KEY && cur_index != pk && (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) { @@ -12526,7 +12549,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time) cur_group_prefix_len+= cur_part->store_length; used_key_parts_map.set_bit(key_part_nr); ++cur_group_key_parts; - max_key_part= max(max_key_part,key_part_nr); + max_key_part= MY_MAX(max_key_part,key_part_nr); } /* Check that used key parts forms a prefix of the index. @@ -13312,9 +13335,9 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, { double blocks_per_group= (double) num_blocks / (double) num_groups; p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group; - p_overlap= min(p_overlap, 1.0); + p_overlap= MY_MIN(p_overlap, 1.0); } - io_cost= (double) min(num_groups * (1 + p_overlap), num_blocks); + io_cost= (double) MY_MIN(num_groups * (1 + p_overlap), num_blocks); } else io_cost= (keys_per_group > keys_per_block) ? diff --git a/sql/opt_range.h b/sql/opt_range.h index ccddd40686c..3dbdce00e9d 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -104,7 +104,7 @@ class QUICK_RANGE :public Sql_alloc { void make_min_endpoint(key_range *kr, uint prefix_length, key_part_map keypart_map) { make_min_endpoint(kr); - kr->length= min(kr->length, prefix_length); + kr->length= MY_MIN(kr->length, prefix_length); kr->keypart_map&= keypart_map; } @@ -142,7 +142,7 @@ class QUICK_RANGE :public Sql_alloc { void make_max_endpoint(key_range *kr, uint prefix_length, key_part_map keypart_map) { make_max_endpoint(kr); - kr->length= min(kr->length, prefix_length); + kr->length= MY_MIN(kr->length, prefix_length); kr->keypart_map&= keypart_map; } diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc index 8029dbf000f..bff96c7d4cb 100644 --- a/sql/opt_range_mrr.cc +++ b/sql/opt_range_mrr.cc @@ -270,7 +270,7 @@ walk_up_n_right: if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag && (seq->real_keyno == MAX_KEY || ((uint)key_tree->part+1 == - seq->param->table->key_info[seq->real_keyno].key_parts && + seq->param->table->key_info[seq->real_keyno].user_defined_key_parts && (seq->param->table->key_info[seq->real_keyno].flags & HA_NOSAME))) && range->start_key.length == range->end_key.length && !memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length)) @@ -295,7 +295,7 @@ walk_up_n_right: } } seq->param->range_count++; - seq->param->max_key_part=max(seq->param->max_key_part,key_tree->part); + seq->param->max_key_part=MY_MAX(seq->param->max_key_part,key_tree->part); return 0; } diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 28d802375e2..7d6d58a3414 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -2185,7 +2185,7 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map) double rows= 1.0; while ((tableno = tm_it.next_bit()) != Table_map_iterator::BITMAP_END) rows *= join->map2table[tableno]->table->quick_condition_rows; - sjm->rows= min(sjm->rows, rows); + sjm->rows= MY_MIN(sjm->rows, rows); } memcpy(sjm->positions, join->best_positions + join->const_tables, sizeof(POSITION) * n_tables); @@ -2380,7 +2380,7 @@ bool find_eq_ref_candidate(TABLE *table, table_map sj_inner_tables) keyuse++; } while (keyuse->key == key && keyuse->table == table); - if (bound_parts == PREV_BITS(uint, keyinfo->key_parts)) + if (bound_parts == PREV_BITS(uint, keyinfo->user_defined_key_parts)) return TRUE; } else @@ -3544,7 +3544,7 @@ bool setup_sj_materialization_part2(JOIN_TAB *sjm_tab) KEY *tmp_key; /* The only index on the temporary table. */ uint tmp_key_parts; /* Number of keyparts in tmp_key. */ tmp_key= sjm->table->key_info; - tmp_key_parts= tmp_key->key_parts; + tmp_key_parts= tmp_key->user_defined_key_parts; /* Create/initialize everything we will need to index lookups into the @@ -3942,7 +3942,6 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) table->s= share; init_tmp_table_share(thd, share, "", 0, tmpname, tmpname); share->blob_field= blob_field; - share->blob_ptr_size= portable_sizeof_char_ptr; share->table_charset= NULL; share->primary_key= MAX_KEY; // Indicate no primary key share->keys_for_keyread.init(); @@ -3995,6 +3994,12 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) if (!table->file) goto err; + if (table->file->set_ha_share_ref(&share->ha_share)) + { + delete table->file; + goto err; + } + null_count=1; null_pack_length= 1; @@ -4064,7 +4069,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) share->max_rows= ~(ha_rows) 0; else share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ? - min(thd->variables.tmp_table_size, + MY_MIN(thd->variables.tmp_table_size, thd->variables.max_heap_table_size) : thd->variables.tmp_table_size) / share->reclength); @@ -4080,7 +4085,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd) table->key_info=keyinfo; keyinfo->key_part=key_part_info; keyinfo->flags=HA_NOSAME; - keyinfo->usable_key_parts= keyinfo->key_parts= 1; + keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= 1; keyinfo->key_length=0; keyinfo->rec_per_key=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; @@ -5180,7 +5185,7 @@ bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list, 0 or 1 record. Examples of both cases: select * from ot where col in (select ... from it where 2>3) - select * from ot where col in (select min(it.key) from it) + select * from ot where col in (select MY_MIN(it.key) from it) in this case, the subquery predicate has not been setup for materialization. In particular, there is no materialized temp.table. diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 069fe6452e8..b8d39057ba8 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -464,7 +464,7 @@ int opt_sum_query(THD *thd, } if (thd->is_error()) - DBUG_RETURN(thd->stmt_da->sql_errno()); + DBUG_RETURN(thd->get_stmt_da()->sql_errno()); /* If we have a where clause, we can only ignore searching in the diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc index 1542efa7415..7454e756416 100644 --- a/sql/opt_table_elimination.cc +++ b/sql/opt_table_elimination.cc @@ -328,7 +328,7 @@ const size_t Dep_value_table::iterator_size= ALIGN_SIZE(sizeof(Dep_value_table::Module_iter)); const size_t Dep_value::iterator_size= - max(Dep_value_table::iterator_size, Dep_value_field::iterator_size); + MY_MAX(Dep_value_table::iterator_size, Dep_value_field::iterator_size); /* @@ -441,7 +441,7 @@ const size_t Dep_module_key::iterator_size= ALIGN_SIZE(sizeof(Dep_module_key::Value_iter)); const size_t Dep_module::iterator_size= - max(Dep_module_expr::iterator_size, Dep_module_key::iterator_size); + MY_MAX(Dep_module_expr::iterator_size, Dep_module_key::iterator_size); /* @@ -1563,7 +1563,7 @@ Dep_value_table *Dep_analysis_context::create_table_value(TABLE *table) if (key->flags & HA_NOSAME) { Dep_module_key *key_dep; - if (!(key_dep= new Dep_module_key(tbl_dep, i, key->key_parts))) + if (!(key_dep= new Dep_module_key(tbl_dep, i, key->user_defined_key_parts))) return NULL; *key_list= key_dep; key_list= &(key_dep->next_table_key); diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 34e47331664..6556d50b218 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -22,10 +22,12 @@ #include "sql_priv.h" // Required to get server definitions for mysql/plugin.h right #include "sql_plugin.h" -#include "sql_partition.h" /* partition_info.h: LIST_PART_ENTRY */ +#include "sql_partition.h" // partition_info.h: LIST_PART_ENTRY + // NOT_A_PARTITION_ID #include "partition_info.h" #include "sql_parse.h" // test_if_data_home_dir #include "sql_acl.h" // *_ACL +#include "sql_base.h" // fill_record #ifdef WITH_PARTITION_STORAGE_ENGINE #include "ha_partition.h" @@ -33,17 +35,21 @@ partition_info *partition_info::get_clone() { + DBUG_ENTER("partition_info::get_clone"); if (!this) - return 0; + DBUG_RETURN(NULL); List_iterator<partition_element> part_it(partitions); partition_element *part; partition_info *clone= new partition_info(); if (!clone) { mem_alloc_error(sizeof(partition_info)); - return NULL; + DBUG_RETURN(NULL); } memcpy(clone, this, sizeof(partition_info)); + memset(&(clone->read_partitions), 0, sizeof(clone->read_partitions)); + memset(&(clone->lock_partitions), 0, sizeof(clone->lock_partitions)); + clone->bitmaps_are_initialized= FALSE; clone->partitions.empty(); while ((part= (part_it++))) @@ -54,7 +60,7 @@ partition_info *partition_info::get_clone() if (!part_clone) { mem_alloc_error(sizeof(partition_element)); - return NULL; + DBUG_RETURN(NULL); } memcpy(part_clone, part, sizeof(partition_element)); part_clone->subpartitions.empty(); @@ -64,16 +70,427 @@ partition_info *partition_info::get_clone() if (!subpart_clone) { mem_alloc_error(sizeof(partition_element)); - return NULL; + DBUG_RETURN(NULL); } memcpy(subpart_clone, subpart, sizeof(partition_element)); part_clone->subpartitions.push_back(subpart_clone); } clone->partitions.push_back(part_clone); } - return clone; + DBUG_RETURN(clone); +} + +/** + Mark named [sub]partition to be used/locked. + + @param part_name Partition name to match. + @param length Partition name length. + + @return Success if partition found + @retval true Partition found + @retval false Partition not found +*/ + +bool partition_info::add_named_partition(const char *part_name, + uint length) +{ + HASH *part_name_hash; + PART_NAME_DEF *part_def; + Partition_share *part_share; + DBUG_ENTER("partition_info::add_named_partition"); + DBUG_ASSERT(table && table->s && table->s->ha_share); + part_share= static_cast<Partition_share*>((table->s->ha_share)); + DBUG_ASSERT(part_share->partition_name_hash_initialized); + part_name_hash= &part_share->partition_name_hash; + DBUG_ASSERT(part_name_hash->records); + + part_def= (PART_NAME_DEF*) my_hash_search(part_name_hash, + (const uchar*) part_name, + length); + if (!part_def) + { + my_error(ER_UNKNOWN_PARTITION, MYF(0), part_name, table->alias.c_ptr()); + DBUG_RETURN(true); + } + + if (part_def->is_subpart) + { + bitmap_set_bit(&read_partitions, part_def->part_id); + } + else + { + if (is_sub_partitioned()) + { + /* Mark all subpartitions in the partition */ + uint j, start= part_def->part_id; + uint end= start + num_subparts; + for (j= start; j < end; j++) + bitmap_set_bit(&read_partitions, j); + } + else + bitmap_set_bit(&read_partitions, part_def->part_id); + } + DBUG_PRINT("info", ("Found partition %u is_subpart %d for name %s", + part_def->part_id, part_def->is_subpart, + part_name)); + DBUG_RETURN(false); } + +/** + Mark named [sub]partition to be used/locked. + + @param part_elem Partition element that matched. +*/ + +bool partition_info::set_named_partition_bitmap(const char *part_name, + uint length) +{ + DBUG_ENTER("partition_info::set_named_partition_bitmap"); + bitmap_clear_all(&read_partitions); + if (add_named_partition(part_name, length)) + DBUG_RETURN(true); + bitmap_copy(&lock_partitions, &read_partitions); + DBUG_RETURN(false); +} + + + +/** + Prune away partitions not mentioned in the PARTITION () clause, + if used. + + @param table_list Table list pointing to table to prune. + + @return Operation status + @retval true Failure + @retval false Success +*/ +bool partition_info::prune_partition_bitmaps(TABLE_LIST *table_list) +{ + List_iterator<String> partition_names_it(*(table_list->partition_names)); + uint num_names= table_list->partition_names->elements; + uint i= 0; + DBUG_ENTER("partition_info::prune_partition_bitmaps"); + + if (num_names < 1) + DBUG_RETURN(true); + + /* + TODO: When adding support for FK in partitioned tables, the referenced + table must probably lock all partitions for read, and also write depending + of ON DELETE/UPDATE. + */ + bitmap_clear_all(&read_partitions); + + /* No check for duplicate names or overlapping partitions/subpartitions. */ + + DBUG_PRINT("info", ("Searching through partition_name_hash")); + do + { + String *part_name_str= partition_names_it++; + if (add_named_partition(part_name_str->c_ptr(), part_name_str->length())) + DBUG_RETURN(true); + } while (++i < num_names); + DBUG_RETURN(false); +} + + +/** + Set read/lock_partitions bitmap over non pruned partitions + + @param table_list Possible TABLE_LIST which can contain + list of partition names to query + + @return Operation status + @retval FALSE OK + @retval TRUE Failed to allocate memory for bitmap or list of partitions + did not match + + @note OK to call multiple times without the need for free_bitmaps. +*/ + +bool partition_info::set_partition_bitmaps(TABLE_LIST *table_list) +{ + DBUG_ENTER("partition_info::set_partition_bitmaps"); + + DBUG_ASSERT(bitmaps_are_initialized); + DBUG_ASSERT(table); + is_pruning_completed= false; + if (!bitmaps_are_initialized) + DBUG_RETURN(TRUE); + + if (table_list && + table_list->partition_names && + table_list->partition_names->elements) + { + if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) + { + /* + Don't allow PARTITION () clause on a NDB tables yet. + TODO: Add partition name handling to NDB/partition_info. + which is currently ha_partition specific. + */ + my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(true); + } + if (prune_partition_bitmaps(table_list)) + DBUG_RETURN(TRUE); + } + else + { + bitmap_set_all(&read_partitions); + DBUG_PRINT("info", ("Set all partitions")); + } + bitmap_copy(&lock_partitions, &read_partitions); + DBUG_ASSERT(bitmap_get_first_set(&lock_partitions) != MY_BIT_NONE); + DBUG_RETURN(FALSE); +} + + +/** + Checks if possible to do prune partitions on insert. + + @param thd Thread context + @param duplic How to handle duplicates + @param update In case of ON DUPLICATE UPDATE, default function fields + @param update_fields In case of ON DUPLICATE UPDATE, which fields to update + @param fields Listed fields + @param empty_values True if values is empty (only defaults) + @param[out] prune_needs_default_values Set on return if copying of default + values is needed + @param[out] can_prune_partitions Enum showing if possible to prune + @param[inout] used_partitions If possible to prune the bitmap + is initialized and cleared + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool partition_info::can_prune_insert(THD* thd, + enum_duplicates duplic, + COPY_INFO &update, + List<Item> &update_fields, + List<Item> &fields, + bool empty_values, + enum_can_prune *can_prune_partitions, + bool *prune_needs_default_values, + MY_BITMAP *used_partitions) +{ + uint32 *bitmap_buf; + uint bitmap_bytes; + uint num_partitions= 0; + *can_prune_partitions= PRUNE_NO; + DBUG_ASSERT(bitmaps_are_initialized); + DBUG_ENTER("partition_info::can_prune_insert"); + + if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) + DBUG_RETURN(false); /* Should not insert prune NDB tables */ + + /* + If under LOCK TABLES pruning will skip start_stmt instead of external_lock + for unused partitions. + + Cannot prune if there are BEFORE INSERT triggers that changes any + partitioning column, since they may change the row to be in another + partition. + */ + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE) && + table->triggers->is_fields_updated_in_trigger(&full_part_field_set, + TRG_EVENT_INSERT, + TRG_ACTION_BEFORE)) + DBUG_RETURN(false); + + if (table->found_next_number_field) + { + /* + If the field is used in the partitioning expression, we cannot prune. + TODO: If all rows have not null values and + is not 0 (with NO_AUTO_VALUE_ON_ZERO sql_mode), then pruning is possible! + */ + if (bitmap_is_set(&full_part_field_set, + table->found_next_number_field->field_index)) + DBUG_RETURN(false); + } + + /* + If updating a field in the partitioning expression, we cannot prune. + + Note: TIMESTAMP_AUTO_SET_ON_INSERT is handled by converting Item_null + to the start time of the statement. Which will be the same as in + write_row(). So pruning of TIMESTAMP DEFAULT CURRENT_TIME will work. + But TIMESTAMP_AUTO_SET_ON_UPDATE cannot be pruned if the timestamp + column is a part of any part/subpart expression. + */ + if (duplic == DUP_UPDATE) + { + /* + TODO: add check for static update values, which can be pruned. + */ + if (is_field_in_part_expr(update_fields)) + DBUG_RETURN(false); + + /* + Cannot prune if there are BEFORE UPDATE triggers that changes any + partitioning column, since they may change the row to be in another + partition. + */ + if (table->triggers && + table->triggers->has_triggers(TRG_EVENT_UPDATE, + TRG_ACTION_BEFORE) && + table->triggers->is_fields_updated_in_trigger(&full_part_field_set, + TRG_EVENT_UPDATE, + TRG_ACTION_BEFORE)) + { + DBUG_RETURN(false); + } + } + + /* + If not all partitioning fields are given, + we also must set all non given partitioning fields + to get correct defaults. + TODO: If any gain, we could enhance this by only copy the needed default + fields by + 1) check which fields needs to be set. + 2) only copy those fields from the default record. + */ + *prune_needs_default_values= false; + if (fields.elements) + { + if (!is_full_part_expr_in_fields(fields)) + *prune_needs_default_values= true; + } + else if (empty_values) + { + *prune_needs_default_values= true; // like 'INSERT INTO t () VALUES ()' + } + else + { + /* + In case of INSERT INTO t VALUES (...) we must get values for + all fields in table from VALUES (...) part, so no defaults + are needed. + */ + } + + /* Pruning possible, have to initialize the used_partitions bitmap. */ + num_partitions= lock_partitions.n_bits; + bitmap_bytes= bitmap_buffer_size(num_partitions); + if (!(bitmap_buf= (uint32*) thd->alloc(bitmap_bytes))) + { + mem_alloc_error(bitmap_bytes); + DBUG_RETURN(true); + } + /* Also clears all bits. */ + if (bitmap_init(used_partitions, bitmap_buf, num_partitions, false)) + { + /* purecov: begin deadcode */ + /* Cannot happen, due to pre-alloc. */ + mem_alloc_error(bitmap_bytes); + DBUG_RETURN(true); + /* purecov: end */ + } + /* + If no partitioning field in set (e.g. defaults) check pruning only once. + */ + if (fields.elements && + !is_field_in_part_expr(fields)) + *can_prune_partitions= PRUNE_DEFAULTS; + else + *can_prune_partitions= PRUNE_YES; + + DBUG_RETURN(false); +} + + +/** + Mark the partition, the record belongs to, as used. + + @param fields Fields to set + @param values Values to use + @param info COPY_INFO used for default values handling + @param copy_default_values True if we should copy default values + @param used_partitions Bitmap to set + + @returns Operational status + @retval false Success + @retval true Failure +*/ + +bool partition_info::set_used_partition(List<Item> &fields, + List<Item> &values, + COPY_INFO &info, + bool copy_default_values, + MY_BITMAP *used_partitions) +{ + THD *thd= table->in_use; + uint32 part_id; + longlong func_value; + Dummy_error_handler error_handler; + bool ret= true; + DBUG_ENTER("set_partition"); + DBUG_ASSERT(thd); + + /* Only allow checking of constant values */ + List_iterator_fast<Item> v(values); + Item *item; + thd->push_internal_handler(&error_handler); + while ((item= v++)) + { + if (!item->const_item()) + goto err; + } + + if (copy_default_values) + restore_record(table,s->default_values); + + if (fields.elements || !values.elements) + { + if (fill_record(thd, table, fields, values, false)) + goto err; + } + else + { + if (fill_record(thd, table, table->field, values, false, false)) + goto err; + } + DBUG_ASSERT(!table->auto_increment_field_not_null); + + /* + Evaluate DEFAULT functions like CURRENT_TIMESTAMP. + TODO: avoid setting non partitioning fields default value, to avoid + overhead. Not yet done, since mostly only one DEFAULT function per + table, or at least very few such columns. + */ +// if (info.function_defaults_apply_on_columns(&full_part_field_set)) +// info.set_function_defaults(table); + + { + /* + This function is used in INSERT; 'values' are supplied by user, + or are default values, not values read from a table, so read_set is + irrelevant. + */ + my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + const int rc= get_partition_id(this, &part_id, &func_value); + dbug_tmp_restore_column_map(table->read_set, old_map); + if (rc) + goto err; + } + + DBUG_PRINT("info", ("Insert into partition %u", part_id)); + bitmap_set_bit(used_partitions, part_id); + ret= false; + +err: + thd->pop_internal_handler(); + DBUG_RETURN(ret); +} + + /* Create a memory area where default partition names are stored and fill it up with the names. @@ -159,8 +576,9 @@ void partition_info::set_show_version_string(String *packet) /* Create a unique name for the subpartition as part_name'sp''subpart_no' + SYNOPSIS - create_subpartition_name() + create_default_subpartition_name() subpart_no Number of subpartition part_name Name of partition RETURN VALUES @@ -168,12 +586,12 @@ void partition_info::set_show_version_string(String *packet) 0 Memory allocation error */ -char *partition_info::create_subpartition_name(uint subpart_no, +char *partition_info::create_default_subpartition_name(uint subpart_no, const char *part_name) { uint size_alloc= strlen(part_name) + MAX_PART_NAME_SIZE; char *ptr= (char*) sql_calloc(size_alloc); - DBUG_ENTER("create_subpartition_name"); + DBUG_ENTER("create_default_subpartition_name"); if (likely(ptr != NULL)) { @@ -319,7 +737,8 @@ bool partition_info::set_up_default_subpartitions(handler *file, if (likely(subpart_elem != 0 && (!part_elem->subpartitions.push_back(subpart_elem)))) { - char *ptr= create_subpartition_name(j, part_elem->partition_name); + char *ptr= create_default_subpartition_name(j, + part_elem->partition_name); if (!ptr) goto end; subpart_elem->engine_type= default_engine_type; @@ -379,7 +798,7 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file, Support routine for check_partition_info SYNOPSIS - has_unique_fields + find_duplicate_field no parameters RETURN VALUE @@ -390,13 +809,13 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file, Check that the user haven't defined the same field twice in key or column list partitioning. */ -char* partition_info::has_unique_fields() +char* partition_info::find_duplicate_field() { char *field_name_outer, *field_name_inner; List_iterator<char> it_outer(part_field_list); uint num_fields= part_field_list.elements; uint i,j; - DBUG_ENTER("partition_info::has_unique_fields"); + DBUG_ENTER("partition_info::find_duplicate_field"); for (i= 0; i < num_fields; i++) { @@ -418,6 +837,152 @@ char* partition_info::has_unique_fields() DBUG_RETURN(NULL); } + +/** + @brief Get part_elem and part_id from partition name + + @param partition_name Name of partition to search for. + @param file_name[out] Partition file name (part after table name, + #P#<part>[#SP#<subpart>]), skipped if NULL. + @param part_id[out] Id of found partition or NOT_A_PARTITION_ID. + + @retval Pointer to part_elem of [sub]partition, if not found NULL + + @note Since names of partitions AND subpartitions must be unique, + this function searches both partitions and subpartitions and if name of + a partition is given for a subpartitioned table, part_elem will be + the partition, but part_id will be NOT_A_PARTITION_ID and file_name not set. +*/ +partition_element *partition_info::get_part_elem(const char *partition_name, + char *file_name, + uint32 *part_id) +{ + List_iterator<partition_element> part_it(partitions); + uint i= 0; + DBUG_ENTER("partition_info::get_part_elem"); + DBUG_ASSERT(part_id); + *part_id= NOT_A_PARTITION_ID; + do + { + partition_element *part_elem= part_it++; + if (is_sub_partitioned()) + { + List_iterator<partition_element> sub_part_it(part_elem->subpartitions); + uint j= 0; + do + { + partition_element *sub_part_elem= sub_part_it++; + if (!my_strcasecmp(system_charset_info, + sub_part_elem->partition_name, partition_name)) + { + if (file_name) + create_subpartition_name(file_name, "", + part_elem->partition_name, + partition_name, + NORMAL_PART_NAME); + *part_id= j + (i * num_subparts); + DBUG_RETURN(sub_part_elem); + } + } while (++j < num_subparts); + + /* Naming a partition (first level) on a subpartitioned table. */ + if (!my_strcasecmp(system_charset_info, + part_elem->partition_name, partition_name)) + DBUG_RETURN(part_elem); + } + else if (!my_strcasecmp(system_charset_info, + part_elem->partition_name, partition_name)) + { + if (file_name) + create_partition_name(file_name, "", partition_name, + NORMAL_PART_NAME, TRUE); + *part_id= i; + DBUG_RETURN(part_elem); + } + } while (++i < num_parts); + DBUG_RETURN(NULL); +} + + +/** + Helper function to find_duplicate_name. +*/ + +static const char *get_part_name_from_elem(const char *name, size_t *length, + my_bool not_used __attribute__((unused))) +{ + *length= strlen(name); + return name; +} + +/* + A support function to check partition names for duplication in a + partitioned table + + SYNOPSIS + find_duplicate_name() + + RETURN VALUES + NULL Has unique part and subpart names + !NULL Pointer to duplicated name + + DESCRIPTION + Checks that the list of names in the partitions doesn't contain any + duplicated names. +*/ + +char *partition_info::find_duplicate_name() +{ + HASH partition_names; + uint max_names; + const uchar *curr_name= NULL; + List_iterator<partition_element> parts_it(partitions); + partition_element *p_elem; + + DBUG_ENTER("partition_info::find_duplicate_name"); + + /* + TODO: If table->s->ha_part_data->partition_name_hash.elements is > 0, + then we could just return NULL, but that has not been verified. + And this only happens when in ALTER TABLE with full table copy. + */ + + max_names= num_parts; + if (is_sub_partitioned()) + max_names+= num_parts * num_subparts; + if (my_hash_init(&partition_names, system_charset_info, max_names, 0, 0, + (my_hash_get_key) get_part_name_from_elem, 0, HASH_UNIQUE)) + { + DBUG_ASSERT(0); + curr_name= (const uchar*) "Internal failure"; + goto error; + } + while ((p_elem= (parts_it++))) + { + curr_name= (const uchar*) p_elem->partition_name; + if (my_hash_insert(&partition_names, curr_name)) + goto error; + + if (!p_elem->subpartitions.is_empty()) + { + List_iterator<partition_element> subparts_it(p_elem->subpartitions); + partition_element *subp_elem; + while ((subp_elem= (subparts_it++))) + { + curr_name= (const uchar*) subp_elem->partition_name; + if (my_hash_insert(&partition_names, curr_name)) + goto error; + } + } + } + my_hash_free(&partition_names); + DBUG_RETURN(NULL); +error: + my_hash_free(&partition_names); + DBUG_RETURN((char*) curr_name); +} + + /* A support function to check if a partition element's name is unique @@ -461,49 +1026,6 @@ bool partition_info::has_unique_name(partition_element *element) /* - A support function to check partition names for duplication in a - partitioned table - - SYNOPSIS - has_unique_names() - - RETURN VALUES - TRUE Has unique part and subpart names - FALSE Doesn't - - DESCRIPTION - Checks that the list of names in the partitions doesn't contain any - duplicated names. -*/ - -char *partition_info::has_unique_names() -{ - DBUG_ENTER("partition_info::has_unique_names"); - - List_iterator<partition_element> parts_it(partitions); - - partition_element *el; - while ((el= (parts_it++))) - { - if (! has_unique_name(el)) - DBUG_RETURN(el->partition_name); - - if (!el->subpartitions.is_empty()) - { - List_iterator<partition_element> subparts_it(el->subpartitions); - partition_element *subel; - while ((subel= (subparts_it++))) - { - if (! has_unique_name(subel)) - DBUG_RETURN(subel->partition_name); - } - } - } - DBUG_RETURN(NULL); -} - - -/* Check that the partition/subpartition is setup to use the correct storage engine SYNOPSIS @@ -1057,11 +1579,11 @@ static void warn_if_dir_in_part_elem(THD *thd, partition_element *part_elem) #endif { if (part_elem->data_file_name) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED), "DATA DIRECTORY"); if (part_elem->index_file_name) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED), "INDEX DIRECTORY"); part_elem->data_file_name= part_elem->index_file_name= NULL; @@ -1187,12 +1709,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type, } if (part_field_list.elements > 0 && - (same_name= has_unique_fields())) + (same_name= find_duplicate_field())) { my_error(ER_SAME_NAME_PARTITION_FIELD, MYF(0), same_name); goto end; } - if ((same_name= has_unique_names())) + if ((same_name= find_duplicate_name())) { my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name); goto end; @@ -1644,6 +2166,71 @@ void partition_info::report_part_expr_error(bool use_subpart_expr) } +/** + Check if fields are in the partitioning expression. + + @param fields List of Items (fields) + + @return True if any field in the fields list is used by a partitioning expr. + @retval true At least one field in the field list is found. + @retval false No field is within any partitioning expression. +*/ + +bool partition_info::is_field_in_part_expr(List<Item> &fields) +{ + List_iterator<Item> it(fields); + Item *item; + Item_field *field; + DBUG_ENTER("is_fields_in_part_expr"); + while ((item= it++)) + { + field= item->field_for_view_update(); + DBUG_ASSERT(field->field->table == table); + if (bitmap_is_set(&full_part_field_set, field->field->field_index)) + DBUG_RETURN(true); + } + DBUG_RETURN(false); +} + + +/** + Check if all partitioning fields are included. +*/ + +bool partition_info::is_full_part_expr_in_fields(List<Item> &fields) +{ + Field **part_field= full_part_field_array; + DBUG_ASSERT(*part_field); + DBUG_ENTER("is_full_part_expr_in_fields"); + /* + It is very seldom many fields in full_part_field_array, so it is OK + to loop over all of them instead of creating a bitmap fields argument + to compare with. + */ + do + { + List_iterator<Item> it(fields); + Item *item; + Item_field *field; + bool found= false; + + while ((item= it++)) + { + field= item->field_for_view_update(); + DBUG_ASSERT(field->field->table == table); + if (*part_field == field->field) + { + found= true; + break; + } + } + if (!found) + DBUG_RETURN(false); + } while (*(++part_field)); + DBUG_RETURN(true); +} + + /* Create a new column value in current list with maxvalue Called from parser @@ -2251,262 +2838,6 @@ int partition_info::fix_parser_data(THD *thd) } -/** - helper function to compare strings that can also be - a NULL pointer. - - @param a char pointer (can be NULL). - @param b char pointer (can be NULL). - - @return false if equal - @retval true strings differs - @retval false strings is equal -*/ - -static bool strcmp_null(const char *a, const char *b) -{ - if (!a && !b) - return false; - if (a && b && !strcmp(a, b)) - return false; - return true; -} - - -/** - Check if the new part_info has the same partitioning. - - @param new_part_info New partition definition to compare with. - - @return True if not considered to have changed the partitioning. - @retval true Allowed change (only .frm change, compatible distribution). - @retval false Different partitioning, will need redistribution of rows. - - @note Currently only used to allow changing from non-set key_algorithm - to a specified key_algorithm, to avoid rebuild when upgrading from 5.1 of - such partitioned tables using numeric colums in the partitioning expression. - For more info see bug#14521864. - Does not check if columns etc has changed, i.e. only for - alter_info->flags == ALTER_PARTITION. -*/ - -bool partition_info::has_same_partitioning(partition_info *new_part_info) -{ - DBUG_ENTER("partition_info::has_same_partitioning"); - - DBUG_ASSERT(part_field_array && part_field_array[0]); - - /* - Only consider pre 5.5.3 .frm's to have same partitioning as - a new one with KEY ALGORITHM = 1 (). - */ - - if (part_field_array[0]->table->s->mysql_version >= 50503) - DBUG_RETURN(false); - - if (!new_part_info || - part_type != new_part_info->part_type || - num_parts != new_part_info->num_parts || - use_default_partitions != new_part_info->use_default_partitions || - new_part_info->is_sub_partitioned() != is_sub_partitioned()) - DBUG_RETURN(false); - - if (part_type != HASH_PARTITION) - { - /* - RANGE or LIST partitioning, check if KEY subpartitioned. - Also COLUMNS partitioning was added in 5.5, so treat that as different. - */ - if (!is_sub_partitioned() || - !new_part_info->is_sub_partitioned() || - column_list || - new_part_info->column_list || - !list_of_subpart_fields || - !new_part_info->list_of_subpart_fields || - new_part_info->num_subparts != num_subparts || - new_part_info->subpart_field_list.elements != - subpart_field_list.elements || - new_part_info->use_default_subpartitions != - use_default_subpartitions) - DBUG_RETURN(false); - } - else - { - /* Check if KEY partitioned. */ - if (!new_part_info->list_of_part_fields || - !list_of_part_fields || - new_part_info->part_field_list.elements != part_field_list.elements) - DBUG_RETURN(false); - } - - /* Check that it will use the same fields in KEY (fields) list. */ - List_iterator<char> old_field_name_it(part_field_list); - List_iterator<char> new_field_name_it(new_part_info->part_field_list); - char *old_name, *new_name; - while ((old_name= old_field_name_it++)) - { - new_name= new_field_name_it++; - if (!new_name || my_strcasecmp(system_charset_info, - new_name, - old_name)) - DBUG_RETURN(false); - } - - if (is_sub_partitioned()) - { - /* Check that it will use the same fields in KEY subpart fields list. */ - List_iterator<char> old_field_name_it(subpart_field_list); - List_iterator<char> new_field_name_it(new_part_info->subpart_field_list); - char *old_name, *new_name; - while ((old_name= old_field_name_it++)) - { - new_name= new_field_name_it++; - if (!new_name || my_strcasecmp(system_charset_info, - new_name, - old_name)) - DBUG_RETURN(false); - } - } - - if (!use_default_partitions) - { - /* - Loop over partitions/subpartition to verify that they are - the same, including state and name. - */ - List_iterator<partition_element> part_it(partitions); - List_iterator<partition_element> new_part_it(new_part_info->partitions); - uint i= 0; - do - { - partition_element *part_elem= part_it++; - partition_element *new_part_elem= new_part_it++; - /* - The following must match: - partition_name, tablespace_name, data_file_name, index_file_name, - engine_type, part_max_rows, part_min_rows, nodegroup_id. - (max_value, signed_flag, has_null_value only on partition level, - RANGE/LIST) - The following can differ: - - part_comment - part_state must be PART_NORMAL! - */ - if (!part_elem || !new_part_elem || - strcmp(part_elem->partition_name, - new_part_elem->partition_name) || - part_elem->part_state != PART_NORMAL || - new_part_elem->part_state != PART_NORMAL || - part_elem->max_value != new_part_elem->max_value || - part_elem->signed_flag != new_part_elem->signed_flag || - part_elem->has_null_value != new_part_elem->has_null_value) - DBUG_RETURN(false); - - /* new_part_elem may not have engine_type set! */ - if (new_part_elem->engine_type && - part_elem->engine_type != new_part_elem->engine_type) - DBUG_RETURN(false); - - if (is_sub_partitioned()) - { - /* - Check that both old and new partition has the same definition - (VALUES IN/VALUES LESS THAN) (No COLUMNS partitioning, see above) - */ - if (part_type == LIST_PARTITION) - { - List_iterator<part_elem_value> list_vals(part_elem->list_val_list); - List_iterator<part_elem_value> - new_list_vals(new_part_elem->list_val_list); - part_elem_value *val; - part_elem_value *new_val; - while ((val= list_vals++)) - { - new_val= new_list_vals++; - if (!new_val) - DBUG_RETURN(false); - if ((!val->null_value && !new_val->null_value) && - val->value != new_val->value) - DBUG_RETURN(false); - } - if (new_list_vals++) - DBUG_RETURN(false); - } - else - { - DBUG_ASSERT(part_type == RANGE_PARTITION); - if (new_part_elem->range_value != part_elem->range_value) - DBUG_RETURN(false); - } - - if (!use_default_subpartitions) - { - List_iterator<partition_element> - sub_part_it(part_elem->subpartitions); - List_iterator<partition_element> - new_sub_part_it(new_part_elem->subpartitions); - uint j= 0; - do - { - partition_element *sub_part_elem= sub_part_it++; - partition_element *new_sub_part_elem= new_sub_part_it++; - /* new_part_elem may not have engine_type set! */ - if (new_sub_part_elem->engine_type && - sub_part_elem->engine_type != new_part_elem->engine_type) - DBUG_RETURN(false); - - if (strcmp(sub_part_elem->partition_name, - new_sub_part_elem->partition_name) || - sub_part_elem->part_state != PART_NORMAL || - new_sub_part_elem->part_state != PART_NORMAL || - sub_part_elem->part_min_rows != - new_sub_part_elem->part_min_rows || - sub_part_elem->part_max_rows != - new_sub_part_elem->part_max_rows || - sub_part_elem->nodegroup_id != - new_sub_part_elem->nodegroup_id) - DBUG_RETURN(false); - - if (strcmp_null(sub_part_elem->data_file_name, - new_sub_part_elem->data_file_name) || - strcmp_null(sub_part_elem->index_file_name, - new_sub_part_elem->index_file_name) || - strcmp_null(sub_part_elem->tablespace_name, - new_sub_part_elem->tablespace_name)) - DBUG_RETURN(false); - - } while (++j < num_subparts); - } - } - else - { - if (part_elem->part_min_rows != new_part_elem->part_min_rows || - part_elem->part_max_rows != new_part_elem->part_max_rows || - part_elem->nodegroup_id != new_part_elem->nodegroup_id) - DBUG_RETURN(false); - - if (strcmp_null(part_elem->data_file_name, - new_part_elem->data_file_name) || - strcmp_null(part_elem->index_file_name, - new_part_elem->index_file_name) || - strcmp_null(part_elem->tablespace_name, - new_part_elem->tablespace_name)) - DBUG_RETURN(false); - } - } while (++i < num_parts); - } - - /* - Only if key_algorithm was not specified before and it is now set, - consider this as nothing was changed, and allow change without rebuild! - */ - if (key_algorithm != partition_info::KEY_ALGORITHM_NONE || - new_part_info->key_algorithm == partition_info::KEY_ALGORITHM_NONE) - DBUG_RETURN(false); - - DBUG_RETURN(true); -} - - void partition_info::print_debug(const char *str, uint *value) { DBUG_ENTER("print_debug"); diff --git a/sql/partition_info.h b/sql/partition_info.h index 17c9cb383ee..01f6b53a148 100644 --- a/sql/partition_info.h +++ b/sql/partition_info.h @@ -20,10 +20,11 @@ #pragma interface /* gcc class implementation */ #endif +#include "sql_class.h" #include "partition_element.h" class partition_info; - +struct TABLE_LIST; /* Some function typedefs */ typedef int (*get_part_id_func)(partition_info *part_info, uint32 *part_id, @@ -111,14 +112,30 @@ public: struct st_ddl_log_memory_entry *frm_log_entry; /* - A bitmap of partitions used by the current query. + Bitmaps of partitions used by the current query. + * read_partitions - partitions to be used for reading. + * lock_partitions - partitions that must be locked (read or write). + Usually read_partitions is the same set as lock_partitions, but + in case of UPDATE the WHERE clause can limit the read_partitions set, + but not neccesarily the lock_partitions set. Usage pattern: - * The handler->extra(HA_EXTRA_RESET) call at query start/end sets all - partitions to be unused. - * Before index/rnd_init(), partition pruning code sets the bits for used - partitions. + * Initialized in ha_partition::open(). + * read+lock_partitions is set according to explicit PARTITION, + WL#5217, in open_and_lock_tables(). + * Bits in read_partitions can be cleared in prune_partitions() + in the optimizing step. + (WL#4443 is about allowing prune_partitions() to affect lock_partitions + and be done before locking too). + * When the partition enabled handler get an external_lock call it locks + all partitions in lock_partitions (and remembers which partitions it + locked, so that it can unlock them later). In case of LOCK TABLES it will + lock all partitions, and keep them locked while lock_partitions can + change for each statement under LOCK TABLES. + * Freed at the same time item_free_list is freed. */ - MY_BITMAP used_partitions; + MY_BITMAP read_partitions; + MY_BITMAP lock_partitions; + bool bitmaps_are_initialized; union { longlong *range_int_array; @@ -157,6 +174,7 @@ public: uint curr_list_object; uint num_columns; + TABLE *table; /* These key_map's are used for Partitioning to enable quick decisions on whether we can derive more information about which partition to @@ -220,6 +238,15 @@ public: bool from_openfrm; bool has_null_value; bool column_list; + /** + True if pruning has been completed and can not be pruned any further, + even if there are subqueries or stored programs in the condition. + + Some times it is needed to run prune_partitions() a second time to prune + read partitions after tables are locked, when subquery and + stored functions might have been evaluated. + */ + bool is_pruning_completed; partition_info() : get_partition_id(NULL), get_part_partition_id(NULL), @@ -232,6 +259,7 @@ public: restore_part_field_ptrs(NULL), restore_subpart_field_ptrs(NULL), part_expr(NULL), subpart_expr(NULL), item_free_list(NULL), first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL), + bitmaps_are_initialized(FALSE), list_array(NULL), err_value(0), part_info_string(NULL), part_func_string(NULL), subpart_func_string(NULL), @@ -252,7 +280,7 @@ public: list_of_part_fields(FALSE), list_of_subpart_fields(FALSE), linear_hash_ind(FALSE), fixed(FALSE), is_auto_partitioned(FALSE), from_openfrm(FALSE), - has_null_value(FALSE), column_list(FALSE) + has_null_value(FALSE), column_list(FALSE), is_pruning_completed(false) { all_fields_in_PF.clear_all(); all_fields_in_PPF.clear_all(); @@ -266,6 +294,8 @@ public: ~partition_info() {} partition_info *get_clone(); + bool set_named_partition_bitmap(const char *part_name, uint length); + bool set_partition_bitmaps(TABLE_LIST *table_list); /* Answers the question if subpartitioning is used for a certain table */ bool is_sub_partitioned() { @@ -280,8 +310,8 @@ public: bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info, uint start_no); - char *has_unique_fields(); - char *has_unique_names(); + char *find_duplicate_field(); + char *find_duplicate_name(); bool check_engine_mix(handlerton *engine_type, bool default_engine); bool check_range_constants(THD *thd); bool check_list_constants(THD *thd); @@ -311,8 +341,34 @@ public: bool init_column_part(); bool add_column_list_value(THD *thd, Item *item); void set_show_version_string(String *packet); + partition_element *get_part_elem(const char *partition_name, + char *file_name, + uint32 *part_id); void report_part_expr_error(bool use_subpart_expr); - bool has_same_partitioning(partition_info *new_part_info); + bool set_used_partition(List<Item> &fields, + List<Item> &values, + COPY_INFO &info, + bool copy_default_values, + MY_BITMAP *used_partitions); + /** + PRUNE_NO - Unable to prune. + PRUNE_DEFAULTS - Partitioning field is only set to + DEFAULT values, only need to check + pruning for one row where the DEFAULTS + values are set. + PRUNE_YES - Pruning is possible, calculate the used partition set + by evaluate the partition_id on row by row basis. + */ + enum enum_can_prune {PRUNE_NO=0, PRUNE_DEFAULTS, PRUNE_YES}; + bool can_prune_insert(THD *thd, + enum_duplicates duplic, + COPY_INFO &update, + List<Item> &update_fields, + List<Item> &fields, + bool empty_values, + enum_can_prune *can_prune_partitions, + bool *prune_needs_default_values, + MY_BITMAP *used_partitions); private: static int list_part_cmp(const void* a, const void* b); bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info, @@ -320,7 +376,12 @@ private: bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info); char *create_default_partition_names(uint part_no, uint num_parts, uint start_no); - char *create_subpartition_name(uint subpart_no, const char *part_name); + char *create_default_subpartition_name(uint subpart_no, + const char *part_name); + bool prune_partition_bitmaps(TABLE_LIST *table_list); + bool add_named_partition(const char *part_name, uint length); + bool is_field_in_part_expr(List<Item> &fields); + bool is_full_part_expr_in_fields(List<Item> &fields); public: bool has_unique_name(partition_element *element); }; diff --git a/sql/password.c b/sql/password.c index 947620ddf7a..954daf2d8d1 100644 --- a/sql/password.c +++ b/sql/password.c @@ -60,12 +60,14 @@ *****************************************************************************/ -#include <password.h> #include <my_global.h> #include <my_sys.h> #include <m_string.h> +#include <password.h> +#include <mysql.h> +#include <my_rnd.h> #include <sha1.h> -#include "mysql.h" +#include <crypt_genhash_impl.h> /************ MySQL 3.23-4.0 authentication routines: untouched ***********/ @@ -372,6 +374,47 @@ my_crypt(char *to, const uchar *s1, const uchar *s2, uint len) } +#if defined(HAVE_OPENSSL) +void my_make_scrambled_password(char *to, const char *password, + size_t pass_len) +{ + + char salt[CRYPT_SALT_LENGTH + 1]; + + generate_user_salt(salt, CRYPT_SALT_LENGTH + 1); + my_crypt_genhash(to, + CRYPT_MAX_PASSWORD_SIZE, + password, + pass_len, + salt, + 0); + +} +#endif +/** + Compute two stage SHA1 hash of the password : + + hash_stage1=sha1("password") + hash_stage2=sha1(hash_stage1) + + @param password [IN] Password string. + @param pass_len [IN] Length of the password. + @param hash_stage1 [OUT] sha1(password) + @param hash_stage2 [OUT] sha1(hash_stage1) +*/ + +inline static +void compute_two_stage_sha1_hash(const char *password, size_t pass_len, + uint8 *hash_stage1, uint8 *hash_stage2) +{ + /* Stage 1: hash password */ + compute_sha1_hash(hash_stage1, password, pass_len); + + /* Stage 2 : hash first stage's output. */ + compute_sha1_hash(hash_stage2, (const char *) hash_stage1, SHA1_HASH_SIZE); +} + + /* MySQL 4.1.1 password hashing: SHA conversion (see RFC 2289, 3174) twice applied to the password string, and then produced octet sequence is @@ -379,27 +422,20 @@ my_crypt(char *to, const uchar *s1, const uchar *s2, uint len) The result of this function is used as return value from PASSWORD() and is stored in the database. SYNOPSIS - my_make_scrambled_password() + my_make_scrambled_password_sha1() buf OUT buffer of size 2*SHA1_HASH_SIZE + 2 to store hex string password IN password string pass_len IN length of password string */ -void my_make_scrambled_password(char *to, const char *password, - size_t pass_len) +void my_make_scrambled_password_sha1(char *to, const char *password, + size_t pass_len) { - SHA1_CONTEXT sha1_context; uint8 hash_stage2[SHA1_HASH_SIZE]; - mysql_sha1_reset(&sha1_context); - /* stage 1: hash password */ - mysql_sha1_input(&sha1_context, (uint8 *) password, (uint) pass_len); - mysql_sha1_result(&sha1_context, (uint8 *) to); - /* stage 2: hash stage1 output */ - mysql_sha1_reset(&sha1_context); - mysql_sha1_input(&sha1_context, (uint8 *) to, SHA1_HASH_SIZE); - /* separate buffer is used to pass 'to' in octet2hex */ - mysql_sha1_result(&sha1_context, hash_stage2); + /* Two stage SHA1 hash of the password. */ + compute_two_stage_sha1_hash(password, pass_len, (uint8 *) to, hash_stage2); + /* convert hash_stage2 to hex string */ *to++= PVERSION41_CHAR; octet2hex(to, (const char*) hash_stage2, SHA1_HASH_SIZE); @@ -419,7 +455,7 @@ void my_make_scrambled_password(char *to, const char *password, void make_scrambled_password(char *to, const char *password) { - my_make_scrambled_password(to, password, strlen(password)); + my_make_scrambled_password_sha1(to, password, strlen(password)); } @@ -443,24 +479,16 @@ void make_scrambled_password(char *to, const char *password) void scramble(char *to, const char *message, const char *password) { - SHA1_CONTEXT sha1_context; uint8 hash_stage1[SHA1_HASH_SIZE]; uint8 hash_stage2[SHA1_HASH_SIZE]; - mysql_sha1_reset(&sha1_context); - /* stage 1: hash password */ - mysql_sha1_input(&sha1_context, (uint8 *) password, (uint) strlen(password)); - mysql_sha1_result(&sha1_context, hash_stage1); - /* stage 2: hash stage 1; note that hash_stage2 is stored in the database */ - mysql_sha1_reset(&sha1_context); - mysql_sha1_input(&sha1_context, hash_stage1, SHA1_HASH_SIZE); - mysql_sha1_result(&sha1_context, hash_stage2); + /* Two stage SHA1 hash of the password. */ + compute_two_stage_sha1_hash(password, strlen(password), hash_stage1, + hash_stage2); + /* create crypt string as sha1(message, hash_stage2) */; - mysql_sha1_reset(&sha1_context); - mysql_sha1_input(&sha1_context, (const uint8 *) message, SCRAMBLE_LENGTH); - mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE); - /* xor allows 'from' and 'to' overlap: lets take advantage of it */ - mysql_sha1_result(&sha1_context, (uint8 *) to); + compute_sha1_hash_multi((uint8 *) to, message, SCRAMBLE_LENGTH, + (const char *) hash_stage2, SHA1_HASH_SIZE); my_crypt(to, (const uchar *) to, hash_stage1, SCRAMBLE_LENGTH); } @@ -472,7 +500,7 @@ scramble(char *to, const char *message, const char *password) null-terminated, reply and hash_stage2 must be at least SHA1_HASH_SIZE long (if not, something fishy is going on). SYNOPSIS - check_scramble() + check_scramble_sha1() scramble clients' reply, presumably produced by scramble() message original random string, previously sent to client (presumably second argument of scramble()), must be @@ -486,27 +514,30 @@ scramble(char *to, const char *message, const char *password) */ my_bool -check_scramble(const uchar *scramble_arg, const char *message, - const uint8 *hash_stage2) +check_scramble_sha1(const uchar *scramble_arg, const char *message, + const uint8 *hash_stage2) { - SHA1_CONTEXT sha1_context; uint8 buf[SHA1_HASH_SIZE]; uint8 hash_stage2_reassured[SHA1_HASH_SIZE]; - mysql_sha1_reset(&sha1_context); /* create key to encrypt scramble */ - mysql_sha1_input(&sha1_context, (const uint8 *) message, SCRAMBLE_LENGTH); - mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE); - mysql_sha1_result(&sha1_context, buf); + compute_sha1_hash_multi(buf, message, SCRAMBLE_LENGTH, + (const char *) hash_stage2, SHA1_HASH_SIZE); /* encrypt scramble */ - my_crypt((char *) buf, buf, scramble_arg, SCRAMBLE_LENGTH); + my_crypt((char *) buf, buf, scramble_arg, SCRAMBLE_LENGTH); + /* now buf supposedly contains hash_stage1: so we can get hash_stage2 */ - mysql_sha1_reset(&sha1_context); - mysql_sha1_input(&sha1_context, buf, SHA1_HASH_SIZE); - mysql_sha1_result(&sha1_context, hash_stage2_reassured); + compute_sha1_hash(hash_stage2_reassured, (const char *) buf, SHA1_HASH_SIZE); + return test(memcmp(hash_stage2, hash_stage2_reassured, SHA1_HASH_SIZE)); } +my_bool +check_scramble(const uchar *scramble_arg, const char *message, + const uint8 *hash_stage2) +{ + return check_scramble_sha1(scramble_arg, message, hash_stage2); +} /* Convert scrambled password from asciiz hex string to binary form. diff --git a/sql/protocol.cc b/sql/protocol.cc index be16d8c3ed8..effeee9b4aa 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -161,14 +161,14 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err, It's one case when we can push an error even though there is an OK or EOF already. */ - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); /* Abort multi-result sets */ thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; error= net_send_error_packet(thd, sql_errno, err, sqlstate); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); DBUG_RETURN(error); } @@ -233,7 +233,7 @@ net_send_ok(THD *thd, pos+=2; /* We can only return up to 65535 warnings in two bytes */ - uint tmp= min(statement_warn_count, 65535); + uint tmp= MY_MIN(statement_warn_count, 65535); int2store(pos, tmp); pos+= 2; } @@ -242,7 +242,7 @@ net_send_ok(THD *thd, int2store(pos, server_status); pos+=2; } - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); if (message && message[0]) pos= net_store_data(pos, (uchar*) message, strlen(message)); @@ -251,7 +251,7 @@ net_send_ok(THD *thd, error= net_flush(net); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); DBUG_PRINT("info", ("OK sent, so no more error sending allowed")); DBUG_RETURN(error); @@ -291,11 +291,11 @@ net_send_eof(THD *thd, uint server_status, uint statement_warn_count) /* Set to TRUE if no active vio, to work well in case of --init-file */ if (net->vio != 0) { - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); error= write_eof_packet(thd, net, server_status, statement_warn_count); if (!error) error= net_flush(net); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); DBUG_PRINT("info", ("EOF sent, so no more error sending allowed")); } DBUG_RETURN(error); @@ -329,7 +329,7 @@ static bool write_eof_packet(THD *thd, NET *net, Don't send warn count during SP execution, as the warn_list is cleared between substatements, and mysqltest gets confused */ - uint tmp= min(statement_warn_count, 65535); + uint tmp= MY_MIN(statement_warn_count, 65535); buff[0]= 254; int2store(buff+1, tmp); /* @@ -486,30 +486,30 @@ static uchar *net_store_length_fast(uchar *packet, uint length) void Protocol::end_statement() { DBUG_ENTER("Protocol::end_statement"); - DBUG_ASSERT(! thd->stmt_da->is_sent); + DBUG_ASSERT(! thd->get_stmt_da()->is_sent()); bool error= FALSE; /* Can not be true, but do not take chances in production. */ - if (thd->stmt_da->is_sent) + if (thd->get_stmt_da()->is_sent()) DBUG_VOID_RETURN; - switch (thd->stmt_da->status()) { + switch (thd->get_stmt_da()->status()) { case Diagnostics_area::DA_ERROR: /* The query failed, send error to log and abort bootstrap. */ - error= send_error(thd->stmt_da->sql_errno(), - thd->stmt_da->message(), - thd->stmt_da->get_sqlstate()); + error= send_error(thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message(), + thd->get_stmt_da()->get_sqlstate()); break; case Diagnostics_area::DA_EOF: error= send_eof(thd->server_status, - thd->stmt_da->statement_warn_count()); + thd->get_stmt_da()->statement_warn_count()); break; case Diagnostics_area::DA_OK: error= send_ok(thd->server_status, - thd->stmt_da->statement_warn_count(), - thd->stmt_da->affected_rows(), - thd->stmt_da->last_insert_id(), - thd->stmt_da->message()); + thd->get_stmt_da()->statement_warn_count(), + thd->get_stmt_da()->affected_rows(), + thd->get_stmt_da()->last_insert_id(), + thd->get_stmt_da()->message()); break; case Diagnostics_area::DA_DISABLED: break; @@ -520,7 +520,7 @@ void Protocol::end_statement() break; } if (!error) - thd->stmt_da->is_sent= TRUE; + thd->get_stmt_da()->set_is_sent(true); DBUG_VOID_RETURN; } @@ -606,17 +606,17 @@ void net_send_progress_packet(THD *thd) *pos++= (uchar) 1; // Number of strings *pos++= (uchar) thd->progress.stage + 1; /* - We have the max() here to avoid problems if max_stage is not set, + We have the MY_MAX() here to avoid problems if max_stage is not set, which may happen during automatic repair of table */ - *pos++= (uchar) max(thd->progress.max_stage, thd->progress.stage + 1); + *pos++= (uchar) MY_MAX(thd->progress.max_stage, thd->progress.stage + 1); progress= 0; if (thd->progress.max_counter) progress= 100000ULL * thd->progress.counter / thd->progress.max_counter; int3store(pos, progress); // Between 0 & 100000 pos+= 3; pos= net_store_data(pos, (const uchar*) proc_info, - min(length, sizeof(buff)-7)); + MY_MIN(length, sizeof(buff)-7)); net_write_command(&thd->net, (uchar) 255, progress_header, sizeof(progress_header), (uchar*) buff, (uint) (pos - buff)); @@ -688,9 +688,9 @@ bool Protocol::flush() { #ifndef EMBEDDED_LIBRARY bool error; - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); error= net_flush(&thd->net); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); return error; #else return 0; @@ -856,7 +856,7 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags) Send no warning information, as it will be sent at statement end. */ if (write_eof_packet(thd, &thd->net, thd->server_status, - thd->warning_info->statement_warn_count())) + thd->get_stmt_da()->current_statement_warn_count())) DBUG_RETURN(1); } DBUG_RETURN(prepare_for_send(list->elements)); diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index 12bdf722bec..fced238e334 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -186,7 +186,7 @@ void init_master_log_pos(Master_info* mi) if CHANGE MASTER did not specify it. (no data loss in conversion as hb period has a max) */ - mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD, + mi->heartbeat_period= (float) MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD, (slave_net_timeout/2.0)); DBUG_ASSERT(mi->heartbeat_period > (float) 0.001 || mi->heartbeat_period == 0); @@ -766,20 +766,20 @@ void create_logfile_name_with_suffix(char *res_file_name, size_t length, { const char *info_file_end= info_file + (p - res_file_name); const char *ext= append ? info_file_end : fn_ext2(info_file); - size_t res_length, ext_pos; + size_t res_length, ext_pos, from_length; uint errors; /* Create null terminated string */ - strmake(buff, suffix->str, suffix->length); + from_length= strmake(buff, suffix->str, suffix->length) - buff; /* Convert to characters usable in a file name */ - res_length= strconvert(system_charset_info, buff, + res_length= strconvert(system_charset_info, buff, from_length, &my_charset_filename, res, sizeof(res), &errors); ext_pos= (size_t) (ext - info_file); length-= (suffix->length - ext_pos); /* Leave place for extension */ p= res_file_name + ext_pos; *p++= '-'; /* Add separator */ - p= strmake(p, res, min((size_t) (length - (p - res_file_name)), + p= strmake(p, res, MY_MIN((size_t) (length - (p - res_file_name)), res_length)); /* Add back extension. We have checked above that there is space for it */ strmov(p, ext); @@ -957,7 +957,7 @@ bool Master_info_index::init_all_master_info() sql_print_error("Initialized Master_info from '%s' failed", buf_master_info_file); if (!master_info_index->get_master_info(&connection_name, - MYSQL_ERROR::WARN_LEVEL_NOTE)) + Sql_condition::WARN_LEVEL_NOTE)) { /* Master_info is not in HASH; Add it */ if (master_info_index->add_master_info(mi, FALSE)) @@ -982,7 +982,7 @@ bool Master_info_index::init_all_master_info() sql_print_information("Initialized Master_info from '%s'", buf_master_info_file); if (master_info_index->get_master_info(&connection_name, - MYSQL_ERROR::WARN_LEVEL_NOTE)) + Sql_condition::WARN_LEVEL_NOTE)) { /* Master_info was already registered */ sql_print_error(ER(ER_CONNECTION_ALREADY_EXISTS), @@ -1079,7 +1079,7 @@ bool Master_info_index::write_master_name_to_index_file(LEX_STRING *name, Master_info * Master_info_index::get_master_info(LEX_STRING *connection_name, - MYSQL_ERROR::enum_warning_level warning) + Sql_condition::enum_warning_level warning) { Master_info *mi; char buff[MAX_CONNECTION_NAME+1], *res; @@ -1096,10 +1096,10 @@ Master_info_index::get_master_info(LEX_STRING *connection_name, mi= (Master_info*) my_hash_search(&master_info_hash, (uchar*) buff, buff_length); - if (!mi && warning != MYSQL_ERROR::WARN_LEVEL_NOTE) + if (!mi && warning != Sql_condition::WARN_LEVEL_NOTE) { my_error(WARN_NO_MASTER_INFO, - MYF(warning == MYSQL_ERROR::WARN_LEVEL_WARN ? ME_JUST_WARNING : + MYF(warning == Sql_condition::WARN_LEVEL_WARN ? ME_JUST_WARNING : 0), (int) connection_name->length, connection_name->str); @@ -1118,7 +1118,7 @@ bool Master_info_index::check_duplicate_master_info(LEX_STRING *name_arg, /* Get full host and port name */ if ((mi= master_info_index->get_master_info(name_arg, - MYSQL_ERROR::WARN_LEVEL_NOTE))) + Sql_condition::WARN_LEVEL_NOTE))) { if (!host) host= mi->host; @@ -1182,7 +1182,7 @@ bool Master_info_index::remove_master_info(LEX_STRING *name) Master_info* mi; DBUG_ENTER("remove_master_info"); - if ((mi= get_master_info(name, MYSQL_ERROR::WARN_LEVEL_WARN))) + if ((mi= get_master_info(name, Sql_condition::WARN_LEVEL_WARN))) { // Delete Master_info and rewrite others to file if (!my_hash_delete(&master_info_hash, (uchar*) mi)) @@ -1294,7 +1294,7 @@ bool Master_info_index::start_all_slaves(THD *thd) break; } else - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_STARTED, ER(ER_SLAVE_STARTED), (int) mi->connection_name.length, mi->connection_name.str); @@ -1339,7 +1339,7 @@ bool Master_info_index::stop_all_slaves(THD *thd) break; } else - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_STOPPED, ER(ER_SLAVE_STOPPED), (int) mi->connection_name.length, mi->connection_name.str); diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h index ad6c57e21c4..991f6673c3a 100644 --- a/sql/rpl_mi.h +++ b/sql/rpl_mi.h @@ -208,7 +208,7 @@ public: bool add_master_info(Master_info *mi, bool write_to_file); bool remove_master_info(LEX_STRING *connection_name); Master_info *get_master_info(LEX_STRING *connection_name, - MYSQL_ERROR::enum_warning_level warning); + Sql_condition::enum_warning_level warning); bool give_error_if_slave_running(); bool start_all_slaves(THD *thd); bool stop_all_slaves(THD *thd); diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc index 99bf8a82004..aa8c118cfe6 100644 --- a/sql/rpl_record.cc +++ b/sql/rpl_record.cc @@ -287,7 +287,7 @@ unpack_row(Relay_log_info const *rli, else { f->set_default(); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, ER(ER_BAD_NULL_ERROR), f->field_name); } @@ -362,7 +362,7 @@ unpack_row(Relay_log_info const *rli, /* throw away master's extra fields */ - uint max_cols= min(tabledef->size(), cols->n_bits); + uint max_cols= MY_MIN(tabledef->size(), cols->n_bits); for (; i < max_cols; i++) { if (bitmap_is_set(cols, i)) @@ -447,7 +447,7 @@ int prepare_record(TABLE *const table, const uint skip, const bool check) { f->set_default(); push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT_FOR_FIELD, ER(ER_NO_DEFAULT_FOR_FIELD), f->field_name); diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index a455779bb6e..3a6bb4c33dc 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -256,7 +256,7 @@ a file name for --relay-log-index option", opt_relaylog_index_name); { sql_print_error("Failed to create a new relay log info file (\ file '%s', errno %d)", fname, my_errno); - msg= current_thd->stmt_da->message(); + msg= current_thd->get_stmt_da()->message(); goto err; } if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0, @@ -264,7 +264,7 @@ file '%s', errno %d)", fname, my_errno); { sql_print_error("Failed to create a cache on relay log info file '%s'", fname); - msg= current_thd->stmt_da->message(); + msg= current_thd->get_stmt_da()->message(); goto err; } @@ -741,7 +741,7 @@ int Relay_log_info::wait_for_pos(THD* thd, String* log_name, ulong log_name_extension; char log_name_tmp[FN_REFLEN]; //make a char[] from String - strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), FN_REFLEN-1)); + strmake(log_name_tmp, log_name->ptr(), MY_MIN(log_name->length(), FN_REFLEN-1)); char *p= fn_ext(log_name_tmp); char *p_end; @@ -751,7 +751,7 @@ int Relay_log_info::wait_for_pos(THD* thd, String* log_name, goto err; } // Convert 0-3 to 4 - log_pos= max(log_pos, BIN_LOG_HEADER_SIZE); + log_pos= MY_MAX(log_pos, BIN_LOG_HEADER_SIZE); /* p points to '.' */ log_name_extension= strtoul(++p, &p_end, 10); /* @@ -1236,7 +1236,7 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos, "Failed to update GTID state in %s.%s, slave state may become " "inconsistent: %d: %s", "mysql", rpl_gtid_slave_state_table_name.str, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); /* At this point we are not in a transaction (for example after DDL), so we can not roll back. Anyway, normally updates to the slave @@ -1355,9 +1355,9 @@ void Relay_log_info::clear_tables_to_lock() void Relay_log_info::slave_close_thread_tables(THD *thd) { DBUG_ENTER("Relay_log_info::slave_close_thread_tables(THD *thd)"); - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); close_thread_tables(thd); /* diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc index ac8a8fe356b..db47c3c164a 100644 --- a/sql/rpl_utility.cc +++ b/sql/rpl_utility.cc @@ -815,7 +815,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli, /* We only check the initial columns for the tables. */ - uint const cols_to_check= min(table->s->fields, size()); + uint const cols_to_check= MY_MIN(table->s->fields, size()); TABLE *tmp_table= NULL; for (uint col= 0 ; col < cols_to_check ; ++col) @@ -916,10 +916,10 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE * List<Create_field> field_list; /* At slave, columns may differ. So we should create - min(columns@master, columns@slave) columns in the + MY_MIN(columns@master, columns@slave) columns in the conversion table. */ - uint const cols_to_create= min(target_table->s->fields, size()); + uint const cols_to_create= MY_MIN(target_table->s->fields, size()); for (uint col= 0 ; col < cols_to_create; ++col) { Create_field *field_def= diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h index b08721aa8c2..9ac17f68a1f 100644 --- a/sql/rpl_utility.h +++ b/sql/rpl_utility.h @@ -295,7 +295,7 @@ public: do { \ char buf[256]; \ uint i; \ - for (i = 0 ; i < min(sizeof(buf) - 1, (BS)->n_bits) ; i++) \ + for (i = 0 ; i < MY_MIN(sizeof(buf) - 1, (BS)->n_bits) ; i++) \ buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \ buf[i] = '\0'; \ DBUG_PRINT((N), ((FRM), buf)); \ diff --git a/sql/set_var.cc b/sql/set_var.cc index d9741ca3481..db74d8f0d9d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -384,7 +384,7 @@ void sys_var::do_deprecated_warning(THD *thd) ? ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT : ER_WARN_DEPRECATED_SYNTAX; if (thd) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_DEPRECATED_SYNTAX, ER(errmsg), buf1, deprecation_substitute); else @@ -421,7 +421,7 @@ bool throw_bounds_warning(THD *thd, const char *name, my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf); return true; } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), name, buf); } @@ -441,7 +441,7 @@ bool throw_bounds_warning(THD *thd, const char *name, bool fixed, double v) my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf); return true; } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), name, buf); } diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index f62f5d917f7..35f2cfb330c 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -1,4 +1,4 @@ -languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, japanese-sjis=jps sjis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u; +languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u, bulgarian=bgn cp1251; default-language eng @@ -51,7 +51,7 @@ ER_YES spa "SI" ukr "ТÐК" ER_CANT_CREATE_FILE - cze "Nemohu vytvo-BÅ™it soubor '%-.200s' (chybový kód: %M)" + cze "Nemohu vytvoÅ™it soubor '%-.200s' (chybový kód: %M)" dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %M)" nla "Kan file '%-.200s' niet aanmaken (Errcode: %M)" eng "Can't create file '%-.200s' (errno: %M)" @@ -61,7 +61,7 @@ ER_CANT_CREATE_FILE greek "ΑδÏνατη η δημιουÏγία του αÏχείου '%-.200s' (κωδικός λάθους: %M)" hun "A '%-.200s' file nem hozhato letre (hibakod: %M)" ita "Impossibile creare il file '%-.200s' (errno: %M)" - jpn "'%-.200s' ファイルãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)" + jpn "ファイル '%-.200s' を作æˆã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "í™”ì¼ '%-.200s'를 만들지 못했습니다. (ì—러번호: %M)" nor "Kan ikke opprette fila '%-.200s' (Feilkode: %M)" norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %M)" @@ -75,7 +75,7 @@ ER_CANT_CREATE_FILE swe "Kan inte skapa filen '%-.200s' (Felkod: %M)" ukr "Ðе можу Ñтворити файл '%-.200s' (помилка: %M)" ER_CANT_CREATE_TABLE - cze "Nemohu vytvo-BÅ™it tabulku %`s.%`s (chybový kód: %M)" + cze "Nemohu vytvoÅ™it tabulku %`s.%`s (chybový kód: %M)" dan "Kan ikke oprette tabellen %`s.%`s (Fejlkode: %M)" nla "Kan tabel %`s.%`s niet aanmaken (Errcode: %M)" eng "Can't create table %`s.%`s (errno: %M)" @@ -100,18 +100,17 @@ ER_CANT_CREATE_TABLE swe "Kan inte skapa tabellen %`s.%`s (Felkod: %M)" ukr "Ðе можу Ñтворити таблицю %`s.%`s (помилка: %M)" ER_CANT_CREATE_DB - cze "Nemohu vytvo-BÅ™it databázi '%-.192s' (chybový kód: %M)" + cze "Nemohu vytvoÅ™it databázi '%-.192s' (chybový kód: %M)" dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %M)" nla "Kan database '%-.192s' niet aanmaken (Errcode: %M)" eng "Can't create database '%-.192s' (errno: %M)" - jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)", est "Ei suuda luua andmebaasi '%-.192s' (veakood: %M)" fre "Ne peut créer la base '%-.192s' (Erreur %M)" ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %M)" greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομÎνων '%-.192s' (κωδικός λάθους: %M)" hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %M)" ita "Impossibile creare il database '%-.192s' (errno: %M)" - jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)" + jpn "データベース '%-.192s' を作æˆã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 만들지 못했습니다.. (ì—러번호: %M)" nor "Kan ikke opprette databasen '%-.192s' (Feilkode: %M)" norwegian-ny "Kan ikkje opprette databasen '%-.192s' (Feilkode: %M)" @@ -125,18 +124,17 @@ ER_CANT_CREATE_DB swe "Kan inte skapa databasen '%-.192s' (Felkod: %M)" ukr "Ðе можу Ñтворити базу данних '%-.192s' (помилка: %M)" ER_DB_CREATE_EXISTS - cze "Nemohu vytvo-BÅ™it databázi '%-.192s'; databáze již existuje" + cze "Nemohu vytvoÅ™it databázi '%-.192s'; databáze již existuje" dan "Kan ikke oprette databasen '%-.192s'; databasen eksisterer" nla "Kan database '%-.192s' niet aanmaken; database bestaat reeds" eng "Can't create database '%-.192s'; database exists" - jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“.æ—¢ã«ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒå˜åœ¨ã—ã¾ã™", est "Ei suuda luua andmebaasi '%-.192s': andmebaas juba eksisteerib" fre "Ne peut créer la base '%-.192s'; elle existe déjà " ger "Kann Datenbank '%-.192s' nicht erzeugen. Datenbank existiert bereits" greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομÎνων '%-.192s'; Η βάση δεδομÎνων υπάÏχει ήδη" hun "Az '%-.192s' adatbazis nem hozhato letre Az adatbazis mar letezik" ita "Impossibile creare il database '%-.192s'; il database esiste" - jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“.æ—¢ã«ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒå˜åœ¨ã—ã¾ã™" + jpn "データベース '%-.192s' を作æˆã§ãã¾ã›ã‚“。データベースã¯ã™ã§ã«å˜åœ¨ã—ã¾ã™ã€‚" kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 만들지 못했습니다.. ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 존재함" nor "Kan ikke opprette databasen '%-.192s'; databasen eksisterer" norwegian-ny "Kan ikkje opprette databasen '%-.192s'; databasen eksisterer" @@ -150,18 +148,17 @@ ER_DB_CREATE_EXISTS swe "Databasen '%-.192s' existerar redan" ukr "Ðе можу Ñтворити базу данних '%-.192s'. База данних Ñ–Ñнує" ER_DB_DROP_EXISTS - cze "Nemohu zru-BÅ¡it databázi '%-.192s', databáze neexistuje" + cze "Nemohu zruÅ¡it databázi '%-.192s', databáze neexistuje" dan "Kan ikke slette (droppe) '%-.192s'; databasen eksisterer ikke" nla "Kan database '%-.192s' niet verwijderen; database bestaat niet" eng "Can't drop database '%-.192s'; database doesn't exist" - jps "'%-.192s' ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã‚’ç ´æ£„ã§ãã¾ã›ã‚“. ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒãªã„ã®ã§ã™.", est "Ei suuda kustutada andmebaasi '%-.192s': andmebaasi ei eksisteeri" fre "Ne peut effacer la base '%-.192s'; elle n'existe pas" ger "Kann Datenbank '%-.192s' nicht löschen; Datenbank nicht vorhanden" greek "ΑδÏνατη η διαγÏαφή της βάσης δεδομÎνων '%-.192s'. Η βάση δεδομÎνων δεν υπάÏχει" hun "A(z) '%-.192s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" ita "Impossibile cancellare '%-.192s'; il database non esiste" - jpn "'%-.192s' ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã‚’ç ´æ£„ã§ãã¾ã›ã‚“. ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒãªã„ã®ã§ã™." + jpn "データベース '%-.192s' を削除ã§ãã¾ã›ã‚“。データベースã¯å˜åœ¨ã—ã¾ã›ã‚“。" kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 ì œê±°í•˜ì§€ 못했습니다. ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 존재하지 ì•ŠìŒ " nor "Kan ikke fjerne (drop) '%-.192s'; databasen eksisterer ikke" norwegian-ny "Kan ikkje fjerne (drop) '%-.192s'; databasen eksisterer ikkje" @@ -175,18 +172,17 @@ ER_DB_DROP_EXISTS swe "Kan inte radera databasen '%-.192s'; databasen finns inte" ukr "Ðе можу видалити базу данних '%-.192s'. База данних не Ñ–Ñнує" ER_DB_DROP_DELETE - cze "Chyba p-BÅ™i ruÅ¡enà databáze (nemohu vymazat '%-.192s', chyba %M)" + cze "Chyba pÅ™i ruÅ¡enà databáze (nemohu vymazat '%-.192s', chyba %M)" dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.192s', Fejlkode %M)" nla "Fout bij verwijderen database (kan '%-.192s' niet verwijderen, Errcode: %M)" eng "Error dropping database (can't delete '%-.192s', errno: %M)" - jps "ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ç ´æ£„ã‚¨ãƒ©ãƒ¼ ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %M)", est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.192s', veakood: %M)" fre "Ne peut effacer la base '%-.192s' (erreur %M)" ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %M)" greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομÎνων (αδÏνατη η διαγÏαφή '%-.192s', κωδικός λάθους: %M)" hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %M)" ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %M)" - jpn "ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ç ´æ£„ã‚¨ãƒ©ãƒ¼ ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %M)" + jpn "データベース削除エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“。エラー番å·: %M)" kor "ë°ì´íƒ€ë² ì´ìŠ¤ ì œê±° ì—러('%-.192s'를 ì‚ì œí• ìˆ˜ ì—†ì니다, ì—러번호: %M)" nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %M)" norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %M)" @@ -200,18 +196,17 @@ ER_DB_DROP_DELETE swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %M)" ukr "Ðе можу видалити базу данних (Ðе можу видалити '%-.192s', помилка: %M)" ER_DB_DROP_RMDIR - cze "Chyba p-BÅ™i ruÅ¡enà databáze (nemohu vymazat adresář '%-.192s', chyba %M)" + cze "Chyba pÅ™i ruÅ¡enà databáze (nemohu vymazat adresář '%-.192s', chyba %M)" dan "Fejl ved sletting af database (kan ikke slette folderen '%-.192s', Fejlkode %M)" nla "Fout bij verwijderen database (kan rmdir '%-.192s' niet uitvoeren, Errcode: %M)" eng "Error dropping database (can't rmdir '%-.192s', errno: %M)" - jps "ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ç ´æ£„ã‚¨ãƒ©ãƒ¼ ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %M)", est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.192s', veakood: %M)" fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %M)" ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %M)" greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομÎνων (αδÏνατη η διαγÏαφή του φακÎλλου '%-.192s', κωδικός λάθους: %M)" hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %M)" ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %M)" - jpn "ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ç ´æ£„ã‚¨ãƒ©ãƒ¼ ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %M)" + jpn "データベース削除エラー (ディレクトリ '%-.192s' を削除ã§ãã¾ã›ã‚“。エラー番å·: %M)" kor "ë°ì´íƒ€ë² ì´ìŠ¤ ì œê±° ì—러(rmdir '%-.192s'를 í• ìˆ˜ ì—†ì니다, ì—러번호: %M)" nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %M)" norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %M)" @@ -225,18 +220,17 @@ ER_DB_DROP_RMDIR swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %M)" ukr "Ðе можу видалити базу данних (Ðе можу видалити теку '%-.192s', помилка: %M)" ER_CANT_DELETE_FILE - cze "Chyba p-BÅ™i výmazu '%-.192s' (chybový kód: %M)" + cze "Chyba pÅ™i výmazu '%-.192s' (chybový kód: %M)" dan "Fejl ved sletning af '%-.192s' (Fejlkode: %M)" nla "Fout bij het verwijderen van '%-.192s' (Errcode: %M)" eng "Error on delete of '%-.192s' (errno: %M)" - jps "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %M)", est "Viga '%-.192s' kustutamisel (veakood: %M)" fre "Erreur en effaçant '%-.192s' (Errcode: %M)" ger "Fehler beim Löschen von '%-.192s' (Fehler: %M)" greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή '%-.192s' (κωδικός λάθους: %M)" hun "Torlesi hiba: '%-.192s' (hibakod: %M)" ita "Errore durante la cancellazione di '%-.192s' (errno: %M)" - jpn "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %M)" + jpn "ファイル '%-.192s' ã®å‰Šé™¤ã‚¨ãƒ©ãƒ¼ (エラー番å·: %M)" kor "'%-.192s' ì‚ì œ 중 ì—러 (ì—러번호: %M)" nor "Feil ved sletting av '%-.192s' (Feilkode: %M)" norwegian-ny "Feil ved sletting av '%-.192s' (Feilkode: %M)" @@ -250,18 +244,17 @@ ER_CANT_DELETE_FILE swe "Kan inte radera filen '%-.192s' (Felkod: %M)" ukr "Ðе можу видалити '%-.192s' (помилка: %M)" ER_CANT_FIND_SYSTEM_REC - cze "Nemohu -BÄÃst záznam v systémové tabulce" + cze "Nemohu ÄÃst záznam v systémové tabulce" dan "Kan ikke læse posten i systemfolderen" nla "Kan record niet lezen in de systeem tabel" eng "Can't read record in system table" - jps "system table ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’èªã‚€äº‹ãŒã§ãã¾ã›ã‚“ã§ã—ãŸ", est "Ei suuda lugeda kirjet süsteemsest tabelist" fre "Ne peut lire un enregistrement de la table 'system'" ger "Datensatz in der Systemtabelle nicht lesbar" greek "ΑδÏνατη η ανάγνωση εγγÏαφής από πίνακα του συστήματος" hun "Nem olvashato rekord a rendszertablaban" ita "Impossibile leggere il record dalla tabella di sistema" - jpn "system table ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’èªã‚€äº‹ãŒã§ãã¾ã›ã‚“ã§ã—ãŸ" + jpn "システム表ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’èªã¿è¾¼ã‚ã¾ã›ã‚“。" kor "system í…Œì´ë¸”ì—ì„œ ë ˆì½”ë“œë¥¼ ì½ì„ 수 없습니다." nor "Kan ikke lese posten i systemkatalogen" norwegian-ny "Kan ikkje lese posten i systemkatalogen" @@ -275,18 +268,17 @@ ER_CANT_FIND_SYSTEM_REC swe "Hittar inte posten i systemregistret" ukr "Ðе можу зчитати Ð·Ð°Ð¿Ð¸Ñ Ð· ÑиÑтемної таблиці" ER_CANT_GET_STAT - cze "Nemohu z-BÃskat stav '%-.200s' (chybový kód: %M)" + cze "Nemohu zÃskat stav '%-.200s' (chybový kód: %M)" dan "Kan ikke læse status af '%-.200s' (Fejlkode: %M)" nla "Kan de status niet krijgen van '%-.200s' (Errcode: %M)" eng "Can't get status of '%-.200s' (errno: %M)" - jps "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %M)", est "Ei suuda lugeda '%-.200s' olekut (veakood: %M)" fre "Ne peut obtenir le status de '%-.200s' (Errcode: %M)" ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %M)" greek "ΑδÏνατη η λήψη πληÏοφοÏιών για την κατάσταση του '%-.200s' (κωδικός λάθους: %M)" hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %M)" ita "Impossibile leggere lo stato di '%-.200s' (errno: %M)" - jpn "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %M)" + jpn "'%-.200s' ã®çŠ¶æ…‹ã‚’å–å¾—ã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "'%-.200s'ì˜ ìƒíƒœë¥¼ 얻지 못했습니다. (ì—러번호: %M)" nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %M)" norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %M)" @@ -300,18 +292,17 @@ ER_CANT_GET_STAT swe "Kan inte läsa filinformationen (stat) frÃ¥n '%-.200s' (Felkod: %M)" ukr "Ðе можу отримати ÑÑ‚Ð°Ñ‚ÑƒÑ '%-.200s' (помилка: %M)" ER_CANT_GET_WD - cze "Chyba p-BÅ™i zjiÅ¡Å¥ovánà pracovnà adresář (chybový kód: %M)" + cze "Chyba pÅ™i zjiÅ¡Å¥ovánà pracovnà adresář (chybový kód: %M)" dan "Kan ikke læse aktive folder (Fejlkode: %M)" nla "Kan de werkdirectory niet krijgen (Errcode: %M)" eng "Can't get working directory (errno: %M)" - jps "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %M)", est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %M)" fre "Ne peut obtenir le répertoire de travail (Errcode: %M)" ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %M)" greek "Ο φάκελλος εÏγασίας δεν βÏÎθηκε (κωδικός λάθους: %M)" hun "A munkakonyvtar nem allapithato meg (hibakod: %M)" ita "Impossibile leggere la directory di lavoro (errno: %M)" - jpn "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %M)" + jpn "作æ¥ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’å–å¾—ã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "수행 ë””ë ‰í† ë¦¬ë¥¼ 찾지 못했습니다. (ì—러번호: %M)" nor "Kan ikke lese aktiv katalog(Feilkode: %M)" norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %M)" @@ -325,18 +316,17 @@ ER_CANT_GET_WD swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %M)" ukr "Ðе можу визначити робочу теку (помилка: %M)" ER_CANT_LOCK - cze "Nemohu uzamknout soubor (chybov-Bý kód: %M)" + cze "Nemohu uzamknout soubor (chybový kód: %M)" dan "Kan ikke lÃ¥se fil (Fejlkode: %M)" nla "Kan de file niet blokeren (Errcode: %M)" eng "Can't lock file (errno: %M)" - jps "ファイルをãƒãƒƒã‚¯ã§ãã¾ã›ã‚“ (errno: %M)", est "Ei suuda lukustada faili (veakood: %M)" fre "Ne peut verrouiller le fichier (Errcode: %M)" ger "Datei kann nicht gesperrt werden (Fehler: %M)" greek "Το αÏχείο δεν μποÏεί να κλειδωθεί (κωδικός λάθους: %M)" hun "A file nem zarolhato. (hibakod: %M)" ita "Impossibile il locking il file (errno: %M)" - jpn "ファイルをãƒãƒƒã‚¯ã§ãã¾ã›ã‚“ (errno: %M)" + jpn "ファイルをãƒãƒƒã‚¯ã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "í™”ì¼ì„ ìž ê·¸ì§€(lock) 못했습니다. (ì—러번호: %M)" nor "Kan ikke lÃ¥se fila (Feilkode: %M)" norwegian-ny "Kan ikkje lÃ¥se fila (Feilkode: %M)" @@ -350,18 +340,17 @@ ER_CANT_LOCK swe "Kan inte lÃ¥sa filen. (Felkod: %M)" ukr "Ðе можу заблокувати файл (помилка: %M)" ER_CANT_OPEN_FILE - cze "Nemohu otev-BÅ™Ãt soubor '%-.200s' (chybový kód: %M)" + cze "Nemohu otevÅ™Ãt soubor '%-.200s' (chybový kód: %M)" dan "Kan ikke Ã¥bne fil: '%-.200s' (Fejlkode: %M)" nla "Kan de file '%-.200s' niet openen (Errcode: %M)" eng "Can't open file: '%-.200s' (errno: %M)" - jps "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)", est "Ei suuda avada faili '%-.200s' (veakood: %M)" fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %M)" ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %M)" greek "Δεν είναι δυνατό να ανοιχτεί το αÏχείο: '%-.200s' (κωδικός λάθους: %M)" hun "A '%-.200s' file nem nyithato meg (hibakod: %M)" ita "Impossibile aprire il file: '%-.200s' (errno: %M)" - jpn "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)" + jpn "ファイル '%-.200s' をオープンã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "í™”ì¼ì„ 열지 못했습니다.: '%-.200s' (ì—러번호: %M)" nor "Kan ikke Ã¥pne fila: '%-.200s' (Feilkode: %M)" norwegian-ny "Kan ikkje Ã¥pne fila: '%-.200s' (Feilkode: %M)" @@ -375,18 +364,17 @@ ER_CANT_OPEN_FILE swe "Kan inte använda '%-.200s' (Felkod: %M)" ukr "Ðе можу відкрити файл: '%-.200s' (помилка: %M)" ER_FILE_NOT_FOUND - cze "Nemohu naj-BÃt soubor '%-.200s' (chybový kód: %M)" + cze "Nemohu najÃt soubor '%-.200s' (chybový kód: %M)" dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %M)" nla "Kan de file: '%-.200s' niet vinden (Errcode: %M)" eng "Can't find file: '%-.200s' (errno: %M)" - jps "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %M)", est "Ei suuda leida faili '%-.200s' (veakood: %M)" fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %M)" ger "Kann Datei '%-.200s' nicht finden (Fehler: %M)" greek "Δεν βÏÎθηκε το αÏχείο: '%-.200s' (κωδικός λάθους: %M)" hun "A(z) '%-.200s' file nem talalhato (hibakod: %M)" ita "Impossibile trovare il file: '%-.200s' (errno: %M)" - jpn "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %M)" + jpn "ファイル '%-.200s' ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。(エラー番å·: %M)" kor "í™”ì¼ì„ 찾지 못했습니다.: '%-.200s' (ì—러번호: %M)" nor "Kan ikke finne fila: '%-.200s' (Feilkode: %M)" norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %M)" @@ -400,18 +388,17 @@ ER_FILE_NOT_FOUND swe "Hittar inte filen '%-.200s' (Felkod: %M)" ukr "Ðе можу знайти файл: '%-.200s' (помилка: %M)" ER_CANT_READ_DIR - cze "Nemohu -BÄÃst adresář '%-.192s' (chybový kód: %M)" + cze "Nemohu ÄÃst adresář '%-.192s' (chybový kód: %M)" dan "Kan ikke læse folder '%-.192s' (Fejlkode: %M)" nla "Kan de directory niet lezen van '%-.192s' (Errcode: %M)" eng "Can't read dir of '%-.192s' (errno: %M)" - jps "'%-.192s' ディレクトリãŒèªã‚ã¾ã›ã‚“.(errno: %M)", est "Ei suuda lugeda kataloogi '%-.192s' (veakood: %M)" fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %M)" ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %M)" greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %M)" hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %M)" ita "Impossibile leggere la directory di '%-.192s' (errno: %M)" - jpn "'%-.192s' ディレクトリãŒèªã‚ã¾ã›ã‚“.(errno: %M)" + jpn "ディレクトリ '%-.192s' ã‚’èªã¿è¾¼ã‚ã¾ã›ã‚“。(エラー番å·: %M)" kor "'%-.192s'ë””ë ‰í† ë¦¬ë¥¼ ì½ì§€ 못했습니다. (ì—러번호: %M)" nor "Kan ikke lese katalogen '%-.192s' (Feilkode: %M)" norwegian-ny "Kan ikkje lese katalogen '%-.192s' (Feilkode: %M)" @@ -425,18 +412,17 @@ ER_CANT_READ_DIR swe "Kan inte läsa frÃ¥n bibliotek '%-.192s' (Felkod: %M)" ukr "Ðе можу прочитати теку '%-.192s' (помилка: %M)" ER_CANT_SET_WD - cze "Nemohu zm-BÄ›nit adresář na '%-.192s' (chybový kód: %M)" + cze "Nemohu zmÄ›nit adresář na '%-.192s' (chybový kód: %M)" dan "Kan ikke skifte folder til '%-.192s' (Fejlkode: %M)" nla "Kan de directory niet veranderen naar '%-.192s' (Errcode: %M)" eng "Can't change dir to '%-.192s' (errno: %M)" - jps "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %M)", est "Ei suuda siseneda kataloogi '%-.192s' (veakood: %M)" fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %M)" ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %M)" greek "ΑδÏνατη η αλλαγή του Ï„ÏÎχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %M)" hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %M)" ita "Impossibile cambiare la directory in '%-.192s' (errno: %M)" - jpn "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %M)" + jpn "ディレクトリ '%-.192s' ã«ç§»å‹•ã§ãã¾ã›ã‚“。(エラー番å·: %M)" kor "'%-.192s'ë””ë ‰í† ë¦¬ë¡œ ì´ë™í• 수 없었습니다. (ì—러번호: %M)" nor "Kan ikke skifte katalog til '%-.192s' (Feilkode: %M)" norwegian-ny "Kan ikkje skifte katalog til '%-.192s' (Feilkode: %M)" @@ -450,7 +436,7 @@ ER_CANT_SET_WD swe "Kan inte byta till '%-.192s' (Felkod: %M)" ukr "Ðе можу перейти у теку '%-.192s' (помилка: %M)" ER_CHECKREAD - cze "Z-Báznam byl zmÄ›nÄ›n od poslednÃho Ätenà v tabulce '%-.192s'" + cze "Záznam byl zmÄ›nÄ›n od poslednÃho Ätenà v tabulce '%-.192s'" dan "Posten er ændret siden sidste læsning '%-.192s'" nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.192s'" eng "Record has changed since last read in table '%-.192s'" @@ -460,6 +446,7 @@ ER_CHECKREAD greek "Η εγγÏαφή Îχει αλλάξει από την τελευταία φοÏά που ανασÏÏθηκε από τον πίνακα '%-.192s'" hun "A(z) '%-.192s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" ita "Il record e` cambiato dall'ultima lettura della tabella '%-.192s'" + jpn "表 '%-.192s' ã®æœ€å¾Œã®èªã¿è¾¼ã¿æ™‚点ã‹ã‚‰ã€ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒå¤‰åŒ–ã—ã¾ã—ãŸã€‚" kor "í…Œì´ë¸” '%-.192s'ì—ì„œ 마지막으로 ì½ì€ 후 Recordê°€ 변경ë˜ì—ˆìŠµë‹ˆë‹¤." nor "Posten har blitt endret siden den ble lest '%-.192s'" norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.192s'" @@ -472,44 +459,42 @@ ER_CHECKREAD spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.192s'" swe "Posten har förändrats sedan den lästes i register '%-.192s'" ukr "Ð—Ð°Ð¿Ð¸Ñ Ð±ÑƒÐ»Ð¾ змінено з чаÑу оÑтаннього Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð· таблиці '%-.192s'" -ER_DISK_FULL - cze "Disk je pln-Bý (%s), Äekám na uvolnÄ›nà nÄ›jakého mÃsta ..." - dan "Ikke mere diskplads (%s). Venter pÃ¥ at fÃ¥ frigjort plads..." - nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..." - eng "Disk full (%s); waiting for someone to free some space..." - jps "Disk full (%s). 誰ã‹ãŒä½•ã‹ã‚’減らã™ã¾ã§ã¾ã£ã¦ãã ã•ã„...", - est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..." - fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..." - ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ..." - greek "Δεν υπάÏχει χώÏος στο δίσκο (%s). ΠαÏακαλώ, πεÏιμÎνετε να ελευθεÏωθεί χώÏος..." - hun "A lemez megtelt (%s)." - ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..." - jpn "Disk full (%s). 誰ã‹ãŒä½•ã‹ã‚’減らã™ã¾ã§ã¾ã£ã¦ãã ã•ã„..." - kor "Disk full (%s). 다른 ì‚¬ëžŒì´ ì§€ìš¸ë•Œê¹Œì§€ 기다립니다..." - nor "Ikke mer diskplass (%s). Venter pÃ¥ Ã¥ fÃ¥ frigjort plass..." - norwegian-ny "Ikkje meir diskplass (%s). Ventar pÃ¥ Ã¥ fÃ¥ frigjort plass..." - pol "Dysk peÅ‚ny (%s). Oczekiwanie na zwolnienie miejsca..." - por "Disco cheio (%s). Aguardando alguém liberar algum espaço..." - rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..." - rus "ДиÑк заполнен. (%s). Ожидаем, пока кто-то не уберет поÑле ÑÐµÐ±Ñ Ð¼ÑƒÑор..." - serbian "Disk je pun (%s). ÄŒekam nekoga da doÄ‘e i oslobodi neÅ¡to mesta..." - slo "Disk je plný (%s), Äakám na uvoľnenie miesta..." - spa "Disco lleno (%s). Esperando para que se libere algo de espacio..." - swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..." - ukr "ДиÑк заповнений (%s). Вичикую, доки звільнитьÑÑ Ñ‚Ñ€Ð¾Ñ…Ð¸ міÑцÑ..." +ER_DISK_FULL + cze "Disk je plný (%s), Äekám na uvolnÄ›nà nÄ›jakého mÃsta ... (chybový kód: %M)" + dan "Ikke mere diskplads (%s). Venter pÃ¥ at fÃ¥ frigjort plads... (Fejlkode: %M)" + nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt... (Errcode: %M)" + eng "Disk full (%s); waiting for someone to free some space... (errno: %M)" + est "Ketas täis (%s). Ootame kuni tekib vaba ruumi... (veakood: %M)" + fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace... (Errcode: %M)" + ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ... (Fehler: %M)" + greek "Δεν υπάÏχει χώÏος στο δίσκο (%s). ΠαÏακαλώ, πεÏιμÎνετε να ελευθεÏωθεί χώÏος... (κωδικός λάθους: %M)" + hun "A lemez megtelt (%s). (hibakod: %M)" + ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio... (errno: %M)" + jpn "ãƒ‡ã‚£ã‚¹ã‚¯é ˜åŸŸä¸è¶³ã§ã™(%s)。(エラー番å·: %M)" + kor "Disk full (%s). 다른 ì‚¬ëžŒì´ ì§€ìš¸ë•Œê¹Œì§€ 기다립니다... (ì—러번호: %M)" + nor "Ikke mer diskplass (%s). Venter pÃ¥ Ã¥ fÃ¥ frigjort plass... (Feilkode: %M)" + norwegian-ny "Ikkje meir diskplass (%s). Ventar pÃ¥ Ã¥ fÃ¥ frigjort plass... (Feilkode: %M)" + pol "Dysk peÅ‚ny (%s). Oczekiwanie na zwolnienie miejsca... (Kod bÅ‚Ä™du: %M)" + por "Disco cheio (%s). Aguardando alguém liberar algum espaço... (erro no. %M)" + rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu... (Eroare: %M)" + rus "ДиÑк заполнен. (%s). Ожидаем, пока кто-то не уберет поÑле ÑÐµÐ±Ñ Ð¼ÑƒÑор... (ошибка: %M)" + serbian "Disk je pun (%s). ÄŒekam nekoga da doÄ‘e i oslobodi neÅ¡to mesta... (errno: %M)" + slo "Disk je plný (%s), Äakám na uvoľnenie miesta... (chybový kód: %M)" + spa "Disco lleno (%s). Esperando para que se libere algo de espacio... (Error: %M)" + swe "Disken är full (%s). Väntar tills det finns ledigt utrymme... (Felkod: %M)" + ukr "ДиÑк заповнений (%s). Вичикую, доки звільнитьÑÑ Ñ‚Ñ€Ð¾Ñ…Ð¸ міÑцÑ... (помилка: %M)" ER_DUP_KEY 23000 - cze "Nemohu zapsat, zdvojen-Bý klÃÄ v tabulce '%-.192s'" + cze "Nemohu zapsat, zdvojený klÃÄ v tabulce '%-.192s'" dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.192s'" nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.192s'" eng "Can't write; duplicate key in table '%-.192s'" - jps "table '%-.192s' ã« key ãŒé‡è¤‡ã—ã¦ã„ã¦æ›¸ãã“ã‚ã¾ã›ã‚“", est "Ei saa kirjutada, korduv võti tabelis '%-.192s'" fre "Ecriture impossible, doublon dans une clé de la table '%-.192s'" ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.192s'" greek "Δεν είναι δυνατή η καταχώÏηση, η τιμή υπάÏχει ήδη στον πίνακα '%-.192s'" hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban." ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'" - jpn "table '%-.192s' ã« key ãŒé‡è¤‡ã—ã¦ã„ã¦æ›¸ãã“ã‚ã¾ã›ã‚“" + jpn "書ãè¾¼ã‚ã¾ã›ã‚“。表 '%-.192s' ã«é‡è¤‡ã™ã‚‹ã‚ーãŒã‚ã‚Šã¾ã™ã€‚" kor "기ë¡í• 수 ì—†ì니다., í…Œì´ë¸” '%-.192s'ì—ì„œ 중복 키" nor "Kan ikke skrive, flere like nøkler i tabellen '%-.192s'" norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.192s'" @@ -523,7 +508,7 @@ ER_DUP_KEY 23000 swe "Kan inte skriva, dubbel söknyckel i register '%-.192s'" ukr "Ðе можу запиÑати, дублюючийÑÑ ÐºÐ»ÑŽÑ‡ в таблиці '%-.192s'" ER_ERROR_ON_CLOSE - cze "Chyba p-BÅ™i zavÃránà '%-.192s' (chybový kód: %M)" + cze "Chyba pÅ™i zavÃránà '%-.192s' (chybový kód: %M)" dan "Fejl ved lukning af '%-.192s' (Fejlkode: %M)" nla "Fout bij het sluiten van '%-.192s' (Errcode: %M)" eng "Error on close of '%-.192s' (errno: %M)" @@ -533,6 +518,7 @@ ER_ERROR_ON_CLOSE greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %M)" hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %M)" ita "Errore durante la chiusura di '%-.192s' (errno: %M)" + jpn "'%-.192s' ã®ã‚¯ãƒãƒ¼ã‚ºæ™‚エラー (エラー番å·: %M)" kor "'%-.192s'닫는 중 ì—러 (ì—러번호: %M)" nor "Feil ved lukking av '%-.192s' (Feilkode: %M)" norwegian-ny "Feil ved lukking av '%-.192s' (Feilkode: %M)" @@ -546,18 +532,17 @@ ER_ERROR_ON_CLOSE swe "Fick fel vid stängning av '%-.192s' (Felkod: %M)" ukr "Ðе можу закрити '%-.192s' (помилка: %M)" ER_ERROR_ON_READ - cze "Chyba p-BÅ™i Ätenà souboru '%-.200s' (chybový kód: %M)" + cze "Chyba pÅ™i Ätenà souboru '%-.200s' (chybový kód: %M)" dan "Fejl ved læsning af '%-.200s' (Fejlkode: %M)" nla "Fout bij het lezen van file '%-.200s' (Errcode: %M)" eng "Error reading file '%-.200s' (errno: %M)" - jps "'%-.200s' ファイルã®èªã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %M)", est "Viga faili '%-.200s' lugemisel (veakood: %M)" fre "Erreur en lecture du fichier '%-.200s' (Errcode: %M)" ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %M)" greek "Î Ïόβλημα κατά την ανάγνωση του αÏχείου '%-.200s' (κωδικός λάθους: %M)" hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %M)" ita "Errore durante la lettura del file '%-.200s' (errno: %M)" - jpn "'%-.200s' ファイルã®èªã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %M)" + jpn "ファイル '%-.200s' ã®èªã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (エラー番å·: %M)" kor "'%-.200s'í™”ì¼ ì½ê¸° ì—러 (ì—러번호: %M)" nor "Feil ved lesing av '%-.200s' (Feilkode: %M)" norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %M)" @@ -571,18 +556,17 @@ ER_ERROR_ON_READ swe "Fick fel vid läsning av '%-.200s' (Felkod %M)" ukr "Ðе можу прочитати файл '%-.200s' (помилка: %M)" ER_ERROR_ON_RENAME - cze "Chyba p-BÅ™i pÅ™ejmenovánà '%-.210s' na '%-.210s' (chybový kód: %M)" + cze "Chyba pÅ™i pÅ™ejmenovánà '%-.210s' na '%-.210s' (chybový kód: %M)" dan "Fejl ved omdøbning af '%-.210s' til '%-.210s' (Fejlkode: %M)" nla "Fout bij het hernoemen van '%-.210s' naar '%-.210s' (Errcode: %M)" eng "Error on rename of '%-.210s' to '%-.210s' (errno: %M)" - jps "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %M)", est "Viga faili '%-.210s' ümbernimetamisel '%-.210s'-ks (veakood: %M)" fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %M)" ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %M)" greek "Î Ïόβλημα κατά την μετονομασία του αÏχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %M)" hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %M)" ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %M)" - jpn "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %M)" + jpn "'%-.210s' ã®åå‰ã‚’ '%-.210s' ã«å¤‰æ›´ã§ãã¾ã›ã‚“ (エラー番å·: %M)" kor "'%-.210s'를 '%-.210s'ë¡œ ì´ë¦„ 변경중 ì—러 (ì—러번호: %M)" nor "Feil ved omdøping av '%-.210s' til '%-.210s' (Feilkode: %M)" norwegian-ny "Feil ved omdøyping av '%-.210s' til '%-.210s' (Feilkode: %M)" @@ -596,18 +580,17 @@ ER_ERROR_ON_RENAME swe "Kan inte byta namn frÃ¥n '%-.210s' till '%-.210s' (Felkod: %M)" ukr "Ðе можу перейменувати '%-.210s' у '%-.210s' (помилка: %M)" ER_ERROR_ON_WRITE - cze "Chyba p-BÅ™i zápisu do souboru '%-.200s' (chybový kód: %M)" + cze "Chyba pÅ™i zápisu do souboru '%-.200s' (chybový kód: %M)" dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %M)" nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %M)" eng "Error writing file '%-.200s' (errno: %M)" - jps "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)", est "Viga faili '%-.200s' kirjutamisel (veakood: %M)" fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %M)" ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %M)" greek "Î Ïόβλημα κατά την αποθήκευση του αÏχείου '%-.200s' (κωδικός λάθους: %M)" hun "Hiba a '%-.200s' file irasakor. (hibakod: %M)" ita "Errore durante la scrittura del file '%-.200s' (errno: %M)" - jpn "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)" + jpn "ファイル '%-.200s' ã®æ›¸ãè¾¼ã¿ã‚¨ãƒ©ãƒ¼ (エラー番å·: %M)" kor "'%-.200s'í™”ì¼ ê¸°ë¡ ì¤‘ ì—러 (ì—러번호: %M)" nor "Feil ved skriving av fila '%-.200s' (Feilkode: %M)" norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %M)" @@ -621,18 +604,17 @@ ER_ERROR_ON_WRITE swe "Fick fel vid skrivning till '%-.200s' (Felkod %M)" ukr "Ðе можу запиÑати файл '%-.200s' (помилка: %M)" ER_FILE_USED - cze "'%-.192s' je zam-BÄen proti zmÄ›nám" + cze "'%-.192s' je zamÄen proti zmÄ›nám" dan "'%-.192s' er lÃ¥st mod opdateringer" nla "'%-.192s' is geblokeerd tegen veranderingen" eng "'%-.192s' is locked against change" - jps "'%-.192s' ã¯ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™", est "'%-.192s' on lukustatud muudatuste vastu" fre "'%-.192s' est verrouillé contre les modifications" ger "'%-.192s' ist für Änderungen gesperrt" greek "'%-.192s' δεν επιτÏÎπονται αλλαγÎÏ‚" hun "'%-.192s' a valtoztatas ellen zarolva" ita "'%-.192s' e` soggetto a lock contro i cambiamenti" - jpn "'%-.192s' ã¯ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™" + jpn "'%-.192s' ã¯ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™ã€‚" kor "'%-.192s'ê°€ ë³€ê²½í• ìˆ˜ ì—†ë„ë¡ ìž ê²¨ìžˆì니다." nor "'%-.192s' er lÃ¥st mot oppdateringer" norwegian-ny "'%-.192s' er lÃ¥st mot oppdateringar" @@ -646,18 +628,17 @@ ER_FILE_USED swe "'%-.192s' är lÃ¥st mot användning" ukr "'%-.192s' заблокований на внеÑÐµÐ½Ð½Ñ Ð·Ð¼Ñ–Ð½" ER_FILSORT_ABORT - cze "T-BÅ™ÃdÄ›nà pÅ™eruÅ¡eno" + cze "TÅ™ÃdÄ›nà pÅ™eruÅ¡eno" dan "Sortering afbrudt" nla "Sorteren afgebroken" eng "Sort aborted" - jps "Sort ä¸æ–", est "Sorteerimine katkestatud" fre "Tri alphabétique abandonné" ger "Sortiervorgang abgebrochen" greek "Η διαδικασία ταξινόμισης ακυÏώθηκε" hun "Sikertelen rendezes" ita "Operazione di ordinamento abbandonata" - jpn "Sort ä¸æ–" + jpn "ソート処ç†ã‚’ä¸æ–ã—ã¾ã—ãŸã€‚" kor "소트가 중단ë˜ì—ˆìŠµë‹ˆë‹¤." nor "Sortering avbrutt" norwegian-ny "Sortering avbrote" @@ -675,14 +656,13 @@ ER_FORM_NOT_FOUND dan "View '%-.192s' eksisterer ikke for '%-.192s'" nla "View '%-.192s' bestaat niet voor '%-.192s'" eng "View '%-.192s' doesn't exist for '%-.192s'" - jps "View '%-.192s' ㌠'%-.192s' ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“", est "Vaade '%-.192s' ei eksisteeri '%-.192s' jaoks" fre "La vue (View) '%-.192s' n'existe pas pour '%-.192s'" ger "View '%-.192s' existiert für '%-.192s' nicht" greek "Το View '%-.192s' δεν υπάÏχει για '%-.192s'" hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz" ita "La view '%-.192s' non esiste per '%-.192s'" - jpn "View '%-.192s' ㌠'%-.192s' ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“" + jpn "ビュー '%-.192s' 㯠'%-.192s' ã«å˜åœ¨ã—ã¾ã›ã‚“。" kor "ë·° '%-.192s'ê°€ '%-.192s'ì—서는 존재하지 ì•Šì니다." nor "View '%-.192s' eksisterer ikke for '%-.192s'" norwegian-ny "View '%-.192s' eksisterar ikkje for '%-.192s'" @@ -717,18 +697,17 @@ ER_ILLEGAL_HA rus "Обработчик %s таблицы %`s.%`s не поддерживает Ñту возможноÑÑ‚ÑŒ" ukr "ДеÑкриптор %s таблиці %`s.%`s не має цієї влаÑтивоÑÑ‚Ñ–" ER_KEY_NOT_FOUND - cze "Nemohu naj-BÃt záznam v '%-.192s'" + cze "Nemohu najÃt záznam v '%-.192s'" dan "Kan ikke finde posten i '%-.192s'" nla "Kan record niet vinden in '%-.192s'" eng "Can't find record in '%-.192s'" - jps "'%-.192s'ã®ãªã‹ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ä»˜ã‹ã‚Šã¾ã›ã‚“", est "Ei suuda leida kirjet '%-.192s'-s" fre "Ne peut trouver l'enregistrement dans '%-.192s'" ger "Kann Datensatz in '%-.192s' nicht finden" greek "ΑδÏνατη η ανεÏÏεση εγγÏαφής στο '%-.192s'" hun "Nem talalhato a rekord '%-.192s'-ben" ita "Impossibile trovare il record in '%-.192s'" - jpn "'%-.192s'ã®ãªã‹ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ä»˜ã‹ã‚Šã¾ã›ã‚“" + jpn "'%-.192s' ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" kor "'%-.192s'ì—ì„œ ë ˆì½”ë“œë¥¼ ì°¾ì„ ìˆ˜ ì—†ì니다." nor "Kan ikke finne posten i '%-.192s'" norwegian-ny "Kan ikkje finne posten i '%-.192s'" @@ -742,18 +721,17 @@ ER_KEY_NOT_FOUND swe "Hittar inte posten '%-.192s'" ukr "Ðе можу запиÑати у '%-.192s'" ER_NOT_FORM_FILE - cze "Nespr-Bávná informace v souboru '%-.200s'" + cze "Nesprávná informace v souboru '%-.200s'" dan "Forkert indhold i: '%-.200s'" nla "Verkeerde info in file: '%-.200s'" eng "Incorrect information in file: '%-.200s'" - jps "ファイル '%-.200s' ã® info ãŒé–“é•ã£ã¦ã„るよã†ã§ã™", est "Vigane informatsioon failis '%-.200s'" fre "Information erronnée dans le fichier: '%-.200s'" ger "Falsche Information in Datei '%-.200s'" greek "Λάθος πληÏοφοÏίες στο αÏχείο: '%-.200s'" hun "Ervenytelen info a file-ban: '%-.200s'" ita "Informazione errata nel file: '%-.200s'" - jpn "ファイル '%-.200s' ã® info ãŒé–“é•ã£ã¦ã„るよã†ã§ã™" + jpn "ファイル '%-.200s' 内ã®æƒ…å ±ãŒä¸æ£ã§ã™ã€‚" kor "í™”ì¼ì˜ ë¶€ì •í™•í•œ ì •ë³´: '%-.200s'" nor "Feil informasjon i filen: '%-.200s'" norwegian-ny "Feil informasjon i fila: '%-.200s'" @@ -767,18 +745,17 @@ ER_NOT_FORM_FILE swe "Felaktig fil: '%-.200s'" ukr "Хибна Ñ–Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ñ–Ñ Ñƒ файлі: '%-.200s'" ER_NOT_KEYFILE - cze "Nespr-Bávný klÃÄ pro tabulku '%-.200s'; pokuste se ho opravit" + cze "Nesprávný klÃÄ pro tabulku '%-.200s'; pokuste se ho opravit" dan "Fejl i indeksfilen til tabellen '%-.200s'; prøv at reparere den" nla "Verkeerde zoeksleutel file voor tabel: '%-.200s'; probeer het te repareren" eng "Incorrect key file for table '%-.200s'; try to repair it" - jps "'%-.200s' テーブル㮠key file ãŒé–“é•ã£ã¦ã„るよã†ã§ã™. 修復をã—ã¦ãã ã•ã„", est "Tabeli '%-.200s' võtmefail on vigane; proovi seda parandada" fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer" ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren" greek "Λάθος αÏχείο ταξινόμισης (key file) για τον πίνακα: '%-.200s'; ΠαÏακαλώ, διοÏθώστε το!" hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!" ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo" - jpn "'%-.200s' テーブル㮠key file ãŒé–“é•ã£ã¦ã„るよã†ã§ã™. 修復をã—ã¦ãã ã•ã„" + jpn "表 '%-.200s' ã®ç´¢å¼•ãƒ•ã‚¡ã‚¤ãƒ«(key file)ã®å†…容ãŒä¸æ£ã§ã™ã€‚修復を試行ã—ã¦ãã ã•ã„。" kor "'%-.200s' í…Œì´ë¸”ì˜ ë¶€ì •í™•í•œ 키 존재. ìˆ˜ì •í•˜ì‹œì˜¤!" nor "Tabellen '%-.200s' har feil i nøkkelfilen; forsøk Ã¥ reparer den" norwegian-ny "Tabellen '%-.200s' har feil i nykkelfila; prøv Ã¥ reparere den" @@ -792,18 +769,17 @@ ER_NOT_KEYFILE swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation" ukr "Хибний файл ключей Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–: '%-.200s'; Спробуйте його відновити" ER_OLD_KEYFILE - cze "Star-Bý klÃÄový soubor pro '%-.192s'; opravte ho." + cze "Starý klÃÄový soubor pro '%-.192s'; opravte ho." dan "Gammel indeksfil for tabellen '%-.192s'; reparer den" nla "Oude zoeksleutel file voor tabel '%-.192s'; repareer het!" eng "Old key file for table '%-.192s'; repair it!" - jps "'%-.192s' テーブルã¯å¤ã„å½¢å¼ã® key file ã®ã‚ˆã†ã§ã™; 修復をã—ã¦ãã ã•ã„", est "Tabeli '%-.192s' võtmefail on aegunud; paranda see!" fre "Vieux fichier d'index pour la table '%-.192s'; réparez le!" ger "Alte Index-Datei für Tabelle '%-.192s'. Bitte reparieren" greek "Παλαιό αÏχείο ταξινόμισης (key file) για τον πίνακα '%-.192s'; ΠαÏακαλώ, διοÏθώστε το!" hun "Regi kulcsfile a '%-.192s'tablahoz; probalja kijavitani!" ita "File chiave vecchio per la tabella '%-.192s'; riparalo!" - jpn "'%-.192s' テーブルã¯å¤ã„å½¢å¼ã® key file ã®ã‚ˆã†ã§ã™; 修復をã—ã¦ãã ã•ã„" + jpn "表 '%-.192s' ã®ç´¢å¼•ãƒ•ã‚¡ã‚¤ãƒ«(key file)ã¯å¤ã„å½¢å¼ã§ã™ã€‚修復ã—ã¦ãã ã•ã„。" kor "'%-.192s' í…Œì´ë¸”ì˜ ì´ì „ë²„ì ¼ì˜ í‚¤ 존재. ìˆ˜ì •í•˜ì‹œì˜¤!" nor "Gammel nøkkelfil for tabellen '%-.192s'; reparer den!" norwegian-ny "Gammel nykkelfil for tabellen '%-.192s'; reparer den!" @@ -817,18 +793,17 @@ ER_OLD_KEYFILE swe "Gammal nyckelfil '%-.192s'; reparera registret" ukr "Старий файл ключей Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.192s'; Відновіть його!" ER_OPEN_AS_READONLY - cze "'%-.192s' je jen pro -BÄtenÃ" + cze "'%-.192s' je jen pro ÄtenÃ" dan "'%-.192s' er skrivebeskyttet" nla "'%-.192s' is alleen leesbaar" eng "Table '%-.192s' is read only" - jps "'%-.192s' ã¯èªã¿è¾¼ã¿å°‚用ã§ã™", est "Tabel '%-.192s' on ainult lugemiseks" fre "'%-.192s' est en lecture seulement" ger "Tabelle '%-.192s' ist nur lesbar" greek "'%-.192s' επιτÏÎπεται μόνο η ανάγνωση" hun "'%-.192s' irasvedett" ita "'%-.192s' e` di sola lettura" - jpn "'%-.192s' ã¯èªã¿è¾¼ã¿å°‚用ã§ã™" + jpn "表 '%-.192s' ã¯èªã¿è¾¼ã¿å°‚用ã§ã™ã€‚" kor "í…Œì´ë¸” '%-.192s'는 ì½ê¸°ì „ìš© 입니다." nor "'%-.192s' er skrivebeskyttet" norwegian-ny "'%-.192s' er skrivetryggja" @@ -842,18 +817,17 @@ ER_OPEN_AS_READONLY swe "'%-.192s' är skyddad mot förändring" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' тільки Ð´Ð»Ñ Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ" ER_OUTOFMEMORY HY001 S1001 - cze "M-Bálo pamÄ›ti. PÅ™estartujte daemona a zkuste znovu (je potÅ™eba %d bytů)" + cze "Málo pamÄ›ti. PÅ™estartujte daemona a zkuste znovu (je potÅ™eba %d bytů)" dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)" nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)" eng "Out of memory; restart server and try again (needed %d bytes)" - jps "Out of memory. デーモンをリスタートã—ã¦ã¿ã¦ãã ã•ã„ (%d bytes å¿…è¦)", est "Mälu sai otsa. Proovi MariaDB uuesti käivitada (puudu jäi %d baiti)" fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)" ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten" greek "Δεν υπάÏχει διαθÎσιμη μνήμη. Î Ïοσπαθήστε πάλι, επανεκινώντας τη διαδικασία (demon) (χÏειάζονται %d bytes)" hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)" ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)" - jpn "Out of memory. デーモンをリスタートã—ã¦ã¿ã¦ãã ã•ã„ (%d bytes å¿…è¦)" + jpn "メモリãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚サーãƒãƒ¼ã‚’å†èµ·å‹•ã—ã¦ã¿ã¦ãã ã•ã„。(%d ãƒã‚¤ãƒˆã®å‰²ã‚Šå½“ã¦ã«å¤±æ•—)" kor "Out of memory. ë°ëª¬ì„ 재 실행 후 다시 시작하시오 (needed %d bytes)" nor "Ikke mer minne. Star pÃ¥ nytt tjenesten og prøv igjen (trengte %d byter)" norwegian-ny "Ikkje meir minne. Start pÃ¥ nytt tenesten og prøv igjen (trengte %d bytar)" @@ -867,18 +841,17 @@ ER_OUTOFMEMORY HY001 S1001 swe "Oväntat slut pÃ¥ minnet, starta om programmet och försök pÃ¥ nytt (Behövde %d bytes)" ukr "Брак пам'ÑÑ‚Ñ–. РеÑтартуйте Ñервер та Ñпробуйте знову (потрібно %d байтів)" ER_OUT_OF_SORTMEMORY HY001 S1001 - cze "M-Bálo pamÄ›ti pro tÅ™ÃdÄ›nÃ. ZvyÅ¡te velikost tÅ™ÃdÃcÃho bufferu" + cze "Málo pamÄ›ti pro tÅ™ÃdÄ›nÃ. ZvyÅ¡te velikost tÅ™ÃdÃcÃho bufferu" dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren" nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size" eng "Out of sort memory, consider increasing server sort buffer size" - jps "Out of sort memory. sort buffer size ãŒè¶³ã‚Šãªã„よã†ã§ã™.", est "Mälu sai sorteerimisel otsa. Suurenda MariaDB-i sorteerimispuhvrit" fre "Manque de mémoire pour le tri. Augmentez-la." ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden" greek "Δεν υπάÏχει διαθÎσιμη μνήμη για ταξινόμιση. Αυξήστε το sort buffer size για τη διαδικασία (demon)" hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet" ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone" - jpn "Out of sort memory. sort buffer size ãŒè¶³ã‚Šãªã„よã†ã§ã™." + jpn "ソートメモリãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚ソートãƒãƒƒãƒ•ã‚¡ã‚µã‚¤ã‚º(sort buffer size)ã®å¢—åŠ ã‚’æ¤œè¨Žã—ã¦ãã ã•ã„。" kor "Out of sort memory. daemon sort bufferì˜ í¬ê¸°ë¥¼ ì¦ê°€ì‹œí‚¤ì„¸ìš”" nor "Ikke mer sorteringsminne. Vurder Ã¥ øke sorteringsminnet (sort buffer size) for tjenesten" norwegian-ny "Ikkje meir sorteringsminne. Vurder Ã¥ auke sorteringsminnet (sorteringsbuffer storleik) for tenesten" @@ -892,18 +865,17 @@ ER_OUT_OF_SORTMEMORY HY001 S1001 swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna" ukr "Брак пам'ÑÑ‚Ñ– Ð´Ð»Ñ ÑортуваннÑ. Треба збільшити розмір буфера ÑÐ¾Ñ€Ñ‚ÑƒÐ²Ð°Ð½Ð½Ñ Ñƒ Ñервера" ER_UNEXPECTED_EOF - cze "Neo-BÄekávaný konec souboru pÅ™i Ätenà '%-.192s' (chybový kód: %M)" + cze "NeoÄekávaný konec souboru pÅ™i Ätenà '%-.192s' (chybový kód: %M)" dan "Uventet afslutning pÃ¥ fil (eof) ved læsning af filen '%-.192s' (Fejlkode: %M)" nla "Onverwachte eof gevonden tijdens het lezen van file '%-.192s' (Errcode: %M)" eng "Unexpected EOF found when reading file '%-.192s' (errno: %M)" - jps "'%-.192s' ファイルをèªã¿è¾¼ã¿ä¸ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %M)", est "Ootamatu faililõpumärgend faili '%-.192s' lugemisel (veakood: %M)" fre "Fin de fichier inattendue en lisant '%-.192s' (Errcode: %M)" ger "Unerwartetes Ende beim Lesen der Datei '%-.192s' (Fehler: %M)" greek "Κατά τη διάÏκεια της ανάγνωσης, βÏÎθηκε απÏοσδόκητα το Ï„Îλος του αÏχείου '%-.192s' (κωδικός λάθους: %M)" hun "Varatlan filevege-jel a '%-.192s'olvasasakor. (hibakod: %M)" ita "Fine del file inaspettata durante la lettura del file '%-.192s' (errno: %M)" - jpn "'%-.192s' ファイルをèªã¿è¾¼ã¿ä¸ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %M)" + jpn "ファイル '%-.192s' ã‚’èªã¿è¾¼ã¿ä¸ã«äºˆæœŸã›ãšãƒ•ã‚¡ã‚¤ãƒ«ã®çµ‚端ã«é”ã—ã¾ã—ãŸã€‚(エラー番å·: %M)" kor "'%-.192s' í™”ì¼ì„ ì½ëŠ” ë„중 ìž˜ëª»ëœ eofì„ ë°œê²¬ (ì—러번호: %M)" nor "Uventet slutt pÃ¥ fil (eof) ved lesing av filen '%-.192s' (Feilkode: %M)" norwegian-ny "Uventa slutt pÃ¥ fil (eof) ved lesing av fila '%-.192s' (Feilkode: %M)" @@ -917,18 +889,17 @@ ER_UNEXPECTED_EOF swe "Oväntat filslut vid läsning frÃ¥n '%-.192s' (Felkod: %M)" ukr "Хибний кінець файлу '%-.192s' (помилка: %M)" ER_CON_COUNT_ERROR 08004 - cze "P-BÅ™ÃliÅ¡ mnoho spojenÃ" + cze "PÅ™ÃliÅ¡ mnoho spojenÃ" dan "For mange forbindelser (connections)" nla "Te veel verbindingen" eng "Too many connections" - jps "接続ãŒå¤šã™ãŽã¾ã™", est "Liiga palju samaaegseid ühendusi" fre "Trop de connexions" ger "Zu viele Verbindungen" greek "ΥπάÏχουν πολλÎÏ‚ συνδÎσεις..." hun "Tul sok kapcsolat" ita "Troppe connessioni" - jpn "接続ãŒå¤šã™ãŽã¾ã™" + jpn "接続ãŒå¤šã™ãŽã¾ã™ã€‚" kor "너무 ë§Žì€ ì—°ê²°... max_connectionì„ ì¦ê°€ 시키시오..." nor "For mange tilkoblinger (connections)" norwegian-ny "For mange tilkoplingar (connections)" @@ -942,18 +913,17 @@ ER_CON_COUNT_ERROR 08004 swe "För mÃ¥nga anslutningar" ukr "Забагато з'єднань" ER_OUT_OF_RESOURCES - cze "M-Bálo prostoru/pamÄ›ti pro thread" + cze "Málo prostoru/pamÄ›ti pro thread" dan "UdgÃ¥et for trÃ¥de/hukommelse" nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen" eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space" - jps "Out of memory; mysqld ã‹ãã®ä»–ã®ãƒ—ãƒã‚»ã‚¹ãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’å…¨ã¦ä½¿ã£ã¦ã„ã‚‹ã‹ç¢ºèªã—ã¦ãã ã•ã„. メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit' ã‚’è¨å®šã—㦠mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é™ç•Œé‡ã‚’多ãã™ã‚‹ã‹ã€swap space を増やã—ã¦ã¿ã¦ãã ã•ã„", est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MariaDB-le rohkema mälu kasutamise lubamine" fre "Manque de 'threads'/mémoire" ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten" greek "Î Ïόβλημα με τη διαθÎσιμη μνήμη (Out of thread space/memory)" hun "Elfogyott a thread-memoria" ita "Fine dello spazio/memoria per i thread" - jpn "Out of memory; mysqld ã‹ãã®ä»–ã®ãƒ—ãƒã‚»ã‚¹ãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’å…¨ã¦ä½¿ã£ã¦ã„ã‚‹ã‹ç¢ºèªã—ã¦ãã ã•ã„. メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit' ã‚’è¨å®šã—㦠mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é™ç•Œé‡ã‚’多ãã™ã‚‹ã‹ã€swap space を増やã—ã¦ã¿ã¦ãã ã•ã„" + jpn "メモリãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚mysqld ã‚„ãã®ä»–ã®ãƒ—ãƒã‚»ã‚¹ãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’使ã„切ã£ã¦ã„ãªã„ã‹ç¢ºèªã—ã¦ä¸‹ã•ã„。メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit'ã®è¨å®šç‰ã§ mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨æœ€å¤§é‡ã‚’多ãã™ã‚‹ã‹ã€ã‚¹ãƒ¯ãƒƒãƒ—é ˜åŸŸã‚’å¢—ã‚„ã™å¿…è¦ãŒã‚ã‚‹ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。" # This message failed to convert from euc-kr, skipped nor "Tomt for trÃ¥d plass/minne" norwegian-ny "Tomt for trÃ¥d plass/minne" @@ -967,18 +937,17 @@ ER_OUT_OF_RESOURCES swe "Fick slut pÃ¥ minnet. Kontrollera om mysqld eller nÃ¥gon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap" ukr "Брак пам'ÑÑ‚Ñ–; Перевірте чи mysqld або ÑкіÑÑŒ інші процеÑи викориÑтовують уÑÑŽ доÑтупну пам'ÑÑ‚ÑŒ. Як ні, то ви можете ÑкориÑтатиÑÑ 'ulimit', аби дозволити mysqld викориÑтовувати більше пам'ÑÑ‚Ñ– або ви можете додати більше міÑÑ†Ñ Ð¿Ñ–Ð´ Ñвап" ER_BAD_HOST_ERROR 08S01 - cze "Nemohu zjistit jm-Béno stroje pro VaÅ¡i adresu" + cze "Nemohu zjistit jméno stroje pro VaÅ¡i adresu" dan "Kan ikke fÃ¥ værtsnavn for din adresse" nla "Kan de hostname niet krijgen van uw adres" eng "Can't get hostname for your address" - jps "ãã® address ã® hostname ãŒå¼•ã‘ã¾ã›ã‚“.", est "Ei suuda lahendada IP aadressi masina nimeks" fre "Ne peut obtenir de hostname pour votre adresse" ger "Kann Hostnamen für diese Adresse nicht erhalten" greek "Δεν Îγινε γνωστό το hostname για την address σας" hun "A gepnev nem allapithato meg a cimbol" ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)" - jpn "ãã® address ã® hostname ãŒå¼•ã‘ã¾ã›ã‚“." + jpn "IPアドレスã‹ã‚‰ãƒ›ã‚¹ãƒˆåを解決ã§ãã¾ã›ã‚“。" kor "ë‹¹ì‹ ì˜ ì»´í“¨í„°ì˜ í˜¸ìŠ¤íŠ¸ì´ë¦„ì„ ì–»ì„ ìˆ˜ ì—†ì니다." nor "Kan ikke fÃ¥ tak i vertsnavn for din adresse" norwegian-ny "Kan ikkje fÃ¥ tak i vertsnavn for di adresse" @@ -992,7 +961,7 @@ ER_BAD_HOST_ERROR 08S01 swe "Kan inte hitta 'hostname' för din adress" ukr "Ðе можу визначити ім'Ñ Ñ…Ð¾Ñту Ð´Ð»Ñ Ð²Ð°ÑˆÐ¾Ñ— адреÑи" ER_HANDSHAKE_ERROR 08S01 - cze "Chyba p-BÅ™i ustavovánà spojenÃ" + cze "Chyba pÅ™i ustavovánà spojenÃ" dan "Forkert hÃ¥ndtryk (handshake)" nla "Verkeerde handshake" eng "Bad handshake" @@ -1002,6 +971,7 @@ ER_HANDSHAKE_ERROR 08S01 greek "Η αναγνώÏιση (handshake) δεν Îγινε σωστά" hun "A kapcsolatfelvetel nem sikerult (Bad handshake)" ita "Negoziazione impossibile" + jpn "ãƒãƒ³ãƒ‰ã‚·ã‚§ã‚¤ã‚¯ã‚¨ãƒ©ãƒ¼" nor "Feil hÃ¥ndtrykk (handshake)" norwegian-ny "Feil handtrykk (handshake)" pol "ZÅ‚y uchwyt(handshake)" @@ -1014,7 +984,7 @@ ER_HANDSHAKE_ERROR 08S01 swe "Fel vid initiering av kommunikationen med klienten" ukr "Ðевірна уÑтановка зв'Ñзку" ER_DBACCESS_DENIED_ERROR 42000 - cze "P-BÅ™Ãstup pro uživatele '%s'@'%s' k databázi '%-.192s' nenà povolen" + cze "PÅ™Ãstup pro uživatele '%s'@'%s' k databázi '%-.192s' nenà povolen" dan "Adgang nægtet bruger: '%s'@'%s' til databasen '%-.192s'" nla "Toegang geweigerd voor gebruiker: '%s'@'%s' naar database '%-.192s'" eng "Access denied for user '%s'@'%s' to database '%-.192s'" @@ -1038,7 +1008,7 @@ ER_DBACCESS_DENIED_ERROR 42000 swe "Användare '%s'@'%s' är ej berättigad att använda databasen %-.192s" ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%s'@'%s' до бази данних '%-.192s'" ER_ACCESS_DENIED_ERROR 28000 - cze "P-BÅ™Ãstup pro uživatele '%s'@'%s' (s heslem %s)" + cze "PÅ™Ãstup pro uživatele '%s'@'%s' (s heslem %s)" dan "Adgang nægtet bruger: '%s'@'%s' (Bruger adgangskode: %s)" nla "Toegang geweigerd voor gebruiker: '%s'@'%s' (Wachtwoord gebruikt: %s)" eng "Access denied for user '%s'@'%s' (using password: %s)" @@ -1062,18 +1032,17 @@ ER_ACCESS_DENIED_ERROR 28000 swe "Användare '%s'@'%s' är ej berättigad att logga in (Använder lösen: %s)" ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%s'@'%s' (ВикориÑтано пароль: %s)" ER_NO_DB_ERROR 3D000 - cze "Nebyla vybr-Bána žádná databáze" + cze "Nebyla vybrána žádná databáze" dan "Ingen database valgt" nla "Geen database geselecteerd" eng "No database selected" - jps "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“.", est "Andmebaasi ei ole valitud" fre "Aucune base n'a été sélectionnée" ger "Keine Datenbank ausgewählt" greek "Δεν επιλÎχθηκε βάση δεδομÎνων" hun "Nincs kivalasztott adatbazis" ita "Nessun database selezionato" - jpn "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“." + jpn "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "ì„ íƒëœ ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 없습니다." nor "Ingen database valgt" norwegian-ny "Ingen database vald" @@ -1087,18 +1056,17 @@ ER_NO_DB_ERROR 3D000 swe "Ingen databas i användning" ukr "Базу данних не вибрано" ER_UNKNOWN_COM_ERROR 08S01 - cze "Nezn-Bámý pÅ™Ãkaz" + cze "Neznámý pÅ™Ãkaz" dan "Ukendt kommando" nla "Onbekend commando" eng "Unknown command" - jps "ãã®ã‚³ãƒžãƒ³ãƒ‰ã¯ä½•ï¼Ÿ", est "Tundmatu käsk" fre "Commande inconnue" ger "Unbekannter Befehl" greek "Αγνωστη εντολή" hun "Ervenytelen parancs" ita "Comando sconosciuto" - jpn "ãã®ã‚³ãƒžãƒ³ãƒ‰ã¯ä½•ï¼Ÿ" + jpn "ä¸æ˜Žãªã‚³ãƒžãƒ³ãƒ‰ã§ã™ã€‚" kor "ëª…ë ¹ì–´ê°€ ë”지 ëª¨ë¥´ê² ì–´ìš”..." nor "Ukjent kommando" norwegian-ny "Ukjent kommando" @@ -1109,21 +1077,20 @@ ER_UNKNOWN_COM_ERROR 08S01 serbian "Nepoznata komanda" slo "Neznámy prÃkaz" spa "Comando desconocido" - swe "Okänt commando" + swe "Okänt kommando" ukr "Ðевідома команда" ER_BAD_NULL_ERROR 23000 - cze "Sloupec '%-.192s' nem-Bůže být null" + cze "Sloupec '%-.192s' nemůže být null" dan "Kolonne '%-.192s' kan ikke være NULL" nla "Kolom '%-.192s' kan niet null zijn" eng "Column '%-.192s' cannot be null" - jps "Column '%-.192s' 㯠null ã«ã¯ã§ããªã„ã®ã§ã™", est "Tulp '%-.192s' ei saa omada nullväärtust" fre "Le champ '%-.192s' ne peut être vide (null)" ger "Feld '%-.192s' darf nicht NULL sein" greek "Το πεδίο '%-.192s' δεν μποÏεί να είναι κενό (null)" hun "A(z) '%-.192s' oszlop erteke nem lehet nulla" ita "La colonna '%-.192s' non puo` essere nulla" - jpn "Column '%-.192s' 㯠null ã«ã¯ã§ããªã„ã®ã§ã™" + jpn "列 '%-.192s' 㯠null ã«ã§ãã¾ã›ã‚“。" kor "칼럼 '%-.192s'는 ë„(Null)ì´ ë˜ë©´ 안ë©ë‹ˆë‹¤. " nor "Kolonne '%-.192s' kan ikke vere null" norwegian-ny "Kolonne '%-.192s' kan ikkje vere null" @@ -1137,18 +1104,17 @@ ER_BAD_NULL_ERROR 23000 swe "Kolumn '%-.192s' fÃ¥r inte vara NULL" ukr "Стовбець '%-.192s' не може бути нульовим" ER_BAD_DB_ERROR 42000 - cze "Nezn-Bámá databáze '%-.192s'" + cze "Neznámá databáze '%-.192s'" dan "Ukendt database '%-.192s'" nla "Onbekende database '%-.192s'" eng "Unknown database '%-.192s'" - jps "'%-.192s' ãªã‚“ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã¯çŸ¥ã‚Šã¾ã›ã‚“.", est "Tundmatu andmebaas '%-.192s'" fre "Base '%-.192s' inconnue" ger "Unbekannte Datenbank '%-.192s'" greek "Αγνωστη βάση δεδομÎνων '%-.192s'" hun "Ervenytelen adatbazis: '%-.192s'" ita "Database '%-.192s' sconosciuto" - jpn "'%-.192s' ãªã‚“ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã¯çŸ¥ã‚Šã¾ã›ã‚“." + jpn "'%-.192s' ã¯ä¸æ˜Žãªãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã§ã™ã€‚" kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'는 알수 ì—†ìŒ" nor "Ukjent database '%-.192s'" norwegian-ny "Ukjent database '%-.192s'" @@ -1162,18 +1128,17 @@ ER_BAD_DB_ERROR 42000 swe "Okänd databas: '%-.192s'" ukr "Ðевідома база данних '%-.192s'" ER_TABLE_EXISTS_ERROR 42S01 - cze "Tabulka '%-.192s' ji-Bž existuje" + cze "Tabulka '%-.192s' již existuje" dan "Tabellen '%-.192s' findes allerede" nla "Tabel '%-.192s' bestaat al" eng "Table '%-.192s' already exists" - jps "Table '%-.192s' ã¯æ—¢ã«ã‚ã‚Šã¾ã™", est "Tabel '%-.192s' juba eksisteerib" fre "La table '%-.192s' existe déjà " ger "Tabelle '%-.192s' bereits vorhanden" greek "Ο πίνακας '%-.192s' υπάÏχει ήδη" hun "A(z) '%-.192s' tabla mar letezik" ita "La tabella '%-.192s' esiste gia`" - jpn "Table '%-.192s' ã¯æ—¢ã«ã‚ã‚Šã¾ã™" + jpn "表 '%-.192s' ã¯ã™ã§ã«å˜åœ¨ã—ã¾ã™ã€‚" kor "í…Œì´ë¸” '%-.192s'는 ì´ë¯¸ 존재함" nor "Tabellen '%-.192s' eksisterer allerede" norwegian-ny "Tabellen '%-.192s' eksisterar allereide" @@ -1187,18 +1152,17 @@ ER_TABLE_EXISTS_ERROR 42S01 swe "Tabellen '%-.192s' finns redan" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' вже Ñ–Ñнує" ER_BAD_TABLE_ERROR 42S02 - cze "Nezn-Bámá tabulka '%-.100s'" + cze "Neznámá tabulka '%-.100s'" dan "Ukendt tabel '%-.100s'" nla "Onbekende tabel '%-.100s'" eng "Unknown table '%-.100s'" - jps "table '%-.100s' ã¯ã‚ã‚Šã¾ã›ã‚“.", est "Tundmatu tabel '%-.100s'" fre "Table '%-.100s' inconnue" ger "Unbekannte Tabelle '%-.100s'" greek "Αγνωστος πίνακας '%-.100s'" hun "Ervenytelen tabla: '%-.100s'" ita "Tabella '%-.100s' sconosciuta" - jpn "table '%-.100s' ã¯ã‚ã‚Šã¾ã›ã‚“." + jpn "'%-.100s' ã¯ä¸æ˜Žãªè¡¨ã§ã™ã€‚" kor "í…Œì´ë¸” '%-.100s'는 알수 ì—†ìŒ" nor "Ukjent tabell '%-.100s'" norwegian-ny "Ukjent tabell '%-.100s'" @@ -1212,7 +1176,7 @@ ER_BAD_TABLE_ERROR 42S02 swe "Okänd tabell '%-.100s'" ukr "Ðевідома Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.100s'" ER_NON_UNIQ_ERROR 23000 - cze "Sloupec '%-.192s' v %-.192s nen-Bà zcela jasný" + cze "Sloupec '%-.192s' v %-.192s nenà zcela jasný" dan "Felt: '%-.192s' i tabel %-.192s er ikke entydigt" nla "Kolom: '%-.192s' in %-.192s is niet eenduidig" eng "Column '%-.192s' in %-.192s is ambiguous" @@ -1222,7 +1186,7 @@ ER_NON_UNIQ_ERROR 23000 greek "Το πεδίο: '%-.192s' σε %-.192s δεν Îχει καθοÏιστεί" hun "A(z) '%-.192s' oszlop %-.192s-ben ketertelmu" ita "Colonna: '%-.192s' di %-.192s e` ambigua" - jpn "Column: '%-.192s' in %-.192s is ambiguous" + jpn "列 '%-.192s' 㯠%-.192s 内ã§æ›–昧ã§ã™ã€‚" kor "칼럼: '%-.192s' in '%-.192s' ì´ ëª¨í˜¸í•¨" nor "Felt: '%-.192s' i tabell %-.192s er ikke entydig" norwegian-ny "Kolonne: '%-.192s' i tabell %-.192s er ikkje eintydig" @@ -1236,18 +1200,17 @@ ER_NON_UNIQ_ERROR 23000 swe "Kolumn '%-.192s' i %-.192s är inte unik" ukr "Стовбець '%-.192s' у %-.192s визначений неоднозначно" ER_SERVER_SHUTDOWN 08S01 - cze "Prob-BÃhá ukonÄovánà práce serveru" + cze "ProbÃhá ukonÄovánà práce serveru" dan "Database nedlukning er i gang" nla "Bezig met het stoppen van de server" eng "Server shutdown in progress" - jps "Server ã‚’ shutdown ä¸...", est "Serveri seiskamine käib" fre "Arrêt du serveur en cours" ger "Der Server wird heruntergefahren" greek "ΕναÏξη διαδικασίας αποσÏνδεσης του εξυπηÏετητή (server shutdown)" hun "A szerver leallitasa folyamatban" ita "Shutdown del server in corso" - jpn "Server ã‚’ shutdown ä¸..." + jpn "サーãƒãƒ¼ã‚’シャットダウンä¸ã§ã™ã€‚" kor "Serverê°€ 셧다운 중입니다." nor "Database nedkobling er i gang" norwegian-ny "Tenar nedkopling er i gang" @@ -1261,18 +1224,17 @@ ER_SERVER_SHUTDOWN 08S01 swe "Servern gÃ¥r nu ned" ukr "ЗавершуєтьÑÑ Ñ€Ð°Ð±Ð¾Ñ‚Ð° Ñервера" ER_BAD_FIELD_ERROR 42S22 S0022 - cze "Nezn-Bámý sloupec '%-.192s' v %-.192s" + cze "Neznámý sloupec '%-.192s' v %-.192s" dan "Ukendt kolonne '%-.192s' i tabel %-.192s" nla "Onbekende kolom '%-.192s' in %-.192s" eng "Unknown column '%-.192s' in '%-.192s'" - jps "'%-.192s' column 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“.", est "Tundmatu tulp '%-.192s' '%-.192s'-s" fre "Champ '%-.192s' inconnu dans %-.192s" ger "Unbekanntes Tabellenfeld '%-.192s' in %-.192s" greek "Αγνωστο πεδίο '%-.192s' σε '%-.192s'" hun "A(z) '%-.192s' oszlop ervenytelen '%-.192s'-ben" ita "Colonna sconosciuta '%-.192s' in '%-.192s'" - jpn "'%-.192s' column 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“." + jpn "列 '%-.192s' 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“。" kor "Unknown 칼럼 '%-.192s' in '%-.192s'" nor "Ukjent kolonne '%-.192s' i tabell %-.192s" norwegian-ny "Ukjent felt '%-.192s' i tabell %-.192s" @@ -1286,17 +1248,17 @@ ER_BAD_FIELD_ERROR 42S22 S0022 swe "Okänd kolumn '%-.192s' i %-.192s" ukr "Ðевідомий Ñтовбець '%-.192s' у '%-.192s'" ER_WRONG_FIELD_WITH_GROUP 42000 S1009 - cze "Pou-Bžité '%-.192s' nebylo v group by" + cze "Použité '%-.192s' nebylo v group by" dan "Brugte '%-.192s' som ikke var i group by" nla "Opdracht gebruikt '%-.192s' dat niet in de GROUP BY voorkomt" eng "'%-.192s' isn't in GROUP BY" - jps "'%-.192s' isn't in GROUP BY", est "'%-.192s' puudub GROUP BY klauslis" fre "'%-.192s' n'est pas dans 'group by'" ger "'%-.192s' ist nicht in GROUP BY vorhanden" greek "ΧÏησιμοποιήθηκε '%-.192s' που δεν υπήÏχε στο group by" hun "Used '%-.192s' with wasn't in group by" ita "Usato '%-.192s' che non e` nel GROUP BY" + jpn "'%-.192s' ã¯GROUP BYå¥ã§æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "'%-.192s'ì€ GROUP BYì†ì— ì—†ìŒ" nor "Brukte '%-.192s' som ikke var i group by" norwegian-ny "Brukte '%-.192s' som ikkje var i group by" @@ -1310,7 +1272,7 @@ ER_WRONG_FIELD_WITH_GROUP 42000 S1009 swe "'%-.192s' finns inte i GROUP BY" ukr "'%-.192s' не Ñ” у GROUP BY" ER_WRONG_GROUP_FIELD 42000 S1009 - cze "Nemohu pou-BžÃt group na '%-.192s'" + cze "Nemohu použÃt group na '%-.192s'" dan "Kan ikke gruppere pÃ¥ '%-.192s'" nla "Kan '%-.192s' niet groeperen" eng "Can't group on '%-.192s'" @@ -1320,6 +1282,7 @@ ER_WRONG_GROUP_FIELD 42000 S1009 greek "ΑδÏνατη η ομαδοποίηση (group on) '%-.192s'" hun "A group nem hasznalhato: '%-.192s'" ita "Impossibile raggruppare per '%-.192s'" + jpn "'%-.192s' ã§ã®ã‚°ãƒ«ãƒ¼ãƒ—化ã¯ã§ãã¾ã›ã‚“。" kor "'%-.192s'를 ê·¸ë£¹í• ìˆ˜ ì—†ìŒ" nor "Kan ikke gruppere pÃ¥ '%-.192s'" norwegian-ny "Kan ikkje gruppere pÃ¥ '%-.192s'" @@ -1333,7 +1296,7 @@ ER_WRONG_GROUP_FIELD 42000 S1009 swe "Kan inte använda GROUP BY med '%-.192s'" ukr "Ðе можу групувати по '%-.192s'" ER_WRONG_SUM_SELECT 42000 S1009 - cze "P-BÅ™Ãkaz obsahuje zároveň funkci sum a sloupce" + cze "PÅ™Ãkaz obsahuje zároveň funkci sum a sloupce" dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk" nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht" eng "Statement has sum functions and columns in same statement" @@ -1342,6 +1305,7 @@ ER_WRONG_SUM_SELECT 42000 S1009 ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt" greek "Η διατÏπωση πεÏιÎχει sum functions και columns στην ίδια διατÏπωση" ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY" + jpn "集計関数ã¨é€šå¸¸ã®åˆ—ãŒåŒæ™‚ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã™ã€‚" kor "Statement ê°€ sumê¸°ëŠ¥ì„ ë™ìž‘중ì´ê³ ì¹¼ëŸ¼ë„ ë™ì¼í•œ statement입니다." nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk" norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk" @@ -1355,7 +1319,7 @@ ER_WRONG_SUM_SELECT 42000 S1009 swe "Kommandot har bÃ¥de sum functions och enkla funktioner" ukr "У виразі викориÑтано підÑумовуючі функції порÑд з іменами Ñтовбців" ER_WRONG_VALUE_COUNT 21S01 - cze "Po-BÄet sloupců neodpovÃdá zadané hodnotÄ›" + cze "PoÄet sloupců neodpovÃdá zadané hodnotÄ›" dan "Kolonne tæller stemmer ikke med antallet af værdier" nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes" eng "Column count doesn't match value count" @@ -1364,6 +1328,7 @@ ER_WRONG_VALUE_COUNT 21S01 greek "Το Column count δεν ταιÏιάζει με το value count" hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel" ita "Il numero delle colonne non e` uguale al numero dei valori" + jpn "列数ãŒå€¤ã®å€‹æ•°ã¨ä¸€è‡´ã—ã¾ã›ã‚“。" kor "ì¹¼ëŸ¼ì˜ ì¹´ìš´íŠ¸ê°€ ê°’ì˜ ì¹´ìš´íŠ¸ì™€ ì¼ì¹˜í•˜ì§€ 않습니다." nor "Felt telling stemmer verdi telling" norwegian-ny "Kolonne telling stemmer verdi telling" @@ -1377,18 +1342,17 @@ ER_WRONG_VALUE_COUNT 21S01 swe "Antalet kolumner motsvarar inte antalet värden" ukr "КількіÑÑ‚ÑŒ Ñтовбців не Ñпівпадає з кількіÑÑ‚ÑŽ значень" ER_TOO_LONG_IDENT 42000 S1009 - cze "Jm-Béno identifikátoru '%-.100s' je pÅ™ÃliÅ¡ dlouhé" + cze "Jméno identifikátoru '%-.100s' je pÅ™ÃliÅ¡ dlouhé" dan "Navnet '%-.100s' er for langt" nla "Naam voor herkenning '%-.100s' is te lang" eng "Identifier name '%-.100s' is too long" - jps "Identifier name '%-.100s' ã¯é•·ã™ãŽã¾ã™", est "Identifikaatori '%-.100s' nimi on liiga pikk" fre "Le nom de l'identificateur '%-.100s' est trop long" ger "Name des Bezeichners '%-.100s' ist zu lang" greek "Το identifier name '%-.100s' είναι Ï€Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿" hun "A(z) '%-.100s' azonositonev tul hosszu." ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" - jpn "Identifier name '%-.100s' ã¯é•·ã™ãŽã¾ã™" + jpn "è˜åˆ¥åå '%-.100s' ã¯é•·ã™ãŽã¾ã™ã€‚" kor "Identifier '%-.100s'는 너무 길군요." nor "Identifikator '%-.100s' er for lang" norwegian-ny "Identifikator '%-.100s' er for lang" @@ -1402,18 +1366,17 @@ ER_TOO_LONG_IDENT 42000 S1009 swe "Kolumnnamn '%-.100s' är för lÃ¥ngt" ukr "Ім'Ñ Ñ–Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ñ–ÐºÐ°Ñ‚Ð¾Ñ€Ð° '%-.100s' задовге" ER_DUP_FIELDNAME 42S21 S1009 - cze "Zdvojen-Bé jméno sloupce '%-.192s'" + cze "Zdvojené jméno sloupce '%-.192s'" dan "Feltnavnet '%-.192s' findes allerede" nla "Dubbele kolom naam '%-.192s'" eng "Duplicate column name '%-.192s'" - jps "'%-.192s' ã¨ã„ㆠcolumn åã¯é‡è¤‡ã—ã¦ã¾ã™", est "Kattuv tulba nimi '%-.192s'" fre "Nom du champ '%-.192s' déjà utilisé" ger "Doppelter Spaltenname: '%-.192s'" greek "Επανάληψη column name '%-.192s'" hun "Duplikalt oszlopazonosito: '%-.192s'" ita "Nome colonna duplicato '%-.192s'" - jpn "'%-.192s' ã¨ã„ㆠcolumn åã¯é‡è¤‡ã—ã¦ã¾ã™" + jpn "列å '%-.192s' ã¯é‡è¤‡ã—ã¦ã¾ã™ã€‚" kor "ì¤‘ë³µëœ ì¹¼ëŸ¼ ì´ë¦„: '%-.192s'" nor "Feltnavnet '%-.192s' eksisterte fra før" norwegian-ny "Feltnamnet '%-.192s' eksisterte frÃ¥ før" @@ -1427,18 +1390,17 @@ ER_DUP_FIELDNAME 42S21 S1009 swe "Kolumnnamn '%-.192s finns flera gÃ¥nger" ukr "Дублююче ім'Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s'" ER_DUP_KEYNAME 42000 S1009 - cze "Zdvojen-Bé jméno klÃÄe '%-.192s'" + cze "Zdvojené jméno klÃÄe '%-.192s'" dan "Indeksnavnet '%-.192s' findes allerede" nla "Dubbele zoeksleutel naam '%-.192s'" eng "Duplicate key name '%-.192s'" - jps "'%-.192s' ã¨ã„ㆠkey ã®åå‰ã¯é‡è¤‡ã—ã¦ã„ã¾ã™", est "Kattuv võtme nimi '%-.192s'" fre "Nom de clef '%-.192s' déjà utilisé" ger "Doppelter Name für Schlüssel vorhanden: '%-.192s'" greek "Επανάληψη key name '%-.192s'" hun "Duplikalt kulcsazonosito: '%-.192s'" ita "Nome chiave duplicato '%-.192s'" - jpn "'%-.192s' ã¨ã„ㆠkey ã®åå‰ã¯é‡è¤‡ã—ã¦ã„ã¾ã™" + jpn "索引å '%-.192s' ã¯é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚" kor "ì¤‘ë³µëœ í‚¤ ì´ë¦„ : '%-.192s'" nor "Nøkkelnavnet '%-.192s' eksisterte fra før" norwegian-ny "Nøkkelnamnet '%-.192s' eksisterte frÃ¥ før" @@ -1454,32 +1416,31 @@ ER_DUP_KEYNAME 42000 S1009 # When using this error code, please use ER(ER_DUP_ENTRY_WITH_KEY_NAME) # for the message string. See, for example, code in handler.cc. ER_DUP_ENTRY 23000 S1009 - cze "Zdvojen-Bý klÃÄ '%-.192s' (ÄÃslo klÃÄe %d)" + cze "Zdvojený klÃÄ '%-.192s' (ÄÃslo klÃÄe %d)" dan "Ens værdier '%-.192s' for indeks %d" nla "Dubbele ingang '%-.192s' voor zoeksleutel %d" eng "Duplicate entry '%-.192s' for key %d" - jps "'%-.192s' 㯠key %d ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™", est "Kattuv väärtus '%-.192s' võtmele %d" fre "Duplicata du champ '%-.192s' pour la clef %d" ger "Doppelter Eintrag '%-.192s' für Schlüssel %d" greek "Διπλή εγγÏαφή '%-.192s' για το κλειδί %d" hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint." ita "Valore duplicato '%-.192s' per la chiave %d" - jpn "'%-.192s' 㯠key %d ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™" + jpn "'%-.192s' ã¯ç´¢å¼• %d ã§é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚" kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’ '%-.192s': key %d" nor "Like verdier '%-.192s' for nøkkel %d" norwegian-ny "Like verdiar '%-.192s' for nykkel %d" - pol "Powtórzone wyst?pienie '%-.192s' dla klucza %d" + pol "Powtórzone wystÄ…pienie '%-.192s' dla klucza %d" por "Entrada '%-.192s' duplicada para a chave %d" rum "Cimpul '%-.192s' e duplicat pentru cheia %d" rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ '%-.192s' по ключу %d" serbian "Dupliran unos '%-.192s' za kljuÄ '%d'" slo "Opakovaný kÄ¾ÃºÄ '%-.192s' (ÄÃslo kľúÄa %d)" spa "Entrada duplicada '%-.192s' para la clave %d" - swe "Dubbel nyckel '%-.192s' för nyckel %d" + swe "Dublett '%-.192s' för nyckel %d" ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ '%-.192s' Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° %d" ER_WRONG_FIELD_SPEC 42000 S1009 - cze "Chybn-Bá specifikace sloupce '%-.192s'" + cze "Chybná specifikace sloupce '%-.192s'" dan "Forkert kolonnespecifikaton for felt '%-.192s'" nla "Verkeerde kolom specificatie voor kolom '%-.192s'" eng "Incorrect column specifier for column '%-.192s'" @@ -1489,6 +1450,7 @@ ER_WRONG_FIELD_SPEC 42000 S1009 greek "ΕσφαλμÎνο column specifier για το πεδίο '%-.192s'" hun "Rossz oszlopazonosito: '%-.192s'" ita "Specifica errata per la colonna '%-.192s'" + jpn "列 '%-.192s' ã®å®šç¾©ãŒä¸æ£ã§ã™ã€‚" kor "칼럼 '%-.192s'ì˜ ë¶€ì •í™•í•œ 칼럼 ì •ì˜ìž" nor "Feil kolonne spesifikator for felt '%-.192s'" norwegian-ny "Feil kolonne spesifikator for kolonne '%-.192s'" @@ -1502,18 +1464,17 @@ ER_WRONG_FIELD_SPEC 42000 S1009 swe "Felaktigt kolumntyp för kolumn '%-.192s'" ukr "Ðевірний Ñпецифікатор ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s'" ER_PARSE_ERROR 42000 s1009 - cze "%s bl-BÃzko '%-.80s' na řádku %d" + cze "%s blÃzko '%-.80s' na řádku %d" dan "%s nær '%-.80s' pÃ¥ linje %d" nla "%s bij '%-.80s' in regel %d" eng "%s near '%-.80s' at line %d" - jps "%s : '%-.80s' 付近 : %d 行目", est "%s '%-.80s' ligidal real %d" fre "%s près de '%-.80s' à la ligne %d" ger "%s bei '%-.80s' in Zeile %d" greek "%s πλησίον '%-.80s' στη γÏαμμή %d" hun "A %s a '%-.80s'-hez kozeli a %d sorban" ita "%s vicino a '%-.80s' linea %d" - jpn "%s : '%-.80s' 付近 : %d 行目" + jpn "%s : '%-.80s' 付近 %d 行目" kor "'%s' ì—러 ê°™ì니다. ('%-.80s' ëª…ë ¹ì–´ ë¼ì¸ %d)" nor "%s nær '%-.80s' pÃ¥ linje %d" norwegian-ny "%s attmed '%-.80s' pÃ¥ line %d" @@ -1527,18 +1488,17 @@ ER_PARSE_ERROR 42000 s1009 swe "%s nära '%-.80s' pÃ¥ rad %d" ukr "%s Ð±Ñ–Ð»Ñ '%-.80s' в Ñтроці %d" ER_EMPTY_QUERY 42000 - cze "V-Býsledek dotazu je prázdný" + cze "Výsledek dotazu je prázdný" dan "Forespørgsel var tom" nla "Query was leeg" eng "Query was empty" - jps "Query ãŒç©ºã§ã™.", est "Tühi päring" fre "Query est vide" ger "Leere Abfrage" greek "Το εÏώτημα (query) που θÎσατε ήταν κενό" hun "Ures lekerdezes." ita "La query e` vuota" - jpn "Query ãŒç©ºã§ã™." + jpn "クエリãŒç©ºã§ã™ã€‚" kor "쿼리결과가 없습니다." nor "Forespørsel var tom" norwegian-ny "Førespurnad var tom" @@ -1552,18 +1512,17 @@ ER_EMPTY_QUERY 42000 swe "FrÃ¥gan var tom" ukr "ПуÑтий запит" ER_NONUNIQ_TABLE 42000 S1009 - cze "Nejednozna-BÄná tabulka/alias: '%-.192s'" + cze "NejednoznaÄná tabulka/alias: '%-.192s'" dan "Tabellen/aliaset: '%-.192s' er ikke unikt" nla "Niet unieke waarde tabel/alias: '%-.192s'" eng "Not unique table/alias: '%-.192s'" - jps "'%-.192s' ã¯ä¸€æ„ã® table/alias åã§ã¯ã‚ã‚Šã¾ã›ã‚“", est "Ei ole unikaalne tabel/alias '%-.192s'" fre "Table/alias: '%-.192s' non unique" ger "Tabellenname/Alias '%-.192s' nicht eindeutig" greek "ΑδÏνατη η ανεÏÏεση unique table/alias: '%-.192s'" hun "Nem egyedi tabla/alias: '%-.192s'" ita "Tabella/alias non unico: '%-.192s'" - jpn "'%-.192s' ã¯ä¸€æ„ã® table/alias åã§ã¯ã‚ã‚Šã¾ã›ã‚“" + jpn "表åï¼åˆ¥å '%-.192s' ã¯ä¸€æ„ã§ã¯ã‚ã‚Šã¾ã›ã‚“。" kor "Unique 하지 ì•Šì€ í…Œì´ë¸”/alias: '%-.192s'" nor "Ikke unikt tabell/alias: '%-.192s'" norwegian-ny "Ikkje unikt tabell/alias: '%-.192s'" @@ -1577,7 +1536,7 @@ ER_NONUNIQ_TABLE 42000 S1009 swe "Icke unikt tabell/alias: '%-.192s'" ukr "Ðеунікальна таблицÑ/пÑевдонім: '%-.192s'" ER_INVALID_DEFAULT 42000 S1009 - cze "Chybn-Bá defaultnà hodnota pro '%-.192s'" + cze "Chybná defaultnà hodnota pro '%-.192s'" dan "Ugyldig standardværdi for '%-.192s'" nla "Foutieve standaard waarde voor '%-.192s'" eng "Invalid default value for '%-.192s'" @@ -1587,6 +1546,7 @@ ER_INVALID_DEFAULT 42000 S1009 greek "ΕσφαλμÎνη Ï€ÏοκαθοÏισμÎνη τιμή (default value) για '%-.192s'" hun "Ervenytelen ertek: '%-.192s'" ita "Valore di default non valido per '%-.192s'" + jpn "'%-.192s' ã¸ã®ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒç„¡åŠ¹ã§ã™ã€‚" kor "'%-.192s'ì˜ ìœ íš¨í•˜ì§€ 못한 ë””í´íŠ¸ ê°’ì„ ì‚¬ìš©í•˜ì…¨ìŠµë‹ˆë‹¤." nor "Ugyldig standardverdi for '%-.192s'" norwegian-ny "Ugyldig standardverdi for '%-.192s'" @@ -1600,18 +1560,17 @@ ER_INVALID_DEFAULT 42000 S1009 swe "Ogiltigt DEFAULT värde för '%-.192s'" ukr "Ðевірне Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¿Ð¾ замовчуванню Ð´Ð»Ñ '%-.192s'" ER_MULTIPLE_PRI_KEY 42000 S1009 - cze "Definov-Báno vÃce primárnÃch klÃÄů" + cze "Definováno vÃce primárnÃch klÃÄů" dan "Flere primærnøgler specificeret" nla "Meerdere primaire zoeksleutels gedefinieerd" eng "Multiple primary key defined" - jps "複数㮠primary key ãŒå®šç¾©ã•ã‚Œã¾ã—ãŸ", est "Mitut primaarset võtit ei saa olla" fre "Plusieurs clefs primaires définies" ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert" greek "ΠεÏισσότεÏα από Îνα primary key οÏίστηκαν" hun "Tobbszoros elsodleges kulcs definialas." ita "Definite piu` chiave primarie" - jpn "複数㮠primary key ãŒå®šç¾©ã•ã‚Œã¾ã—ãŸ" + jpn "PRIMARY KEY ãŒè¤‡æ•°å®šç¾©ã•ã‚Œã¦ã„ã¾ã™ã€‚" kor "Multiple primary keyê°€ ì •ì˜ë˜ì–´ 있슴" nor "Fleire primærnøkle spesifisert" norwegian-ny "Fleire primærnyklar spesifisert" @@ -1625,18 +1584,17 @@ ER_MULTIPLE_PRI_KEY 42000 S1009 swe "Flera PRIMARY KEY använda" ukr "Первинного ключа визначено неодноразово" ER_TOO_MANY_KEYS 42000 S1009 - cze "Zad-Báno pÅ™ÃliÅ¡ mnoho klÃÄů, je povoleno nejvÃce %d klÃÄů" + cze "Zadáno pÅ™ÃliÅ¡ mnoho klÃÄů, je povoleno nejvÃce %d klÃÄů" dan "For mange nøgler specificeret. Kun %d nøgler mÃ¥ bruges" nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan" eng "Too many keys specified; max %d keys allowed" - jps "key ã®æŒ‡å®šãŒå¤šã™ãŽã¾ã™. key ã¯æœ€å¤§ %d ã¾ã§ã§ã™", est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit" fre "Trop de clefs sont définies. Maximum de %d clefs alloué" ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt" greek "ΠάÏα πολλά key οÏίσθηκαν. Το Ï€Î¿Î»Ï %d επιτÏÎπονται" hun "Tul sok kulcs. Maximum %d kulcs engedelyezett." ita "Troppe chiavi. Sono ammesse max %d chiavi" - jpn "key ã®æŒ‡å®šãŒå¤šã™ãŽã¾ã™. key ã¯æœ€å¤§ %d ã¾ã§ã§ã™" + jpn "索引ã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚最大 %d 個ã¾ã§ã§ã™ã€‚" kor "너무 ë§Žì€ í‚¤ê°€ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %dì˜ í‚¤ê°€ 가능함" nor "For mange nøkler spesifisert. Maks %d nøkler tillatt" norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt" @@ -1650,7 +1608,7 @@ ER_TOO_MANY_KEYS 42000 S1009 swe "För mÃ¥nga nycklar använda. Man fÃ¥r ha högst %d nycklar" ukr "Забагато ключів зазначено. Дозволено не більше %d ключів" ER_TOO_MANY_KEY_PARTS 42000 S1009 - cze "Zad-Báno pÅ™ÃliÅ¡ mnoho Äást klÃÄů, je povoleno nejvÃce %d ÄástÃ" + cze "Zadáno pÅ™ÃliÅ¡ mnoho Äást klÃÄů, je povoleno nejvÃce %d ÄástÃ" dan "For mange nøgledele specificeret. Kun %d dele mÃ¥ bruges" nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan" eng "Too many key parts specified; max %d parts allowed" @@ -1660,6 +1618,7 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009 greek "ΠάÏα πολλά key parts οÏίσθηκαν. Το Ï€Î¿Î»Ï %d επιτÏÎπονται" hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett" ita "Troppe parti di chiave specificate. Sono ammesse max %d parti" + jpn "索引ã®ã‚ー列指定ãŒå¤šã™ãŽã¾ã™ã€‚最大 %d 個ã¾ã§ã§ã™ã€‚" kor "너무 ë§Žì€ í‚¤ 부분(parts)ë“¤ì´ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %d ë¶€ë¶„ì´ ê°€ëŠ¥í•¨" nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt" norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt" @@ -1673,18 +1632,17 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009 swe "För mÃ¥nga nyckeldelar använda. Man fÃ¥r ha högst %d nyckeldelar" ukr "Забагато чаÑтин ключа зазначено. Дозволено не більше %d чаÑтин" ER_TOO_LONG_KEY 42000 S1009 - cze "Zadan-Bý klÃÄ byl pÅ™ÃliÅ¡ dlouhý, nejvÄ›tÅ¡Ã délka klÃÄe je %d" + cze "Zadaný klÃÄ byl pÅ™ÃliÅ¡ dlouhý, nejvÄ›tÅ¡Ã délka klÃÄe je %d" dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d" nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d" eng "Specified key was too long; max key length is %d bytes" - jps "key ãŒé•·ã™ãŽã¾ã™. key ã®é•·ã•ã¯æœ€å¤§ %d ã§ã™", est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d" fre "La clé est trop longue. Longueur maximale: %d" ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d" greek "Το κλειδί που οÏίσθηκε είναι Ï€Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿. Το μÎγιστο μήκος είναι %d" hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d" ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d" - jpn "key ãŒé•·ã™ãŽã¾ã™. key ã®é•·ã•ã¯æœ€å¤§ %d ã§ã™" + jpn "索引ã®ã‚ーãŒé•·ã™ãŽã¾ã™ã€‚最大 %d ãƒã‚¤ãƒˆã¾ã§ã§ã™ã€‚" kor "ì •ì˜ëœ 키가 너무 ê¹ë‹ˆë‹¤. 최대 í‚¤ì˜ ê¸¸ì´ëŠ” %d입니다." nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d" norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d" @@ -1698,18 +1656,17 @@ ER_TOO_LONG_KEY 42000 S1009 swe "För lÃ¥ng nyckel. Högsta tillÃ¥tna nyckellängd är %d" ukr "Зазначений ключ задовгий. Ðайбільша довжина ключа %d байтів" ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 - cze "Kl-BÃÄový sloupec '%-.192s' v tabulce neexistuje" + cze "KlÃÄový sloupec '%-.192s' v tabulce neexistuje" dan "Nøglefeltet '%-.192s' eksisterer ikke i tabellen" nla "Zoeksleutel kolom '%-.192s' bestaat niet in tabel" eng "Key column '%-.192s' doesn't exist in table" - jps "Key column '%-.192s' ãŒãƒ†ãƒ¼ãƒ–ルã«ã‚ã‚Šã¾ã›ã‚“.", est "Võtme tulp '%-.192s' puudub tabelis" fre "La clé '%-.192s' n'existe pas dans la table" ger "In der Tabelle gibt es kein Schlüsselfeld '%-.192s'" greek "Το πεδίο κλειδί '%-.192s' δεν υπάÏχει στον πίνακα" hun "A(z) '%-.192s'kulcsoszlop nem letezik a tablaban" ita "La colonna chiave '%-.192s' non esiste nella tabella" - jpn "Key column '%-.192s' ãŒãƒ†ãƒ¼ãƒ–ルã«ã‚ã‚Šã¾ã›ã‚“." + jpn "ã‚ー列 '%-.192s' ã¯è¡¨ã«ã‚ã‚Šã¾ã›ã‚“。" kor "Key 칼럼 '%-.192s'는 í…Œì´ë¸”ì— ì¡´ìž¬í•˜ì§€ 않습니다." nor "Nøkkel felt '%-.192s' eksiterer ikke i tabellen" norwegian-ny "Nykkel kolonne '%-.192s' eksiterar ikkje i tabellen" @@ -1728,18 +1685,17 @@ ER_BLOB_USED_AS_KEY 42000 S1009 rus "Столбец типа BLOB %`s не может быть иÑпользован как значение ключа в %s таблице" ukr "BLOB Ñтовбець %`s не може бути викориÑтаний у визначенні ключа в %s таблиці" ER_TOO_BIG_FIELDLENGTH 42000 S1009 - cze "P-BÅ™ÃliÅ¡ velká délka sloupce '%-.192s' (nejvÃce %lu). Použijte BLOB" + cze "PÅ™ÃliÅ¡ velká délka sloupce '%-.192s' (nejvÃce %lu). Použijte BLOB" dan "For stor feltlængde for kolonne '%-.192s' (maks = %lu). Brug BLOB i stedet" nla "Te grote kolomlengte voor '%-.192s' (max = %lu). Maak hiervoor gebruik van het type BLOB" eng "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead" - jps "column '%-.192s' ã¯,確ä¿ã™ã‚‹ column ã®å¤§ãã•ãŒå¤šã™ãŽã¾ã™. (最大 %lu ã¾ã§). BLOB ã‚’ã‹ã‚ã‚Šã«ä½¿ç”¨ã—ã¦ãã ã•ã„." est "Tulba '%-.192s' pikkus on liiga pikk (maksimaalne pikkus: %lu). Kasuta BLOB väljatüüpi" fre "Champ '%-.192s' trop long (max = %lu). Utilisez un BLOB" ger "Feldlänge für Feld '%-.192s' zu groß (maximal %lu). BLOB- oder TEXT-Spaltentyp verwenden!" greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿ μήκος για το πεδίο '%-.192s' (max = %lu). ΠαÏακαλώ χÏησιμοποιείστε τον Ï„Ïπο BLOB" hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %lu). Hasznaljon BLOB tipust inkabb." ita "La colonna '%-.192s' e` troppo grande (max=%lu). Utilizza un BLOB." - jpn "column '%-.192s' ã¯,確ä¿ã™ã‚‹ column ã®å¤§ãã•ãŒå¤šã™ãŽã¾ã™. (最大 %lu ã¾ã§). BLOB ã‚’ã‹ã‚ã‚Šã«ä½¿ç”¨ã—ã¦ãã ã•ã„." + jpn "列 '%-.192s' ã®ã‚µã‚¤ã‚ºå®šç¾©ãŒå¤§ãã™ãŽã¾ã™ (最大 %lu ã¾ã§)。代ã‚ã‚Šã« BLOB ã¾ãŸã¯ TEXT を使用ã—ã¦ãã ã•ã„。" kor "칼럼 '%-.192s'ì˜ ì¹¼ëŸ¼ 길ì´ê°€ 너무 ê¹ë‹ˆë‹¤ (최대 = %lu). ëŒ€ì‹ ì— BLOB를 사용하세요." nor "For stor nøkkellengde for kolonne '%-.192s' (maks = %lu). Bruk BLOB istedenfor" norwegian-ny "For stor nykkellengde for felt '%-.192s' (maks = %lu). Bruk BLOB istadenfor" @@ -1753,18 +1709,17 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009 swe "För stor kolumnlängd angiven för '%-.192s' (max= %lu). Använd en BLOB instället" ukr "Задовга довжина ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' (max = %lu). ВикориÑтайте тип BLOB" ER_WRONG_AUTO_KEY 42000 S1009 - cze "M-Bůžete mÃt pouze jedno AUTO pole a to musà být definováno jako klÃÄ" + cze "Můžete mÃt pouze jedno AUTO pole a to musà být definováno jako klÃÄ" dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret" nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd." eng "Incorrect table definition; there can be only one auto column and it must be defined as a key" - jps "テーブルã®å®šç¾©ãŒé•ã„ã¾ã™; there can be only one auto column and it must be defined as a key", est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena" fre "Un seul champ automatique est permis et il doit être indexé" ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden" greek "ΜποÏεί να υπάÏχει μόνο Îνα auto field και Ï€ÏÎπει να Îχει οÏισθεί σαν key" hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni." ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave" - jpn "テーブルã®å®šç¾©ãŒé•ã„ã¾ã™; there can be only one auto column and it must be defined as a key" + jpn "ä¸æ£ãªè¡¨å®šç¾©ã§ã™ã€‚AUTO_INCREMENT列ã¯ï¼‘個ã¾ã§ã§ã€ç´¢å¼•ã‚’定義ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚" kor "ë¶€ì •í™•í•œ í…Œì´ë¸” ì •ì˜; í…Œì´ë¸”ì€ í•˜ë‚˜ì˜ auto ì¹¼ëŸ¼ì´ ì¡´ìž¬í•˜ê³ í‚¤ë¡œ ì •ì˜ë˜ì–´ì ¸ì•¼ 합니다." nor "Bare ett auto felt kan være definert som nøkkel." norwegian-ny "Bare eitt auto felt kan være definert som nøkkel." @@ -1778,18 +1733,17 @@ ER_WRONG_AUTO_KEY 42000 S1009 swe "Det fÃ¥r finnas endast ett AUTO_INCREMENT-fält och detta mÃ¥ste vara en nyckel" ukr "Ðевірне Ð²Ð¸Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–; Може бути лише один автоматичний Ñтовбець, що повинен бути визначений Ñк ключ" ER_READY - cze "%s: p-BÅ™ipraven na spojenÃ\nVersion: '%s' socket: '%s' port: %d"" + cze "%s: pÅ™ipraven na spojenÃ\nVersion: '%s' socket: '%s' port: %d"" dan "%s: klar til tilslutninger\nVersion: '%s' socket: '%s' port: %d"" nla "%s: klaar voor verbindingen\nVersion: '%s' socket: '%s' port: %d"" eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d" - jps "%s: 準備完了¥nVersion: '%s' socket: '%s' port: %d"", est "%s: ootab ühendusi\nVersion: '%s' socket: '%s' port: %d"" fre "%s: Prêt pour des connexions\nVersion: '%s' socket: '%s' port: %d"" ger "%s: Bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d" greek "%s: σε αναμονή συνδÎσεων\nVersion: '%s' socket: '%s' port: %d"" hun "%s: kapcsolatra kesz\nVersion: '%s' socket: '%s' port: %d"" ita "%s: Pronto per le connessioni\nVersion: '%s' socket: '%s' port: %d"" - jpn "%s: 準備完了\nVersion: '%s' socket: '%s' port: %d"" + jpn "%s: 接続準備完了。\nãƒãƒ¼ã‚¸ãƒ§ãƒ³: '%s' socket: '%s' port: %d"" kor "%s: ì—°ê²° 준비중입니다\nVersion: '%s' socket: '%s' port: %d"" nor "%s: klar for tilkoblinger\nVersion: '%s' socket: '%s' port: %d"" norwegian-ny "%s: klar for tilkoblingar\nVersion: '%s' socket: '%s' port: %d"" @@ -1803,7 +1757,7 @@ ER_READY swe "%s: klar att ta emot klienter\nVersion: '%s' socket: '%s' port: %d"" ukr "%s: Готовий Ð´Ð»Ñ Ð·'єднань!\nVersion: '%s' socket: '%s' port: %d"" ER_NORMAL_SHUTDOWN - cze "%s: norm-Bálnà ukonÄenÃ\n" + cze "%s: normálnà ukonÄenÃ\n" dan "%s: Normal nedlukning\n" nla "%s: Normaal afgesloten \n" eng "%s: Normal shutdown\n" @@ -1813,6 +1767,7 @@ ER_NORMAL_SHUTDOWN greek "%s: Φυσιολογική διαδικασία shutdown\n" hun "%s: Normal leallitas\n" ita "%s: Shutdown normale\n" + jpn "%s: 通常シャットダウン\n" kor "%s: ì •ìƒì ì¸ shutdown\n" nor "%s: Normal avslutning\n" norwegian-ny "%s: Normal nedkopling\n" @@ -1826,18 +1781,17 @@ ER_NORMAL_SHUTDOWN swe "%s: Normal avslutning\n" ukr "%s: Ðормальне завершеннÑ\n" ER_GOT_SIGNAL - cze "%s: p-BÅ™ijat signal %d, konÄÃm\n" + cze "%s: pÅ™ijat signal %d, konÄÃm\n" dan "%s: Fangede signal %d. Afslutter!!\n" nla "%s: Signaal %d. Systeem breekt af!\n" eng "%s: Got signal %d. Aborting!\n" - jps "%s: Got signal %d. ä¸æ–!Â¥n", est "%s: sain signaali %d. Lõpetan!\n" fre "%s: Reçu le signal %d. Abandonne!\n" ger "%s: Signal %d erhalten. Abbruch!\n" greek "%s: Ελήφθη το μήνυμα %d. Η διαδικασία εγκαταλείπεται!\n" hun "%s: %d jelzes. Megszakitva!\n" ita "%s: Ricevuto segnale %d. Interruzione!\n" - jpn "%s: Got signal %d. ä¸æ–!\n" + jpn "%s: シグナル %d ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚強制終了ã—ã¾ã™ï¼\n" kor "%s: %d ì‹ í˜¸ê°€ 들어왔ìŒ. 중지!\n" nor "%s: Oppdaget signal %d. Avslutter!\n" norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n" @@ -1851,18 +1805,17 @@ ER_GOT_SIGNAL swe "%s: Fick signal %d. Avslutar!\n" ukr "%s: Отримано Ñигнал %d. ПерериваюÑÑŒ!\n" ER_SHUTDOWN_COMPLETE - cze "%s: ukon-BÄenà práce hotovo\n" + cze "%s: ukonÄenà práce hotovo\n" dan "%s: Server lukket\n" nla "%s: Afsluiten afgerond\n" eng "%s: Shutdown complete\n" - jps "%s: Shutdown 完了¥n", est "%s: Lõpp\n" fre "%s: Arrêt du serveur terminé\n" ger "%s: Herunterfahren beendet\n" greek "%s: Η διαδικασία Shutdown ολοκληÏώθηκε\n" hun "%s: A leallitas kesz\n" ita "%s: Shutdown completato\n" - jpn "%s: Shutdown 完了\n" + jpn "%s: シャットダウン完了\n" kor "%s: Shutdown ì´ ì™„ë£Œë¨!\n" nor "%s: Avslutning komplett\n" norwegian-ny "%s: Nedkopling komplett\n" @@ -1876,18 +1829,17 @@ ER_SHUTDOWN_COMPLETE swe "%s: Avslutning klar\n" ukr "%s: Роботу завершено\n" ER_FORCING_CLOSE 08S01 - cze "%s: n-Básilné uzavÅ™enà threadu %ld uživatele '%-.48s'\n" + cze "%s: násilné uzavÅ™enà threadu %ld uživatele '%-.48s'\n" dan "%s: Forceret nedlukning af trÃ¥d: %ld bruger: '%-.48s'\n" nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.48s'\n" eng "%s: Forcing close of thread %ld user: '%-.48s'\n" - jps "%s: スレッド %ld 強制終了 user: '%-.48s'Â¥n", est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.48s'\n" fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.48s'\n" ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.48s'\n" greek "%s: Το thread θα κλείσει %ld user: '%-.48s'\n" hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.48s'\n" ita "%s: Forzata la chiusura del thread %ld utente: '%-.48s'\n" - jpn "%s: スレッド %ld 強制終了 user: '%-.48s'\n" + jpn "%s: スレッド %ld を強制終了ã—ã¾ã™ (ユーザー: '%-.48s')\n" kor "%s: thread %ldì˜ ê°•ì œ 종료 user: '%-.48s'\n" nor "%s: PÃ¥tvinget avslutning av trÃ¥d %ld bruker: '%-.48s'\n" norwegian-ny "%s: PÃ¥tvinga avslutning av trÃ¥d %ld brukar: '%-.48s'\n" @@ -1901,18 +1853,17 @@ ER_FORCING_CLOSE 08S01 swe "%s: Stänger av trÃ¥d %ld; användare: '%-.48s'\n" ukr "%s: ПриÑкорюю Ð·Ð°ÐºÑ€Ð¸Ñ‚Ñ‚Ñ Ð³Ñ–Ð»ÐºÐ¸ %ld кориÑтувача: '%-.48s'\n" ER_IPSOCK_ERROR 08S01 - cze "Nemohu vytvo-BÅ™it IP socket" + cze "Nemohu vytvoÅ™it IP socket" dan "Kan ikke oprette IP socket" nla "Kan IP-socket niet openen" eng "Can't create IP socket" - jps "IP socket ãŒä½œã‚Œã¾ã›ã‚“", est "Ei suuda luua IP socketit" fre "Ne peut créer la connexion IP (socket)" ger "Kann IP-Socket nicht erzeugen" greek "Δεν είναι δυνατή η δημιουÏγία IP socket" hun "Az IP socket nem hozhato letre" ita "Impossibile creare il socket IP" - jpn "IP socket ãŒä½œã‚Œã¾ã›ã‚“" + jpn "IPソケットを作æˆã§ãã¾ã›ã‚“。" kor "IP ì†Œì¼“ì„ ë§Œë“¤ì§€ 못했습니다." nor "Kan ikke opprette IP socket" norwegian-ny "Kan ikkje opprette IP socket" @@ -1926,18 +1877,17 @@ ER_IPSOCK_ERROR 08S01 swe "Kan inte skapa IP-socket" ukr "Ðе можу Ñтворити IP роз'єм" ER_NO_SUCH_INDEX 42S12 S1009 - cze "Tabulka '%-.192s' nem-Bá index odpovÃdajÃcà CREATE INDEX. VytvoÅ™te tabulku znovu" + cze "Tabulka '%-.192s' nemá index odpovÃdajÃcà CREATE INDEX. VytvoÅ™te tabulku znovu" dan "Tabellen '%-.192s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen" nla "Tabel '%-.192s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" eng "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table" - jps "Table '%-.192s' ã¯ãã®ã‚ˆã†ãª index ã‚’æŒã£ã¦ã„ã¾ã›ã‚“(CREATE INDEX 実行時ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“). テーブルを作り直ã—ã¦ãã ã•ã„", est "Tabelil '%-.192s' puuduvad võtmed. Loo tabel uuesti" fre "La table '%-.192s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table" ger "Tabelle '%-.192s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" greek "Ο πίνακας '%-.192s' δεν Îχει ευÏετήÏιο (index) σαν αυτό που χÏησιμοποιείτε στην CREATE INDEX. ΠαÏακαλώ, ξαναδημιουÏγήστε τον πίνακα" hun "A(z) '%-.192s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" ita "La tabella '%-.192s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" - jpn "Table '%-.192s' ã¯ãã®ã‚ˆã†ãª index ã‚’æŒã£ã¦ã„ã¾ã›ã‚“(CREATE INDEX 実行時ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“). テーブルを作り直ã—ã¦ãã ã•ã„" + jpn "表 '%-.192s' ã«ä»¥å‰CREATE INDEXã§ä½œæˆã•ã‚ŒãŸç´¢å¼•ãŒã‚ã‚Šã¾ã›ã‚“。表を作り直ã—ã¦ãã ã•ã„。" kor "í…Œì´ë¸” '%-.192s'는 ì¸ë±ìŠ¤ë¥¼ 만들지 않았습니다. alter í…Œì´ë¸”ëª…ë ¹ì„ ì´ìš©í•˜ì—¬ í…Œì´ë¸”ì„ ìˆ˜ì •í•˜ì„¸ìš”..." nor "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" norwegian-ny "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen pÃ¥ nytt" @@ -1951,7 +1901,7 @@ ER_NO_SUCH_INDEX 42S12 S1009 swe "Tabellen '%-.192s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' має індекÑ, що не Ñпівпадає з вказанним у CREATE INDEX. Створіть таблицю знову" ER_WRONG_FIELD_TERMINATORS 42000 S1009 - cze "Argument separ-Bátoru položek nebyl oÄekáván. PÅ™eÄtÄ›te si manuál" + cze "Argument separátoru položek nebyl oÄekáván. PÅ™eÄtÄ›te si manuál" dan "Felt adskiller er ikke som forventet, se dokumentationen" nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding" eng "Field separator argument is not what is expected; check the manual" @@ -1961,6 +1911,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009 greek "Ο διαχωÏιστής πεδίων δεν είναι αυτός που αναμενόταν. ΠαÏακαλώ ανατÏÎξτε στο manual" hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!" ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale" + jpn "フィールド区切り文å—ãŒäºˆæœŸã›ã¬ä½¿ã‚れ方をã—ã¦ã„ã¾ã™ã€‚マニュアルを確èªã—ã¦ä¸‹ã•ã„。" kor "í•„ë“œ êµ¬ë¶„ìž ì¸ìˆ˜ë“¤ì´ ì™„ì „í•˜ì§€ 않습니다. ë©”ë‰´ì–¼ì„ ì°¾ì•„ 보세요." nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen" norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjÃ¥ dokumentasjonen" @@ -1974,7 +1925,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009 swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen" ukr "Хибний розділювач полів. Почитайте документацію" ER_BLOBS_AND_NO_TERMINATED 42000 S1009 - cze "Nen-Bà možné použÃt pevný rowlength s BLOBem. Použijte 'fields terminated by'." + cze "Nenà možné použÃt pevný rowlength s BLOBem. Použijte 'fields terminated by'." dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'." nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'." eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'" @@ -1984,7 +1935,7 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009 greek "Δεν μποÏείτε να χÏησιμοποιήσετε fixed rowlength σε BLOBs. ΠαÏακαλώ χÏησιμοποιείστε 'fields terminated by'." hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ." ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'." - jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'." + jpn "BLOBã«ã¯å›ºå®šé•·ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒä½¿ç”¨ã§ãã¾ã›ã‚“。'FIELDS TERMINATED BY'å¥ã‚’使用ã—ã¦ä¸‹ã•ã„。" kor "BLOB로는 ê³ ì •ê¸¸ì´ì˜ lowlength를 ì‚¬ìš©í• ìˆ˜ 없습니다. 'fields terminated by'를 사용하세요." nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." @@ -1998,18 +1949,17 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009 swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'" ukr "Ðе можна викориÑтовувати Ñталу довжину Ñтроки з BLOB. ЗкориÑтайтеÑÑ 'fields terminated by'" ER_TEXTFILE_NOT_READABLE - cze "Soubor '%-.128s' mus-Bà být v adresáři databáze nebo Äitelný pro vÅ¡echny" + cze "Soubor '%-.128s' musà být v adresáři databáze nebo Äitelný pro vÅ¡echny" dan "Filen '%-.128s' skal være i database-folderen, eller kunne læses af alle" nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." eng "The file '%-.128s' must be in the database directory or be readable by all" - jps "ファイル '%-.128s' 㯠databse ã® directory ã«ã‚ã‚‹ã‹å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒèªã‚るよã†ã«è¨±å¯ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“.", est "Fail '%-.128s' peab asuma andmebaasi kataloogis või olema kõigile loetav" fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous" ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein" greek "Το αÏχείο '%-.128s' Ï€ÏÎπει να υπάÏχει στο database directory ή να μποÏεί να διαβαστεί από όλους" hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti" - jpn "ファイル '%-.128s' 㯠databse ã® directory ã«ã‚ã‚‹ã‹å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒèªã‚るよã†ã«è¨±å¯ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“." + jpn "ファイル '%-.128s' ã¯ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ã‚ã‚‹ã‹ã€å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã‹ã‚‰èªã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚" kor "'%-.128s' í™”ì¼ëŠ” ë°ì´íƒ€ë² ì´ìŠ¤ ë””ë ‰í† ë¦¬ì— ì¡´ìž¬í•˜ê±°ë‚˜ 모ë‘ì—게 ì½ê¸° 가능하여야 합니다." nor "Filen '%-.128s' mÃ¥ være i database-katalogen for Ã¥ være lesbar for alle" norwegian-ny "Filen '%-.128s' mÃ¥ være i database-katalogen for Ã¥ være lesbar for alle" @@ -2023,18 +1973,17 @@ ER_TEXTFILE_NOT_READABLE swe "Textfilen '%-.128s' mÃ¥ste finnas i databasbiblioteket eller vara läsbar för alla" ukr "Файл '%-.128s' повинен бути у теці бази данних або мати вÑтановлене право на Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð´Ð»Ñ ÑƒÑÑ–Ñ…" ER_FILE_EXISTS_ERROR - cze "Soubor '%-.200s' ji-Bž existuje" + cze "Soubor '%-.200s' již existuje" dan "Filen '%-.200s' eksisterer allerede" nla "Het bestand '%-.200s' bestaat reeds" eng "File '%-.200s' already exists" - jps "File '%-.200s' ã¯æ—¢ã«å˜åœ¨ã—ã¾ã™", est "Fail '%-.200s' juba eksisteerib" fre "Le fichier '%-.200s' existe déjà " ger "Datei '%-.200s' bereits vorhanden" greek "Το αÏχείο '%-.200s' υπάÏχει ήδη" hun "A '%-.200s' file mar letezik." ita "Il file '%-.200s' esiste gia`" - jpn "File '%-.200s' ã¯æ—¢ã«å˜åœ¨ã—ã¾ã™" + jpn "ファイル '%-.200s' ã¯ã™ã§ã«å˜åœ¨ã—ã¾ã™ã€‚" kor "'%-.200s' í™”ì¼ì€ ì´ë¯¸ 존재합니다." nor "Filen '%-.200s' eksisterte allerede" norwegian-ny "Filen '%-.200s' eksisterte allereide" @@ -2048,18 +1997,17 @@ ER_FILE_EXISTS_ERROR swe "Filen '%-.200s' existerar redan" ukr "Файл '%-.200s' вже Ñ–Ñнує" ER_LOAD_INFO - cze "Z-Báznamů: %ld Vymazáno: %ld PÅ™eskoÄeno: %ld VarovánÃ: %ld" + cze "Záznamů: %ld Vymazáno: %ld PÅ™eskoÄeno: %ld VarovánÃ: %ld" dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld" nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld" eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld" - jps "レコード数: %ld 削除: %ld Skipped: %ld Warnings: %ld", est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld" fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld" ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld" greek "ΕγγÏαφÎÏ‚: %ld ΔιαγÏαφÎÏ‚: %ld ΠαÏεκάμφθησαν: %ld Î Ïοειδοποιήσεις: %ld" hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld" ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld" - jpn "レコード数: %ld 削除: %ld Skipped: %ld Warnings: %ld" + jpn "レコード数: %ld 削除: %ld スã‚ップ: %ld è¦å‘Š: %ld" kor "ë ˆì½”ë“œ: %ldê°œ ì‚ì œ: %ldê°œ 스킵: %ldê°œ ê²½ê³ : %ldê°œ" nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld" norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Ã…tvaringar: %ld" @@ -2073,11 +2021,10 @@ ER_LOAD_INFO swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld" ukr "ЗапиÑів: %ld Видалено: %ld Пропущено: %ld ЗаÑтережень: %ld" ER_ALTER_INFO - cze "Z-Báznamů: %ld Zdvojených: %ld" + cze "Záznamů: %ld Zdvojených: %ld" dan "Poster: %ld Ens: %ld" nla "Records: %ld Dubbel: %ld" eng "Records: %ld Duplicates: %ld" - jps "レコード数: %ld é‡è¤‡: %ld", est "Kirjeid: %ld Kattuvaid: %ld" fre "Enregistrements: %ld Doublons: %ld" ger "Datensätze: %ld Duplikate: %ld" @@ -2098,7 +2045,7 @@ ER_ALTER_INFO swe "Rader: %ld Dubletter: %ld" ukr "ЗапиÑів: %ld Дублікатів: %ld" ER_WRONG_SUB_KEY - cze "Chybn-Bá podÄást klÃÄe -- nenà to Å™etÄ›zec nebo je delÅ¡Ã než délka Äásti klÃÄe" + cze "Chybná podÄást klÃÄe -- nenà to Å™etÄ›zec nebo je delÅ¡Ã než délka Äásti klÃÄe" dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden" nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel" eng "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys" @@ -2108,7 +2055,7 @@ ER_WRONG_SUB_KEY greek "ΕσφαλμÎνο sub part key. Το χÏησιμοποιοÏμενο key part δεν είναι string ή το μήκος του είναι μεγαλÏτεÏο" hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz" ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave." - jpn "Incorrect prefix key; the used key part isn't a string or the used length is longer than the key part" + jpn "ã‚ーã®ãƒ—レフィックスãŒä¸æ£ã§ã™ã€‚ã‚ーãŒæ–‡å—列ã§ã¯ãªã„ã‹ã€ãƒ—レフィックス長ãŒã‚ーよりも長ã„ã‹ã€ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ãŒä¸€æ„索引ã®ãƒ—レフィックス指定をサãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“。" kor "ë¶€ì •í™•í•œ 서버 파트 키. ì‚¬ìš©ëœ í‚¤ 파트가 스트ë§ì´ 아니거나 키 íŒŒíŠ¸ì˜ ê¸¸ì´ê°€ 너무 ê¹ë‹ˆë‹¤." nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden" norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden" @@ -2122,18 +2069,17 @@ ER_WRONG_SUB_KEY swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden" ukr "Ðевірна чаÑтина ключа. ВикориÑтана чаÑтина ключа не Ñ” Ñтрокою, задовга або вказівник таблиці не підтримує унікальних чаÑтин ключей" ER_CANT_REMOVE_ALL_FIELDS 42000 - cze "Nen-Bà možné vymazat vÅ¡echny položky s ALTER TABLE. Použijte DROP TABLE" + cze "Nenà možné vymazat vÅ¡echny položky s ALTER TABLE. Použijte DROP TABLE" dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet." nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!" eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead" - jps "ALTER TABLE ã§å…¨ã¦ã® column ã¯å‰Šé™¤ã§ãã¾ã›ã‚“. DROP TABLE を使用ã—ã¦ãã ã•ã„", est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil" fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE" ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden" greek "Δεν είναι δυνατή η διαγÏαφή όλων των πεδίων με ALTER TABLE. ΠαÏακαλώ χÏησιμοποιείστε DROP TABLE" hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette" ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE" - jpn "ALTER TABLE ã§å…¨ã¦ã® column ã¯å‰Šé™¤ã§ãã¾ã›ã‚“. DROP TABLE を使用ã—ã¦ãã ã•ã„" + jpn "ALTER TABLE ã§ã¯å…¨ã¦ã®åˆ—ã®å‰Šé™¤ã¯ã§ãã¾ã›ã‚“。DROP TABLE を使用ã—ã¦ãã ã•ã„。" kor "ALTER TABLE ëª…ë ¹ìœ¼ë¡œëŠ” ëª¨ë“ ì¹¼ëŸ¼ì„ ì§€ìš¸ 수 없습니다. DROP TABLE ëª…ë ¹ì„ ì´ìš©í•˜ì„¸ìš”." nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden." norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor." @@ -2147,18 +2093,17 @@ ER_CANT_REMOVE_ALL_FIELDS 42000 swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället" ukr "Ðе можливо видалити вÑÑ– Ñтовбці за допомогою ALTER TABLE. Ð”Ð»Ñ Ñ†ÑŒÐ¾Ð³Ð¾ ÑкориÑтайтеÑÑ DROP TABLE" ER_CANT_DROP_FIELD_OR_KEY 42000 - cze "Nemohu zru-BÅ¡it '%-.192s' (provést DROP). Zkontrolujte, zda neexistujà záznamy/klÃÄe" + cze "Nemohu zruÅ¡it '%-.192s' (provést DROP). Zkontrolujte, zda neexistujà záznamy/klÃÄe" dan "Kan ikke udføre DROP '%-.192s'. Undersøg om feltet/nøglen eksisterer." nla "Kan '%-.192s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." eng "Can't DROP '%-.192s'; check that column/key exists" - jps "'%-.192s' ã‚’ç ´æ£„ã§ãã¾ã›ã‚“ã§ã—ãŸ; check that column/key exists", est "Ei suuda kustutada '%-.192s'. Kontrolli kas tulp/võti eksisteerib" fre "Ne peut effacer (DROP) '%-.192s'. Vérifiez s'il existe" ger "Kann '%-.192s' nicht löschen. Existiert die Spalte oder der Schlüssel?" greek "ΑδÏνατη η διαγÏαφή (DROP) '%-.192s'. ΠαÏακαλώ ελÎγξτε αν το πεδίο/κλειδί υπάÏχει" hun "A DROP '%-.192s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" ita "Impossibile cancellare '%-.192s'. Controllare che il campo chiave esista" - jpn "'%-.192s' ã‚’ç ´æ£„ã§ãã¾ã›ã‚“ã§ã—ãŸ; check that column/key exists" + jpn "'%-.192s' を削除ã§ãã¾ã›ã‚“。列ï¼ç´¢å¼•ã®å˜åœ¨ã‚’確èªã—ã¦ä¸‹ã•ã„。" kor "'%-.192s'를 DROPí• ìˆ˜ 없습니다. 칼럼ì´ë‚˜ 키가 존재하는지 채í¬í•˜ì„¸ìš”." nor "Kan ikke DROP '%-.192s'. Undersøk om felt/nøkkel eksisterer." norwegian-ny "Kan ikkje DROP '%-.192s'. Undersøk om felt/nøkkel eksisterar." @@ -2172,18 +2117,17 @@ ER_CANT_DROP_FIELD_OR_KEY 42000 swe "Kan inte ta bort '%-.192s'. Kontrollera att fältet/nyckel finns" ukr "Ðе можу DROP '%-.192s'. Перевірте, чи цей Ñтовбець/ключ Ñ–Ñнує" ER_INSERT_INFO - cze "Z-Báznamů: %ld Zdvojených: %ld VarovánÃ: %ld" + cze "Záznamů: %ld Zdvojených: %ld VarovánÃ: %ld" dan "Poster: %ld Ens: %ld Advarsler: %ld" nla "Records: %ld Dubbel: %ld Waarschuwing: %ld" eng "Records: %ld Duplicates: %ld Warnings: %ld" - jps "レコード数: %ld é‡è¤‡æ•°: %ld Warnings: %ld", est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld" fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld" ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld" greek "ΕγγÏαφÎÏ‚: %ld Επαναλήψεις: %ld Î Ïοειδοποιήσεις: %ld" hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld" ita "Records: %ld Duplicati: %ld Avvertimenti: %ld" - jpn "レコード数: %ld é‡è¤‡æ•°: %ld Warnings: %ld" + jpn "レコード数: %ld é‡è¤‡æ•°: %ld è¦å‘Š: %ld" kor "ë ˆì½”ë“œ: %ldê°œ 중복: %ldê°œ ê²½ê³ : %ldê°œ" nor "Poster: %ld Like: %ld Advarsler: %ld" norwegian-ny "Postar: %ld Like: %ld Ã…tvaringar: %ld" @@ -2199,22 +2143,22 @@ ER_INSERT_INFO ER_UPDATE_TABLE_USED eng "You can't specify target table '%-.192s' for update in FROM clause" ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.192s' ist in der FROM-Klausel nicht zulässig." + jpn "FROMå¥ã«ã‚る表 '%-.192s' ã¯UPDATEã®å¯¾è±¡ã«ã§ãã¾ã›ã‚“。" rus "Ðе допуÑкаетÑÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ðµ таблицы '%-.192s' в ÑпиÑке таблиц FROM Ð´Ð»Ñ Ð²Ð½ÐµÑÐµÐ½Ð¸Ñ Ð² нее изменений" swe "INSERT-table '%-.192s' fÃ¥r inte finnas i FROM tabell-listan" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' що змінюєтьÑÑ Ð½Ðµ дозволена у переліку таблиць FROM" ER_NO_SUCH_THREAD - cze "Nezn-Bámá identifikace threadu: %lu" + cze "Neznámá identifikace threadu: %lu" dan "Ukendt trÃ¥d id: %lu" nla "Onbekend thread id: %lu" eng "Unknown thread id: %lu" - jps "thread id: %lu ã¯ã‚ã‚Šã¾ã›ã‚“", est "Tundmatu lõim: %lu" fre "Numéro de tâche inconnu: %lu" ger "Unbekannte Thread-ID: %lu" greek "Αγνωστο thread id: %lu" hun "Ervenytelen szal (thread) id: %lu" ita "Thread id: %lu sconosciuto" - jpn "thread id: %lu ã¯ã‚ã‚Šã¾ã›ã‚“" + jpn "ä¸æ˜Žãªã‚¹ãƒ¬ãƒƒãƒ‰IDã§ã™: %lu" kor "알수 없는 ì“°ë ˆë“œ id: %lu" nor "Ukjent trÃ¥d id: %lu" norwegian-ny "Ukjent trÃ¥d id: %lu" @@ -2228,18 +2172,17 @@ ER_NO_SUCH_THREAD swe "Finns ingen trÃ¥d med id %lu" ukr "Ðевідомий ідентифікатор гілки: %lu" ER_KILL_DENIED_ERROR - cze "Nejste vlastn-BÃkem threadu %lu" + cze "Nejste vlastnÃkem threadu %lu" dan "Du er ikke ejer af trÃ¥den %lu" nla "U bent geen bezitter van thread %lu" eng "You are not owner of thread %lu" - jps "thread %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“", est "Ei ole lõime %lu omanik" fre "Vous n'êtes pas propriétaire de la tâche no: %lu" ger "Sie sind nicht Eigentümer von Thread %lu" greek "Δεν είσθε owner του thread %lu" hun "A %lu thread-nek mas a tulajdonosa" ita "Utente non proprietario del thread %lu" - jpn "thread %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“" + jpn "スレッド %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“。" kor "ì“°ë ˆë“œ(Thread) %luì˜ ì†Œìœ ìžê°€ 아닙니다." nor "Du er ikke eier av trÃ¥den %lu" norwegian-ny "Du er ikkje eigar av trÃ¥d %lu" @@ -2253,7 +2196,7 @@ ER_KILL_DENIED_ERROR swe "Du är inte ägare till trÃ¥d %lu" ukr "Ви не володар гілки %lu" ER_NO_TABLES_USED - cze "Nejsou pou-Bžity žádné tabulky" + cze "Nejsou použity žádné tabulky" dan "Ingen tabeller i brug" nla "Geen tabellen gebruikt." eng "No tables used" @@ -2263,6 +2206,7 @@ ER_NO_TABLES_USED greek "Δεν χÏησιμοποιήθηκαν πίνακες" hun "Nincs hasznalt tabla" ita "Nessuna tabella usata" + jpn "表ãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "ì–´ë–¤ í…Œì´ë¸”ë„ ì‚¬ìš©ë˜ì§€ 않았습니다." nor "Ingen tabeller i bruk" norwegian-ny "Ingen tabellar i bruk" @@ -2276,7 +2220,7 @@ ER_NO_TABLES_USED swe "Inga tabeller angivna" ukr "Ðе викориÑтано таблиць" ER_TOO_BIG_SET - cze "P-BÅ™ÃliÅ¡ mnoho Å™etÄ›zců pro sloupec %-.192s a SET" + cze "PÅ™ÃliÅ¡ mnoho Å™etÄ›zců pro sloupec %-.192s a SET" dan "For mange tekststrenge til specifikationen af SET i kolonne %-.192s" nla "Teveel strings voor kolom %-.192s en SET" eng "Too many strings for column %-.192s and SET" @@ -2286,6 +2230,7 @@ ER_TOO_BIG_SET greek "ΠάÏα πολλά strings για το πεδίο %-.192s και SET" hun "Tul sok karakter: %-.192s es SET" ita "Troppe stringhe per la colonna %-.192s e la SET" + jpn "SETåž‹ã®åˆ— '%-.192s' ã®ãƒ¡ãƒ³ãƒãƒ¼ã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚" kor "칼럼 %-.192s와 SETì—ì„œ 스트ë§ì´ 너무 많습니다." nor "For mange tekststrenger kolonne %-.192s og SET" norwegian-ny "For mange tekststrengar felt %-.192s og SET" @@ -2299,7 +2244,7 @@ ER_TOO_BIG_SET swe "För mÃ¥nga alternativ till kolumn %-.192s för SET" ukr "Забагато Ñтрок Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ %-.192s та SET" ER_NO_UNIQUE_LOGFILE - cze "Nemohu vytvo-BÅ™it jednoznaÄné jméno logovacÃho souboru %-.200s.(1-999)\n" + cze "Nemohu vytvoÅ™it jednoznaÄné jméno logovacÃho souboru %-.200s.(1-999)\n" dan "Kan ikke lave unikt log-filnavn %-.200s.(1-999)\n" nla "Het is niet mogelijk een unieke naam te maken voor de logfile %-.200s.(1-999)\n" eng "Can't generate a unique log-filename %-.200s.(1-999)\n" @@ -2309,6 +2254,7 @@ ER_NO_UNIQUE_LOGFILE greek "ΑδÏνατη η δημιουÏγία unique log-filename %-.200s.(1-999)\n" hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n" ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n" + jpn "一æ„ãªãƒã‚°ãƒ•ã‚¡ã‚¤ãƒ«å %-.200s.(1-999) を生æˆã§ãã¾ã›ã‚“。\n" kor "Unique ë¡œê·¸í™”ì¼ '%-.200s'를 만들수 없습니다.(1-999)\n" nor "Kan ikke lage unikt loggfilnavn %-.200s.(1-999)\n" norwegian-ny "Kan ikkje lage unikt loggfilnavn %-.200s.(1-999)\n" @@ -2322,18 +2268,17 @@ ER_NO_UNIQUE_LOGFILE swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n" ukr "Ðе можу згенерувати унікальне ім'Ñ log-файлу %-.200s.(1-999)\n" ER_TABLE_NOT_LOCKED_FOR_WRITE - cze "Tabulka '%-.192s' byla zam-BÄena s READ a nemůže být zmÄ›nÄ›na" + cze "Tabulka '%-.192s' byla zamÄena s READ a nemůže být zmÄ›nÄ›na" dan "Tabellen '%-.192s' var lÃ¥st med READ lÃ¥s og kan ikke opdateres" nla "Tabel '%-.192s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." eng "Table '%-.192s' was locked with a READ lock and can't be updated" - jps "Table '%-.192s' 㯠READ lock ã«ãªã£ã¦ã„ã¦ã€æ›´æ–°ã¯ã§ãã¾ã›ã‚“", est "Tabel '%-.192s' on lukustatud READ lukuga ning ei ole muudetav" fre "Table '%-.192s' verrouillée lecture (READ): modification impossible" ger "Tabelle '%-.192s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" greek "Ο πίνακας '%-.192s' Îχει κλειδωθεί με READ lock και δεν επιτÏÎπονται αλλαγÎÏ‚" hun "A(z) '%-.192s' tabla zarolva lett (READ lock) es nem lehet frissiteni" ita "La tabella '%-.192s' e` soggetta a lock in lettura e non puo` essere aggiornata" - jpn "Table '%-.192s' 㯠READ lock ã«ãªã£ã¦ã„ã¦ã€æ›´æ–°ã¯ã§ãã¾ã›ã‚“" + jpn "表 '%-.192s' ã¯READãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¦ã€æ›´æ–°ã§ãã¾ã›ã‚“。" kor "í…Œì´ë¸” '%-.192s'는 READ ë½ì´ ìž ê²¨ìžˆì–´ì„œ ê°±ì‹ í• ìˆ˜ 없습니다." nor "Tabellen '%-.192s' var lÃ¥st med READ lÃ¥s og kan ikke oppdateres" norwegian-ny "Tabellen '%-.192s' var lÃ¥st med READ lÃ¥s og kan ikkje oppdaterast" @@ -2347,18 +2292,17 @@ ER_TABLE_NOT_LOCKED_FOR_WRITE swe "Tabell '%-.192s' kan inte uppdateras emedan den är lÃ¥st för läsning" ukr "Таблицю '%-.192s' заблоковано тільки Ð´Ð»Ñ Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ, тому Ñ—Ñ— не можна оновити" ER_TABLE_NOT_LOCKED - cze "Tabulka '%-.192s' nebyla zam-BÄena s LOCK TABLES" + cze "Tabulka '%-.192s' nebyla zamÄena s LOCK TABLES" dan "Tabellen '%-.192s' var ikke lÃ¥st med LOCK TABLES" nla "Tabel '%-.192s' was niet gelocked met LOCK TABLES" eng "Table '%-.192s' was not locked with LOCK TABLES" - jps "Table '%-.192s' 㯠LOCK TABLES ã«ã‚ˆã£ã¦ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“", est "Tabel '%-.192s' ei ole lukustatud käsuga LOCK TABLES" fre "Table '%-.192s' non verrouillée: utilisez LOCK TABLES" ger "Tabelle '%-.192s' wurde nicht mit LOCK TABLES gesperrt" greek "Ο πίνακας '%-.192s' δεν Îχει κλειδωθεί με LOCK TABLES" hun "A(z) '%-.192s' tabla nincs zarolva a LOCK TABLES-szel" ita "Non e` stato impostato il lock per la tabella '%-.192s' con LOCK TABLES" - jpn "Table '%-.192s' 㯠LOCK TABLES ã«ã‚ˆã£ã¦ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“" + jpn "表 '%-.192s' 㯠LOCK TABLES ã§ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "í…Œì´ë¸” '%-.192s'는 LOCK TABLES ëª…ë ¹ìœ¼ë¡œ ìž ê¸°ì§€ 않았습니다." nor "Tabellen '%-.192s' var ikke lÃ¥st med LOCK TABLES" norwegian-ny "Tabellen '%-.192s' var ikkje lÃ¥st med LOCK TABLES" @@ -2372,7 +2316,7 @@ ER_TABLE_NOT_LOCKED swe "Tabell '%-.192s' är inte lÃ¥st med LOCK TABLES" ukr "Таблицю '%-.192s' не було блоковано з LOCK TABLES" ER_BLOB_CANT_HAVE_DEFAULT 42000 - cze "Blob polo-Bžka '%-.192s' nemůže mÃt defaultnà hodnotu" + cze "Blob položka '%-.192s' nemůže mÃt defaultnà hodnotu" dan "BLOB feltet '%-.192s' kan ikke have en standard værdi" nla "Blob veld '%-.192s' can geen standaardwaarde bevatten" eng "BLOB/TEXT column '%-.192s' can't have a default value" @@ -2382,7 +2326,7 @@ ER_BLOB_CANT_HAVE_DEFAULT 42000 greek "Τα Blob πεδία '%-.192s' δεν μποÏοÏν να Îχουν Ï€ÏοκαθοÏισμÎνες τιμÎÏ‚ (default value)" hun "A(z) '%-.192s' blob objektumnak nem lehet alapertelmezett erteke" ita "Il campo BLOB '%-.192s' non puo` avere un valore di default" - jpn "BLOB column '%-.192s' can't have a default value" + jpn "BLOB/TEXT 列 '%-.192s' ã«ã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ã‚’指定ã§ãã¾ã›ã‚“。" kor "BLOB 칼럼 '%-.192s' 는 ë””í´íŠ¸ ê°’ì„ ê°€ì§ˆ 수 없습니다." nor "Blob feltet '%-.192s' kan ikke ha en standard verdi" norwegian-ny "Blob feltet '%-.192s' kan ikkje ha ein standard verdi" @@ -2396,18 +2340,17 @@ ER_BLOB_CANT_HAVE_DEFAULT 42000 swe "BLOB fält '%-.192s' kan inte ha ett DEFAULT-värde" ukr "Стовбець BLOB '%-.192s' не може мати Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¿Ð¾ замовчуванню" ER_WRONG_DB_NAME 42000 - cze "Nep-BÅ™Ãpustné jméno databáze '%-.100s'" + cze "NepÅ™Ãpustné jméno databáze '%-.100s'" dan "Ugyldigt database navn '%-.100s'" nla "Databasenaam '%-.100s' is niet getoegestaan" eng "Incorrect database name '%-.100s'" - jps "指定ã—㟠database å '%-.100s' ãŒé–“é•ã£ã¦ã„ã¾ã™", est "Vigane andmebaasi nimi '%-.100s'" fre "Nom de base de donnée illégal: '%-.100s'" ger "Unerlaubter Datenbankname '%-.100s'" greek "Λάθος όνομα βάσης δεδομÎνων '%-.100s'" hun "Hibas adatbazisnev: '%-.100s'" ita "Nome database errato '%-.100s'" - jpn "指定ã—㟠database å '%-.100s' ãŒé–“é•ã£ã¦ã„ã¾ã™" + jpn "データベースå '%-.100s' ã¯ä¸æ£ã§ã™ã€‚" kor "'%-.100s' ë°ì´íƒ€ë² ì´ìŠ¤ì˜ ì´ë¦„ì´ ë¶€ì •í™•í•©ë‹ˆë‹¤." nor "Ugyldig database navn '%-.100s'" norwegian-ny "Ugyldig database namn '%-.100s'" @@ -2421,18 +2364,17 @@ ER_WRONG_DB_NAME 42000 swe "Felaktigt databasnamn '%-.100s'" ukr "Ðевірне ім'Ñ Ð±Ð°Ð·Ð¸ данних '%-.100s'" ER_WRONG_TABLE_NAME 42000 - cze "Nep-BÅ™Ãpustné jméno tabulky '%-.100s'" + cze "NepÅ™Ãpustné jméno tabulky '%-.100s'" dan "Ugyldigt tabel navn '%-.100s'" nla "Niet toegestane tabelnaam '%-.100s'" eng "Incorrect table name '%-.100s'" - jps "指定ã—㟠table å '%-.100s' ã¯ã¾ã¡ãŒã£ã¦ã„ã¾ã™", est "Vigane tabeli nimi '%-.100s'" fre "Nom de table illégal: '%-.100s'" ger "Unerlaubter Tabellenname '%-.100s'" greek "Λάθος όνομα πίνακα '%-.100s'" hun "Hibas tablanev: '%-.100s'" ita "Nome tabella errato '%-.100s'" - jpn "指定ã—㟠table å '%-.100s' ã¯ã¾ã¡ãŒã£ã¦ã„ã¾ã™" + jpn "表å '%-.100s' ã¯ä¸æ£ã§ã™ã€‚" kor "'%-.100s' í…Œì´ë¸” ì´ë¦„ì´ ë¶€ì •í™•í•©ë‹ˆë‹¤." nor "Ugyldig tabell navn '%-.100s'" norwegian-ny "Ugyldig tabell namn '%-.100s'" @@ -2446,7 +2388,7 @@ ER_WRONG_TABLE_NAME 42000 swe "Felaktigt tabellnamn '%-.100s'" ukr "Ðевірне ім'Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.100s'" ER_TOO_BIG_SELECT 42000 - cze "Zadan-Bý SELECT by procházel pÅ™ÃliÅ¡ mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1" + cze "Zadaný SELECT by procházel pÅ™ÃliÅ¡ mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1" dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt" nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is." eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay" @@ -2456,6 +2398,7 @@ ER_TOO_BIG_SELECT 42000 greek "Το SELECT θα εξετάσει μεγάλο αÏιθμό εγγÏαφών και πιθανώς θα καθυστεÏήσει. ΠαÏακαλώ εξετάστε τις παÏαμÎÏ„Ïους του WHERE και χÏησιμοποιείστε SET SQL_BIG_SELECTS=1 αν το SELECT είναι σωστό" hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay" ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto." + jpn "SELECTãŒMAX_JOIN_SIZEを超ãˆã‚‹è¡Œæ•°ã‚’処ç†ã—ã¾ã—ãŸã€‚WHEREå¥ã‚’確èªã—ã€SELECTæ–‡ã«å•é¡ŒãŒãªã‘ã‚Œã°ã€ SET SQL_BIG_SELECTS=1 ã¾ãŸã¯ SET MAX_JOIN_SIZE=# を使用ã—ã¦ä¸‹ã•ã„。" kor "SELECT ëª…ë ¹ì—ì„œ 너무 ë§Žì€ ë ˆì½”ë“œë¥¼ 찾기 ë•Œë¬¸ì— ë§Žì€ ì‹œê°„ì´ ì†Œìš”ë©ë‹ˆë‹¤. ë”°ë¼ì„œ WHERE ë¬¸ì„ ì 검하거나, 만약 SELECTê°€ okë˜ë©´ SET SQL_BIG_SELECTS=1 ì˜µì…˜ì„ ì‚¬ìš©í•˜ì„¸ìš”." nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" @@ -2469,7 +2412,7 @@ ER_TOO_BIG_SELECT 42000 swe "Den angivna frÃ¥gan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins" ukr "Запиту SELECT потрібно обробити багато запиÑів, що, певне, займе дуже багато чаÑу. Перевірте ваше WHERE та викориÑтовуйте SET SQL_BIG_SELECTS=1, Ñкщо цей запит SELECT Ñ” вірним" ER_UNKNOWN_ERROR - cze "Nezn-Bámá chyba" + cze "Neznámá chyba" dan "Ukendt fejl" nla "Onbekende Fout" eng "Unknown error" @@ -2479,6 +2422,7 @@ ER_UNKNOWN_ERROR greek "Î ÏοÎκυψε άγνωστο λάθος" hun "Ismeretlen hiba" ita "Errore sconosciuto" + jpn "ä¸æ˜Žãªã‚¨ãƒ©ãƒ¼" kor "알수 없는 ì—러입니다." nor "Ukjent feil" norwegian-ny "Ukjend feil" @@ -2488,10 +2432,10 @@ ER_UNKNOWN_ERROR serbian "Nepoznata greÅ¡ka" slo "Neznámá chyba" spa "Error desconocido" - swe "Oidentifierat fel" + swe "Okänt fel" ukr "Ðевідома помилка" ER_UNKNOWN_PROCEDURE 42000 - cze "Nezn-Bámá procedura %-.192s" + cze "Neznámá procedura %-.192s" dan "Ukendt procedure %-.192s" nla "Onbekende procedure %-.192s" eng "Unknown procedure '%-.192s'" @@ -2501,6 +2445,7 @@ ER_UNKNOWN_PROCEDURE 42000 greek "Αγνωστη διαδικασία '%-.192s'" hun "Ismeretlen eljaras: '%-.192s'" ita "Procedura '%-.192s' sconosciuta" + jpn "'%-.192s' ã¯ä¸æ˜Žãªãƒ—ãƒã‚·ãƒ¼ã‚¸ãƒ£ã§ã™ã€‚" kor "알수 없는 수행문 : '%-.192s'" nor "Ukjent prosedyre %-.192s" norwegian-ny "Ukjend prosedyre %-.192s" @@ -2514,7 +2459,7 @@ ER_UNKNOWN_PROCEDURE 42000 swe "Okänd procedur: %-.192s" ukr "Ðевідома процедура '%-.192s'" ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 - cze "Chybn-Bý poÄet parametrů procedury %-.192s" + cze "Chybný poÄet parametrů procedury %-.192s" dan "Forkert antal parametre til proceduren %-.192s" nla "Foutief aantal parameters doorgegeven aan procedure %-.192s" eng "Incorrect parameter count to procedure '%-.192s'" @@ -2524,6 +2469,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 greek "Λάθος αÏιθμός παÏαμÎÏ„Ïων στη διαδικασία '%-.192s'" hun "Rossz parameter a(z) '%-.192s'eljaras szamitasanal" ita "Numero di parametri errato per la procedura '%-.192s'" + jpn "プãƒã‚·ãƒ¼ã‚¸ãƒ£ '%-.192s' ã¸ã®ãƒ‘ラメータ数ãŒä¸æ£ã§ã™ã€‚" kor "'%-.192s' ìˆ˜í–‰ë¬¸ì— ëŒ€í•œ ë¶€ì •í™•í•œ 파ë¼ë©”í„°" nor "Feil parameter antall til prosedyren %-.192s" norwegian-ny "Feil parameter tal til prosedyra %-.192s" @@ -2537,7 +2483,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 swe "Felaktigt antal parametrar till procedur %-.192s" ukr "Хибна кількіÑÑ‚ÑŒ параметрів процедури '%-.192s'" ER_WRONG_PARAMETERS_TO_PROCEDURE - cze "Chybn-Bé parametry procedury %-.192s" + cze "Chybné parametry procedury %-.192s" dan "Forkert(e) parametre til proceduren %-.192s" nla "Foutieve parameters voor procedure %-.192s" eng "Incorrect parameters to procedure '%-.192s'" @@ -2547,6 +2493,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE greek "Λάθος παÏάμετÏοι στην διαδικασία '%-.192s'" hun "Rossz parameter a(z) '%-.192s' eljarasban" ita "Parametri errati per la procedura '%-.192s'" + jpn "プãƒã‚·ãƒ¼ã‚¸ãƒ£ '%-.192s' ã¸ã®ãƒ‘ラメータãŒä¸æ£ã§ã™ã€‚" kor "'%-.192s' ìˆ˜í–‰ë¬¸ì— ëŒ€í•œ ë¶€ì •í™•í•œ 파ë¼ë©”í„°" nor "Feil parametre til prosedyren %-.192s" norwegian-ny "Feil parameter til prosedyra %-.192s" @@ -2560,7 +2507,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE swe "Felaktiga parametrar till procedur %-.192s" ukr "Хибний параметер процедури '%-.192s'" ER_UNKNOWN_TABLE 42S02 - cze "Nezn-Bámá tabulka '%-.192s' v %-.32s" + cze "Neznámá tabulka '%-.192s' v %-.32s" dan "Ukendt tabel '%-.192s' i %-.32s" nla "Onbekende tabel '%-.192s' in %-.32s" eng "Unknown table '%-.192s' in %-.32s" @@ -2570,7 +2517,7 @@ ER_UNKNOWN_TABLE 42S02 greek "Αγνωστος πίνακας '%-.192s' σε %-.32s" hun "Ismeretlen tabla: '%-.192s' %-.32s-ban" ita "Tabella '%-.192s' sconosciuta in %-.32s" - jpn "Unknown table '%-.192s' in %-.32s" + jpn "'%-.192s' 㯠%-.32s ã§ã¯ä¸æ˜Žãªè¡¨ã§ã™ã€‚" kor "알수 없는 í…Œì´ë¸” '%-.192s' (ë°ì´íƒ€ë² ì´ìŠ¤ %-.32s)" nor "Ukjent tabell '%-.192s' i %-.32s" norwegian-ny "Ukjend tabell '%-.192s' i %-.32s" @@ -2584,7 +2531,7 @@ ER_UNKNOWN_TABLE 42S02 swe "Okänd tabell '%-.192s' i '%-.32s'" ukr "Ðевідома Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' у %-.32s" ER_FIELD_SPECIFIED_TWICE 42000 - cze "Polo-Bžka '%-.192s' je zadána dvakrát" + cze "Položka '%-.192s' je zadána dvakrát" dan "Feltet '%-.192s' er anvendt to gange" nla "Veld '%-.192s' is dubbel gespecificeerd" eng "Column '%-.192s' specified twice" @@ -2594,6 +2541,7 @@ ER_FIELD_SPECIFIED_TWICE 42000 greek "Το πεδίο '%-.192s' Îχει οÏισθεί δÏο φοÏÎÏ‚" hun "A(z) '%-.192s' mezot ketszer definialta" ita "Campo '%-.192s' specificato 2 volte" + jpn "列 '%-.192s' ã¯2回指定ã•ã‚Œã¦ã„ã¾ã™ã€‚" kor "칼럼 '%-.192s'는 ë‘번 ì •ì˜ë˜ì–´ 있ì니다." nor "Feltet '%-.192s' er spesifisert to ganger" norwegian-ny "Feltet '%-.192s' er spesifisert to gangar" @@ -2607,7 +2555,7 @@ ER_FIELD_SPECIFIED_TWICE 42000 swe "Fält '%-.192s' är redan använt" ukr "Стовбець '%-.192s' зазначено двічі" ER_INVALID_GROUP_FUNC_USE - cze "Nespr-Bávné použità funkce group" + cze "Nesprávné použità funkce group" dan "Forkert brug af grupperings-funktion" nla "Ongeldig gebruik van GROUP-functie" eng "Invalid use of group function" @@ -2617,6 +2565,7 @@ ER_INVALID_GROUP_FUNC_USE greek "ΕσφαλμÎνη χÏήση της group function" hun "A group funkcio ervenytelen hasznalata" ita "Uso non valido di una funzione di raggruppamento" + jpn "集計関数ã®ä½¿ç”¨æ–¹æ³•ãŒä¸æ£ã§ã™ã€‚" kor "ìž˜ëª»ëœ ê·¸ë£¹ 함수를 사용하였습니다." por "Uso inválido de função de agrupamento (GROUP)" rum "Folosire incorecta a functiei group" @@ -2627,7 +2576,7 @@ ER_INVALID_GROUP_FUNC_USE swe "Felaktig användning av SQL grupp function" ukr "Хибне викориÑÑ‚Ð°Ð½Ð½Ñ Ñ„ÑƒÐ½ÐºÑ†Ñ–Ñ— групуваннÑ" ER_UNSUPPORTED_EXTENSION 42000 - cze "Tabulka '%-.192s' pou-BžÃvá rozÅ¡ÃÅ™enÃ, které v této verzi MariaDB nenÃ" + cze "Tabulka '%-.192s' použÃvá rozÅ¡ÃÅ™enÃ, které v této verzi MySQL nenÃ" dan "Tabellen '%-.192s' bruger et filtypenavn som ikke findes i denne MariaDB version" nla "Tabel '%-.192s' gebruikt een extensie, die niet in deze MariaDB-versie voorkomt." eng "Table '%-.192s' uses an extension that doesn't exist in this MariaDB version" @@ -2637,6 +2586,7 @@ ER_UNSUPPORTED_EXTENSION 42000 greek "Ο πίνακς '%-.192s' χÏησιμοποιεί κάποιο extension που δεν υπάÏχει στην Îκδοση αυτή της MariaDB" hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MariaDB versioban." ita "La tabella '%-.192s' usa un'estensione che non esiste in questa versione di MariaDB" + jpn "表 '%-.192s' ã¯ã€ã“ã®MySQLãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«ã¯ç„¡ã„機能を使用ã—ã¦ã„ã¾ã™ã€‚" kor "í…Œì´ë¸” '%-.192s'는 í™•ìž¥ëª…ë ¹ì„ ì´ìš©í•˜ì§€ë§Œ í˜„ìž¬ì˜ MariaDB ë²„ì ¼ì—서는 존재하지 않습니다." nor "Table '%-.192s' uses a extension that doesn't exist in this MariaDB version" norwegian-ny "Table '%-.192s' uses a extension that doesn't exist in this MariaDB version" @@ -2650,18 +2600,17 @@ ER_UNSUPPORTED_EXTENSION 42000 swe "Tabell '%-.192s' har en extension som inte finns i denna version av MariaDB" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' викориÑтовує розширеннÑ, що не Ñ–Ñнує у цій верÑÑ–Ñ— MariaDB" ER_TABLE_MUST_HAVE_COLUMNS 42000 - cze "Tabulka mus-Bà mÃt alespoň jeden sloupec" + cze "Tabulka musà mÃt alespoň jeden sloupec" dan "En tabel skal have mindst een kolonne" nla "Een tabel moet minstens 1 kolom bevatten" eng "A table must have at least 1 column" - jps "テーブルã¯æœ€ä½Ž 1 個㮠column ãŒå¿…è¦ã§ã™", est "Tabelis peab olema vähemalt üks tulp" fre "Une table doit comporter au moins une colonne" ger "Eine Tabelle muss mindestens eine Spalte besitzen" greek "Ενας πίνακας Ï€ÏÎπει να Îχει τουλάχιστον Îνα πεδίο" hun "A tablanak legalabb egy oszlopot tartalmazni kell" ita "Una tabella deve avere almeno 1 colonna" - jpn "テーブルã¯æœ€ä½Ž 1 個㮠column ãŒå¿…è¦ã§ã™" + jpn "表ã«ã¯æœ€ä½Žã§ã‚‚1個ã®åˆ—ãŒå¿…è¦ã§ã™ã€‚" kor "í•˜ë‚˜ì˜ í…Œì´ë¸”ì—서는 ì ì–´ë„ í•˜ë‚˜ì˜ ì¹¼ëŸ¼ì´ ì¡´ìž¬í•˜ì—¬ì•¼ 합니다." por "Uma tabela tem que ter pelo menos uma (1) coluna" rum "O tabela trebuie sa aiba cel putin o coloana" @@ -2672,18 +2621,17 @@ ER_TABLE_MUST_HAVE_COLUMNS 42000 swe "Tabeller mÃ¥ste ha minst 1 kolumn" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ Ð¿Ð¾Ð²Ð¸Ð½Ð½Ð° мати хочаб один Ñтовбець" ER_RECORD_FILE_FULL - cze "Tabulka '%-.192s' je pln-Bá" + cze "Tabulka '%-.192s' je plná" dan "Tabellen '%-.192s' er fuld" nla "De tabel '%-.192s' is vol" eng "The table '%-.192s' is full" - jps "table '%-.192s' ã¯ã„ã£ã±ã„ã§ã™", est "Tabel '%-.192s' on täis" fre "La table '%-.192s' est pleine" ger "Tabelle '%-.192s' ist voll" greek "Ο πίνακας '%-.192s' είναι γεμάτος" hun "A '%-.192s' tabla megtelt" ita "La tabella '%-.192s' e` piena" - jpn "table '%-.192s' ã¯ã„ã£ã±ã„ã§ã™" + jpn "表 '%-.192s' ã¯æº€æ¯ã§ã™ã€‚" kor "í…Œì´ë¸” '%-.192s'ê°€ full났습니다. " por "Tabela '%-.192s' está cheia" rum "Tabela '%-.192s' e plina" @@ -2694,18 +2642,17 @@ ER_RECORD_FILE_FULL swe "Tabellen '%-.192s' är full" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' заповнена" ER_UNKNOWN_CHARACTER_SET 42000 - cze "Nezn-Bámá znaková sada: '%-.64s'" + cze "Neznámá znaková sada: '%-.64s'" dan "Ukendt tegnsæt: '%-.64s'" nla "Onbekende character set: '%-.64s'" eng "Unknown character set: '%-.64s'" - jps "character set '%-.64s' ã¯ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“", est "Vigane kooditabel '%-.64s'" fre "Jeu de caractères inconnu: '%-.64s'" ger "Unbekannter Zeichensatz: '%-.64s'" greek "Αγνωστο character set: '%-.64s'" hun "Ervenytelen karakterkeszlet: '%-.64s'" ita "Set di caratteri '%-.64s' sconosciuto" - jpn "character set '%-.64s' ã¯ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“" + jpn "ä¸æ˜Žãªæ–‡å—コードセット: '%-.64s'" kor "알수없는 언어 Set: '%-.64s'" por "Conjunto de caracteres '%-.64s' desconhecido" rum "Set de caractere invalid: '%-.64s'" @@ -2716,18 +2663,17 @@ ER_UNKNOWN_CHARACTER_SET 42000 swe "Okänd teckenuppsättning: '%-.64s'" ukr "Ðевідома кодова таблицÑ: '%-.64s'" ER_TOO_MANY_TABLES - cze "P-BÅ™ÃliÅ¡ mnoho tabulek, MariaDB jich může mÃt v joinu jen %d" + cze "PÅ™ÃliÅ¡ mnoho tabulek, MySQL jich může mÃt v joinu jen %d" dan "For mange tabeller. MariaDB kan kun bruge %d tabeller i et join" nla "Teveel tabellen. MariaDB kan slechts %d tabellen in een join bevatten" eng "Too many tables; MariaDB can only use %d tables in a join" - jps "テーブルãŒå¤šã™ãŽã¾ã™; MariaDB can only use %d tables in a join", est "Liiga palju tabeleid. MariaDB suudab JOINiga ühendada kuni %d tabelit" fre "Trop de tables. MariaDB ne peut utiliser que %d tables dans un JOIN" ger "Zu viele Tabellen. MariaDB kann in einem Join maximal %d Tabellen verwenden" greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿Ï‚ αÏιθμός πινάκων. Η MariaDB μποÏεί να χÏησιμοποιήσει %d πίνακες σε διαδικασία join" hun "Tul sok tabla. A MariaDB csak %d tablat tud kezelni osszefuzeskor" ita "Troppe tabelle. MariaDB puo` usare solo %d tabelle in una join" - jpn "テーブルãŒå¤šã™ãŽã¾ã™; MariaDB can only use %d tables in a join" + jpn "表ãŒå¤šã™ãŽã¾ã™ã€‚MySQLãŒJOINã§ãる表㯠%d 個ã¾ã§ã§ã™ã€‚" kor "너무 ë§Žì€ í…Œì´ë¸”ì´ Joinë˜ì—ˆìŠµë‹ˆë‹¤. MariaDBì—서는 JOINì‹œ %dê°œì˜ í…Œì´ë¸”만 ì‚¬ìš©í• ìˆ˜ 있습니다." por "Tabelas demais. O MariaDB pode usar somente %d tabelas em uma junção (JOIN)" rum "Prea multe tabele. MariaDB nu poate folosi mai mult de %d tabele intr-un join" @@ -2738,18 +2684,17 @@ ER_TOO_MANY_TABLES swe "För mÃ¥nga tabeller. MariaDB can ha högst %d tabeller i en och samma join" ukr "Забагато таблиць. MariaDB може викориÑтовувати лише %d таблиць у об'єднанні" ER_TOO_MANY_FIELDS - cze "P-BÅ™ÃliÅ¡ mnoho položek" + cze "PÅ™ÃliÅ¡ mnoho položek" dan "For mange felter" nla "Te veel velden" eng "Too many columns" - jps "column ãŒå¤šã™ãŽã¾ã™", est "Liiga palju tulpasid" fre "Trop de champs" ger "Zu viele Felder" greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿Ï‚ αÏιθμός πεδίων" hun "Tul sok mezo" ita "Troppi campi" - jpn "column ãŒå¤šã™ãŽã¾ã™" + jpn "列ãŒå¤šã™ãŽã¾ã™ã€‚" kor "ì¹¼ëŸ¼ì´ ë„ˆë¬´ 많습니다." por "Colunas demais" rum "Prea multe coloane" @@ -2760,18 +2705,17 @@ ER_TOO_MANY_FIELDS swe "För mÃ¥nga fält" ukr "Забагато Ñтовбців" ER_TOO_BIG_ROWSIZE 42000 - cze "-BŘádek je pÅ™ÃliÅ¡ velký. Maximálnà velikost řádku, nepoÄÃtaje položky blob, je %ld. MusÃte zmÄ›nit nÄ›které položky na blob" + cze "Řádek je pÅ™ÃliÅ¡ velký. Maximálnà velikost řádku, nepoÄÃtaje položky blob, je %ld. MusÃte zmÄ›nit nÄ›které položky na blob" dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du mÃ¥ lave nogle felter til BLOB's" nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen." eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs" - jps "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„.", est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks" fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB" ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden" greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿ μÎγεθος εγγÏαφής. Το μÎγιστο μÎγεθος εγγÏαφής, χωÏίς να υπολογίζονται τα blobs, είναι %ld. Î ÏÎπει να οÏίσετε κάποια πεδία σαν blobs" hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %ld. Nehany mezot meg kell valtoztatnia" ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %ld. Devi cambiare alcuni campi in BLOB" - jpn "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„." + jpn "行サイズãŒå¤§ãã™ãŽã¾ã™ã€‚ã“ã®è¡¨ã®æœ€å¤§è¡Œã‚µã‚¤ã‚ºã¯ BLOB ã‚’å«ã¾ãšã« %ld ã§ã™ã€‚æ ¼ç´æ™‚ã®ã‚ªãƒ¼ãƒãƒ¼ãƒ˜ãƒƒãƒ‰ã‚‚å«ã¾ã‚Œã¾ã™(マニュアルを確èªã—ã¦ãã ã•ã„)。列をTEXTã¾ãŸã¯BLOBã«å¤‰æ›´ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚" kor "너무 í° row 사ì´ì¦ˆìž…니다. BLOB를 계산하지 ì•Šê³ ìµœëŒ€ row 사ì´ì¦ˆëŠ” %ld입니다. ì–¼ë§ˆê°„ì˜ í•„ë“œë“¤ì„ BLOBë¡œ 바꾸셔야 ê² êµ°ìš”.." por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %ld. Você tem que mudar alguns campos para BLOBs" rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %ld. Trebuie sa schimbati unele cimpuri in BLOB-uri" @@ -2782,17 +2726,16 @@ ER_TOO_BIG_ROWSIZE 42000 swe "För stor total radlängd. Den högst tillÃ¥tna radlängden, förutom BLOBs, är %ld. Ändra nÃ¥gra av dina fält till BLOB" ukr "Задовга Ñтрока. Ðайбільшою довжиною Ñтроки, не рахуючи BLOB, Ñ” %ld. Вам потрібно привеÑти деÑкі Ñтовбці до типу BLOB" ER_STACK_OVERRUN - cze "P-BÅ™eteÄenà zásobnÃku threadu: použito %ld z %ld. Použijte 'mysqld --thread_stack=#' k zadánà vÄ›tÅ¡Ãho zásobnÃku" + cze "PÅ™eteÄenà zásobnÃku threadu: použito %ld z %ld. Použijte 'mysqld --thread_stack=#' k zadánà vÄ›tÅ¡Ãho zásobnÃku" dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld --thread_stack=#' for at allokere en større stak om nødvendigt" nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld --thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)." eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld --thread_stack=#' to specify a bigger stack if needed" - jps "Thread stack overrun: Used: %ld of a %ld stack. ã‚¹ã‚¿ãƒƒã‚¯é ˜åŸŸã‚’å¤šãã¨ã‚ŠãŸã„å ´åˆã€'mysqld --thread_stack=#' ã¨æŒ‡å®šã—ã¦ãã ã•ã„", fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld --thread_stack=#' pour indiquer une plus grande valeur" ger "Thread-Stack-Ãœberlauf. Benutzt: %ld von %ld Stack. 'mysqld --thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen" greek "Stack overrun στο thread: Used: %ld of a %ld stack. ΠαÏακαλώ χÏησιμοποιείστε 'mysqld --thread_stack=#' για να οÏίσετε Îνα μεγαλÏτεÏο stack αν χÏειάζεται" hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld --thread_stack=#' nagyobb verem definialasahoz" ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld --thread_stack=#' per specificare uno stack piu` grande." - jpn "Thread stack overrun: Used: %ld of a %ld stack. ã‚¹ã‚¿ãƒƒã‚¯é ˜åŸŸã‚’å¤šãã¨ã‚ŠãŸã„å ´åˆã€'mysqld --thread_stack=#' ã¨æŒ‡å®šã—ã¦ãã ã•ã„" + jpn "スレッドスタックä¸è¶³ã§ã™(使用: %ld ; サイズ: %ld)。必è¦ã«å¿œã˜ã¦ã€ã‚ˆã‚Šå¤§ãã„値㧠'mysqld --thread_stack=#' ã®æŒ‡å®šã‚’ã—ã¦ãã ã•ã„。" kor "ì“°ë ˆë“œ 스íƒì´ 넘쳤습니다. 사용: %ldê°œ 스íƒ: %ldê°œ. 만약 필요시 ë”í° ìŠ¤íƒì„ ì›í• ë•Œì—는 'mysqld --thread_stack=#' 를 ì •ì˜í•˜ì„¸ìš”" por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld --thread_stack=#' para especificar uma pilha maior, se necessário" rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld --thread_stack=#' ca sa specifici un stack mai mare" @@ -2803,7 +2746,7 @@ ER_STACK_OVERRUN swe "TrÃ¥dstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld --thread_stack=#' ifall du behöver en större stack" ukr "Стек гілок переповнено: ВикориÑтано: %ld з %ld. ВикориÑтовуйте 'mysqld --thread_stack=#' аби зазначити більший Ñтек, Ñкщо необхідно" ER_WRONG_OUTER_JOIN 42000 - cze "V OUTER JOIN byl nalezen k-BřÞový odkaz. Prověřte ON podmÃnky" + cze "V OUTER JOIN byl nalezen křÞový odkaz. Prověřte ON podmÃnky" dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions" nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions" eng "Cross dependency found in OUTER JOIN; examine your ON conditions" @@ -2813,6 +2756,7 @@ ER_WRONG_OUTER_JOIN 42000 greek "Cross dependency βÏÎθηκε σε OUTER JOIN. ΠαÏακαλώ εξετάστε τις συνθήκες που θÎσατε στο ON" hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket" ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON" + jpn "OUTER JOINã«ç›¸äº’ä¾å˜ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚ONå¥ã®æ¡ä»¶ã‚’確èªã—ã¦ä¸‹ã•ã„。" por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'" rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON" rus "Ð’ OUTER JOIN обнаружена перекреÑÑ‚Ð½Ð°Ñ Ð·Ð°Ð²Ð¸ÑимоÑÑ‚ÑŒ. Внимательно проанализируйте Ñвои уÑÐ»Ð¾Ð²Ð¸Ñ ON" @@ -2825,18 +2769,17 @@ ER_NULL_COLUMN_IN_INDEX 42000 eng "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler" swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.192s' till NOT NULL eller använd en annan hanterare" ER_CANT_FIND_UDF - cze "Nemohu na-BÄÃst funkci '%-.192s'" + cze "Nemohu naÄÃst funkci '%-.192s'" dan "Kan ikke læse funktionen '%-.192s'" nla "Kan functie '%-.192s' niet laden" eng "Can't load function '%-.192s'" - jps "function '%-.192s' ã‚’ ãƒãƒ¼ãƒ‰ã§ãã¾ã›ã‚“", est "Ei suuda avada funktsiooni '%-.192s'" fre "Imposible de charger la fonction '%-.192s'" ger "Kann Funktion '%-.192s' nicht laden" greek "Δεν είναι δυνατή η διαδικασία load για τη συνάÏτηση '%-.192s'" hun "A(z) '%-.192s' fuggveny nem toltheto be" ita "Impossibile caricare la funzione '%-.192s'" - jpn "function '%-.192s' ã‚’ ãƒãƒ¼ãƒ‰ã§ãã¾ã›ã‚“" + jpn "関数 '%-.192s' ã‚’ãƒãƒ¼ãƒ‰ã§ãã¾ã›ã‚“。" kor "'%-.192s' 함수를 로드하지 못했습니다." por "Não pode carregar a função '%-.192s'" rum "Nu pot incarca functia '%-.192s'" @@ -2851,14 +2794,13 @@ ER_CANT_INITIALIZE_UDF dan "Kan ikke starte funktionen '%-.192s'; %-.80s" nla "Kan functie '%-.192s' niet initialiseren; %-.80s" eng "Can't initialize function '%-.192s'; %-.80s" - jps "function '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“; %-.80s", est "Ei suuda algväärtustada funktsiooni '%-.192s'; %-.80s" fre "Impossible d'initialiser la fonction '%-.192s'; %-.80s" ger "Kann Funktion '%-.192s' nicht initialisieren: %-.80s" greek "Δεν είναι δυνατή η ÎναÏξη της συνάÏτησης '%-.192s'; %-.80s" hun "A(z) '%-.192s' fuggveny nem inicializalhato; %-.80s" ita "Impossibile inizializzare la funzione '%-.192s'; %-.80s" - jpn "function '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“; %-.80s" + jpn "関数 '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“。; %-.80s" kor "'%-.192s' 함수를 초기화 하지 못했습니다.; %-.80s" por "Não pode inicializar a função '%-.192s' - '%-.80s'" rum "Nu pot initializa functia '%-.192s'; %-.80s" @@ -2869,18 +2811,17 @@ ER_CANT_INITIALIZE_UDF swe "Kan inte initialisera funktionen '%-.192s'; '%-.80s'" ukr "Ðе можу ініціалізувати функцію '%-.192s'; %-.80s" ER_UDF_NO_PATHS - cze "Pro sd-BÃlenou knihovnu nejsou povoleny cesty" + cze "Pro sdÃlenou knihovnu nejsou povoleny cesty" dan "Angivelse af sti ikke tilladt for delt bibliotek" nla "Geen pad toegestaan voor shared library" eng "No paths allowed for shared library" - jps "shared library ã¸ã®ãƒ‘スãŒé€šã£ã¦ã„ã¾ã›ã‚“", est "Teegi nimes ei tohi olla kataloogi" fre "Chemin interdit pour les bibliothèques partagées" ger "Keine Pfade gestattet für Shared Library" greek "Δεν βÏÎθηκαν paths για την shared library" hun "Nincs ut a megosztott konyvtarakhoz (shared library)" ita "Non sono ammessi path per le librerie condivisa" - jpn "shared library ã¸ã®ãƒ‘スãŒé€šã£ã¦ã„ã¾ã›ã‚“" + jpn "共有ライブラリã«ã¯ãƒ‘スを指定ã§ãã¾ã›ã‚“。" kor "ê³µìœ ë¼ì´ë²„러리를 위한 패스가 ì •ì˜ë˜ì–´ 있지 않습니다." por "Não há caminhos (paths) permitidos para biblioteca compartilhada" rum "Nici un paths nu e permis pentru o librarie shared" @@ -2891,18 +2832,17 @@ ER_UDF_NO_PATHS swe "Man fÃ¥r inte ange sökväg för dynamiska bibliotek" ukr "Ðе дозволено викориÑтовувати путі Ð´Ð»Ñ Ñ€Ð¾Ð·Ð´Ñ–Ð»ÑŽÐ²Ð°Ð½Ð¸Ñ… бібліотек" ER_UDF_EXISTS - cze "Funkce '%-.192s' ji-Bž existuje" + cze "Funkce '%-.192s' již existuje" dan "Funktionen '%-.192s' findes allerede" nla "Functie '%-.192s' bestaat reeds" eng "Function '%-.192s' already exists" - jps "Function '%-.192s' ã¯æ—¢ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™", est "Funktsioon '%-.192s' juba eksisteerib" fre "La fonction '%-.192s' existe déjà " ger "Funktion '%-.192s' existiert schon" greek "Η συνάÏτηση '%-.192s' υπάÏχει ήδη" hun "A '%-.192s' fuggveny mar letezik" ita "La funzione '%-.192s' esiste gia`" - jpn "Function '%-.192s' ã¯æ—¢ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™" + jpn "関数 '%-.192s' ã¯ã™ã§ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™ã€‚" kor "'%-.192s' 함수는 ì´ë¯¸ 존재합니다." por "Função '%-.192s' já existe" rum "Functia '%-.192s' exista deja" @@ -2913,18 +2853,17 @@ ER_UDF_EXISTS swe "Funktionen '%-.192s' finns redan" ukr "Ð¤ÑƒÐ½ÐºÑ†Ñ–Ñ '%-.192s' вже Ñ–Ñнує" ER_CANT_OPEN_LIBRARY - cze "Nemohu otev-BÅ™Ãt sdÃlenou knihovnu '%-.192s' (errno: %d %-.128s)" + cze "Nemohu otevÅ™Ãt sdÃlenou knihovnu '%-.192s' (errno: %d %-.128s)" dan "Kan ikke Ã¥bne delt bibliotek '%-.192s' (errno: %d %-.128s)" nla "Kan shared library '%-.192s' niet openen (Errcode: %d %-.128s)" eng "Can't open shared library '%-.192s' (errno: %d %-.128s)" - jps "shared library '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“ (errno: %d %-.128s)", est "Ei suuda avada jagatud teeki '%-.192s' (veakood: %d %-.128s)" fre "Impossible d'ouvrir la bibliothèque partagée '%-.192s' (errno: %d %-.128s)" ger "Kann Shared Library '%-.192s' nicht öffnen (Fehler: %d %-.128s)" greek "Δεν είναι δυνατή η ανάγνωση της shared library '%-.192s' (κωδικός λάθους: %d %-.128s)" hun "A(z) '%-.192s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)" ita "Impossibile aprire la libreria condivisa '%-.192s' (errno: %d %-.128s)" - jpn "shared library '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“ (errno: %d %-.128s)" + jpn "共有ライブラリ '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“。(エラー番å·: %d %-.128s)" kor "'%-.192s' ê³µìœ ë¼ì´ë²„러리를 열수 없습니다.(ì—러번호: %d %-.128s)" nor "Can't open shared library '%-.192s' (errno: %d %-.128s)" norwegian-ny "Can't open shared library '%-.192s' (errno: %d %-.128s)" @@ -2938,18 +2877,17 @@ ER_CANT_OPEN_LIBRARY swe "Kan inte öppna det dynamiska biblioteket '%-.192s' (Felkod: %d %-.128s)" ukr "Ðе можу відкрити розділювану бібліотеку '%-.192s' (помилка: %d %-.128s)" ER_CANT_FIND_DL_ENTRY - cze "Nemohu naj-BÃt funkci '%-.128s' v knihovnÄ›" + cze "Nemohu najÃt funkci '%-.128s' v knihovnÄ›" dan "Kan ikke finde funktionen '%-.128s' i bibliotek" nla "Kan functie '%-.128s' niet in library vinden" eng "Can't find symbol '%-.128s' in library" - jps "function '%-.128s' をライブラリーä¸ã«è¦‹ä»˜ã‘る事ãŒã§ãã¾ã›ã‚“", est "Ei leia funktsiooni '%-.128s' antud teegis" fre "Impossible de trouver la fonction '%-.128s' dans la bibliothèque" ger "Kann Funktion '%-.128s' in der Library nicht finden" greek "Δεν είναι δυνατή η ανεÏÏεση της συνάÏτησης '%-.128s' στην βιβλιοθήκη" hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban" ita "Impossibile trovare la funzione '%-.128s' nella libreria" - jpn "function '%-.128s' をライブラリーä¸ã«è¦‹ä»˜ã‘る事ãŒã§ãã¾ã›ã‚“" + jpn "関数 '%-.128s' ã¯å…±æœ‰ãƒ©ã‚¤ãƒ–ラリーä¸ã«ã‚ã‚Šã¾ã›ã‚“。" kor "ë¼ì´ë²„러리ì—ì„œ '%-.128s' 함수를 ì°¾ì„ ìˆ˜ 없습니다." por "Não pode encontrar a função '%-.128s' na biblioteca" rum "Nu pot gasi functia '%-.128s' in libraria" @@ -2960,18 +2898,17 @@ ER_CANT_FIND_DL_ENTRY swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket" ukr "Ðе можу знайти функцію '%-.128s' у бібліотеці" ER_FUNCTION_NOT_DEFINED - cze "Funkce '%-.192s' nen-Bà definována" + cze "Funkce '%-.192s' nenà definována" dan "Funktionen '%-.192s' er ikke defineret" nla "Functie '%-.192s' is niet gedefinieerd" eng "Function '%-.192s' is not defined" - jps "Function '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“", est "Funktsioon '%-.192s' ei ole defineeritud" fre "La fonction '%-.192s' n'est pas définie" ger "Funktion '%-.192s' ist nicht definiert" greek "Η συνάÏτηση '%-.192s' δεν Îχει οÏισθεί" hun "A '%-.192s' fuggveny nem definialt" ita "La funzione '%-.192s' non e` definita" - jpn "Function '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“" + jpn "関数 '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "'%-.192s' 함수가 ì •ì˜ë˜ì–´ 있지 않습니다." por "Função '%-.192s' não está definida" rum "Functia '%-.192s' nu e definita" @@ -2982,18 +2919,17 @@ ER_FUNCTION_NOT_DEFINED swe "Funktionen '%-.192s' är inte definierad" ukr "Функцію '%-.192s' не визначено" ER_HOST_IS_BLOCKED - cze "Stroj '%-.64s' je zablokov-Bán kvůli mnoha chybám pÅ™i pÅ™ipojovánÃ. Odblokujete použitÃm 'mysqladmin flush-hosts'" + cze "Stroj '%-.64s' je zablokován kvůli mnoha chybám pÅ™i pÅ™ipojovánÃ. Odblokujete použitÃm 'mysqladmin flush-hosts'" dan "Værten '%-.64s' er blokeret pÃ¥ grund af mange fejlforespørgsler. LÃ¥s op med 'mysqladmin flush-hosts'" nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'" eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'" - jps "Host '%-.64s' 㯠many connection error ã®ãŸã‚ã€æ‹’å¦ã•ã‚Œã¾ã—ãŸ. 'mysqladmin flush-hosts' ã§è§£é™¤ã—ã¦ãã ã•ã„", est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga" fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connexion. Débloquer le par 'mysqladmin flush-hosts'" ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" greek "Ο υπολογιστής '%-.64s' Îχει αποκλεισθεί λόγω πολλαπλών λαθών σÏνδεσης. Î Ïοσπαθήστε να διοÏώσετε με 'mysqladmin flush-hosts'" hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" - jpn "Host '%-.64s' 㯠many connection error ã®ãŸã‚ã€æ‹’å¦ã•ã‚Œã¾ã—ãŸ. 'mysqladmin flush-hosts' ã§è§£é™¤ã—ã¦ãã ã•ã„" + jpn "接続エラーãŒå¤šã„ãŸã‚ã€ãƒ›ã‚¹ãƒˆ '%-.64s' ã¯æ‹’å¦ã•ã‚Œã¾ã—ãŸã€‚'mysqladmin flush-hosts' ã§è§£é™¤ã§ãã¾ã™ã€‚" kor "너무 ë§Žì€ ì—°ê²°ì˜¤ë¥˜ë¡œ ì¸í•˜ì—¬ 호스트 '%-.64s'는 블ë½ë˜ì—ˆìŠµë‹ˆë‹¤. 'mysqladmin flush-hosts'를 ì´ìš©í•˜ì—¬ 블ë½ì„ í•´ì œí•˜ì„¸ìš”" por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'" rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'" @@ -3003,18 +2939,17 @@ ER_HOST_IS_BLOCKED swe "Denna dator, '%-.64s', är blockerad pga mÃ¥nga felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna" ukr "ХоÑÑ‚ '%-.64s' заблоковано з причини великої кількоÑÑ‚Ñ– помилок з'єднаннÑ. Ð”Ð»Ñ Ñ€Ð¾Ð·Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð²Ð¸ÐºÐ¾Ñ€Ð¸Ñтовуйте 'mysqladmin flush-hosts'" ER_HOST_NOT_PRIVILEGED - cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MariaDB serveru pÅ™ipojit" + cze "Stroj '%-.64s' nemá povoleno se k tomuto MySQL serveru pÅ™ipojit" dan "Værten '%-.64s' kan ikke tilkoble denne MariaDB-server" nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MariaDB server" eng "Host '%-.64s' is not allowed to connect to this MariaDB server" - jps "Host '%-.64s' 㯠MariaDB server ã«æŽ¥ç¶šã‚’許å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“", est "Masinal '%-.64s' puudub ligipääs sellele MariaDB serverile" fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MariaDB" ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MariaDB-Server zu verbinden" greek "Ο υπολογιστής '%-.64s' δεν Îχει δικαίωμα σÏνδεσης με τον MariaDB server" hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MariaDB szerverhez" ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MariaDB" - jpn "Host '%-.64s' 㯠MariaDB server ã«æŽ¥ç¶šã‚’許å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“" + jpn "ホスト '%-.64s' ã‹ã‚‰ã®ã“ã® MySQL server ã¸ã®æŽ¥ç¶šã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "'%-.64s' 호스트는 ì´ MariaDBì„œë²„ì— ì ‘ì†í• 허가를 받지 못했습니다." por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MariaDB" rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MariaDB" @@ -3024,18 +2959,17 @@ ER_HOST_NOT_PRIVILEGED swe "Denna dator, '%-.64s', har inte privileger att använda denna MariaDB server" ukr "ХоÑту '%-.64s' не доволено зв'ÑзуватиÑÑŒ з цим Ñервером MariaDB" ER_PASSWORD_ANONYMOUS_USER 42000 - cze "Pou-BžÃváte MariaDB jako anonymnà uživatel a anonymnà uživatelé nemajà povoleno mÄ›nit hesla" + cze "PoužÃváte MySQL jako anonymnà uživatel a anonymnà uživatelé nemajà povoleno mÄ›nit hesla" dan "Du bruger MariaDB som anonym bruger. Anonyme brugere mÃ¥ ikke ændre adgangskoder" nla "U gebruikt MariaDB als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen" eng "You are using MariaDB as an anonymous user and anonymous users are not allowed to change passwords" - jps "MariaDB ã‚’ anonymous users ã§ä½¿ç”¨ã—ã¦ã„る状態ã§ã¯ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“", est "Te kasutate MariaDB-i anonüümse kasutajana, kelledel pole parooli muutmise õigust" fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe" ger "Sie benutzen MariaDB als anonymer Benutzer und dürfen daher keine Passwörter ändern" greek "ΧÏησιμοποιείτε την MariaDB σαν anonymous user και Îτσι δεν μποÏείτε να αλλάξετε τα passwords άλλων χÏηστών" hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas" ita "Impossibile cambiare la password usando MariaDB come utente anonimo" - jpn "MariaDB ã‚’ anonymous users ã§ä½¿ç”¨ã—ã¦ã„る状態ã§ã¯ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“" + jpn "MySQL を匿åユーザーã§ä½¿ç”¨ã—ã¦ã„ã‚‹ã®ã§ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“。" kor "ë‹¹ì‹ ì€ MariaDBì„œë²„ì— ìµëª…ì˜ ì‚¬ìš©ìžë¡œ ì ‘ì†ì„ 하셨습니다.ìµëª…ì˜ ì‚¬ìš©ìžëŠ” 암호를 ë³€ê²½í• ìˆ˜ 없습니다." por "Você está usando o MariaDB como usuário anônimo e usuários anônimos não têm permissão para mudar senhas" rum "Dumneavoastra folositi MariaDB ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele" @@ -3045,18 +2979,17 @@ ER_PASSWORD_ANONYMOUS_USER 42000 swe "Du använder MariaDB som en anonym användare och som sÃ¥dan fÃ¥r du inte ändra ditt lösenord" ukr "Ви викориÑтовуєте MariaDB Ñк анонімний кориÑтувач, тому вам не дозволено змінювати паролі" ER_PASSWORD_NOT_ALLOWED 42000 - cze "Na zm-BÄ›nu hesel ostatnÃm musÃte mÃt právo provést update tabulek v databázi mysql" + cze "Na zmÄ›nu hesel ostatnÃm musÃte mÃt právo provést update tabulek v databázi mysql" dan "Du skal have tilladelse til at opdatere tabeller i MariaDB databasen for at ændre andres adgangskoder" nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen" eng "You must have privileges to update tables in the mysql database to be able to change passwords for others" - jps "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯, mysql データベースã«å¯¾ã—㦠update ã®è¨±å¯ãŒãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“.", est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis" fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres" ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können" greek "Î ÏÎπει να Îχετε δικαίωμα διόÏθωσης πινάκων (update) στη βάση δεδομÎνων mysql για να μποÏείτε να αλλάξετε τα passwords άλλων χÏηστών" hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz" ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti" - jpn "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯, mysql データベースã«å¯¾ã—㦠update ã®è¨±å¯ãŒãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“." + jpn "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯ã€mysqlデータベースã®è¡¨ã‚’æ›´æ–°ã™ã‚‹æ¨©é™ãŒå¿…è¦ã§ã™ã€‚" kor "ë‹¹ì‹ ì€ ë‹¤ë¥¸ì‚¬ìš©ìžë“¤ì˜ 암호를 ë³€ê²½í• ìˆ˜ 있ë„ë¡ ë°ì´íƒ€ë² ì´ìŠ¤ ë³€ê²½ê¶Œí•œì„ ê°€ì ¸ì•¼ 합니다." por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros" rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora" @@ -3066,7 +2999,7 @@ ER_PASSWORD_NOT_ALLOWED 42000 swe "För att ändra lösenord för andra mÃ¥ste du ha rättigheter att uppdatera mysql-databasen" ukr "Ви повині мати право на Ð¾Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†ÑŒ у базі данних mysql, аби мати можливіÑÑ‚ÑŒ змінювати пароль іншим" ER_PASSWORD_NO_MATCH 42000 - cze "V tabulce user nen-Bà žádný odpovÃdajÃcà řádek" + cze "V tabulce user nenà žádný odpovÃdajÃcà řádek" dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen" nla "Kan geen enkele passende rij vinden in de gebruikers tabel" eng "Can't find any matching row in the user table" @@ -3076,6 +3009,7 @@ ER_PASSWORD_NO_MATCH 42000 greek "Δεν είναι δυνατή η ανεÏÏεση της αντίστοιχης εγγÏαφής στον πίνακα των χÏηστών" hun "Nincs megegyezo sor a user tablaban" ita "Impossibile trovare la riga corrispondente nella tabella user" + jpn "ユーザーテーブルã«è©²å½“ã™ã‚‹ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" kor "ì‚¬ìš©ìž í…Œì´ë¸”ì—ì„œ ì¼ì¹˜í•˜ëŠ” ê²ƒì„ ì°¾ì„ ìˆ˜ ì—†ì니다." por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)" rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului" @@ -3085,17 +3019,16 @@ ER_PASSWORD_NO_MATCH 42000 swe "Hittade inte användaren i 'user'-tabellen" ukr "Ðе можу знайти відповідних запиÑів у таблиці кориÑтувача" ER_UPDATE_INFO - cze "Nalezen-Bých řádků: %ld ZmÄ›nÄ›no: %ld VarovánÃ: %ld" + cze "Nalezených řádků: %ld ZmÄ›nÄ›no: %ld VarovánÃ: %ld" dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld" nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld" eng "Rows matched: %ld Changed: %ld Warnings: %ld" - jps "一致数(Rows matched): %ld 変更: %ld Warnings: %ld", est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld" fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld" ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld" hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld" ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld" - jpn "一致数(Rows matched): %ld 変更: %ld Warnings: %ld" + jpn "該当ã—ãŸè¡Œ: %ld 変更: %ld è¦å‘Š: %ld" kor "ì¼ì¹˜í•˜ëŠ” Rows : %ldê°œ 변경ë¨: %ldê°œ ê²½ê³ : %ldê°œ" por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld" rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld" @@ -3105,17 +3038,16 @@ ER_UPDATE_INFO swe "Rader: %ld Uppdaterade: %ld Varningar: %ld" ukr "ЗапиÑів відповідає: %ld Змінено: %ld ЗаÑтережень: %ld" ER_CANT_CREATE_THREAD - cze "Nemohu vytvo-BÅ™it nový thread (errno %M). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podÃvejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄnà systémy" + cze "Nemohu vytvoÅ™it nový thread (errno %M). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podÃvejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄnà systémy" dan "Kan ikke danne en ny trÃ¥d (fejl nr. %M). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl" nla "Kan geen nieuwe thread aanmaken (Errcode: %M). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout" eng "Can't create a new thread (errno %M); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug" - jps "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %M). ã‚‚ã—最大使用許å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’越ãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å—を探ã—ã¦ãã¿ã¦ã ã•ã„.", est "Ei suuda luua uut lõime (veakood %M). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga" fre "Impossible de créer une nouvelle tâche (errno %M). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS" ger "Kann keinen neuen Thread erzeugen (Fehler: %M). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen" hun "Uj thread letrehozasa nem lehetseges (Hibakod: %M). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet" ita "Impossibile creare un nuovo thread (errno %M). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO" - jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %M). ã‚‚ã—最大使用許å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’越ãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å—を探ã—ã¦ãã¿ã¦ã ã•ã„." + jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ã‚’作æˆã§ãã¾ã›ã‚“。(ã‚¨ãƒ©ãƒ¼ç•ªå· %M) ã‚‚ã—も使用å¯èƒ½ãƒ¡ãƒ¢ãƒªãƒ¼ã®ä¸è¶³ã§ãªã‘ã‚Œã°ã€OSä¾å˜ã®ãƒã‚°ã§ã‚ã‚‹å¯èƒ½æ€§ãŒã‚ã‚Šã¾ã™ã€‚" kor "새로운 ì“°ë ˆë“œë¥¼ 만들 수 없습니다.(ì—러번호 %M). 만약 ì—¬ìœ ë©”ëª¨ë¦¬ê°€ 있다면 OS-dependent버그 ì˜ ë©”ë‰´ì–¼ ë¶€ë¶„ì„ ì°¾ì•„ë³´ì‹œì˜¤." nor "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug" norwegian-ny "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug" @@ -3128,7 +3060,7 @@ ER_CANT_CREATE_THREAD swe "Kan inte skapa en ny trÃ¥d (errno %M)" ukr "Ðе можу Ñтворити нову гілку (помилка %M). Якщо ви не викориÑтали уÑÑŽ пам'ÑÑ‚ÑŒ, то прочитайте документацію до вашої ОС - можливо це помилка ОС" ER_WRONG_VALUE_COUNT_ON_ROW 21S01 - cze "Po-BÄet sloupců neodpovÃdá poÄtu hodnot na řádku %lu" + cze "PoÄet sloupců neodpovÃdá poÄtu hodnot na řádku %lu" dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %lu" nla "Kolom aantal komt niet overeen met waarde aantal in rij %lu" eng "Column count doesn't match value count at row %lu" @@ -3136,6 +3068,7 @@ ER_WRONG_VALUE_COUNT_ON_ROW 21S01 ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %lu überein" hun "Az oszlopban talalhato ertek nem egyezik meg a %lu sorban szamitott ertekkel" ita "Il numero delle colonne non corrisponde al conteggio alla riga %lu" + jpn "%lu 行目ã§ã€åˆ—ã®æ•°ãŒå€¤ã®æ•°ã¨ä¸€è‡´ã—ã¾ã›ã‚“。" kor "Row %luì—ì„œ 칼럼 카운트와 value 카운터와 ì¼ì¹˜í•˜ì§€ 않습니다." por "Contagem de colunas não confere com a contagem de valores na linha %lu" rum "Numarul de coloane nu corespunde cu numarul de valori la linia %lu" @@ -3145,7 +3078,7 @@ ER_WRONG_VALUE_COUNT_ON_ROW 21S01 swe "Antalet kolumner motsvarar inte antalet värden pÃ¥ rad: %lu" ukr "КількіÑÑ‚ÑŒ Ñтовбців не Ñпівпадає з кількіÑÑ‚ÑŽ значень у Ñтроці %lu" ER_CANT_REOPEN_TABLE - cze "Nemohu znovuotev-BÅ™Ãt tabulku: '%-.192s" + cze "Nemohu znovuotevÅ™Ãt tabulku: '%-.192s" dan "Kan ikke genÃ¥bne tabel '%-.192s" nla "Kan tabel niet opnieuw openen: '%-.192s" eng "Can't reopen table: '%-.192s'" @@ -3154,6 +3087,7 @@ ER_CANT_REOPEN_TABLE ger "Kann Tabelle'%-.192s' nicht erneut öffnen" hun "Nem lehet ujra-megnyitni a tablat: '%-.192s" ita "Impossibile riaprire la tabella: '%-.192s'" + jpn "表をå†ã‚ªãƒ¼ãƒ—ンã§ãã¾ã›ã‚“。: '%-.192s'" kor "í…Œì´ë¸”ì„ ë‹¤ì‹œ 열수 없군요: '%-.192s" nor "Can't reopen table: '%-.192s" norwegian-ny "Can't reopen table: '%-.192s" @@ -3167,17 +3101,16 @@ ER_CANT_REOPEN_TABLE swe "Kunde inte stänga och öppna tabell '%-.192s" ukr "Ðе можу перевідкрити таблицю: '%-.192s'" ER_INVALID_USE_OF_NULL 22004 - cze "Neplatn-Bé užità hodnoty NULL" + cze "Neplatné užità hodnoty NULL" dan "Forkert brug af nulværdi (NULL)" nla "Foutief gebruik van de NULL waarde" eng "Invalid use of NULL value" - jps "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™", est "NULL väärtuse väärkasutus" fre "Utilisation incorrecte de la valeur NULL" ger "Unerlaubte Verwendung eines NULL-Werts" hun "A NULL ervenytelen hasznalata" ita "Uso scorretto del valore NULL" - jpn "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™" + jpn "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™ã€‚" kor "NULL ê°’ì„ ìž˜ëª» 사용하셨군요..." por "Uso inválido do valor NULL" rum "Folosirea unei value NULL e invalida" @@ -3187,7 +3120,7 @@ ER_INVALID_USE_OF_NULL 22004 swe "Felaktig använding av NULL" ukr "Хибне викориÑÑ‚Ð°Ð½Ð½Ñ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ NULL" ER_REGEXP_ERROR 42000 - cze "Regul-Bárnà výraz vrátil chybu '%-.64s'" + cze "Regulárnà výraz vrátil chybu '%-.64s'" dan "Fik fejl '%-.64s' fra regexp" nla "Fout '%-.64s' ontvangen van regexp" eng "Got error '%-.64s' from regexp" @@ -3196,6 +3129,7 @@ ER_REGEXP_ERROR 42000 ger "regexp lieferte Fehler '%-.64s'" hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)" ita "Errore '%-.64s' da regexp" + jpn "regexp ãŒã‚¨ãƒ©ãƒ¼ '%-.64s' ã‚’è¿”ã—ã¾ã—ãŸã€‚" kor "regexpì—ì„œ '%-.64s'ê°€ 났습니다." por "Obteve erro '%-.64s' em regexp" rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)" @@ -3205,7 +3139,7 @@ ER_REGEXP_ERROR 42000 swe "Fick fel '%-.64s' frÃ¥n REGEXP" ukr "Отримано помилку '%-.64s' від регулÑрного виразу" ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 - cze "Pokud nen-Bà žádná GROUP BY klauzule, nenà dovoleno souÄasné použità GROUP položek (MIN(),MAX(),COUNT()...) s ne GROUP položkami" + cze "Pokud nenà žádná GROUP BY klauzule, nenà dovoleno souÄasné použità GROUP položek (MIN(),MAX(),COUNT()...) s ne GROUP položkami" dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat" nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is" eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause" @@ -3214,6 +3148,7 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulässig, wenn keine GROUP-BY-Klausel vorhanden ist" hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul" ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY" + jpn "GROUP BYå¥ãŒç„¡ã„å ´åˆã€é›†è¨ˆé–¢æ•°(MIN(),MAX(),COUNT(),...)ã¨é€šå¸¸ã®åˆ—ã‚’åŒæ™‚ã«ä½¿ç”¨ã§ãã¾ã›ã‚“。" kor "Mixing of GROUP 칼럼s (MIN(),MAX(),COUNT(),...) with no GROUP 칼럼s is illegal if there is no GROUP BY clause" por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)" rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY" @@ -3223,17 +3158,16 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 swe "Man fÃ¥r ha bÃ¥de GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en frÃ¥ga om man inte har en GROUP BY-del" ukr "Ð—Ð¼Ñ–ÑˆÑƒÐ²Ð°Ð½Ð½Ñ GROUP Ñтовбців (MIN(),MAX(),COUNT()...) з не GROUP ÑтовбцÑми Ñ” забороненим, Ñкщо не має GROUP BY" ER_NONEXISTING_GRANT 42000 - cze "Neexistuje odpov-BÃdajÃcà grant pro uživatele '%-.48s' na stroji '%-.64s'" + cze "Neexistuje odpovÃdajÃcà grant pro uživatele '%-.48s' na stroji '%-.64s'" dan "Denne tilladelse findes ikke for brugeren '%-.48s' pÃ¥ vært '%-.64s'" nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s'" eng "There is no such grant defined for user '%-.48s' on host '%-.64s'" - jps "ユーザー '%-.48s' (ホスト '%-.64s' ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“", est "Sellist õigust ei ole defineeritud kasutajale '%-.48s' masinast '%-.64s'" fre "Un tel droit n'est pas défini pour l'utilisateur '%-.48s' sur l'hôte '%-.64s'" ger "Für Benutzer '%-.48s' auf Host '%-.64s' gibt es keine solche Berechtigung" hun "A '%-.48s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s'" - jpn "ユーザー '%-.48s' (ホスト '%-.64s' ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“" + jpn "ユーザー '%-.48s' (ホスト '%-.64s' 上) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "ì‚¬ìš©ìž '%-.48s' (호스트 '%-.64s')를 위하여 ì •ì˜ëœ 그런 승ì¸ì€ 없습니다." por "Não existe tal permissão (grant) definida para o usuário '%-.48s' no 'host' '%-.64s'" rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.48s' de pe host-ul '%-.64s'" @@ -3243,7 +3177,7 @@ ER_NONEXISTING_GRANT 42000 swe "Det finns inget privilegium definierat för användare '%-.48s' pÃ¥ '%-.64s'" ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s'" ER_TABLEACCESS_DENIED_ERROR 42000 - cze "%-.32s p-BÅ™Ãkaz nepÅ™Ãstupný pro uživatele: '%s'@'%s' pro tabulku '%-.192s'" + cze "%-.32s pÅ™Ãkaz nepÅ™Ãstupný pro uživatele: '%s'@'%s' pro tabulku '%-.192s'" dan "%-.32s-kommandoen er ikke tilladt for brugeren '%s'@'%s' for tabellen '%-.192s'" nla "%-.32s commando geweigerd voor gebruiker: '%s'@'%s' voor tabel '%-.192s'" eng "%-.32s command denied to user '%s'@'%s' for table '%-.192s'" @@ -3263,7 +3197,7 @@ ER_TABLEACCESS_DENIED_ERROR 42000 swe "%-.32s ej tillÃ¥tet för '%s'@'%s' för tabell '%-.192s'" ukr "%-.32s команда заборонена кориÑтувачу: '%s'@'%s' у таблиці '%-.192s'" ER_COLUMNACCESS_DENIED_ERROR 42000 - cze "%-.32s p-BÅ™Ãkaz nepÅ™Ãstupný pro uživatele: '%s'@'%s' pro sloupec '%-.192s' v tabulce '%-.192s'" + cze "%-.32s pÅ™Ãkaz nepÅ™Ãstupný pro uživatele: '%s'@'%s' pro sloupec '%-.192s' v tabulce '%-.192s'" dan "%-.32s-kommandoen er ikke tilladt for brugeren '%s'@'%s' for kolonne '%-.192s' in tabellen '%-.192s'" nla "%-.32s commando geweigerd voor gebruiker: '%s'@'%s' voor kolom '%-.192s' in tabel '%-.192s'" eng "%-.32s command denied to user '%s'@'%s' for column '%-.192s' in table '%-.192s'" @@ -3283,7 +3217,7 @@ ER_COLUMNACCESS_DENIED_ERROR 42000 swe "%-.32s ej tillÃ¥tet för '%s'@'%s' för kolumn '%-.192s' i tabell '%-.192s'" ukr "%-.32s команда заборонена кориÑтувачу: '%s'@'%s' Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' у таблиці '%-.192s'" ER_ILLEGAL_GRANT_FOR_TABLE 42000 - cze "Neplatn-Bý pÅ™Ãkaz GRANT/REVOKE. ProsÃm, pÅ™eÄtÄ›te si v manuálu, jaká privilegia je možné použÃt." + cze "Neplatný pÅ™Ãkaz GRANT/REVOKE. ProsÃm, pÅ™eÄtÄ›te si v manuálu, jaká privilegia je možné použÃt." dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres." nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden." eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used" @@ -3293,7 +3227,7 @@ ER_ILLEGAL_GRANT_FOR_TABLE 42000 greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used." hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek" ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati." - jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + jpn "ä¸æ£ãª GRANT/REVOKE コマンドã§ã™ã€‚ã©ã®æ¨©é™ã§åˆ©ç”¨å¯èƒ½ã‹ã¯ãƒžãƒ‹ãƒ¥ã‚¢ãƒ«ã‚’å‚ç…§ã—ã¦ä¸‹ã•ã„。" kor "ìž˜ëª»ëœ GRANT/REVOKE ëª…ë ¹. ì–´ë–¤ 권리와 승ì¸ì´ 사용ë˜ì–´ 질 수 있는지 ë©”ë‰´ì–¼ì„ ë³´ì‹œì˜¤." nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." @@ -3307,7 +3241,7 @@ ER_ILLEGAL_GRANT_FOR_TABLE 42000 swe "Felaktigt GRANT-privilegium använt" ukr "Хибна GRANT/REVOKE команда; прочитайте документацію ÑтоÑовно того, Ñкі права можна викориÑтовувати" ER_GRANT_WRONG_HOST_OR_USER 42000 - cze "Argument p-BÅ™Ãkazu GRANT uživatel nebo stroj je pÅ™ÃliÅ¡ dlouhý" + cze "Argument pÅ™Ãkazu GRANT uživatel nebo stroj je pÅ™ÃliÅ¡ dlouhý" dan "Værts- eller brugernavn for langt til GRANT" nla "De host of gebruiker parameter voor GRANT is te lang" eng "The host or user argument to GRANT is too long" @@ -3316,6 +3250,7 @@ ER_GRANT_WRONG_HOST_OR_USER 42000 ger "Das Host- oder User-Argument für GRANT ist zu lang" hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban" ita "L'argomento host o utente per la GRANT e` troppo lungo" + jpn "GRANTコマンドã¸ã®ã€ãƒ›ã‚¹ãƒˆåやユーザーåãŒé•·ã™ãŽã¾ã™ã€‚" kor "승ì¸(GRANT)ì„ ìœ„í•˜ì—¬ 사용한 사용ìžë‚˜ í˜¸ìŠ¤íŠ¸ì˜ ê°’ë“¤ì´ ë„ˆë¬´ ê¹ë‹ˆë‹¤." por "Argumento de 'host' ou de usuário para o GRANT é longo demais" rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung" @@ -3334,7 +3269,7 @@ ER_NO_SUCH_TABLE 42S02 ger "Tabelle '%-.192s.%-.192s' existiert nicht" hun "A '%-.192s.%-.192s' tabla nem letezik" ita "La tabella '%-.192s.%-.192s' non esiste" - jpn "Table '%-.192s.%-.192s' doesn't exist" + jpn "表 '%-.192s.%-.192s' ã¯å˜åœ¨ã—ã¾ã›ã‚“。" kor "í…Œì´ë¸” '%-.192s.%-.192s' 는 존재하지 않습니다." nor "Table '%-.192s.%-.192s' doesn't exist" norwegian-ny "Table '%-.192s.%-.192s' doesn't exist" @@ -3348,7 +3283,7 @@ ER_NO_SUCH_TABLE 42S02 swe "Det finns ingen tabell som heter '%-.192s.%-.192s'" ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s.%-.192s' не Ñ–Ñнує" ER_NONEXISTING_TABLE_GRANT 42000 - cze "Neexistuje odpov-BÃdajÃcà grant pro uživatele '%-.48s' na stroji '%-.64s' pro tabulku '%-.192s'" + cze "Neexistuje odpovÃdajÃcà grant pro uživatele '%-.48s' na stroji '%-.64s' pro tabulku '%-.192s'" dan "Denne tilladelse eksisterer ikke for brugeren '%-.48s' pÃ¥ vært '%-.64s' for tabellen '%-.192s'" nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s' op tabel '%-.192s'" eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'" @@ -3357,6 +3292,7 @@ ER_NONEXISTING_TABLE_GRANT 42000 ger "Eine solche Berechtigung ist für User '%-.48s' auf Host '%-.64s' an Tabelle '%-.192s' nicht definiert" hun "A '%-.48s' felhasznalo szamara a '%-.64s' host '%-.192s' tablajaban ez a parancs nem engedelyezett" ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s' sulla tabella '%-.192s'" + jpn "ユーザー '%-.48s' (ホスト '%-.64s' 上) ã®è¡¨ '%-.192s' ã¸ã®æ¨©é™ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“。" kor "ì‚¬ìš©ìž '%-.48s'(호스트 '%-.64s')는 í…Œì´ë¸” '%-.192s'를 사용하기 위하여 ì •ì˜ëœ 승ì¸ì€ 없습니다. " por "Não existe tal permissão (grant) definido para o usuário '%-.48s' no 'host' '%-.64s', na tabela '%-.192s'" rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.48s' de pe host-ul '%-.64s' pentru tabela '%-.192s'" @@ -3366,7 +3302,7 @@ ER_NONEXISTING_TABLE_GRANT 42000 swe "Det finns inget privilegium definierat för användare '%-.48s' pÃ¥ '%-.64s' för tabell '%-.192s'" ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.192s'" ER_NOT_ALLOWED_COMMAND 42000 - cze "Pou-Bžitý pÅ™Ãkaz nenà v této verzi MariaDB povolen" + cze "Použitý pÅ™Ãkaz nenà v této verzi MySQL povolen" dan "Den brugte kommando er ikke tilladt med denne udgave af MariaDB" nla "Het used commando is niet toegestaan in deze MariaDB versie" eng "The used command is not allowed with this MariaDB version" @@ -3375,6 +3311,7 @@ ER_NOT_ALLOWED_COMMAND 42000 ger "Der verwendete Befehl ist in dieser MariaDB-Version nicht zulässig" hun "A hasznalt parancs nem engedelyezett ebben a MariaDB verzioban" ita "Il comando utilizzato non e` supportato in questa versione di MariaDB" + jpn "ã“ã®MySQLãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯åˆ©ç”¨ã§ããªã„コマンドã§ã™ã€‚" kor "ì‚¬ìš©ëœ ëª…ë ¹ì€ í˜„ìž¬ì˜ MariaDB ë²„ì ¼ì—서는 ì´ìš©ë˜ì§€ 않습니다." por "Comando usado não é permitido para esta versão do MariaDB" rum "Comanda folosita nu este permisa pentru aceasta versiune de MariaDB" @@ -3384,7 +3321,7 @@ ER_NOT_ALLOWED_COMMAND 42000 swe "Du kan inte använda detta kommando med denna MariaDB version" ukr "ВикориÑтовувана команда не дозволена у цій верÑÑ–Ñ— MariaDB" ER_SYNTAX_ERROR 42000 - cze "Va-BÅ¡e syntaxe je nÄ›jaká divná" + cze "VaÅ¡e syntaxe je nÄ›jaká divná" dan "Der er en fejl i SQL syntaksen" nla "Er is iets fout in de gebruikte syntax" eng "You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use" @@ -3394,7 +3331,7 @@ ER_SYNTAX_ERROR 42000 greek "You have an error in your SQL syntax" hun "Szintaktikai hiba" ita "Errore di sintassi nella query SQL" - jpn "Something is wrong in your syntax" + jpn "SQL構文エラーã§ã™ã€‚ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«å¯¾å¿œã™ã‚‹ãƒžãƒ‹ãƒ¥ã‚¢ãƒ«ã‚’å‚ç…§ã—ã¦æ£ã—ã„構文を確èªã—ã¦ãã ã•ã„。" kor "SQL êµ¬ë¬¸ì— ì˜¤ë¥˜ê°€ 있습니다." nor "Something is wrong in your syntax" norwegian-ny "Something is wrong in your syntax" @@ -3408,7 +3345,7 @@ ER_SYNTAX_ERROR 42000 swe "Du har nÃ¥got fel i din syntax" ukr "У Ð²Ð°Ñ Ð¿Ð¾Ð¼Ð¸Ð»ÐºÐ° у ÑинтакÑиÑÑ– SQL" ER_DELAYED_CANT_CHANGE_LOCK - cze "Zpo-BždÄ›ný insert threadu nebyl schopen zÃskat požadovaný zámek pro tabulku %-.192s" + cze "ZpoždÄ›ný insert threadu nebyl schopen zÃskat požadovaný zámek pro tabulku %-.192s" dan "Forsinket indsættelse trÃ¥den (delayed insert thread) kunne ikke opnÃ¥ lÃ¥s pÃ¥ tabellen %-.192s" nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.192s" eng "Delayed insert thread couldn't get requested lock for table %-.192s" @@ -3417,6 +3354,7 @@ ER_DELAYED_CANT_CHANGE_LOCK ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.192s' nicht erhalten" hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.192s tablahoz" ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.192s" + jpn "'Delayed insert'スレッドãŒè¡¨ '%-.192s' ã®ãƒãƒƒã‚¯ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" kor "ì§€ì—°ëœ insert ì“°ë ˆë“œê°€ í…Œì´ë¸” %-.192sì˜ ìš”êµ¬ëœ ë½í‚¹ì„ ì²˜ë¦¬í• ìˆ˜ 없었습니다." por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.192s'" rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.192s" @@ -3426,7 +3364,7 @@ ER_DELAYED_CANT_CHANGE_LOCK swe "DELAYED INSERT-trÃ¥den kunde inte lÃ¥sa tabell '%-.192s'" ukr "Гілка Ð´Ð»Ñ INSERT DELAYED не може отримати Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– %-.192s" ER_TOO_MANY_DELAYED_THREADS - cze "P-BÅ™ÃliÅ¡ mnoho zpoždÄ›ných threadů" + cze "PÅ™ÃliÅ¡ mnoho zpoždÄ›ných threadů" dan "For mange slettede trÃ¥de (threads) i brug" nla "Te veel 'delayed' threads in gebruik" eng "Too many delayed threads in use" @@ -3435,6 +3373,7 @@ ER_TOO_MANY_DELAYED_THREADS ger "Zu viele verzögerte (DELAYED) Threads in Verwendung" hun "Tul sok kesletetett thread (delayed)" ita "Troppi threads ritardati in uso" + jpn "'Delayed insert'スレッドãŒå¤šã™ãŽã¾ã™ã€‚" kor "너무 ë§Žì€ ì§€ì—° ì“°ë ˆë“œë¥¼ ì‚¬ìš©í•˜ê³ ìžˆìŠµë‹ˆë‹¤." por "Excesso de 'threads' retardadas (atrasadas) em uso" rum "Prea multe threaduri aminate care sint in uz" @@ -3444,7 +3383,7 @@ ER_TOO_MANY_DELAYED_THREADS swe "Det finns redan 'max_delayed_threads' trÃ¥dar i använding" ukr "Забагато затриманих гілок викориÑтовуєтьÑÑ" ER_ABORTING_CONNECTION 08S01 - cze "Zru-BÅ¡eno spojenà %ld do databáze: '%-.192s' uživatel: '%-.48s' (%-.64s)" + cze "ZruÅ¡eno spojenà %ld do databáze: '%-.192s' uživatel: '%-.48s' (%-.64s)" dan "Afbrudt forbindelse %ld til database: '%-.192s' bruger: '%-.48s' (%-.64s)" nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' (%-.64s)" eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" @@ -3453,7 +3392,7 @@ ER_ABORTING_CONNECTION 08S01 ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s' (%-.64s)" hun "Megszakitott kapcsolat %ld db: '%-.192s' adatbazishoz, felhasznalo: '%-.48s' (%-.64s)" ita "Interrotta la connessione %ld al db: '%-.192s' utente: '%-.48s' (%-.64s)" - jpn "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" + jpn "接続 %ld ãŒä¸æ–ã•ã‚Œã¾ã—ãŸã€‚データベース: '%-.192s' ユーザー: '%-.48s' (%-.64s)" kor "ë°ì´íƒ€ë² ì´ìŠ¤ ì ‘ì†ì„ 위한 ì—°ê²° %ldê°€ ì¤‘ë‹¨ë¨ : '%-.192s' 사용ìž: '%-.48s' (%-.64s)" nor "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" norwegian-ny "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)" @@ -3467,7 +3406,7 @@ ER_ABORTING_CONNECTION 08S01 swe "Avbröt länken för trÃ¥d %ld till db '%-.192s', användare '%-.48s' (%-.64s)" ukr "Перервано з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ %ld до бази данних: '%-.192s' кориÑтувача: '%-.48s' (%-.64s)" ER_NET_PACKET_TOO_LARGE 08S01 - cze "Zji-BÅ¡tÄ›n pÅ™Ãchozà packet delÅ¡Ã než 'max_allowed_packet'" + cze "ZjiÅ¡tÄ›n pÅ™Ãchozà packet delÅ¡Ã než 'max_allowed_packet'" dan "Modtog en datapakke som var større end 'max_allowed_packet'" nla "Groter pakket ontvangen dan 'max_allowed_packet'" eng "Got a packet bigger than 'max_allowed_packet' bytes" @@ -3476,6 +3415,7 @@ ER_NET_PACKET_TOO_LARGE 08S01 ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes" hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'" ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'" + jpn "'max_allowed_packet'よりも大ããªãƒ‘ケットをå—ä¿¡ã—ã¾ã—ãŸã€‚" kor "'max_allowed_packet'보다 ë”í° íŒ¨í‚·ì„ ë°›ì•˜ìŠµë‹ˆë‹¤." por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)" rum "Un packet mai mare decit 'max_allowed_packet' a fost primit" @@ -3485,7 +3425,7 @@ ER_NET_PACKET_TOO_LARGE 08S01 swe "Kommunkationspaketet är större än 'max_allowed_packet'" ukr "Отримано пакет більший ніж max_allowed_packet" ER_NET_READ_ERROR_FROM_PIPE 08S01 - cze "Zji-BÅ¡tÄ›na chyba pÅ™i Ätenà z roury spojenÃ" + cze "ZjiÅ¡tÄ›na chyba pÅ™i Ätenà z roury spojenÃ" dan "Fik læsefejl fra forbindelse (connection pipe)" nla "Kreeg leesfout van de verbindings pipe" eng "Got a read error from the connection pipe" @@ -3494,6 +3434,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01 ger "Lese-Fehler bei einer Verbindungs-Pipe" hun "Olvasasi hiba a kapcsolat soran" ita "Rilevato un errore di lettura dalla pipe di connessione" + jpn "接続パイプã®èªã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ã§ã™ã€‚" kor "ì—°ê²° 파ì´í”„로부터 ì—러가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤." por "Obteve um erro de leitura no 'pipe' da conexão" rum "Eroare la citire din cauza lui 'connection pipe'" @@ -3503,7 +3444,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01 swe "Fick läsfel frÃ¥n klienten vid läsning frÃ¥n 'PIPE'" ukr "Отримано помилку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð· комунікаційного каналу" ER_NET_FCNTL_ERROR 08S01 - cze "Zji-BÅ¡tÄ›na chyba fcntl()" + cze "ZjiÅ¡tÄ›na chyba fcntl()" dan "Fik fejlmeddelelse fra fcntl()" nla "Kreeg fout van fcntl()" eng "Got an error from fcntl()" @@ -3512,6 +3453,7 @@ ER_NET_FCNTL_ERROR 08S01 ger "fcntl() lieferte einen Fehler" hun "Hiba a fcntl() fuggvenyben" ita "Rilevato un errore da fcntl()" + jpn "fcntl()ãŒã‚¨ãƒ©ãƒ¼ã‚’è¿”ã—ã¾ã—ãŸã€‚" kor "fcntl() 함수로부터 ì—러가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤." por "Obteve um erro em fcntl()" rum "Eroare obtinuta de la fcntl()" @@ -3521,7 +3463,7 @@ ER_NET_FCNTL_ERROR 08S01 swe "Fick fatalt fel frÃ¥n 'fcntl()'" ukr "Отримано помилкку від fcntl()" ER_NET_PACKETS_OUT_OF_ORDER 08S01 - cze "P-BÅ™Ãchozà packety v chybném poÅ™adÃ" + cze "PÅ™Ãchozà packety v chybném poÅ™adÃ" dan "Modtog ikke datapakker i korrekt rækkefølge" nla "Pakketten in verkeerde volgorde ontvangen" eng "Got packets out of order" @@ -3530,6 +3472,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01 ger "Pakete nicht in der richtigen Reihenfolge empfangen" hun "Helytelen sorrendben erkezett adatcsomagok" ita "Ricevuti pacchetti non in ordine" + jpn "ä¸æ£ãªé †åºã®ãƒ‘ケットをå—ä¿¡ã—ã¾ã—ãŸã€‚" kor "순서가 맞지않는 íŒ¨í‚·ì„ ë°›ì•˜ìŠµë‹ˆë‹¤." por "Obteve pacotes fora de ordem" rum "Packets care nu sint ordonati au fost gasiti" @@ -3539,7 +3482,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01 swe "Kommunikationspaketen kom i fel ordning" ukr "Отримано пакети у неналежному порÑдку" ER_NET_UNCOMPRESS_ERROR 08S01 - cze "Nemohu rozkomprimovat komunika-BÄnà packet" + cze "Nemohu rozkomprimovat komunikaÄnà packet" dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)" nla "Communicatiepakket kon niet worden gedecomprimeerd" eng "Couldn't uncompress communication packet" @@ -3548,6 +3491,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01 ger "Kommunikationspaket lässt sich nicht entpacken" hun "A kommunikacios adatcsomagok nem tomorithetok ki" ita "Impossibile scompattare i pacchetti di comunicazione" + jpn "圧縮パケットã®å±•é–‹ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" kor "í†µì‹ íŒ¨í‚·ì˜ ì••ì¶•í•´ì œë¥¼ í• ìˆ˜ 없었습니다." por "Não conseguiu descomprimir pacote de comunicação" rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)" @@ -3557,7 +3501,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01 swe "Kunde inte packa up kommunikationspaketet" ukr "Ðе можу декомпреÑувати комунікаційний пакет" ER_NET_READ_ERROR 08S01 - cze "Zji-BÅ¡tÄ›na chyba pÅ™i Ätenà komunikaÄnÃho packetu" + cze "ZjiÅ¡tÄ›na chyba pÅ™i Ätenà komunikaÄnÃho packetu" dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)" nla "Fout bij het lezen van communicatiepakketten" eng "Got an error reading communication packets" @@ -3566,6 +3510,7 @@ ER_NET_READ_ERROR 08S01 ger "Fehler beim Lesen eines Kommunikationspakets" hun "HIba a kommunikacios adatcsomagok olvasasa soran" ita "Rilevato un errore ricevendo i pacchetti di comunicazione" + jpn "パケットã®å—ä¿¡ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" kor "í†µì‹ íŒ¨í‚·ì„ ì½ëŠ” 중 오류가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤." por "Obteve um erro na leitura de pacotes de comunicação" rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)" @@ -3575,7 +3520,7 @@ ER_NET_READ_ERROR 08S01 swe "Fick ett fel vid läsning frÃ¥n klienten" ukr "Отримано помилку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ ÐºÐ¾Ð¼ÑƒÐ½Ñ–ÐºÐ°Ñ†Ñ–Ð¹Ð½Ð¸Ñ… пакетів" ER_NET_READ_INTERRUPTED 08S01 - cze "Zji-BÅ¡tÄ›n timeout pÅ™i Ätenà komunikaÄnÃho packetu" + cze "ZjiÅ¡tÄ›n timeout pÅ™i Ätenà komunikaÄnÃho packetu" dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)" nla "Timeout bij het lezen van communicatiepakketten" eng "Got timeout reading communication packets" @@ -3584,6 +3529,7 @@ ER_NET_READ_INTERRUPTED 08S01 ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets" hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran" ita "Rilevato un timeout ricevendo i pacchetti di comunicazione" + jpn "パケットã®å—ä¿¡ã§ã‚¿ã‚¤ãƒ アウトãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" kor "í†µì‹ íŒ¨í‚·ì„ ì½ëŠ” 중 timeoutì´ ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤." por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação" rum "Timeout obtinut citind pachetele de comunicatie (communication packets)" @@ -3593,7 +3539,7 @@ ER_NET_READ_INTERRUPTED 08S01 swe "Fick 'timeout' vid läsning frÃ¥n klienten" ukr "Отримано затримку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ ÐºÐ¾Ð¼ÑƒÐ½Ñ–ÐºÐ°Ñ†Ñ–Ð¹Ð½Ð¸Ñ… пакетів" ER_NET_ERROR_ON_WRITE 08S01 - cze "Zji-BÅ¡tÄ›na chyba pÅ™i zápisu komunikaÄnÃho packetu" + cze "ZjiÅ¡tÄ›na chyba pÅ™i zápisu komunikaÄnÃho packetu" dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)" nla "Fout bij het schrijven van communicatiepakketten" eng "Got an error writing communication packets" @@ -3602,6 +3548,7 @@ ER_NET_ERROR_ON_WRITE 08S01 ger "Fehler beim Schreiben eines Kommunikationspakets" hun "Hiba a kommunikacios csomagok irasa soran" ita "Rilevato un errore inviando i pacchetti di comunicazione" + jpn "パケットã®é€ä¿¡ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" kor "í†µì‹ íŒ¨í‚·ì„ ê¸°ë¡í•˜ëŠ” 중 오류가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤." por "Obteve um erro na escrita de pacotes de comunicação" rum "Eroare in scrierea pachetelor de comunicatie (communication packets)" @@ -3611,7 +3558,7 @@ ER_NET_ERROR_ON_WRITE 08S01 swe "Fick ett fel vid skrivning till klienten" ukr "Отримано помилку запиÑу комунікаційних пакетів" ER_NET_WRITE_INTERRUPTED 08S01 - cze "Zji-BÅ¡tÄ›n timeout pÅ™i zápisu komunikaÄnÃho packetu" + cze "ZjiÅ¡tÄ›n timeout pÅ™i zápisu komunikaÄnÃho packetu" dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)" nla "Timeout bij het schrijven van communicatiepakketten" eng "Got timeout writing communication packets" @@ -3620,6 +3567,7 @@ ER_NET_WRITE_INTERRUPTED 08S01 ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets" hun "Idotullepes a kommunikacios csomagok irasa soran" ita "Rilevato un timeout inviando i pacchetti di comunicazione" + jpn "パケットã®é€ä¿¡ã§ã‚¿ã‚¤ãƒ アウトãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" kor "í†µì‹ íŒ¨íŒƒì„ ê¸°ë¡í•˜ëŠ” 중 timeoutì´ ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤." por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação" rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)" @@ -3629,7 +3577,7 @@ ER_NET_WRITE_INTERRUPTED 08S01 swe "Fick 'timeout' vid skrivning till klienten" ukr "Отримано затримку запиÑу комунікаційних пакетів" ER_TOO_LONG_STRING 42000 - cze "V-Býsledný Å™etÄ›zec je delÅ¡Ã než 'max_allowed_packet'" + cze "Výsledný Å™etÄ›zec je delÅ¡Ã než 'max_allowed_packet'" dan "Strengen med resultater er større end 'max_allowed_packet'" nla "Resultaat string is langer dan 'max_allowed_packet'" eng "Result string is longer than 'max_allowed_packet' bytes" @@ -3638,6 +3586,7 @@ ER_TOO_LONG_STRING 42000 ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes" hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'" ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'" + jpn "çµæžœã®æ–‡å—列㌠'max_allowed_packet' よりも大ãã„ã§ã™ã€‚" por "'String' resultante é mais longa do que 'max_allowed_packet'" rum "Sirul rezultat este mai lung decit 'max_allowed_packet'" rus "Ð ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð¸Ñ€ÑƒÑŽÑ‰Ð°Ñ Ñтрока больше, чем 'max_allowed_packet'" @@ -3646,7 +3595,7 @@ ER_TOO_LONG_STRING 42000 swe "Resultatsträngen är längre än max_allowed_packet" ukr "Строка результату довша ніж max_allowed_packet" ER_TABLE_CANT_HANDLE_BLOB 42000 - cze "Typ pou-Bžité tabulky (%s) nepodporuje BLOB/TEXT sloupce" + cze "Typ použité tabulky (%s) nepodporuje BLOB/TEXT sloupce" dan "Denne tabeltype (%s) understøtter ikke brug af BLOB og TEXT kolonner" nla "Het gebruikte tabel type (%s) ondersteunt geen BLOB/TEXT kolommen" eng "Storage engine %s doesn't support BLOB/TEXT columns" @@ -3663,7 +3612,7 @@ ER_TABLE_CANT_HANDLE_BLOB 42000 swe "Den använda tabelltypen (%s) kan inte hantera BLOB/TEXT-kolumner" ukr "%s таблиці не підтримують BLOB/TEXT Ñтовбці" ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 - cze "Typ pou-Bžité tabulky (%s) nepodporuje AUTO_INCREMENT sloupce" + cze "Typ použité tabulky (%s) nepodporuje AUTO_INCREMENT sloupce" dan "Denne tabeltype understøtter (%s) ikke brug af AUTO_INCREMENT kolonner" nla "Het gebruikte tabel type (%s) ondersteunt geen AUTO_INCREMENT kolommen" eng "Storage engine %s doesn't support AUTO_INCREMENT columns" @@ -3680,7 +3629,7 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 swe "Den använda tabelltypen (%s) kan inte hantera AUTO_INCREMENT-kolumner" ukr "%s таблиці не підтримують AUTO_INCREMENT Ñтовбці" ER_DELAYED_INSERT_TABLE_LOCKED - cze "INSERT DELAYED nen-Bà možno s tabulkou '%-.192s' použÃt, protože je zamÄená pomocà LOCK TABLES" + cze "INSERT DELAYED nenà možno s tabulkou '%-.192s' použÃt, protože je zamÄená pomocà LOCK TABLES" dan "INSERT DELAYED kan ikke bruges med tabellen '%-.192s', fordi tabellen er lÃ¥st med LOCK TABLES" nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.192s', vanwege een 'lock met LOCK TABLES" eng "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES" @@ -3690,7 +3639,7 @@ ER_DELAYED_INSERT_TABLE_LOCKED greek "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" hun "Az INSERT DELAYED nem hasznalhato a '%-.192s' tablahoz, mert a tabla zarolt (LOCK TABLES)" ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.192s', perche` soggetta a lock da 'LOCK TABLES'" - jpn "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" + jpn "表 '%-.192s' ã¯LOCK TABLESã§ãƒãƒƒã‚¯ã•ã‚Œã¦ã„ã‚‹ãŸã‚ã€INSERT DELAYEDを使用ã§ãã¾ã›ã‚“。" kor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" nor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" norwegian-ny "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES" @@ -3704,7 +3653,7 @@ ER_DELAYED_INSERT_TABLE_LOCKED swe "INSERT DELAYED kan inte användas med tabell '%-.192s', emedan den är lÃ¥st med LOCK TABLES" ukr "INSERT DELAYED не може бути викориÑтано з таблицею '%-.192s', тому що Ñ—Ñ— заблоковано з LOCK TABLES" ER_WRONG_COLUMN_NAME 42000 - cze "Nespr-Bávné jméno sloupce '%-.100s'" + cze "Nesprávné jméno sloupce '%-.100s'" dan "Forkert kolonnenavn '%-.100s'" nla "Incorrecte kolom naam '%-.100s'" eng "Incorrect column name '%-.100s'" @@ -3713,6 +3662,7 @@ ER_WRONG_COLUMN_NAME 42000 ger "Falscher Spaltenname '%-.100s'" hun "Ervenytelen mezonev: '%-.100s'" ita "Nome colonna '%-.100s' non corretto" + jpn "列å '%-.100s' ã¯ä¸æ£ã§ã™ã€‚" por "Nome de coluna '%-.100s' incorreto" rum "Nume increct de coloana '%-.100s'" rus "Ðеверное Ð¸Ð¼Ñ Ñтолбца '%-.100s'" @@ -3726,7 +3676,7 @@ ER_WRONG_KEY_COLUMN 42000 rus "Обработчик таблиц %s не может проиндекÑировать Ñтолбец %`s" ukr "Вказівник таблиц %s не може індекÑувати Ñтовбець %`s" ER_WRONG_MRG_TABLE - cze "V-BÅ¡echny tabulky v MERGE tabulce nejsou definovány stejnÄ›" + cze "VÅ¡echny tabulky v MERGE tabulce nejsou definovány stejnÄ›" dan "Tabellerne i MERGE er ikke defineret ens" nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities" eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist" @@ -3735,7 +3685,7 @@ ER_WRONG_MRG_TABLE ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert" hun "A MERGE tablaban talalhato tablak definicioja nem azonos" ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica" - jpn "All tables in the MERGE table are not defined identically" + jpn "MERGE表ã®æ§‹æˆè¡¨ãŒã‚ªãƒ¼ãƒ—ンã§ãã¾ã›ã‚“。列定義ãŒç•°ãªã‚‹ã‹ã€MyISAM表ã§ã¯ãªã„ã‹ã€å˜åœ¨ã—ã¾ã›ã‚“。" kor "All tables in the MERGE table are not defined identically" nor "All tables in the MERGE table are not defined identically" norwegian-ny "All tables in the MERGE table are not defined identically" @@ -3749,7 +3699,7 @@ ER_WRONG_MRG_TABLE swe "Tabellerna i MERGE-tabellen är inte identiskt definierade" ukr "Таблиці у MERGE TABLE мають різну Ñтруктуру" ER_DUP_UNIQUE 23000 - cze "Kv-Bůli unique constraintu nemozu zapsat do tabulky '%-.192s'" + cze "Kvůli unique constraintu nemozu zapsat do tabulky '%-.192s'" dan "Kan ikke skrive til tabellen '%-.192s' fordi det vil bryde CONSTRAINT regler" nla "Kan niet opslaan naar table '%-.192s' vanwege 'unique' beperking" eng "Can't write, because of unique constraint, to table '%-.192s'" @@ -3757,6 +3707,7 @@ ER_DUP_UNIQUE 23000 fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.192s'" ger "Schreiben in Tabelle '%-.192s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)" hun "A '%-.192s' nem irhato, az egyedi mezok miatt" + jpn "一æ„性制約é•åã®ãŸã‚ã€è¡¨ '%-.192s' ã«æ›¸ãè¾¼ã‚ã¾ã›ã‚“。" ita "Impossibile scrivere nella tabella '%-.192s' per limitazione di unicita`" por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.192s'" rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.192s'" @@ -3766,7 +3717,7 @@ ER_DUP_UNIQUE 23000 swe "Kan inte skriva till tabell '%-.192s'; UNIQUE-test" ukr "Ðе можу запиÑати до таблиці '%-.192s', з причини вимог унікальноÑÑ‚Ñ–" ER_BLOB_KEY_WITHOUT_LENGTH 42000 - cze "BLOB sloupec '%-.192s' je pou-Bžit ve specifikaci klÃÄe bez délky" + cze "BLOB sloupec '%-.192s' je použit ve specifikaci klÃÄe bez délky" dan "BLOB kolonnen '%-.192s' brugt i nøglespecifikation uden nøglelængde" nla "BLOB kolom '%-.192s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" eng "BLOB/TEXT column '%-.192s' used in key specification without a key length" @@ -3776,7 +3727,7 @@ ER_BLOB_KEY_WITHOUT_LENGTH 42000 greek "BLOB column '%-.192s' used in key specification without a key length" hun "BLOB mezo '%-.192s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" ita "La colonna '%-.192s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" - jpn "BLOB column '%-.192s' used in key specification without a key length" + jpn "BLOB列 '%-.192s' ã‚’ã‚ーã«ä½¿ç”¨ã™ã‚‹ã«ã¯é•·ã•æŒ‡å®šãŒå¿…è¦ã§ã™ã€‚" kor "BLOB column '%-.192s' used in key specification without a key length" nor "BLOB column '%-.192s' used in key specification without a key length" norwegian-ny "BLOB column '%-.192s' used in key specification without a key length" @@ -3790,7 +3741,7 @@ ER_BLOB_KEY_WITHOUT_LENGTH 42000 swe "Du har inte angett nÃ¥gon nyckellängd för BLOB '%-.192s'" ukr "Стовбець BLOB '%-.192s' викориÑтано у визначенні ключа без Ð²ÐºÐ°Ð·Ð°Ð½Ð½Ñ Ð´Ð¾Ð²Ð¶Ð¸Ð½Ð¸ ключа" ER_PRIMARY_CANT_HAVE_NULL 42000 - cze "V-BÅ¡echny Äásti primárnÃho klÃÄe musejà být NOT NULL; pokud potÅ™ebujete NULL, použijte UNIQUE" + cze "VÅ¡echny Äásti primárnÃho klÃÄe musejà být NOT NULL; pokud potÅ™ebujete NULL, použijte UNIQUE" dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet" nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken" eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead" @@ -3799,6 +3750,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000 ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden" hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot" ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE" + jpn "PRIMARY KEYã®åˆ—ã¯å…¨ã¦NOT NULLã§ãªã‘ã‚Œã°ã„ã‘ã¾ã›ã‚“。UNIQUE索引ã§ã‚ã‚Œã°NULLã‚’å«ã‚€ã“ã¨ãŒå¯èƒ½ã§ã™ã€‚" por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar" rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb" rus "Ð’Ñе чаÑти первичного ключа (PRIMARY KEY) должны быть определены как NOT NULL; ЕÑли вам нужна поддержка величин NULL в ключе, воÑпользуйтеÑÑŒ индекÑом UNIQUE" @@ -3807,7 +3759,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000 swe "Alla delar av en PRIMARY KEY mÃ¥ste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället" ukr "УÑÑ– чаÑтини PRIMARY KEY повинні бути NOT NULL; Якщо ви потребуєте NULL у ключі, ÑкориÑтайтеÑÑ UNIQUE" ER_TOO_MANY_ROWS 42000 - cze "V-Býsledek obsahuje vÃce než jeden řádek" + cze "Výsledek obsahuje vÃce než jeden řádek" dan "Resultatet bestod af mere end een række" nla "Resultaat bevatte meer dan een rij" eng "Result consisted of more than one row" @@ -3816,6 +3768,7 @@ ER_TOO_MANY_ROWS 42000 ger "Ergebnis besteht aus mehr als einer Zeile" hun "Az eredmeny tobb, mint egy sort tartalmaz" ita "Il risultato consiste di piu` di una riga" + jpn "çµæžœãŒ2行以上ã§ã™ã€‚" por "O resultado consistiu em mais do que uma linha" rum "Resultatul constista din mai multe linii" rus "Ð’ результате возвращена более чем одна Ñтрока" @@ -3824,7 +3777,7 @@ ER_TOO_MANY_ROWS 42000 swe "Resultet bestod av mera än en rad" ukr "Результат знаходитьÑÑ Ñƒ більше ніж одній Ñтроці" ER_REQUIRES_PRIMARY_KEY 42000 - cze "Tento typ tabulky vy-Bžaduje primárnà klÃÄ" + cze "Tento typ tabulky vyžaduje primárnà klÃÄ" dan "Denne tabeltype kræver en primærnøgle" nla "Dit tabel type heeft een primaire zoeksleutel nodig" eng "This table type requires a primary key" @@ -3833,6 +3786,7 @@ ER_REQUIRES_PRIMARY_KEY 42000 ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)" hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo" ita "Questo tipo di tabella richiede una chiave primaria" + jpn "使用ã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ã§ã¯ã€PRIMARY KEYãŒå¿…è¦ã§ã™ã€‚" por "Este tipo de tabela requer uma chave primária" rum "Aceast tip de tabela are nevoie de o cheie primara" rus "Ðтот тип таблицы требует Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¿ÐµÑ€Ð²Ð¸Ñ‡Ð½Ð¾Ð³Ð¾ ключа" @@ -3841,7 +3795,7 @@ ER_REQUIRES_PRIMARY_KEY 42000 swe "Denna tabelltyp kräver en PRIMARY KEY" ukr "Цей тип таблиці потребує первинного ключа" ER_NO_RAID_COMPILED - cze "Tato verze MariaDB nen-Bà zkompilována s podporou RAID" + cze "Tato verze MySQL nenà zkompilována s podporou RAID" dan "Denne udgave af MariaDB er ikke oversat med understøttelse af RAID" nla "Deze versie van MariaDB is niet gecompileerd met RAID ondersteuning" eng "This version of MariaDB is not compiled with RAID support" @@ -3850,6 +3804,7 @@ ER_NO_RAID_COMPILED ger "Diese MariaDB-Version ist nicht mit RAID-Unterstützung kompiliert" hun "Ezen leforditott MariaDB verzio nem tartalmaz RAID support-ot" ita "Questa versione di MYSQL non e` compilata con il supporto RAID" + jpn "ã“ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®MySQLã¯RAIDサãƒãƒ¼ãƒˆã‚’å«ã‚ã¦ã‚³ãƒ³ãƒ‘イルã•ã‚Œã¦ã„ã¾ã›ã‚“。" por "Esta versão do MariaDB não foi compilada com suporte a RAID" rum "Aceasta versiune de MariaDB, nu a fost compilata cu suport pentru RAID" rus "Ðта верÑÐ¸Ñ MariaDB Ñкомпилирована без поддержки RAID" @@ -3858,7 +3813,7 @@ ER_NO_RAID_COMPILED swe "Denna version av MariaDB är inte kompilerad med RAID" ukr "Ð¦Ñ Ð²ÐµÑ€ÑÑ–Ñ MariaDB не зкомпільована з підтримкою RAID" ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE - cze "Update tabulky bez WHERE s kl-BÃÄem nenà v módu bezpeÄných update dovoleno" + cze "Update tabulky bez WHERE s klÃÄem nenà v módu bezpeÄných update dovoleno" dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt" nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom" eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column" @@ -3867,6 +3822,7 @@ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE ger "MariaDB läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben" hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column" ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave" + jpn "'safe update mode'ã§ã€ç´¢å¼•ã‚’利用ã™ã‚‹WHEREå¥ã®ç„¡ã„更新処ç†ã‚’実行ã—よã†ã¨ã—ã¾ã—ãŸã€‚" por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave" rus "Ð’Ñ‹ работаете в режиме безопаÑных обновлений (safe update mode) и попробовали изменить таблицу без иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÐ»ÑŽÑ‡ÐµÐ²Ð¾Ð³Ð¾ Ñтолбца в чаÑти WHERE" serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuÄa" @@ -3874,7 +3830,7 @@ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel" ukr "Ви у режимі безпечного Ð¾Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ‚Ð° намагаєтеÑÑŒ оновити таблицю без оператора WHERE, що викориÑтовує KEY Ñтовбець" ER_KEY_DOES_NOT_EXITS 42000 S1009 - cze "Kl-BÃÄ '%-.192s' v tabulce '%-.192s' neexistuje" + cze "KlÃÄ '%-.192s' v tabulce '%-.192s' neexistuje" dan "Nøglen '%-.192s' eksisterer ikke i tabellen '%-.192s'" nla "Zoeksleutel '%-.192s' bestaat niet in tabel '%-.192s'" eng "Key '%-.192s' doesn't exist in table '%-.192s'" @@ -3883,6 +3839,7 @@ ER_KEY_DOES_NOT_EXITS 42000 S1009 ger "Schlüssel '%-.192s' existiert in der Tabelle '%-.192s' nicht" hun "A '%-.192s' kulcs nem letezik a '%-.192s' tablaban" ita "La chiave '%-.192s' non esiste nella tabella '%-.192s'" + jpn "索引 '%-.192s' ã¯è¡¨ '%-.192s' ã«ã¯å˜åœ¨ã—ã¾ã›ã‚“。" por "Chave '%-.192s' não existe na tabela '%-.192s'" rus "Ключ '%-.192s' не ÑущеÑтвует в таблице '%-.192s'" serbian "KljuÄ '%-.192s' ne postoji u tabeli '%-.192s'" @@ -3890,7 +3847,7 @@ ER_KEY_DOES_NOT_EXITS 42000 S1009 swe "Nyckel '%-.192s' finns inte in tabell '%-.192s'" ukr "Ключ '%-.192s' не Ñ–Ñнує в таблиці '%-.192s'" ER_CHECK_NO_SUCH_TABLE 42000 - cze "Nemohu otev-BÅ™Ãt tabulku" + cze "Nemohu otevÅ™Ãt tabulku" dan "Kan ikke Ã¥bne tabellen" nla "Kan tabel niet openen" eng "Can't open table" @@ -3899,6 +3856,7 @@ ER_CHECK_NO_SUCH_TABLE 42000 ger "Kann Tabelle nicht öffnen" hun "Nem tudom megnyitni a tablat" ita "Impossibile aprire la tabella" + jpn "表をオープンã§ãã¾ã›ã‚“。" por "Não pode abrir a tabela" rus "Ðевозможно открыть таблицу" serbian "Ne mogu da otvorim tabelu" @@ -3916,7 +3874,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000 greek "The handler for the table doesn't support %s" hun "A tabla kezeloje (handler) nem tamogatja az %s" ita "Il gestore per la tabella non supporta il %s" - jpn "The handler for the table doesn't support %s" + jpn "ã“ã®è¡¨ã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ã¯ '%s' を利用ã§ãã¾ã›ã‚“。" kor "The handler for the table doesn't support %s" nor "The handler for the table doesn't support %s" norwegian-ny "The handler for the table doesn't support %s" @@ -3930,7 +3888,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000 swe "Tabellhanteraren för denna tabell kan inte göra %s" ukr "Вказівник таблиці не підтримуе %s" ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 - cze "Proveden-Bà tohoto pÅ™Ãkazu nenà v transakci dovoleno" + cze "Provedenà tohoto pÅ™Ãkazu nenà v transakci dovoleno" dan "Du mÃ¥ ikke bruge denne kommando i en transaktion" nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie" eng "You are not allowed to execute this command in a transaction" @@ -3939,6 +3897,7 @@ ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen" hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban" ita "Non puoi eseguire questo comando in una transazione" + jpn "ã“ã®ã‚³ãƒžãƒ³ãƒ‰ã¯ãƒˆãƒ©ãƒ³ã‚¶ã‚¯ã‚·ãƒ§ãƒ³å†…ã§å®Ÿè¡Œã§ãã¾ã›ã‚“。" por "Não lhe é permitido executar este comando em uma transação" rus "Вам не разрешено выполнÑÑ‚ÑŒ Ñту команду в транзакции" serbian "Nije Vam dozvoljeno da izvrÅ¡ite ovu komandu u transakciji" @@ -3946,7 +3905,7 @@ ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 swe "Du fÃ¥r inte utföra detta kommando i en transaktion" ukr "Вам не дозволено виконувати цю команду в транзакції" ER_ERROR_DURING_COMMIT - cze "Chyba %M p-BÅ™i COMMIT" + cze "Chyba %M pÅ™i COMMIT" dan "Modtog fejl %M mens kommandoen COMMIT blev udført" nla "Kreeg fout %M tijdens COMMIT" eng "Got error %M during COMMIT" @@ -3955,6 +3914,7 @@ ER_ERROR_DURING_COMMIT ger "Fehler %M beim COMMIT" hun "%M hiba a COMMIT vegrehajtasa soran" ita "Rilevato l'errore %M durante il COMMIT" + jpn "COMMITä¸ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" por "Obteve erro %M durante COMMIT" rus "Получена ошибка %M в процеÑÑе COMMIT" serbian "GreÅ¡ka %M za vreme izvrÅ¡avanja komande 'COMMIT'" @@ -3962,7 +3922,7 @@ ER_ERROR_DURING_COMMIT swe "Fick fel %M vid COMMIT" ukr "Отримано помилку %M під Ñ‡Ð°Ñ COMMIT" ER_ERROR_DURING_ROLLBACK - cze "Chyba %M p-BÅ™i ROLLBACK" + cze "Chyba %M pÅ™i ROLLBACK" dan "Modtog fejl %M mens kommandoen ROLLBACK blev udført" nla "Kreeg fout %M tijdens ROLLBACK" eng "Got error %M during ROLLBACK" @@ -3971,6 +3931,7 @@ ER_ERROR_DURING_ROLLBACK ger "Fehler %M beim ROLLBACK" hun "%M hiba a ROLLBACK vegrehajtasa soran" ita "Rilevato l'errore %M durante il ROLLBACK" + jpn "ROLLBACKä¸ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" por "Obteve erro %M durante ROLLBACK" rus "Получена ошибка %M в процеÑÑе ROLLBACK" serbian "GreÅ¡ka %M za vreme izvrÅ¡avanja komande 'ROLLBACK'" @@ -3978,7 +3939,7 @@ ER_ERROR_DURING_ROLLBACK swe "Fick fel %M vid ROLLBACK" ukr "Отримано помилку %M під Ñ‡Ð°Ñ ROLLBACK" ER_ERROR_DURING_FLUSH_LOGS - cze "Chyba %M p-BÅ™i FLUSH_LOGS" + cze "Chyba %M pÅ™i FLUSH_LOGS" dan "Modtog fejl %M mens kommandoen FLUSH_LOGS blev udført" nla "Kreeg fout %M tijdens FLUSH_LOGS" eng "Got error %M during FLUSH_LOGS" @@ -3987,6 +3948,7 @@ ER_ERROR_DURING_FLUSH_LOGS ger "Fehler %M bei FLUSH_LOGS" hun "%M hiba a FLUSH_LOGS vegrehajtasa soran" ita "Rilevato l'errore %M durante il FLUSH_LOGS" + jpn "FLUSH_LOGSä¸ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" por "Obteve erro %M durante FLUSH_LOGS" rus "Получена ошибка %M в процеÑÑе FLUSH_LOGS" serbian "GreÅ¡ka %M za vreme izvrÅ¡avanja komande 'FLUSH_LOGS'" @@ -3994,7 +3956,7 @@ ER_ERROR_DURING_FLUSH_LOGS swe "Fick fel %M vid FLUSH_LOGS" ukr "Отримано помилку %M під Ñ‡Ð°Ñ FLUSH_LOGS" ER_ERROR_DURING_CHECKPOINT - cze "Chyba %M p-BÅ™i CHECKPOINT" + cze "Chyba %M pÅ™i CHECKPOINT" dan "Modtog fejl %M mens kommandoen CHECKPOINT blev udført" nla "Kreeg fout %M tijdens CHECKPOINT" eng "Got error %M during CHECKPOINT" @@ -4003,6 +3965,7 @@ ER_ERROR_DURING_CHECKPOINT ger "Fehler %M bei CHECKPOINT" hun "%M hiba a CHECKPOINT vegrehajtasa soran" ita "Rilevato l'errore %M durante il CHECKPOINT" + jpn "CHECKPOINTä¸ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚" por "Obteve erro %M durante CHECKPOINT" rus "Получена ошибка %M в процеÑÑе CHECKPOINT" serbian "GreÅ¡ka %M za vreme izvrÅ¡avanja komande 'CHECKPOINT'" @@ -4010,7 +3973,7 @@ ER_ERROR_DURING_CHECKPOINT swe "Fick fel %M vid CHECKPOINT" ukr "Отримано помилку %M під Ñ‡Ð°Ñ CHECKPOINT" ER_NEW_ABORTING_CONNECTION 08S01 - cze "Spojen-Bà %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo pÅ™eruÅ¡eno" + cze "Spojenà %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo pÅ™eruÅ¡eno" dan "Afbrød forbindelsen %ld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)" nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)" eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)" @@ -4018,6 +3981,7 @@ ER_NEW_ABORTING_CONNECTION 08S01 fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)" ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)" ita "Interrotta la connessione %ld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)" + jpn "接続 %ld ãŒä¸æ–ã•ã‚Œã¾ã—ãŸã€‚データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s' (%-.64s)" por "Conexão %ld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')" rus "Прервано Ñоединение %ld к базе данных '%-.192s' Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s' Ñ Ñ…Ð¾Ñта '%-.64s' (%-.64s)" serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)" @@ -4029,12 +3993,13 @@ ER_unused_2 ER_FLUSH_MASTER_BINLOG_CLOSED eng "Binlog closed, cannot RESET MASTER" ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen" + jpn "ãƒã‚¤ãƒŠãƒªãƒã‚°ãŒã‚¯ãƒãƒ¼ã‚ºã•ã‚Œã¦ã„ã¾ã™ã€‚RESET MASTER を実行ã§ãã¾ã›ã‚“。" por "Binlog fechado. Não pode fazer RESET MASTER" rus "Двоичный журнал Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚, невозможно выполнить RESET MASTER" serbian "Binarni log file zatvoren, ne mogu da izvrÅ¡im komandu 'RESET MASTER'" ukr "Реплікаційний лог закрито, не можу виконати RESET MASTER" ER_INDEX_REBUILD - cze "P-BÅ™ebudovánà indexu dumpnuté tabulky '%-.192s' nebylo úspěšné" + cze "PÅ™ebudovánà indexu dumpnuté tabulky '%-.192s' nebylo úspěšné" dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.192s'" nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.192s'" eng "Failed rebuilding the index of dumped table '%-.192s'" @@ -4043,6 +4008,7 @@ ER_INDEX_REBUILD greek "Failed rebuilding the index of dumped table '%-.192s'" hun "Failed rebuilding the index of dumped table '%-.192s'" ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.192s'" + jpn "ダンプ表 '%-.192s' ã®ç´¢å¼•å†æ§‹ç¯‰ã«å¤±æ•—ã—ã¾ã—ãŸã€‚" por "Falhou na reconstrução do Ãndice da tabela 'dumped' '%-.192s'" rus "Ошибка переÑтройки индекÑа Ñохраненной таблицы '%-.192s'" serbian "Izgradnja indeksa dump-ovane tabele '%-.192s' nije uspela" @@ -4056,20 +4022,22 @@ ER_MASTER fre "Erreur reçue du maître: '%-.64s'" ger "Fehler vom Master: '%-.64s'" ita "Errore dal master: '%-.64s" + jpn "マスターã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿ: '%-.64s'" por "Erro no 'master' '%-.64s'" rus "Ошибка от головного Ñервера: '%-.64s'" serbian "GreÅ¡ka iz glavnog servera '%-.64s' u klasteru" spa "Error del master: '%-.64s'" - swe "Fick en master: '%-.64s'" + swe "Fel frÃ¥n master: '%-.64s'" ukr "Помилка від головного: '%-.64s'" ER_MASTER_NET_READ 08S01 - cze "S-BÃÅ¥ová chyba pÅ™i Ätenà z masteru" + cze "SÃÅ¥ová chyba pÅ™i Ätenà z masteru" dan "Netværksfejl ved læsning fra master" nla "Net fout tijdens lezen van master" eng "Net error reading from master" fre "Erreur de lecture réseau reçue du maître" ger "Netzfehler beim Lesen vom Master" ita "Errore di rete durante la ricezione dal master" + jpn "マスターã‹ã‚‰ã®ãƒ‡ãƒ¼ã‚¿å—ä¿¡ä¸ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚¨ãƒ©ãƒ¼" por "Erro de rede lendo do 'master'" rus "Возникла ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð² процеÑÑе коммуникации Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером" serbian "GreÅ¡ka u primanju mrežnih paketa sa glavnog servera u klasteru" @@ -4077,13 +4045,14 @@ ER_MASTER_NET_READ 08S01 swe "Fick nätverksfel vid läsning frÃ¥n master" ukr "Мережева помилка Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð²Ñ–Ð´ головного" ER_MASTER_NET_WRITE 08S01 - cze "S-BÃÅ¥ová chyba pÅ™i zápisu na master" + cze "SÃÅ¥ová chyba pÅ™i zápisu na master" dan "Netværksfejl ved skrivning til master" nla "Net fout tijdens schrijven naar master" eng "Net error writing to master" fre "Erreur d'écriture réseau reçue du maître" ger "Netzfehler beim Schreiben zum Master" ita "Errore di rete durante l'invio al master" + jpn "マスターã¸ã®ãƒ‡ãƒ¼ã‚¿é€ä¿¡ä¸ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚¨ãƒ©ãƒ¼" por "Erro de rede gravando no 'master'" rus "Возникла ошибка запиÑи в процеÑÑе коммуникации Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером" serbian "GreÅ¡ka u slanju mrežnih paketa na glavni server u klasteru" @@ -4091,7 +4060,7 @@ ER_MASTER_NET_WRITE 08S01 swe "Fick nätverksfel vid skrivning till master" ukr "Мережева помилка запиÑу до головного" ER_FT_MATCHING_KEY_NOT_FOUND - cze "-BŽádný sloupec nemá vytvoÅ™en fulltextový index" + cze "Žádný sloupec nemá vytvoÅ™en fulltextový index" dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen" nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst" eng "Can't find FULLTEXT index matching the column list" @@ -4099,6 +4068,7 @@ ER_FT_MATCHING_KEY_NOT_FOUND fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes" ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht" ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne" + jpn "列リストã«å¯¾å¿œã™ã‚‹å…¨æ–‡ç´¢å¼•(FULLTEXT)ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。" por "Não pode encontrar um Ãndice para o texto todo que combine com a lista de colunas" rus "Ðевозможно отыÑкать полнотекÑтовый (FULLTEXT) индекÑ, ÑоответÑтвующий ÑпиÑку Ñтолбцов" serbian "Ne mogu da pronaÄ‘em 'FULLTEXT' indeks koli odgovara listi kolona" @@ -4106,7 +4076,7 @@ ER_FT_MATCHING_KEY_NOT_FOUND swe "Hittar inte ett FULLTEXT-index i kolumnlistan" ukr "Ðе можу знайти FULLTEXT індекÑ, що відповідає переліку Ñтовбців" ER_LOCK_OR_ACTIVE_TRANSACTION - cze "Nemohu prov-Bést zadaný pÅ™Ãkaz, protože existujà aktivnà zamÄené tabulky nebo aktivnà transakce" + cze "Nemohu provést zadaný pÅ™Ãkaz, protože existujà aktivnà zamÄené tabulky nebo aktivnà transakce" dan "Kan ikke udføre den givne kommando fordi der findes aktive, lÃ¥ste tabeller eller fordi der udføres en transaktion" nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie" eng "Can't execute the given command because you have active locked tables or an active transaction" @@ -4114,6 +4084,7 @@ ER_LOCK_OR_ACTIVE_TRANSACTION fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active" ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen" ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto" + jpn "ã™ã§ã«ã‚¢ã‚¯ãƒ†ã‚£ãƒ–ãªè¡¨ãƒãƒƒã‚¯ã‚„トランザクションãŒã‚ã‚‹ãŸã‚ã€ã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã§ãã¾ã›ã‚“。" por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa" rus "Ðевозможно выполнить указанную команду, поÑкольку у Ð²Ð°Ñ Ð¿Ñ€Ð¸ÑутÑтвуют активно заблокированные таблица или Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ñ" serbian "Ne mogu da izvrÅ¡im datu komandu zbog toga Å¡to su tabele zakljuÄane ili je transakcija u toku" @@ -4121,7 +4092,7 @@ ER_LOCK_OR_ACTIVE_TRANSACTION swe "Kan inte utföra kommandot emedan du har en lÃ¥st tabell eller an aktiv transaktion" ukr "Ðе можу виконати подану команду тому, що Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ð·Ð°Ð±Ð»Ð¾ÐºÐ¾Ð²Ð°Ð½Ð° або виконуєтьÑÑ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ñ–Ñ" ER_UNKNOWN_SYSTEM_VARIABLE - cze "Nezn-Bámá systémová promÄ›nná '%-.64s'" + cze "Neznámá systémová promÄ›nná '%-.64s'" dan "Ukendt systemvariabel '%-.64s'" nla "Onbekende systeem variabele '%-.64s'" eng "Unknown system variable '%-.64s'" @@ -4129,6 +4100,7 @@ ER_UNKNOWN_SYSTEM_VARIABLE fre "Variable système '%-.64s' inconnue" ger "Unbekannte Systemvariable '%-.64s'" ita "Variabile di sistema '%-.64s' sconosciuta" + jpn "'%-.64s' ã¯ä¸æ˜Žãªã‚·ã‚¹ãƒ†ãƒ 変数ã§ã™ã€‚" por "Variável de sistema '%-.64s' desconhecida" rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÑиÑÑ‚ÐµÐ¼Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s'" serbian "Nepoznata sistemska promenljiva '%-.64s'" @@ -4136,7 +4108,7 @@ ER_UNKNOWN_SYSTEM_VARIABLE swe "Okänd systemvariabel: '%-.64s'" ukr "Ðевідома ÑиÑтемна змінна '%-.64s'" ER_CRASHED_ON_USAGE - cze "Tabulka '%-.192s' je ozna-BÄena jako poruÅ¡ená a mÄ›la by být opravena" + cze "Tabulka '%-.192s' je oznaÄena jako poruÅ¡ená a mÄ›la by být opravena" dan "Tabellen '%-.192s' er markeret med fejl og bør repareres" nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" eng "Table '%-.192s' is marked as crashed and should be repaired" @@ -4144,6 +4116,7 @@ ER_CRASHED_ON_USAGE fre "La table '%-.192s' est marquée 'crashed' et devrait être réparée" ger "Tabelle '%-.192s' ist als defekt markiert und sollte repariert werden" ita "La tabella '%-.192s' e` segnalata come corrotta e deve essere riparata" + jpn "表 '%-.192s' ã¯å£Šã‚Œã¦ã„ã¾ã™ã€‚修復ãŒå¿…è¦ã§ã™ã€‚" por "Tabela '%-.192s' está marcada como danificada e deve ser reparada" rus "Таблица '%-.192s' помечена как иÑÐ¿Ð¾Ñ€Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¸ должна пройти проверку и ремонт" serbian "Tabela '%-.192s' je markirana kao oÅ¡tećena i trebala bi biti popravljena" @@ -4151,7 +4124,7 @@ ER_CRASHED_ON_USAGE swe "Tabell '%-.192s' är trasig och bör repareras med REPAIR TABLE" ukr "Таблицю '%-.192s' марковано Ñк зіпÑовану та Ñ—Ñ— потрібно відновити" ER_CRASHED_ON_REPAIR - cze "Tabulka '%-.192s' je ozna-BÄena jako poruÅ¡ená a poslednà (automatická?) oprava se nezdaÅ™ila" + cze "Tabulka '%-.192s' je oznaÄena jako poruÅ¡ená a poslednà (automatická?) oprava se nezdaÅ™ila" dan "Tabellen '%-.192s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" eng "Table '%-.192s' is marked as crashed and last (automatic?) repair failed" @@ -4159,6 +4132,7 @@ ER_CRASHED_ON_REPAIR fre "La table '%-.192s' est marquée 'crashed' et le dernier 'repair' a échoué" ger "Tabelle '%-.192s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" ita "La tabella '%-.192s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" + jpn "表 '%-.192s' ã¯å£Šã‚Œã¦ã„ã¾ã™ã€‚修復(自動?)ã«ã‚‚失敗ã—ã¦ã„ã¾ã™ã€‚" por "Tabela '%-.192s' está marcada como danificada e a última reparação (automática?) falhou" rus "Таблица '%-.192s' помечена как иÑÐ¿Ð¾Ñ€Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¸ поÑледний (автоматичеÑкий?) ремонт не был уÑпешным" serbian "Tabela '%-.192s' je markirana kao oÅ¡tećena, a zadnja (automatska?) popravka je bila neuspela" @@ -4173,6 +4147,7 @@ ER_WARNING_NOT_COMPLETE_ROLLBACK fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées" ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden" ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)" + jpn "トランザクション対応ã§ã¯ãªã„表ã¸ã®å¤‰æ›´ã¯ãƒãƒ¼ãƒ«ãƒãƒƒã‚¯ã•ã‚Œã¾ã›ã‚“。" por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituÃdas (rolled back)" rus "Внимание: по некоторым измененным нетранзакционным таблицам невозможно будет произвеÑти откат транзакции" serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'" @@ -4187,6 +4162,7 @@ ER_TRANS_CACHE_FULL fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez" ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrössern Sie diese Server-Variable versuchen Sie es noch einmal" ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare" + jpn "複数ステートメントã‹ã‚‰æˆã‚‹ãƒˆãƒ©ãƒ³ã‚¶ã‚¯ã‚·ãƒ§ãƒ³ãŒ 'max_binlog_cache_size' 以上ã®å®¹é‡ã‚’å¿…è¦ã¨ã—ã¾ã—ãŸã€‚ã“ã®ã‚·ã‚¹ãƒ†ãƒ å¤‰æ•°ã‚’å¢—åŠ ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。" por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente" rus "Транзакции, включающей большое количеÑтво команд, потребовалоÑÑŒ более чем 'max_binlog_cache_size' байт. Увеличьте Ñту переменную Ñервера mysqld и попробуйте еще раз" spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo" @@ -4212,6 +4188,7 @@ ER_SLAVE_NOT_RUNNING fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE" ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren" ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE" + jpn "ã“ã®å‡¦ç†ã¯ã€ç¨¼åƒä¸ã®ã‚¹ãƒ¬ãƒ¼ãƒ–ã§ãªã‘ã‚Œã°å®Ÿè¡Œã§ãã¾ã›ã‚“。スレーブã®è¨å®šã‚’ã—ã¦START SLAVEコマンドを実行ã—ã¦ãã ã•ã„。" por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE" rus "Ð”Ð»Ñ Ñтой операции требуетÑÑ Ñ€Ð°Ð±Ð¾Ñ‚Ð°ÑŽÑ‰Ð¸Ð¹ подчиненный Ñервер. Сначала выполните START SLAVE" serbian "Ova operacija zahteva da je aktivan podreÄ‘eni server. KonfiguriÅ¡ite prvo podreÄ‘eni server i onda izvrÅ¡ite komandu 'START SLAVE'" @@ -4225,6 +4202,7 @@ ER_BAD_SLAVE fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO" ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben" ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO" + jpn "ã“ã®ã‚µãƒ¼ãƒãƒ¼ã¯ã‚¹ãƒ¬ãƒ¼ãƒ–ã¨ã—ã¦è¨å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。コンフィグファイルã‹CHANGE MASTER TOコマンドã§è¨å®šã—ã¦ä¸‹ã•ã„。" por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO" rus "Ðтот Ñервер не наÑтроен как подчиненный. ВнеÑите иÑÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð² конфигурационном файле или Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ CHANGE MASTER TO" serbian "Server nije konfigurisan kao podreÄ‘eni server, ispravite konfiguracioni file ili na njemu izvrÅ¡ite komandu 'CHANGE MASTER TO'" @@ -4235,15 +4213,17 @@ ER_MASTER_INFO eng "Could not initialize master info structure for '%.*s'; more error messages can be found in the MariaDB error log" fre "Impossible d'initialiser les structures d'information de maître '%.*s', vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MariaDB" ger "Konnte Master-Info-Struktur '%.*s' nicht initialisieren. Weitere Fehlermeldungen können im MariaDB-Error-Log eingesehen werden" + jpn "'master info '%.*s''æ§‹é€ ä½“ã®åˆæœŸåŒ–ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚MariaDBエラーãƒã‚°ã§ã‚¨ãƒ©ãƒ¼ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã‚’確èªã—ã¦ãã ã•ã„。" serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info' '%.*s'" swe "Kunde inte initialisera replikationsstrukturerna för '%.*s'. See MariaDB fel fil för mera information" -ER_SLAVE_THREAD +ER_SLAVE_THREAD dan "Kunne ikke danne en slave-trÃ¥d; check systemressourcerne" nla "Kon slave thread niet aanmaken, controleer systeem resources" eng "Could not create slave thread; check system resources" fre "Impossible de créer une tâche esclave, vérifiez les ressources système" ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen überprüfen" ita "Impossibile creare il thread 'slave', controllare le risorse di sistema" + jpn "スレーブスレッドを作æˆã§ãã¾ã›ã‚“。システムリソースを確èªã—ã¦ãã ã•ã„。" por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema" rus "Ðевозможно Ñоздать поток подчиненного Ñервера. Проверьте ÑиÑтемные реÑурÑÑ‹" serbian "Nisam mogao da startujem thread za podreÄ‘eni server, proverite sistemske resurse" @@ -4258,6 +4238,7 @@ ER_TOO_MANY_USER_CONNECTIONS 42000 fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connexions actives" ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen" ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive" + jpn "ユーザー '%-.64s' ã¯ã™ã§ã« 'max_user_connections' 以上ã®ã‚¢ã‚¯ãƒ†ã‚£ãƒ–ãªæŽ¥ç¶šã‚’è¡Œã£ã¦ã„ã¾ã™ã€‚" por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas" rus "У Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %-.64s уже больше чем 'max_user_connections' активных Ñоединений" serbian "Korisnik %-.64s već ima viÅ¡e aktivnih konekcija nego Å¡to je to odreÄ‘eno 'max_user_connections' promenljivom" @@ -4272,6 +4253,7 @@ ER_SET_CONSTANTS_ONLY fre "Seules les expressions constantes sont autorisées avec SET" ger "Bei diesem Befehl dürfen nur konstante Ausdrücke verwendet werden" ita "Si possono usare solo espressioni costanti con SET" + jpn "SET処ç†ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚" por "Você pode usar apenas expressões constantes com SET" rus "С Ñтой командой вы можете иÑпользовать только конÑтантные выражениÑ" serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'" @@ -4286,6 +4268,7 @@ ER_LOCK_WAIT_TIMEOUT fre "Timeout sur l'obtention du verrou" ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten" ita "E' scaduto il timeout per l'attesa del lock" + jpn "ãƒãƒƒã‚¯å¾…ã¡ãŒã‚¿ã‚¤ãƒ アウトã—ã¾ã—ãŸã€‚トランザクションをå†è©¦è¡Œã—ã¦ãã ã•ã„。" por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação." rus "Таймаут Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸ иÑтек; попробуйте перезапуÑтить транзакцию" serbian "Vremenski limit za zakljuÄavanje tabele je istekao; Probajte da ponovo startujete transakciju" @@ -4300,6 +4283,7 @@ ER_LOCK_TABLE_FULL fre "Le nombre total de verrou dépasse la taille de la table des verrous" ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle" ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock" + jpn "ãƒãƒƒã‚¯ã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚" por "O número total de travamentos excede o tamanho da tabela de travamentos" rus "Общее количеÑтво блокировок превыÑило размеры таблицы блокировок" serbian "Broj totalnih zakljuÄavanja tabele premaÅ¡uje veliÄinu tabele zakljuÄavanja" @@ -4314,6 +4298,7 @@ ER_READ_ONLY_TRANSACTION 25000 fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED" ger "Während einer READ-UNCOMMITTED-Transaktion können keine UPDATE-Sperren angefordert werden" ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'" + jpn "èªã¿è¾¼ã¿å°‚用トランザクションã§ã™ã€‚" por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED" rus "Блокировки обновлений Ð½ÐµÐ»ÑŒÐ·Ñ Ð¿Ð¾Ð»ÑƒÑ‡Ð¸Ñ‚ÑŒ в процеÑÑе Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð½Ðµ принÑтой (в режиме READ UNCOMMITTED) транзакции" serbian "ZakljuÄavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija" @@ -4328,6 +4313,7 @@ ER_DROP_DB_WITH_READ_LOCK fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture" ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält" ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + jpn "ã‚°ãƒãƒ¼ãƒãƒ«ãƒªãƒ¼ãƒ‰ãƒãƒƒã‚¯ã‚’ä¿æŒã—ã¦ã„ã‚‹é–“ã¯ã€DROP DATABASE を実行ã§ãã¾ã›ã‚“。" por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" rus "Ðе допуÑкаетÑÑ DROP DATABASE, пока поток держит глобальную блокировку чтениÑ" serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuÄava Äitanje podataka" @@ -4342,6 +4328,7 @@ ER_CREATE_DB_WITH_READ_LOCK fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture" ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält" ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + jpn "ã‚°ãƒãƒ¼ãƒãƒ«ãƒªãƒ¼ãƒ‰ãƒãƒƒã‚¯ã‚’ä¿æŒã—ã¦ã„ã‚‹é–“ã¯ã€CREATE DATABASE を実行ã§ãã¾ã›ã‚“。" por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" rus "Ðе допуÑкаетÑÑ CREATE DATABASE, пока поток держит глобальную блокировку чтениÑ" serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuÄava Äitanje podataka" @@ -4355,6 +4342,7 @@ ER_WRONG_ARGUMENTS fre "Mauvais arguments à %s" ger "Falsche Argumente für %s" ita "Argomenti errati a %s" + jpn "%s ã®å¼•æ•°ãŒä¸æ£ã§ã™" por "Argumentos errados para %s" rus "Ðеверные параметры Ð´Ð»Ñ %s" serbian "PogreÅ¡ni argumenti prosleÄ‘eni na %s" @@ -4381,6 +4369,7 @@ ER_UNION_TABLES_IN_DIFFERENT_DIR fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée" ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden" ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database" + jpn "ä¸æ£ãªè¡¨å®šç¾©ã§ã™ã€‚MERGE表ã®æ§‹æˆè¡¨ã¯ã™ã¹ã¦åŒã˜ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹å†…ã«ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。" por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados." rus "Ðеверное определение таблицы; Ð’Ñе таблицы в MERGE должны принадлежать одной и той же базе данных" serbian "PogreÅ¡na definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka" @@ -4393,6 +4382,7 @@ ER_LOCK_DEADLOCK 40001 fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction" ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten" ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione" + jpn "ãƒãƒƒã‚¯å–å¾—ä¸ã«ãƒ‡ãƒƒãƒ‰ãƒãƒƒã‚¯ãŒæ¤œå‡ºã•ã‚Œã¾ã—ãŸã€‚トランザクションをå†è©¦è¡Œã—ã¦ãã ã•ã„。" por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação." rus "Возникла Ñ‚ÑƒÐ¿Ð¸ÐºÐ¾Ð²Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² процеÑÑе Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸; Попробуйте перезапуÑтить транзакцию" serbian "Unakrsno zakljuÄavanje pronaÄ‘eno kada sam pokuÅ¡ao da dobijem pravo na zakljuÄavanje; Probajte da restartujete transakciju" @@ -4417,6 +4407,7 @@ ER_CANNOT_ADD_FOREIGN fre "Impossible d'ajouter des contraintes d'index externe" ger "Fremdschlüssel-Beschränkung kann nicht hinzugefügt werden" ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)" + jpn "外部ã‚ãƒ¼åˆ¶ç´„ã‚’è¿½åŠ ã§ãã¾ã›ã‚“。" por "Não pode acrescentar uma restrição de chave estrangeira" rus "Ðевозможно добавить Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð²Ð½ÐµÑˆÐ½ÐµÐ³Ð¾ ключа" serbian "Ne mogu da dodam proveru spoljnog kljuÄa" @@ -4430,6 +4421,7 @@ ER_NO_REFERENCED_ROW 23000 greek "Cannot add a child row: a foreign key constraint fails" hun "Cannot add a child row: a foreign key constraint fails" ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + jpn "親ã‚ーãŒã‚ã‚Šã¾ã›ã‚“。外部ã‚ー制約é•åã§ã™ã€‚" norwegian-ny "Cannot add a child row: a foreign key constraint fails" por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou" rus "Ðевозможно добавить или обновить дочернюю Ñтроку: проверка ограничений внешнего ключа не выполнÑетÑÑ" @@ -4442,6 +4434,7 @@ ER_ROW_IS_REFERENCED 23000 greek "Cannot delete a parent row: a foreign key constraint fails" hun "Cannot delete a parent row: a foreign key constraint fails" ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + jpn "åレコードãŒã‚ã‚Šã¾ã™ã€‚外部ã‚ー制約é•åã§ã™ã€‚" por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou" rus "Ðевозможно удалить или обновить родительÑкую Ñтроку: проверка ограничений внешнего ключа не выполнÑетÑÑ" serbian "Ne mogu da izbriÅ¡em roditeljski slog: provera spoljnog kljuÄa je neuspela" @@ -4452,6 +4445,7 @@ ER_CONNECT_TO_MASTER 08S01 eng "Error connecting to master: %-.128s" ger "Fehler bei der Verbindung zum Master: %-.128s" ita "Errore durante la connessione al master: %-.128s" + jpn "マスターã¸ã®æŽ¥ç¶šã‚¨ãƒ©ãƒ¼: %-.128s" por "Erro conectando com o master: %-.128s" rus "Ошибка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером: %-.128s" spa "Error de coneccion a master: %-.128s" @@ -4461,6 +4455,7 @@ ER_QUERY_ON_MASTER eng "Error running query on master: %-.128s" ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s" ita "Errore eseguendo una query sul master: %-.128s" + jpn "マスターã§ã®ã‚¯ã‚¨ãƒªå®Ÿè¡Œã‚¨ãƒ©ãƒ¼: %-.128s" por "Erro rodando consulta no master: %-.128s" rus "Ошибка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на головном Ñервере: %-.128s" spa "Error executando el query en master: %-.128s" @@ -4471,6 +4466,7 @@ ER_ERROR_WHEN_EXECUTING_COMMAND est "Viga käsu %s täitmisel: %-.128s" ger "Fehler beim Ausführen des Befehls %s: %-.128s" ita "Errore durante l'esecuzione del comando %s: %-.128s" + jpn "%s コマンドã®å®Ÿè¡Œã‚¨ãƒ©ãƒ¼: %-.128s" por "Erro quando executando comando %s: %-.128s" rus "Ошибка при выполнении команды %s: %-.128s" serbian "GreÅ¡ka pri izvrÅ¡avanju komande %s: %-.128s" @@ -4482,6 +4478,7 @@ ER_WRONG_USAGE est "Vigane %s ja %s kasutus" ger "Falsche Verwendung von %s und %s" ita "Uso errato di %s e %s" + jpn "%s ã® %s ã«é–¢ã™ã‚‹ä¸æ£ãªä½¿ç”¨æ³•ã§ã™ã€‚" por "Uso errado de %s e %s" rus "Ðеверное иÑпользование %s и %s" serbian "PogreÅ¡na upotreba %s i %s" @@ -4494,6 +4491,7 @@ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000 est "Tulpade arv kasutatud SELECT lausetes ei kattu" ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurück" ita "La SELECT utilizzata ha un numero di colonne differente" + jpn "使用ã®SELECTæ–‡ãŒè¿”ã™åˆ—æ•°ãŒé•ã„ã¾ã™ã€‚" por "Os comandos SELECT usados têm diferente número de colunas" rus "ИÑпользованные операторы выборки (SELECT) дают разное количеÑтво Ñтолбцов" serbian "Upotrebljene 'SELECT' komande adresiraju razliÄit broj kolona" @@ -4505,6 +4503,7 @@ ER_CANT_UPDATE_WITH_READLOCK est "Ei suuda täita päringut konfliktse luku tõttu" ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgeführt werden" ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura" + jpn "競åˆã™ã‚‹ãƒªãƒ¼ãƒ‰ãƒãƒƒã‚¯ã‚’ä¿æŒã—ã¦ã„ã‚‹ã®ã§ã€ã‚¯ã‚¨ãƒªã‚’実行ã§ãã¾ã›ã‚“。" por "Não posso executar a consulta porque você tem um conflito de travamento de leitura" rus "Ðевозможно иÑполнить запроÑ, поÑкольку у Ð²Ð°Ñ ÑƒÑтановлены конфликтующие блокировки чтениÑ" serbian "Ne mogu da izvrÅ¡im upit zbog toga Å¡to imate zakljuÄavanja Äitanja podataka u konfliktu" @@ -4516,6 +4515,7 @@ ER_MIXING_NOT_ALLOWED est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud" ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert" ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali" + jpn "トランザクション対応ã®è¡¨ã¨éžå¯¾å¿œã®è¡¨ã®åŒæ™‚使用ã¯ç„¡åŠ¹åŒ–ã•ã‚Œã¦ã„ã¾ã™ã€‚" por "Mistura de tabelas transacional e não-transacional está desabilitada" rus "ИÑпользование транзакционных таблиц нарÑду Ñ Ð½ÐµÑ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ð¾Ð½Ð½Ñ‹Ð¼Ð¸ запрещено" serbian "MeÅ¡anje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuÄeno" @@ -4527,6 +4527,7 @@ ER_DUP_ARGUMENT est "Määrangut '%s' on lauses kasutatud topelt" ger "Option '%s' wird im Befehl zweimal verwendet" ita "L'opzione '%s' e' stata usata due volte nel comando" + jpn "オプション '%s' ãŒ2度使用ã•ã‚Œã¦ã„ã¾ã™ã€‚" por "Opção '%s' usada duas vezes no comando" rus "ÐžÐ¿Ñ†Ð¸Ñ '%s' дважды иÑпользована в выражении" spa "Opción '%s' usada dos veces en el comando" @@ -4536,6 +4537,7 @@ ER_USER_LIMIT_REACHED 42000 eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)" ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)" ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)" + jpn "ユーザー '%-.64s' ã¯ãƒªã‚½ãƒ¼ã‚¹ã®ä¸Šé™ '%s' ã«é”ã—ã¾ã—ãŸã€‚(ç¾åœ¨å€¤: %ld)" por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)" rus "Пользователь '%-.64s' превыÑил иÑпользование реÑурÑа '%s' (текущее значение: %ld)" spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)" @@ -4545,6 +4547,7 @@ ER_SPECIFIC_ACCESS_DENIED_ERROR 42000 eng "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation" ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt" ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione" + jpn "アクセスã¯æ‹’å¦ã•ã‚Œã¾ã—ãŸã€‚ã“ã®æ“作ã«ã¯ %-.128s 権é™ãŒ(複数ã®å ´åˆã¯ã©ã‚Œã‹1ã¤)å¿…è¦ã§ã™ã€‚" por "Acesso negado. Você precisa o privilégio %-.128s para essa operação" rus "Ð’ доÑтупе отказано. Вам нужны привилегии %-.128s Ð´Ð»Ñ Ñтой операции" spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación" @@ -4555,6 +4558,7 @@ ER_LOCAL_VARIABLE eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL" ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden" ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL" + jpn "変数 '%-.64s' ã¯ã‚»ãƒƒã‚·ãƒ§ãƒ³å¤‰æ•°ã§ã™ã€‚SET GLOBALã§ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。" por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL" rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' ÑвлÑетÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¾Ð²Ð¾Ð¹ (SESSION) переменной и не может быть изменена Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ SET GLOBAL" spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL" @@ -4564,6 +4568,7 @@ ER_GLOBAL_VARIABLE eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL" ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden" ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL" + jpn "変数 '%-.64s' ã¯ã‚°ãƒãƒ¼ãƒãƒ«å¤‰æ•°ã§ã™ã€‚SET GLOBALを使用ã—ã¦ãã ã•ã„。" por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL" rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' ÑвлÑетÑÑ Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ð¾Ð¹ (GLOBAL) переменной, и ее Ñледует изменÑÑ‚ÑŒ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ SET GLOBAL" spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL" @@ -4573,6 +4578,7 @@ ER_NO_DEFAULT 42000 eng "Variable '%-.64s' doesn't have a default value" ger "Variable '%-.64s' hat keinen Vorgabewert" ita "La variabile '%-.64s' non ha un valore di default" + jpn "変数 '%-.64s' ã«ã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒã‚ã‚Šã¾ã›ã‚“。" por "Variável '%-.64s' não tem um valor padrão" rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' не имеет Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ умолчанию" spa "Variable '%-.64s' no tiene un valor patrón" @@ -4582,6 +4588,7 @@ ER_WRONG_VALUE_FOR_VAR 42000 eng "Variable '%-.64s' can't be set to the value of '%-.200s'" ger "Variable '%-.64s' kann nicht auf '%-.200s' gesetzt werden" ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.200s'" + jpn "変数 '%-.64s' ã«å€¤ '%-.200s' ã‚’è¨å®šã§ãã¾ã›ã‚“。" por "Variável '%-.64s' não pode ser configurada para o valor de '%-.200s'" rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' не может быть уÑтановлена в значение '%-.200s'" spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.200s'" @@ -4591,6 +4598,7 @@ ER_WRONG_TYPE_FOR_VAR 42000 eng "Incorrect argument type to variable '%-.64s'" ger "Falscher Argumenttyp für Variable '%-.64s'" ita "Tipo di valore errato per la variabile '%-.64s'" + jpn "変数 '%-.64s' ã¸ã®å€¤ã®åž‹ãŒä¸æ£ã§ã™ã€‚" por "Tipo errado de argumento para variável '%-.64s'" rus "Ðеверный тип аргумента Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ '%-.64s'" spa "Tipo de argumento equivocado para variable '%-.64s'" @@ -4600,6 +4608,7 @@ ER_VAR_CANT_BE_READ eng "Variable '%-.64s' can only be set, not read" ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden" ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto" + jpn "変数 '%-.64s' ã¯æ›¸ãè¾¼ã¿å°‚用ã§ã™ã€‚èªã¿è¾¼ã¿ã¯ã§ãã¾ã›ã‚“。" por "Variável '%-.64s' somente pode ser configurada, não lida" rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' может быть только уÑтановлена, но не Ñчитана" spa "Variable '%-.64s' solamente puede ser configurada, no leÃda" @@ -4609,6 +4618,7 @@ ER_CANT_USE_OPTION_HERE 42000 eng "Incorrect usage/placement of '%s'" ger "Falsche Verwendung oder Platzierung von '%s'" ita "Uso/posizione di '%s' sbagliato" + jpn "'%s' ã®ä½¿ç”¨æ³•ã¾ãŸã¯å ´æ‰€ãŒä¸æ£ã§ã™ã€‚" por "Errado uso/colocação de '%s'" rus "Ðеверное иÑпользование или в неверном меÑте указан '%s'" spa "Equivocado uso/colocación de '%s'" @@ -4618,6 +4628,7 @@ ER_NOT_SUPPORTED_YET 42000 eng "This version of MariaDB doesn't yet support '%s'" ger "Diese MariaDB-Version unterstützt '%s' nicht" ita "Questa versione di MariaDB non supporta ancora '%s'" + jpn "ã“ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®MariaDBã§ã¯ã€ã¾ã '%s' を利用ã§ãã¾ã›ã‚“。" por "Esta versão de MariaDB não suporta ainda '%s'" rus "Ðта верÑÐ¸Ñ MariaDB пока еще не поддерживает '%s'" spa "Esta versión de MariaDB no soporta todavia '%s'" @@ -4627,6 +4638,7 @@ ER_MASTER_FATAL_ERROR_READING_BINLOG eng "Got fatal error %d from master when reading data from binary log: '%-.320s'" ger "Schwerer Fehler %d: '%-.320s vom Master beim Lesen des binären Logs" ita "Errore fatale %d: '%-.320s' dal master leggendo i dati dal log binario" + jpn "致命的ãªã‚¨ãƒ©ãƒ¼ %d: '%-.320s' ãŒãƒžã‚¹ã‚¿ãƒ¼ã§ãƒã‚¤ãƒŠãƒªãƒã‚°èªã¿è¾¼ã¿ä¸ã«ç™ºç”Ÿã—ã¾ã—ãŸã€‚" por "Obteve fatal erro %d: '%-.320s' do master quando lendo dados do binary log" rus "Получена неиÑÐ¿Ñ€Ð°Ð²Ð¸Ð¼Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° %d: '%-.320s' от головного Ñервера в процеÑÑе выборки данных из двоичного журнала" spa "Recibió fatal error %d: '%-.320s' del master cuando leyendo datos del binary log" @@ -4634,6 +4646,7 @@ ER_MASTER_FATAL_ERROR_READING_BINLOG ER_SLAVE_IGNORED_TABLE eng "Slave SQL thread ignored the query because of replicate-*-table rules" ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert" + jpn "replicate-*-table ルールã«å¾“ã£ã¦ã€ã‚¹ãƒ¬ãƒ¼ãƒ–SQLスレッドã¯ã‚¯ã‚¨ãƒªã‚’無視ã—ã¾ã—ãŸã€‚" nla "Slave SQL thread negeerde de query vanwege replicate-*-table opties" por "Slave SQL thread ignorado a consulta devido à s normas de replicação-*-tabela" spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla" @@ -4642,12 +4655,14 @@ ER_INCORRECT_GLOBAL_LOCAL_VAR eng "Variable '%-.192s' is a %s variable" serbian "Promenljiva '%-.192s' je %s promenljiva" ger "Variable '%-.192s' ist eine %s-Variable" + jpn "変数 '%-.192s' 㯠%s 変数ã§ã™ã€‚" nla "Variabele '%-.192s' is geen %s variabele" spa "Variable '%-.192s' es una %s variable" swe "Variabel '%-.192s' är av typ %s" ER_WRONG_FK_DEF 42000 eng "Incorrect foreign key definition for '%-.192s': %s" ger "Falsche Fremdschlüssel-Definition für '%-.192s': %s" + jpn "外部ã‚ー '%-.192s' ã®å®šç¾©ã®ä¸æ£: %s" nla "Incorrecte foreign key definitie voor '%-.192s': %s" por "Definição errada da chave estrangeira para '%-.192s': %s" spa "Equivocada definición de llave extranjera para '%-.192s': %s" @@ -4655,6 +4670,7 @@ ER_WRONG_FK_DEF 42000 ER_KEY_REF_DO_NOT_MATCH_TABLE_REF eng "Key reference and table reference don't match" ger "Schlüssel- und Tabellenverweis passen nicht zusammen" + jpn "外部ã‚ーã®å‚照表ã¨å®šç¾©ãŒä¸€è‡´ã—ã¾ã›ã‚“。" nla "Sleutel- en tabelreferentie komen niet overeen" por "Referência da chave e referência da tabela não coincidem" spa "Referencia de llave y referencia de tabla no coinciden" @@ -4662,6 +4678,7 @@ ER_KEY_REF_DO_NOT_MATCH_TABLE_REF ER_OPERAND_COLUMNS 21000 eng "Operand should contain %d column(s)" ger "Operand sollte %d Spalte(n) enthalten" + jpn "オペランド㫠%d 個ã®åˆ—ãŒå¿…è¦ã§ã™ã€‚" nla "Operand behoort %d kolommen te bevatten" rus "Операнд должен Ñодержать %d колонок" spa "Operando debe tener %d columna(s)" @@ -4669,6 +4686,7 @@ ER_OPERAND_COLUMNS 21000 ER_SUBQUERY_NO_1_ROW 21000 eng "Subquery returns more than 1 row" ger "Unterabfrage lieferte mehr als einen Datensatz zurück" + jpn "サブクエリãŒ2行以上ã®çµæžœã‚’è¿”ã—ã¾ã™ã€‚" nla "Subquery retourneert meer dan 1 rij" por "Subconsulta retorna mais que 1 registro" rus "ÐŸÐ¾Ð´Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð¾Ð·Ð²Ñ€Ð°Ñ‰Ð°ÐµÑ‚ более одной запиÑи" @@ -4679,6 +4697,7 @@ ER_UNKNOWN_STMT_HANDLER dan "Unknown prepared statement handler (%.*s) given to %s" eng "Unknown prepared statement handler (%.*s) given to %s" ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben" + jpn "'%.*s' ã¯ãƒ—リペアードステートメントã®ä¸æ˜Žãªãƒãƒ³ãƒ‰ãƒ«ã§ã™ã€‚(%s ã§æŒ‡å®šã•ã‚Œã¾ã—ãŸ)" nla "Onebekende prepared statement handler (%.*s) voor %s aangegeven" por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s" spa "Desconocido preparado comando handler (%.*s) dado para %s" @@ -4687,6 +4706,7 @@ ER_UNKNOWN_STMT_HANDLER ER_CORRUPT_HELP_DB eng "Help database is corrupt or does not exist" ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht" + jpn "ヘルプデータベースã¯å£Šã‚Œã¦ã„ã‚‹ã‹å˜åœ¨ã—ã¾ã›ã‚“。" nla "Help database is beschadigd of bestaat niet" por "Banco de dado de ajuda corrupto ou não existente" spa "Base de datos Help está corrupto o no existe" @@ -4694,6 +4714,7 @@ ER_CORRUPT_HELP_DB ER_CYCLIC_REFERENCE eng "Cyclic reference on subqueries" ger "Zyklischer Verweis in Unterabfragen" + jpn "サブクエリã®å‚ç…§ãŒãƒ«ãƒ¼ãƒ—ã—ã¦ã„ã¾ã™ã€‚" nla "Cyclische verwijzing in subqueries" por "Referência cÃclica em subconsultas" rus "ЦикличеÑÐºÐ°Ñ ÑÑылка на подзапроÑ" @@ -4703,6 +4724,7 @@ ER_CYCLIC_REFERENCE ER_AUTO_CONVERT eng "Converting column '%s' from %s to %s" ger "Feld '%s' wird von %s nach %s umgewandelt" + jpn "列 '%s' ã‚’ %s ã‹ã‚‰ %s ã¸å¤‰æ›ã—ã¾ã™ã€‚" nla "Veld '%s' wordt van %s naar %s geconverteerd" por "Convertendo coluna '%s' de %s para %s" rus "Преобразование Ð¿Ð¾Ð»Ñ '%s' из %s в %s" @@ -4712,6 +4734,7 @@ ER_AUTO_CONVERT ER_ILLEGAL_REFERENCE 42S22 eng "Reference '%-.64s' not supported (%s)" ger "Verweis '%-.64s' wird nicht unterstützt (%s)" + jpn "'%-.64s' ã®å‚ç…§ã¯ã§ãã¾ã›ã‚“。(%s)" nla "Verwijzing '%-.64s' niet ondersteund (%s)" por "Referência '%-.64s' não suportada (%s)" rus "СÑылка '%-.64s' не поддерживаетÑÑ (%s)" @@ -4721,6 +4744,7 @@ ER_ILLEGAL_REFERENCE 42S22 ER_DERIVED_MUST_HAVE_ALIAS 42000 eng "Every derived table must have its own alias" ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden" + jpn "導出表ã«ã¯åˆ¥åãŒå¿…é ˆã§ã™ã€‚" nla "Voor elke afgeleide tabel moet een unieke alias worden gebruikt" por "Cada tabela derivada deve ter seu próprio alias" spa "Cada tabla derivada debe tener su propio alias" @@ -4728,6 +4752,7 @@ ER_DERIVED_MUST_HAVE_ALIAS 42000 ER_SELECT_REDUCED 01000 eng "Select %u was reduced during optimization" ger "Select %u wurde während der Optimierung reduziert" + jpn "Select %u ã¯æœ€é©åŒ–ã«ã‚ˆã£ã¦æ¸›ã‚‰ã•ã‚Œã¾ã—ãŸã€‚" nla "Select %u werd geredureerd tijdens optimtalisatie" por "Select %u foi reduzido durante otimização" rus "Select %u был упразднен в процеÑÑе оптимизации" @@ -4737,6 +4762,7 @@ ER_SELECT_REDUCED 01000 ER_TABLENAME_NOT_ALLOWED_HERE 42000 eng "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s" ger "Tabelle '%-.192s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" + jpn "特定ã®SELECTã®ã¿ã§ä½¿ç”¨ã®è¡¨ '%-.192s' 㯠%-.32s ã§ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。" nla "Tabel '%-.192s' uit een van de SELECTS kan niet in %-.32s gebruikt worden" por "Tabela '%-.192s' de um dos SELECTs não pode ser usada em %-.32s" spa "Tabla '%-.192s' de uno de los SELECT no puede ser usada en %-.32s" @@ -4744,6 +4770,7 @@ ER_TABLENAME_NOT_ALLOWED_HERE 42000 ER_NOT_SUPPORTED_AUTH_MODE 08004 eng "Client does not support authentication protocol requested by server; consider upgrading MariaDB client" ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MariaDB-Client" + jpn "クライアントã¯ã‚µãƒ¼ãƒãƒ¼ãŒè¦æ±‚ã™ã‚‹èªè¨¼ãƒ—ãƒãƒˆã‚³ãƒ«ã«å¯¾å¿œã§ãã¾ã›ã‚“。MariaDBクライアントã®ã‚¢ãƒƒãƒ—グレードを検討ã—ã¦ãã ã•ã„。" nla "Client ondersteunt het door de server verwachtte authenticatieprotocol niet. Overweeg een nieuwere MariaDB client te gebruiken" por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MariaDB" spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MariaDB" @@ -4751,6 +4778,7 @@ ER_NOT_SUPPORTED_AUTH_MODE 08004 ER_SPATIAL_CANT_HAVE_NULL 42000 eng "All parts of a SPATIAL index must be NOT NULL" ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein" + jpn "空間索引ã®ã‚ー列㯠NOT NULL ã§ãªã‘ã‚Œã°ã„ã‘ã¾ã›ã‚“。" nla "Alle delete van een SPATIAL index dienen als NOT NULL gedeclareerd te worden" por "Todas as partes de uma SPATIAL index devem ser NOT NULL" spa "Todas las partes de una SPATIAL index deben ser NOT NULL" @@ -4758,6 +4786,7 @@ ER_SPATIAL_CANT_HAVE_NULL 42000 ER_COLLATION_CHARSET_MISMATCH 42000 eng "COLLATION '%s' is not valid for CHARACTER SET '%s'" ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig" + jpn "COLLATION '%s' 㯠CHARACTER SET '%s' ã«é©ç”¨ã§ãã¾ã›ã‚“。" nla "COLLATION '%s' is niet geldig voor CHARACTER SET '%s'" por "COLLATION '%s' não é válida para CHARACTER SET '%s'" spa "COLLATION '%s' no es válido para CHARACTER SET '%s'" @@ -4765,6 +4794,7 @@ ER_COLLATION_CHARSET_MISMATCH 42000 ER_SLAVE_WAS_RUNNING eng "Slave is already running" ger "Slave läuft bereits" + jpn "スレーブã¯ã™ã§ã«ç¨¼åƒä¸ã§ã™ã€‚" nla "Slave is reeds actief" por "O slave já está rodando" spa "Slave ya está funcionando" @@ -4772,6 +4802,7 @@ ER_SLAVE_WAS_RUNNING ER_SLAVE_WAS_NOT_RUNNING eng "Slave already has been stopped" ger "Slave wurde bereits angehalten" + jpn "スレーブã¯ã™ã§ã«åœæ¢ã—ã¦ã„ã¾ã™ã€‚" nla "Slave is reeds gestopt" por "O slave já está parado" spa "Slave ya fué parado" @@ -4779,24 +4810,28 @@ ER_SLAVE_WAS_NOT_RUNNING ER_TOO_BIG_FOR_UNCOMPRESS eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)" ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)" + jpn "展開後ã®ãƒ‡ãƒ¼ã‚¿ãŒå¤§ãã™ãŽã¾ã™ã€‚最大サイズ㯠%d ã§ã™ã€‚(展開後データã®é•·ã•æƒ…å ±ãŒå£Šã‚Œã¦ã„ã‚‹å¯èƒ½æ€§ã‚‚ã‚ã‚Šã¾ã™ã€‚)" nla "Ongecomprimeerder data is te groot; de maximum lengte is %d (waarschijnlijk, de lengte van de gecomprimeerde data was beschadigd)" por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)" spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)" ER_ZLIB_Z_MEM_ERROR eng "ZLIB: Not enough memory" ger "ZLIB: Nicht genug Speicher" + jpn "ZLIB: メモリä¸è¶³ã§ã™ã€‚" nla "ZLIB: Onvoldoende geheugen" por "ZLIB: Não suficiente memória disponÃvel" spa "Z_MEM_ERROR: No suficiente memoria para zlib" ER_ZLIB_Z_BUF_ERROR eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)" ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)" + jpn "ZLIB: 出力ãƒãƒƒãƒ•ã‚¡ã«å分ãªç©ºããŒã‚ã‚Šã¾ã›ã‚“。(展開後データã®é•·ã•æƒ…å ±ãŒå£Šã‚Œã¦ã„ã‚‹å¯èƒ½æ€§ã‚‚ã‚ã‚Šã¾ã™ã€‚)" nla "ZLIB: Onvoldoende ruimte in uitgaande buffer (waarschijnlijk, de lengte van de ongecomprimeerde data was beschadigd)" por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)" spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)" ER_ZLIB_Z_DATA_ERROR eng "ZLIB: Input data corrupted" ger "ZLIB: Eingabedaten beschädigt" + jpn "ZLIB: 入力データãŒå£Šã‚Œã¦ã„ã¾ã™ã€‚" nla "ZLIB: Invoer data beschadigd" por "ZLIB: Dados de entrada está corrupto" spa "ZLIB: Dato de entrada fué corrompido para zlib" @@ -4805,18 +4840,21 @@ ER_CUT_VALUE_GROUP_CONCAT ER_WARN_TOO_FEW_RECORDS 01000 eng "Row %lu doesn't contain data for all columns" ger "Zeile %lu enthält nicht für alle Felder Daten" + jpn "è¡Œ %lu ã¯ã™ã¹ã¦ã®åˆ—ã¸ã®ãƒ‡ãƒ¼ã‚¿ã‚’å«ã‚“ã§ã„ã¾ã›ã‚“。" nla "Rij %lu bevat niet de data voor alle kolommen" por "Conta de registro é menor que a conta de coluna na linha %lu" spa "LÃnea %lu no contiene datos para todas las columnas" ER_WARN_TOO_MANY_RECORDS 01000 eng "Row %lu was truncated; it contained more data than there were input columns" ger "Zeile %lu gekürzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt" + jpn "è¡Œ %lu ã¯ãƒ‡ãƒ¼ã‚¿ã‚’切りæ¨ã¦ã‚‰ã‚Œã¾ã—ãŸã€‚列よりも多ã„データをå«ã‚“ã§ã„ã¾ã—ãŸã€‚" nla "Regel %lu ingekort, bevatte meer data dan invoer kolommen" por "Conta de registro é maior que a conta de coluna na linha %lu" spa "LÃnea %lu fué truncada; La misma contine mas datos que las que existen en las columnas de entrada" ER_WARN_NULL_TO_NOTNULL 22004 eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %lu" ger "Feld auf Vorgabewert gesetzt, da NULL für NOT-NULL-Feld '%s' in Zeile %lu angegeben" + jpn "列ã«ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒè¨å®šã•ã‚Œã¾ã—ãŸã€‚NOT NULLã®åˆ— '%s' ã« è¡Œ %lu 㧠NULL ãŒä¸Žãˆã‚‰ã‚Œã¾ã—ãŸã€‚" por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %lu" spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la lÃnea %lu" ER_WARN_DATA_OUT_OF_RANGE 22003 @@ -4824,17 +4862,20 @@ ER_WARN_DATA_OUT_OF_RANGE 22003 WARN_DATA_TRUNCATED 01000 eng "Data truncated for column '%s' at row %lu" ger "Daten abgeschnitten für Feld '%s' in Zeile %lu" + jpn "列 '%s' ã® è¡Œ %lu ã§ãƒ‡ãƒ¼ã‚¿ãŒåˆ‡ã‚Šæ¨ã¦ã‚‰ã‚Œã¾ã—ãŸã€‚" por "Dado truncado para coluna '%s' na linha %lu" spa "Datos truncados para columna '%s' en la lÃnea %lu" ER_WARN_USING_OTHER_HANDLER eng "Using storage engine %s for table '%s'" ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt" + jpn "ストレージエンジン %s ãŒè¡¨ '%s' ã«åˆ©ç”¨ã•ã‚Œã¦ã„ã¾ã™ã€‚" por "Usando engine de armazenamento %s para tabela '%s'" spa "Usando motor de almacenamiento %s para tabla '%s'" swe "Använder handler %s för tabell '%s'" ER_CANT_AGGREGATE_2COLLATIONS eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'" ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'" + jpn "ç…§åˆé †åº (%s,%s) 㨠(%s,%s) ã®æ··åœ¨ã¯æ“作 '%s' ã§ã¯ä¸æ£ã§ã™ã€‚" por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'" spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'" ER_DROP_USER @@ -4843,42 +4884,50 @@ ER_DROP_USER ER_REVOKE_GRANTS eng "Can't revoke all privileges for one or more of the requested users" ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden" + jpn "指定ã•ã‚ŒãŸãƒ¦ãƒ¼ã‚¶ãƒ¼ã‹ã‚‰æŒ‡å®šã•ã‚ŒãŸå…¨ã¦ã®æ¨©é™ã‚’剥奪ã™ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos" spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados" ER_CANT_AGGREGATE_3COLLATIONS eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'" ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'" + jpn "ç…§åˆé †åº (%s,%s), (%s,%s), (%s,%s) ã®æ··åœ¨ã¯æ“作 '%s' ã§ã¯ä¸æ£ã§ã™ã€‚" por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'" spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'" ER_CANT_AGGREGATE_NCOLLATIONS eng "Illegal mix of collations for operation '%s'" ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'" + jpn "æ“作 '%s' ã§ã¯ä¸æ£ãªç…§åˆé †åºã®æ··åœ¨ã§ã™ã€‚" por "Ilegal combinação de collations para operação '%s'" spa "Ilegal mezcla de collations para operación '%s'" ER_VARIABLE_IS_NOT_STRUCT eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)" ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)" + jpn "変数 '%-.64s' ã¯æ§‹é€ 変数ã®æ§‹æˆè¦ç´ ã§ã¯ã‚ã‚Šã¾ã›ã‚“。(XXXX.変数å ã¨ã„ã†æŒ‡å®šã¯ã§ãã¾ã›ã‚“。)" por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)" spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)" ER_UNKNOWN_COLLATION eng "Unknown collation: '%-.64s'" ger "Unbekannte Sortierreihenfolge: '%-.64s'" + jpn "ä¸æ˜Žãªç…§åˆé †åº: '%-.64s'" por "Collation desconhecida: '%-.64s'" spa "Collation desconocida: '%-.64s'" ER_SLAVE_IGNORED_SSL_PARAMS eng "SSL parameters in CHANGE MASTER are ignored because this MariaDB slave was compiled without SSL support; they can be used later if MariaDB slave with SSL is started" ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MariaDB-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MariaDB-Slave mit SSL gestartet wird" + jpn "ã“ã®MySQLスレーブã¯SSLサãƒãƒ¼ãƒˆã‚’å«ã‚ã¦ã‚³ãƒ³ãƒ‘イルã•ã‚Œã¦ã„ãªã„ã®ã§ã€CHANGE MASTER ã®SSLパラメータã¯ç„¡è¦–ã•ã‚Œã¾ã—ãŸã€‚今後SSLサãƒãƒ¼ãƒˆã‚’æŒã¤MySQLスレーブを起動ã™ã‚‹éš›ã«åˆ©ç”¨ã•ã‚Œã¾ã™ã€‚" por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MariaDB foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MariaDB com SSL seja iniciado." spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MariaDB fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MariaDB con SSL sea inicializado" ER_SERVER_IS_IN_SECURE_AUTH_MODE eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format" ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern" + jpn "サーãƒãƒ¼ã¯ --secure-auth モードã§ç¨¼åƒã—ã¦ã„ã¾ã™ã€‚ã—ã‹ã— '%s'@'%s' ã¯å¤ã„å½¢å¼ã®ãƒ‘スワードを使用ã—ã¦ã„ã¾ã™ã€‚æ–°ã—ã„å½¢å¼ã®ãƒ‘スワードã«å¤‰æ›´ã—ã¦ãã ã•ã„。" por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato" rus "Сервер запущен в режиме --secure-auth (безопаÑной авторизации), но Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%s'@'%s' пароль Ñохранён в Ñтаром формате; необходимо обновить формат паролÑ" spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" ER_WARN_FIELD_RESOLVED eng "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d" ger "Feld oder Verweis '%-.192s%s%-.192s%s%-.192s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst" + jpn "フィールドã¾ãŸã¯å‚ç…§ '%-.192s%s%-.192s%s%-.192s' 㯠SELECT #%d ã§ã¯ãªãã€SELECT #%d ã§è§£æ±ºã•ã‚Œã¾ã—ãŸã€‚" por "Campo ou referência '%-.192s%s%-.192s%s%-.192s' de SELECT #%d foi resolvido em SELECT #%d" rus "Поле или ÑÑылка '%-.192s%s%-.192s%s%-.192s' из SELECTа #%d была найдена в SELECTе #%d" spa "Campo o referencia '%-.192s%s%-.192s%s%-.192s' de SELECT #%d fue resolvido en SELECT #%d" @@ -4886,27 +4935,32 @@ ER_WARN_FIELD_RESOLVED ER_BAD_SLAVE_UNTIL_COND eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL" ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL" + jpn "START SLAVE UNTIL ã¸ã®ãƒ‘ラメータã¾ãŸã¯ãã®çµ„ã¿åˆã‚ã›ãŒä¸æ£ã§ã™ã€‚" por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL" spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL" ER_MISSING_SKIP_SLAVE eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart" ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet" + jpn "START SLAVE UNTIL ã§æ®µéšŽçš„ã«ãƒ¬ãƒ—リケーションを行ã†éš›ã«ã¯ã€--skip-slave-start オプションを使ã†ã“ã¨ã‚’推奨ã—ã¾ã™ã€‚使ã‚ãªã„å ´åˆã€ã‚¹ãƒ¬ãƒ¼ãƒ–ã®mysqldãŒä¸æ…®ã®å†èµ·å‹•ã‚’ã™ã‚‹ã¨å•é¡ŒãŒç™ºç”Ÿã—ã¾ã™ã€‚" por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo" spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave" ER_UNTIL_COND_IGNORED eng "SQL thread is not to be started so UNTIL options are ignored" ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert" + jpn "スレーブSQLスレッドãŒé–‹å§‹ã•ã‚Œãªã„ãŸã‚ã€UNTILオプションã¯ç„¡è¦–ã•ã‚Œã¾ã—ãŸã€‚" por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas" spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas" ER_WRONG_NAME_FOR_INDEX 42000 eng "Incorrect index name '%-.100s'" ger "Falscher Indexname '%-.100s'" + jpn "索引å '%-.100s' ã¯ä¸æ£ã§ã™ã€‚" por "Incorreto nome de Ãndice '%-.100s'" spa "Nombre de Ãndice incorrecto '%-.100s'" swe "Felaktigt index namn '%-.100s'" ER_WRONG_NAME_FOR_CATALOG 42000 eng "Incorrect catalog name '%-.100s'" ger "Falscher Katalogname '%-.100s'" + jpn "ã‚«ã‚¿ãƒã‚°å '%-.100s' ã¯ä¸æ£ã§ã™ã€‚" por "Incorreto nome de catálogo '%-.100s'" spa "Nombre de catalog incorrecto '%-.100s'" swe "Felaktigt katalog namn '%-.100s'" @@ -4921,33 +4975,39 @@ ER_WARN_QC_RESIZE ER_BAD_FT_COLUMN eng "Column '%-.192s' cannot be part of FULLTEXT index" ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein" + jpn "列 '%-.192s' ã¯å…¨æ–‡ç´¢å¼•ã®ã‚ーã«ã¯ã§ãã¾ã›ã‚“。" por "Coluna '%-.192s' não pode ser parte de Ãndice FULLTEXT" spa "Columna '%-.192s' no puede ser parte de FULLTEXT index" swe "Kolumn '%-.192s' kan inte vara del av ett FULLTEXT index" ER_UNKNOWN_KEY_CACHE eng "Unknown key cache '%-.100s'" ger "Unbekannter Schlüssel-Cache '%-.100s'" + jpn "'%-.100s' ã¯ä¸æ˜Žãªã‚ーã‚ャッシュã§ã™ã€‚" por "Key cache desconhecida '%-.100s'" spa "Desconocida key cache '%-.100s'" swe "Okänd nyckel cache '%-.100s'" ER_WARN_HOSTNAME_WONT_WORK eng "MariaDB is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work" ger "MariaDB wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist" + jpn "MariaDB㯠--skip-name-resolve モードã§èµ·å‹•ã—ã¦ã„ã¾ã™ã€‚ã“ã®ã‚ªãƒ—ションを外ã—ã¦å†èµ·å‹•ã—ãªã‘ã‚Œã°ã€ã“ã®æ¨©é™æ“作ã¯æ©Ÿèƒ½ã—ã¾ã›ã‚“。" por "MariaDB foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar" spa "MariaDB esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar" ER_UNKNOWN_STORAGE_ENGINE 42000 eng "Unknown storage engine '%s'" ger "Unbekannte Speicher-Engine '%s'" + jpn "'%s' ã¯ä¸æ˜Žãªã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ã§ã™ã€‚" por "Motor de tabela desconhecido '%s'" spa "Desconocido motor de tabla '%s'" ER_WARN_DEPRECATED_SYNTAX eng "'%s' is deprecated and will be removed in a future release. Please use %s instead" ger "'%s' ist veraltet. Bitte benutzen Sie '%s'" + jpn "'%s' ã¯å°†æ¥ã®ãƒªãƒªãƒ¼ã‚¹ã§å»ƒæ¢äºˆå®šã§ã™ã€‚代ã‚ã‚Šã« %s を使用ã—ã¦ãã ã•ã„。" por "'%s' é desatualizado. Use '%s' em seu lugar" spa "'%s' está desaprobado, use '%s' en su lugar" ER_NON_UPDATABLE_TABLE eng "The target table %-.100s of the %s is not updatable" ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar" + jpn "対象表 %-.100s ã¯æ›´æ–°å¯èƒ½ã§ã¯ãªã„ã®ã§ã€%s ã‚’è¡Œãˆã¾ã›ã‚“。" por "A tabela destino %-.100s do %s não é atualizável" rus "Таблица %-.100s в %s не может изменÑÑ‚ÑÑ" spa "La tabla destino %-.100s del %s no es actualizable" @@ -4956,33 +5016,39 @@ ER_NON_UPDATABLE_TABLE ER_FEATURE_DISABLED eng "The '%s' feature is disabled; you need MariaDB built with '%s' to have it working" ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MariaDB mit '%s' übersetzen, damit es verfügbar ist" + jpn "機能 '%s' ã¯ç„¡åŠ¹ã§ã™ã€‚利用ã™ã‚‹ãŸã‚ã«ã¯ '%s' ã‚’å«ã‚ã¦ãƒ“ルドã—ãŸMariaDBãŒå¿…è¦ã§ã™ã€‚" por "O recurso '%s' foi desativado; você necessita MariaDB construÃdo com '%s' para ter isto funcionando" spa "El recurso '%s' fue deshabilitado; usted necesita construir MariaDB con '%s' para tener eso funcionando" swe "'%s' är inte aktiverad; För att aktivera detta mÃ¥ste du bygga om MariaDB med '%s' definierad" ER_OPTION_PREVENTS_STATEMENT eng "The MariaDB server is running with the %s option so it cannot execute this statement" ger "Der MariaDB-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen" + jpn "MariaDBサーãƒãƒ¼ãŒ %s オプションã§å®Ÿè¡Œã•ã‚Œã¦ã„ã‚‹ã®ã§ã€ã“ã®ã‚¹ãƒ†ãƒ¼ãƒˆãƒ¡ãƒ³ãƒˆã¯å®Ÿè¡Œã§ãã¾ã›ã‚“。" por "O servidor MariaDB está rodando com a opção %s razão pela qual não pode executar esse commando" spa "El servidor MariaDB está rodando con la opción %s tal que no puede ejecutar este comando" swe "MariaDB är startad med %s. Pga av detta kan du inte använda detta kommando" ER_DUPLICATED_VALUE_IN_TYPE eng "Column '%-.100s' has duplicated value '%-.64s' in %s" ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s" + jpn "列 '%-.100s' ã§ã€é‡è¤‡ã™ã‚‹å€¤ '%-.64s' ㌠%s ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã™ã€‚" por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s" ER_TRUNCATED_WRONG_VALUE 22007 eng "Truncated incorrect %-.32s value: '%-.128s'" ger "Falscher %-.32s-Wert gekürzt: '%-.128s'" + jpn "ä¸æ£ãª %-.32s ã®å€¤ãŒåˆ‡ã‚Šæ¨ã¦ã‚‰ã‚Œã¾ã—ãŸã€‚: '%-.128s'" por "Truncado errado %-.32s valor: '%-.128s'" spa "Equivocado truncado %-.32s valor: '%-.128s'" ER_TOO_MUCH_AUTO_TIMESTAMP_COLS eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben" + jpn "ä¸æ£ãªè¡¨å®šç¾©ã§ã™ã€‚DEFAULTå¥ã¾ãŸã¯ON UPDATEå¥ã« CURRENT_TIMESTAMP ã‚’ã¨ã‚‚ãªã†TIMESTAMPåž‹ã®åˆ—ã¯1ã¤ã¾ã§ã§ã™ã€‚" por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" ER_INVALID_ON_UPDATE eng "Invalid ON UPDATE clause for '%-.192s' column" ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.192s'" + jpn "列 '%-.192s' ã« ON UPDATEå¥ã¯ç„¡åŠ¹ã§ã™ã€‚" por "Inválida cláusula ON UPDATE para campo '%-.192s'" spa "Inválido ON UPDATE cláusula para campo '%-.192s'" ER_UNSUPPORTED_PS @@ -4992,11 +5058,13 @@ ER_GET_ERRMSG dan "Modtog fejl %d '%-.100s' fra %s" eng "Got error %d '%-.100s' from %s" ger "Fehler %d '%-.100s' von %s" + jpn "エラー %d '%-.100s' ㌠%s ã‹ã‚‰è¿”ã•ã‚Œã¾ã—ãŸã€‚" nor "Mottok feil %d '%-.100s' fa %s" norwegian-ny "Mottok feil %d '%-.100s' fra %s" ER_GET_TEMPORARY_ERRMSG dan "Modtog temporary fejl %d '%-.100s' fra %s" eng "Got temporary error %d '%-.100s' from %s" + jpn "一時エラー %d '%-.100s' ㌠%s ã‹ã‚‰è¿”ã•ã‚Œã¾ã—ãŸã€‚" ger "Temporärer Fehler %d '%-.100s' von %s" nor "Mottok temporary feil %d '%-.100s' fra %s" norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s" @@ -5462,6 +5530,7 @@ ER_TRG_IN_WRONG_SCHEMA ER_STACK_OVERRUN_NEED_MORE eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack." ger "Thread-Stack-Ãœberlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld --thread_stack=#', um einen größeren Stack anzugeben" + jpn "スレッドスタックä¸è¶³ã§ã™(使用: %ld ; サイズ: %ld ; è¦æ±‚: %ld)。より大ãã„値㧠'mysqld --thread_stack=#' ã®æŒ‡å®šã‚’ã—ã¦ãã ã•ã„。" ER_TOO_LONG_BODY 42000 S1009 eng "Routine body for '%-.100s' is too long" ger "Routinen-Body für '%-.100s' ist zu lang" @@ -5567,6 +5636,7 @@ ER_WRONG_STRING_LENGTH ER_NON_INSERTABLE_TABLE eng "The target table %-.100s of the %s is not insertable-into" ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar" + jpn "対象表 %-.100s ã¯æŒ¿å…¥å¯èƒ½ã§ã¯ãªã„ã®ã§ã€%s ã‚’è¡Œãˆã¾ã›ã‚“。" ER_ADMIN_WRONG_MRG_TABLE eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist" ger "Tabelle '%-.64s' ist unterschiedlich definiert, nicht vom Typ MyISAM oder existiert nicht" @@ -5836,12 +5906,10 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT ger "Kein DATETIME-Ausdruck angegeben" ER_UNUSED_2 - eng "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted" - ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt" + eng "" ER_UNUSED_3 - eng "Cannot load from mysql.%s. The table is probably corrupted" - ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt" + eng "" ER_EVENT_CANNOT_DELETE eng "Failed to delete the event from mysql.event" ger "Löschen des Events aus mysql.event fehlgeschlagen" @@ -5869,8 +5937,7 @@ ER_CANT_LOCK_LOG_TABLE eng "You can't use locks with log tables." ger "Log-Tabellen können nicht gesperrt werden." ER_UNUSED_4 - eng "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry" - ger "Aufrechterhalten der Fremdschlüssel-Beschränkungen für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen" + eng "" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error." ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben" @@ -5963,29 +6030,28 @@ ER_NATIVE_FCT_NAME_COLLISION # When using this error message, use the ER_DUP_ENTRY error code. See, for # example, code in handler.cc. ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009 - cze "Zvojen-Bý klÃÄ '%-.64s' (ÄÃslo klÃÄe '%-.192s')" + cze "Zvojený klÃÄ '%-.64s' (ÄÃslo klÃÄe '%-.192s')" dan "Ens værdier '%-.64s' for indeks '%-.192s'" nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.192s'" eng "Duplicate entry '%-.64s' for key '%-.192s'" - jps "'%-.64s' 㯠key '%-.192s' ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™", est "Kattuv väärtus '%-.64s' võtmele '%-.192s'" fre "Duplicata du champ '%-.64s' pour la clef '%-.192s'" ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.192s'" greek "Διπλή εγγÏαφή '%-.64s' για το κλειδί '%-.192s'" hun "Duplikalt bejegyzes '%-.64s' a '%-.192s' kulcs szerint." ita "Valore duplicato '%-.64s' per la chiave '%-.192s'" - jpn "'%-.64s' 㯠key '%-.192s' ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™" + jpn "'%-.64s' ã¯ç´¢å¼• '%-.192s' ã§é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚" kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’ '%-.64s': key '%-.192s'" nor "Like verdier '%-.64s' for nøkkel '%-.192s'" norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.192s'" - pol "Powtórzone wyst?pienie '%-.64s' dla klucza '%-.192s'" + pol "Powtórzone wystÄ…pienie '%-.64s' dla klucza '%-.192s'" por "Entrada '%-.64s' duplicada para a chave '%-.192s'" rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.192s'" rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ '%-.64s' по ключу '%-.192s'" serbian "Dupliran unos '%-.64s' za kljuÄ '%-.192s'" slo "Opakovaný kÄ¾ÃºÄ '%-.64s' (ÄÃslo kľúÄa '%-.192s')" spa "Entrada duplicada '%-.64s' para la clave '%-.192s'" - swe "Dubbel nyckel '%-.64s' för nyckel '%-.192s'" + swe "Dublett '%-.64s' för nyckel '%-.192s'" ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ '%-.64s' Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'" ER_BINLOG_PURGE_EMFILE eng "Too many files opened, please execute the command again" @@ -6051,9 +6117,8 @@ ER_TRG_CANT_OPEN_TABLE ER_CANT_CREATE_SROUTINE eng "Cannot create stored routine `%-.64s`. Check warnings" ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen" -ER_NEVER_USED - eng "Ambiguous slave modes combination. %s" - ger "Mehrdeutige Kombination von Slave-Modi. %s" +ER_UNUSED + eng "" ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement." ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran." @@ -6316,7 +6381,7 @@ ER_VALUES_IS_NOT_INT_TYPE_ERROR swe "Värden i VALUES för partition '%-.64s' mÃ¥ste ha typen INT" ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000 - cze "P-BÅ™Ãstup pro uživatele '%s'@'%s'" + cze "PÅ™Ãstup pro uživatele '%s'@'%s'" dan "Adgang nægtet bruger: '%s'@'%s'" nla "Toegang geweigerd voor gebruiker: '%s'@'%s'" eng "Access denied for user '%s'@'%s'" @@ -6409,7 +6474,6 @@ ER_PLUGIN_NO_UNINSTALL ER_PLUGIN_NO_INSTALL eng "Plugin '%s' is marked as not dynamically installable. You have to stop the server to install it." - ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT eng "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave." @@ -6432,15 +6496,10 @@ ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST # End of 5.5 error messages. # -ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 - eng "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted" - ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt" - ER_CANNOT_LOAD_FROM_TABLE_V2 eng "Cannot load from %s.%s. The table is probably corrupted" ger "Kann %s.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt" - ER_MASTER_DELAY_VALUE_OUT_OF_RANGE eng "The requested value %u for the master delay exceeds the maximum %u" ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT @@ -6495,25 +6554,9 @@ ER_PARTITION_CLAUSE_ON_NONPARTITIONED ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET eng "Found a row not matching the given partition set" swe "Hittade en rad som inte passar i nÃ¥gon given partition" -ER_NO_SUCH_PARTITION - cze "partion '%-.64s' neexistuje" - dan "partition '%-.64s' eksisterer ikke" - nla "partition '%-.64s' bestaat niet" - eng "partition '%-.64s' doesn't exist" - est "partition '%-.64s' ei eksisteeri" - fre "La partition '%-.64s' n'existe pas" - ger "Die partition '%-.64s' existiert nicht" - hun "A '%-.64s' partition nem letezik" - ita "La tabella particione '%-.64s' non esiste" - nor "Partition '%-.64s' doesn't exist" - norwegian-ny "Partition '%-.64s' doesn't exist" - pol "Partition '%-.64s' doesn't exist" - por "Particion '%-.64s' n�o existe" - rum "Partition '%-.64s' nu exista" - serbian "Partition '%-.64s' ne postoji" - slo "Partition '%-.64s' doesn't exist" - spa "Particion '%-.64s' no existe" - swe "Det finns ingen partition som heter '%-.64s'" + +ER_UNUSED_5 + eng "" ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE eng "Failure while changing the type of replication repository: %s." @@ -6555,18 +6598,13 @@ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO 23000 S1009 eng "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table" ger "Fremdschlüssel-Beschränkung für Tabelle '%.192s', Datensatz '%-.192s' würde zu einem doppelten Eintrag in einer Kind-Tabelle führen" swe "FOREIGN KEY constraint för tabell '%.192s', posten '%-.192s' kan inte uppdatera en barntabell pÃ¥ grund av UNIQUE-test" + ER_SQLTHREAD_WITH_SECURE_SLAVE eng "Setting authentication options is not possible when only the Slave SQL Thread is being started." ER_TABLE_HAS_NO_FT eng "The table does not have FULLTEXT index to support this query" -ER_INNODB_FT_LIMIT - eng "InnoDB presently supports one FULLTEXT index per table" - -ER_INNODB_NO_FT_TEMP_TABLE - eng "Cannot create FULLTEXT index on temporary InnoDB table" - ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER eng "The system variable %.200s cannot be set in stored functions or triggers." @@ -6604,13 +6642,13 @@ ER_BAD_SLAVE_AUTO_POSITION eng "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active." ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON - eng "CHANGE MASTER TO AUTO_POSITION = 1 can only be executed when GTID_MODE = ON." + eng "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when GTID_MODE = ON." ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET eng "Cannot execute statements with implicit commit inside a transaction when GTID_NEXT != AUTOMATIC or GTID_NEXT_LIST != NULL." -ER_GTID_MODE_2_OR_3_REQUIRES_DISABLE_GTID_UNSAFE_STATEMENTS_ON - eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires DISABLE_GTID_UNSAFE_STATEMENTS = 1." +ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON + eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires ENFORCE_GTID_CONSISTENCY = 1." ER_GTID_MODE_REQUIRES_BINLOG eng "GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates." @@ -6628,13 +6666,13 @@ ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF eng "Found a Gtid_log_event or Previous_gtids_log_event when GTID_MODE = OFF." ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE - eng "Updates to non-transactional tables are forbidden when DISABLE_GTID_UNSAFE_STATEMENTS = 1." + eng "When ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables." ER_GTID_UNSAFE_CREATE_SELECT - eng "CREATE TABLE ... SELECT is forbidden when DISABLE_GTID_UNSAFE_STATEMENTS = 1." + eng "CREATE TABLE ... SELECT is forbidden when ENFORCE_GTID_CONSISTENCY = 1." ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION - eng "When DISABLE_GTID_UNSAFE_STATEMENTS = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1." + eng "When ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1." ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME eng "The value of GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions." @@ -6652,6 +6690,241 @@ ER_UNKNOWN_EXPLAIN_FORMAT ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION 25006 eng "Cannot execute statement in a READ ONLY transaction." +ER_TOO_LONG_TABLE_PARTITION_COMMENT + eng "Comment for table partition '%-.64s' is too long (max = %lu)" + +ER_SLAVE_CONFIGURATION + eng "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log." + +ER_INNODB_FT_LIMIT + eng "InnoDB presently supports one FULLTEXT index creation at a time" + +ER_INNODB_NO_FT_TEMP_TABLE + eng "Cannot create FULLTEXT index on temporary InnoDB table" + +ER_INNODB_FT_WRONG_DOCID_COLUMN + eng "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index" + +ER_INNODB_FT_WRONG_DOCID_INDEX + eng "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index" + +ER_INNODB_ONLINE_LOG_TOO_BIG + eng "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again." + +ER_UNKNOWN_ALTER_ALGORITHM + eng "Unknown ALGORITHM '%s'" + +ER_UNKNOWN_ALTER_LOCK + eng "Unknown LOCK type '%s'" + +ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS + eng "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL." + +ER_MTS_RECOVERY_FAILURE + eng "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log." + +ER_MTS_RESET_WORKERS + eng "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log." + +ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 + eng "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted" + ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt" + +ER_SLAVE_SILENT_RETRY_TRANSACTION + eng "Slave must silently retry current transaction" + +ER_DISCARD_FK_CHECKS_RUNNING + eng "There is a foreign key check running on table '%-.192s'. Cannot discard the table." + +ER_TABLE_SCHEMA_MISMATCH + eng "Schema mismatch (%s)" + +ER_TABLE_IN_SYSTEM_TABLESPACE + eng "Table '%-.192s' in system tablespace" + +ER_IO_READ_ERROR + eng "IO Read error: (%lu, %s) %s" + +ER_IO_WRITE_ERROR + eng "IO Write error: (%lu, %s) %s" + +ER_TABLESPACE_MISSING + eng "Tablespace is missing for table '%-.192s'" + +ER_TABLESPACE_EXISTS + eng "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT." + +ER_TABLESPACE_DISCARDED + eng "Tablespace has been discarded for table '%-.192s'" + +ER_INTERNAL_ERROR + eng "Internal error: '%-.192s'" + +ER_INNODB_IMPORT_ERROR + eng "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %lu : '%s'" + +ER_INNODB_INDEX_CORRUPT + eng "Index corrupt: %s" + +ER_INVALID_YEAR_COLUMN_LENGTH + eng "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead." + rus "Тип YEAR(%lu) более не поддерживаетÑÑ, вмеÑто него будет Ñоздана колонка Ñ Ñ‚Ð¸Ð¿Ð¾Ð¼ YEAR(4)." + +ER_NOT_VALID_PASSWORD + eng "Your password does not satisfy the current policy requirements" + +ER_MUST_CHANGE_PASSWORD + eng "You must SET PASSWORD before executing this statement" + bgn "ТрÑбва първо да Ñи Ñмените паролата ÑÑŠÑ SET PASSWORD за да можете да изпълните тази команда" + +ER_FK_NO_INDEX_CHILD + eng "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'" + +ER_FK_NO_INDEX_PARENT + eng "Failed to add the foreign key constaint. Missing index for constraint '%s' in the referenced table '%s'" + +ER_FK_FAIL_ADD_SYSTEM + eng "Failed to add the foreign key constraint '%s' to system tables" + +ER_FK_CANNOT_OPEN_PARENT + eng "Failed to open the referenced table '%s'" + +ER_FK_INCORRECT_OPTION + eng "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'" + +ER_FK_DUP_NAME + eng "Duplicate foreign key constraint name '%s'" + +ER_PASSWORD_FORMAT + eng "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function." + +ER_FK_COLUMN_CANNOT_DROP + eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'" + ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' benötigt" + +ER_FK_COLUMN_CANNOT_DROP_CHILD + eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'" + ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' der Tabelle '%-.192s' benötigt" + +ER_FK_COLUMN_NOT_NULL + eng "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL" + ger "Spalte '%-.192s' kann nicht NOT NULL sein: wird für eine Fremdschlüsselbeschränkung '%-.192s' SET NULL benötigt" + +ER_DUP_INDEX + eng "Duplicate index '%-.64s' defined on the table '%-.64s.%-.64s'. This is deprecated and will be disallowed in a future release." + +ER_FK_COLUMN_CANNOT_CHANGE + eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'" + +ER_FK_COLUMN_CANNOT_CHANGE_CHILD + eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'" + +ER_FK_CANNOT_DELETE_PARENT + eng "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'" + +ER_MALFORMED_PACKET + eng "Malformed communication packet." + +ER_READ_ONLY_MODE + eng "Running in read-only mode" + +ER_GTID_NEXT_TYPE_UNDEFINED_GROUP + eng "When GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET GTID_NEXT before a transaction and forgot to set GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current GTID_NEXT is '%s'." + +ER_VARIABLE_NOT_SETTABLE_IN_SP + eng "The system variable %.200s cannot be set in stored procedures." + +ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF + eng "GTID_PURGED can only be set when GTID_MODE = ON." + +ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY + eng "GTID_PURGED can only be set when GTID_EXECUTED is empty." + +ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY + eng "GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)." + +ER_GTID_PURGED_WAS_CHANGED + eng "GTID_PURGED was changed from '%s' to '%s'." + +ER_GTID_EXECUTED_WAS_CHANGED + eng "GTID_EXECUTED was changed from '%s' to '%s'." + +ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES + eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to." + +ER_ALTER_OPERATION_NOT_SUPPORTED 0A000 + eng "%s is not supported for this operation. Try %s." + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON 0A000 + eng "%s is not supported. Reason: %s. Try %s." + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY + eng "COPY algorithm requires a lock" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION + eng "Partition specific operations do not yet support LOCK/ALGORITHM" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME + eng "Columns participating in a foreign key are renamed" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE + eng "Cannot change column type INPLACE" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK + eng "Adding foreign keys needs foreign_key_checks=OFF" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE + eng "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK + eng "Dropping a primary key is not allowed without also adding a new primary key" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC + eng "Adding an auto-increment column requires a lock" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS + eng "Cannot replace hidden FTS_DOC_ID with a user-visible one" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS + eng "Cannot drop or rename FTS_DOC_ID" + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS + eng "Fulltext index creation requires a lock" + +ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE + eng "sql_slave_skip_counter can not be set when the server is running with GTID_MODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction" + +ER_DUP_UNKNOWN_IN_INDEX 23000 + cze "Zdvojený klÃÄ (ÄÃslo klÃÄe '%-.192s')" + dan "Flere ens nøgler for indeks '%-.192s'" + nla "Dubbele ingang voor zoeksleutel '%-.192s'" + eng "Duplicate entry for key '%-.192s'" + est "Kattuv väärtus võtmele '%-.192s'" + fre "Duplicata du champ pour la clef '%-.192s'" + ger "Doppelter Eintrag für Schlüssel '%-.192s'" + greek "Διπλή εγγÏαφή για το κλειδί '%-.192s'" + hun "Duplikalt bejegyzes a '%-.192s' kulcs szerint." + ita "Valore duplicato per la chiave '%-.192s'" + jpn "ã¯ç´¢å¼• '%-.192s' ã§é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚" + kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’: key '%-.192s'" + nor "Like verdier for nøkkel '%-.192s'" + norwegian-ny "Like verdiar for nykkel '%-.192s'" + pol "Powtórzone wystÄ…pienie dla klucza '%-.192s'" + por "Entrada duplicada para a chave '%-.192s'" + rum "Cimpul e duplicat pentru cheia '%-.192s'" + rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ по ключу '%-.192s'" + serbian "Dupliran unos za kljuÄ '%-.192s'" + slo "Opakovaný kÄ¾ÃºÄ (ÄÃslo kľúÄa '%-.192s')" + spa "Entrada duplicada para la clave '%-.192s'" + swe "Dublett för nyckel '%-.192s'" + ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'" + +ER_IDENT_CAUSES_TOO_LONG_PATH + eng "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'." + +ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL + eng "cannot silently convert NULL values, as required in this SQL_MODE" + # # MariaDB error messages section starts here # @@ -6691,12 +6964,12 @@ ER_UNKNOWN_OPTION eng "Unknown option '%-.64s'" ER_BAD_OPTION_VALUE eng "Incorrect value '%-.64s' for option '%-.64s'" -ER_NOT_USED_ERROR_MESSAGE +ER_UNUSED_6 eng "" -ER_NOT_USED_ERROR_MESSAGE2 +ER_UNUSED_7 + eng "" +ER_UNUSED_8 eng "" -ER_CANT_DO_ONLINE - eng "Can't execute the given '%s' command as online" ER_DATA_OVERFLOW 22003 eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated." ER_DATA_TRUNCATED 22003 @@ -6721,7 +6994,7 @@ ER_VIEW_ORDERBY_IGNORED eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already." ER_CONNECTION_KILLED 70100 eng "Connection was killed" -ER_INTERNAL_ERROR +ER_UNSED eng "Internal error: '%-.192s'" ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION eng "Cannot modify @@session.skip_replication inside a transaction" diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index edc33c4d63b..9437db6c318 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -190,7 +190,7 @@ extern "C" sig_handler handle_fatal_signal(int sig) "Some pointers may be invalid and cause the dump to abort.\n"); my_safe_printf_stderr("Query (%p): ", thd->query()); - my_safe_print_str(thd->query(), min(65536U, thd->query_length())); + my_safe_print_str(thd->query(), MY_MIN(65536U, thd->query_length())); my_safe_printf_stderr("\nConnection ID (thread ID): %lu\n", (ulong) thd->thread_id); my_safe_printf_stderr("Status: %s\n\n", kreason); diff --git a/sql/slave.cc b/sql/slave.cc index 55fe53345da..d46be570b5e 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -306,7 +306,8 @@ handle_slave_init(void *arg __attribute__((unused))) sql_print_warning("Failed to load slave replication state from table " "%s.%s: %u: %s", "mysql", rpl_gtid_slave_state_table_name.str, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); mysql_mutex_lock(&LOCK_thread_count); delete thd; @@ -473,7 +474,7 @@ int init_recovery(Master_info* mi, const char** errmsg) Relay_log_info *rli= &mi->rli; if (rli->group_master_log_name[0]) { - mi->master_log_pos= max(BIN_LOG_HEADER_SIZE, + mi->master_log_pos= MY_MAX(BIN_LOG_HEADER_SIZE, rli->group_master_log_pos); strmake_buf(mi->master_log_name, rli->group_master_log_name); @@ -925,7 +926,8 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start, keep them in case connection with GTID fails and user wants to go back and continue with previous old-style replication coordinates). */ - mi->master_log_pos = max(BIN_LOG_HEADER_SIZE, mi->rli.group_master_log_pos); + mi->master_log_pos = MY_MAX(BIN_LOG_HEADER_SIZE, + mi->rli.group_master_log_pos); strmake(mi->master_log_name, mi->rli.group_master_log_name, sizeof(mi->master_log_name)-1); purge_relay_logs(&mi->rli, NULL, 0, &errmsg); @@ -2592,13 +2594,13 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, slave is 2. At SHOW SLAVE STATUS time, assume that the difference between timestamp of slave and rli->last_master_timestamp is 0 (i.e. they are in the same second), then we get 0-(2-1)=-1 as a result. - This confuses users, so we don't go below 0: hence the max(). + This confuses users, so we don't go below 0: hence the MY_MAX(). last_master_timestamp == 0 (an "impossible" timestamp 1970) is a special marker to say "consider we have caught up". */ protocol->store((longlong)(mi->rli.last_master_timestamp ? - max(0, time_diff) : 0)); + MY_MAX(0, time_diff) : 0)); } else { @@ -2987,7 +2989,7 @@ static int has_temporary_error(THD *thd) DBUG_ENTER("has_temporary_error"); DBUG_EXECUTE_IF("all_errors_are_temporary_errors", - if (thd->stmt_da->is_error()) + if (thd->get_stmt_da()->is_error()) { thd->clear_error(); my_error(ER_LOCK_DEADLOCK, MYF(0)); @@ -3006,16 +3008,16 @@ static int has_temporary_error(THD *thd) currently, InnoDB deadlock detected by InnoDB or lock wait timeout (innodb_lock_wait_timeout exceeded */ - if (thd->stmt_da->sql_errno() == ER_LOCK_DEADLOCK || - thd->stmt_da->sql_errno() == ER_LOCK_WAIT_TIMEOUT) + if (thd->get_stmt_da()->sql_errno() == ER_LOCK_DEADLOCK || + thd->get_stmt_da()->sql_errno() == ER_LOCK_WAIT_TIMEOUT) DBUG_RETURN(1); #ifdef HAVE_NDB_BINLOG /* currently temporary error set in ndbcluster */ - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; + List_iterator_fast<Sql_condition> it(thd->warning_info->warn_list()); + Sql_condition *err; while ((err= it++)) { DBUG_PRINT("info", ("has condition %d %s", err->get_sql_errno(), @@ -3362,7 +3364,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli) exec_res= 0; rli->cleanup_context(thd, 1); /* chance for concurrent connection to get more locks */ - slave_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), + slave_sleep(thd, MY_MIN(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), sql_slave_killed, rli); mysql_mutex_lock(&rli->data_lock); // because of SHOW STATUS rli->trans_retries++; @@ -3580,9 +3582,10 @@ pthread_handler_t handle_slave_io(void *arg) /* Load the set of seen GTIDs, if we did not already. */ if (rpl_load_gtid_slave_state(thd)) { - mi->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), + mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "Unable to load replication GTID slave state from mysql.%s: %s", - rpl_gtid_slave_state_table_name.str, thd->stmt_da->message()); + rpl_gtid_slave_state_table_name.str, + thd->get_stmt_da()->message()); /* If we are using old-style replication, we can continue, even though we then will not be able to record the GTIDs we receive. But if using GTID, @@ -4174,18 +4177,19 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, if (check_temp_dir(rli->slave_patternload_file)) { - rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "Unable to use slave's temporary directory %s - %s", - slave_load_tmpdir, thd->stmt_da->message()); + slave_load_tmpdir, thd->get_stmt_da()->message()); goto err; } /* Load the set of seen GTIDs, if we did not already. */ if (rpl_load_gtid_slave_state(thd)) { - rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "Unable to load replication GTID slave state from mysql.%s: %s", - rpl_gtid_slave_state_table_name.str, thd->stmt_da->message()); + rpl_gtid_slave_state_table_name.str, + thd->get_stmt_da()->message()); /* If we are using old-style replication, we can continue, even though we then will not be able to record the GTIDs we receive. But if using GTID, @@ -4201,7 +4205,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave); if (thd->is_slave_error) { - rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "Slave SQL thread aborted. Can't execute init_slave query"); goto err; } @@ -4269,20 +4273,20 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, if (thd->is_error()) { - char const *const errmsg= thd->stmt_da->message(); + char const *const errmsg= thd->get_stmt_da()->message(); DBUG_PRINT("info", - ("thd->stmt_da->sql_errno()=%d; rli->last_error.number=%d", - thd->stmt_da->sql_errno(), last_errno)); + ("thd->get_stmt_da()->sql_errno()=%d; rli->last_error.number=%d", + thd->get_stmt_da()->sql_errno(), last_errno)); if (last_errno == 0) { /* This function is reporting an error which was not reported while executing exec_relay_log_event(). */ - rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), "%s", errmsg); + rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "%s", errmsg); } - else if (last_errno != thd->stmt_da->sql_errno()) + else if (last_errno != thd->get_stmt_da()->sql_errno()) { /* * An error was reported while executing exec_relay_log_event() @@ -4291,13 +4295,14 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME, * what caused the problem. */ sql_print_error("Slave (additional info): %s Error_code: %d", - errmsg, thd->stmt_da->sql_errno()); + errmsg, thd->get_stmt_da()->sql_errno()); } } /* Print any warnings issued */ - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; + Diagnostics_area::Sql_condition_iterator it= + thd->get_stmt_da()->sql_conditions(); + const Sql_condition *err; /* Added controlled slave thread cancel for replication of user-defined variables. @@ -5744,7 +5749,7 @@ static IO_CACHE *reopen_relay_log(Relay_log_info *rli, const char **errmsg) relay_log_pos Current log pos pending Number of bytes already processed from the event */ - rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE); + rli->event_relay_log_pos= MY_MAX(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE); my_b_seek(cur_log,rli->event_relay_log_pos); DBUG_RETURN(cur_log); } diff --git a/sql/sp.cc b/sql/sp.cc index 978d7a2eb13..c1c162267a8 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -325,7 +325,7 @@ Stored_routine_creation_ctx::load_from_db(THD *thd, if (invalid_creation_ctx) { push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_SR_INVALID_CREATION_CTX, ER(ER_SR_INVALID_CREATION_CTX), (const char *) db_name, @@ -683,9 +683,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); }; bool @@ -693,13 +693,13 @@ Silence_deprecated_warning::handle_condition( THD *, uint sql_errno, const char*, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char*, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; if (sql_errno == ER_WARN_DEPRECATED_SYNTAX && - level == MYSQL_ERROR::WARN_LEVEL_WARN) + level == Sql_condition::WARN_LEVEL_WARN) return TRUE; return FALSE; @@ -772,9 +772,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* message, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); bool error_caught() const { return m_error_caught; } @@ -786,9 +786,9 @@ bool Bad_db_error_handler::handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* message, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { if (sql_errno == ER_BAD_DB_ERROR) { @@ -1390,9 +1390,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { if (sql_errno == ER_NO_SUCH_TABLE || sql_errno == ER_NO_SUCH_TABLE_IN_ENGINE || @@ -1757,7 +1757,7 @@ sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any) &thd->sp_proc_cache, FALSE) != NULL || sp_find_routine(thd, TYPE_ENUM_FUNCTION, name, &thd->sp_func_cache, FALSE) != NULL; - thd->warning_info->clear_warning_info(thd->query_id); + thd->get_stmt_da()->clear_warning_info(thd->query_id); if (sp_object_found) { if (any) diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 6b591edca5e..cb689735925 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -773,7 +773,7 @@ sp_head::~sp_head() for (uint ip = 0 ; (i = get_instr(ip)) ; ip++) delete i; delete_dynamic(&m_instr); - m_pcont->destroy(); + delete m_pcont; free_items(); /* @@ -976,7 +976,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str) thd->query_name_consts= 0; for (Item_splocal **splocal= sp_vars_uses.front(); - splocal < sp_vars_uses.back(); splocal++) + splocal <= sp_vars_uses.back(); splocal++) { Item *val; @@ -1079,105 +1079,6 @@ void sp_head::recursion_level_error(THD *thd) } -/** - Find an SQL handler for any condition (warning or error) after execution - of a stored routine instruction. Basically, this function looks for an - appropriate SQL handler in RT-contexts. If an SQL handler is found, it is - remembered in the RT-context for future activation (the context can be - inactive at the moment). - - If there is no pending condition, the function just returns. - - If there was an error during the execution, an SQL handler for it will be - searched within the current and outer scopes. - - There might be several errors in the Warning Info (that's possible by using - SIGNAL/RESIGNAL in nested scopes) -- the function is looking for an SQL - handler for the latest (current) error only. - - If there was a warning during the execution, an SQL handler for it will be - searched within the current scope only. - - If several warnings were thrown during the execution and there are different - SQL handlers for them, it is not determined which SQL handler will be chosen. - Only one SQL handler will be executed. - - If warnings and errors were thrown during the execution, the error takes - precedence. I.e. error handler will be executed. If there is no handler - for that error, condition will remain unhandled. - - Once a warning or an error has been handled it is not removed from - Warning Info. - - According to The Standard (quoting PeterG): - - An SQL procedure statement works like this ... - SQL/Foundation 13.5 <SQL procedure statement> - (General Rules) (greatly summarized) says: - (1) Empty diagnostics area, thus clearing the condition. - (2) Execute statement. - During execution, if Exception Condition occurs, - set Condition Area = Exception Condition and stop - statement. - During execution, if No Data occurs, - set Condition Area = No Data Condition and continue - statement. - During execution, if Warning occurs, - and Condition Area is not already full due to - an earlier No Data condition, set Condition Area - = Warning and continue statement. - (3) Finish statement. - At end of execution, if Condition Area is not - already full due to an earlier No Data or Warning, - set Condition Area = Successful Completion. - In effect, this system means there is a precedence: - Exception trumps No Data, No Data trumps Warning, - Warning trumps Successful Completion. - - NB: "Procedure statements" include any DDL or DML or - control statements. So CREATE and DELETE and WHILE - and CALL and RETURN are procedure statements. But - DECLARE and END are not procedure statements. - - @param thd thread handle - @param ctx runtime context of the stored routine -*/ - -static void -find_handler_after_execution(THD *thd, sp_rcontext *ctx) -{ - if (thd->is_error()) - { - ctx->find_handler(thd, - thd->stmt_da->sql_errno(), - thd->stmt_da->get_sqlstate(), - MYSQL_ERROR::WARN_LEVEL_ERROR, - thd->stmt_da->message()); - } - else if (thd->warning_info->statement_warn_count()) - { - List_iterator<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; - while ((err= it++)) - { - if ((err->get_level() != MYSQL_ERROR::WARN_LEVEL_WARN && - err->get_level() != MYSQL_ERROR::WARN_LEVEL_NOTE) || - err->handled()) - continue; - - if (ctx->find_handler(thd, - err->get_sql_errno(), - err->get_sqlstate(), - err->get_level(), - err->get_message_text())) - { - err->mark_handled(); - break; - } - } - } -} - /** Execute the routine. The main instruction jump loop is there. @@ -1224,8 +1125,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success) String old_packet; Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer; Object_creation_ctx *saved_creation_ctx; - Warning_info *saved_warning_info; - Warning_info warning_info(thd->warning_info->warn_id(), false); + Diagnostics_area *da= thd->get_stmt_da(); + Warning_info sp_wi(da->warning_info_id(), false, true); /* Just reporting a stack overrun error @@ -1296,9 +1197,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success) old_arena= thd->stmt_arena; /* Push a new warning information area. */ - warning_info.append_warning_info(thd, thd->warning_info); - saved_warning_info= thd->warning_info; - thd->warning_info= &warning_info; + da->copy_sql_conditions_to_wi(thd, &sp_wi); + da->push_warning_info(&sp_wi); /* Switch query context. This has to be done early as this is sometimes @@ -1398,7 +1298,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success) } /* Reset number of warnings for this query. */ - thd->warning_info->reset_for_next_command(); + thd->get_stmt_da()->reset_for_next_command(); DBUG_PRINT("execute", ("Instruction %u", ip)); @@ -1449,19 +1349,10 @@ sp_head::execute(THD *thd, bool merge_da_on_success) errors are not catchable by SQL handlers) or the connection has been killed during execution. */ - if (!thd->is_fatal_error && !thd->killed_errno()) + if (!thd->is_fatal_error && !thd->killed_errno() && + ctx->handle_sql_condition(thd, &ip, i)) { - /* - Find SQL handler in the appropriate RT-contexts: - - warnings can be handled by SQL handlers within - the current scope only; - - errors can be handled by any SQL handler from outer scope. - */ - find_handler_after_execution(thd, ctx); - - /* If found, activate handler for the current scope. */ - if (ctx->activate_handler(thd, &ip, i, &execute_arena, &backup_arena)) - err_status= FALSE; + err_status= FALSE; } /* Reset sp_rcontext::end_partial_result_set flag. */ @@ -1506,9 +1397,40 @@ sp_head::execute(THD *thd, bool merge_da_on_success) - if there was an exception during execution, warning info should be propagated to the caller in any case. */ + da->pop_warning_info(); + if (err_status || merge_da_on_success) - saved_warning_info->merge_with_routine_info(thd, thd->warning_info); - thd->warning_info= saved_warning_info; + { + /* + If a routine body is empty or if a routine did not generate any warnings, + do not duplicate our own contents by appending the contents of the called + routine. We know that the called routine did not change its warning info. + + On the other hand, if the routine body is not empty and some statement in + the routine generates a warning or uses tables, warning info is guaranteed + to have changed. In this case we know that the routine warning info + contains only new warnings, and thus we perform a copy. + */ + if (da->warning_info_changed(&sp_wi)) + { + /* + If the invocation of the routine was a standalone statement, + rather than a sub-statement, in other words, if it's a CALL + of a procedure, rather than invocation of a function or a + trigger, we need to clear the current contents of the caller's + warning info. + + This is per MySQL rules: if a statement generates a warning, + warnings from the previous statement are flushed. Normally + it's done in push_warning(). However, here we don't use + push_warning() to avoid invocation of condition handlers or + escalation of warnings to errors. + */ + da->opt_clear_warning_info(thd->query_id); + da->copy_sql_conditions_from_wi(thd, &sp_wi); + da->remove_marked_sql_conditions(); + } + } done: DBUG_PRINT("info", ("err_status: %d killed: %d is_slave_error: %d report_error: %d", @@ -1716,8 +1638,7 @@ sp_head::execute_trigger(THD *thd, init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0)); thd->set_n_backup_active_arena(&call_arena, &backup_arena); - if (!(nctx= new sp_rcontext(m_pcont, 0, octx)) || - nctx->init(thd)) + if (!(nctx= sp_rcontext::create(thd, m_pcont, NULL))) { err_status= TRUE; goto err_with_cleanup; @@ -1833,8 +1754,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0)); thd->set_n_backup_active_arena(&call_arena, &backup_arena); - if (!(nctx= new sp_rcontext(m_pcont, return_value_fld, octx)) || - nctx->init(thd)) + if (!(nctx= sp_rcontext::create(thd, m_pcont, return_value_fld))) { thd->restore_active_arena(&call_arena, &backup_arena); err_status= TRUE; @@ -1962,7 +1882,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, if (mysql_bin_log.write(&qinfo) && thd->binlog_evt_union.unioned_events_trans) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Invoked ROUTINE modified a transactional table but MySQL " "failed to reflect this change in the binary log"); err_status= TRUE; @@ -2051,9 +1971,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (! octx) { /* Create a temporary old context. */ - if (!(octx= new sp_rcontext(m_pcont, NULL, octx)) || octx->init(thd)) + if (!(octx= sp_rcontext::create(thd, m_pcont, NULL))) { - delete octx; /* Delete octx if it was init() that failed. */ + DBUG_PRINT("error", ("Could not create octx")); DBUG_RETURN(TRUE); } @@ -2066,8 +1986,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) thd->spcont->callers_arena= thd; } - if (!(nctx= new sp_rcontext(m_pcont, NULL, octx)) || - nctx->init(thd)) + if (!(nctx= sp_rcontext::create(thd, m_pcont, NULL))) { delete nctx; /* Delete nctx if it was init() that failed. */ thd->spcont= save_spcont; @@ -2090,12 +2009,12 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (!arg_item) break; - sp_variable_t *spvar= m_pcont->find_variable(i); + sp_variable *spvar= m_pcont->find_variable(i); if (!spvar) continue; - if (spvar->mode != sp_param_in) + if (spvar->mode != sp_variable::MODE_IN) { Settable_routine_parameter *srp= arg_item->get_settable_routine_parameter(); @@ -2107,10 +2026,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) break; } - srp->set_required_privilege(spvar->mode == sp_param_inout); + srp->set_required_privilege(spvar->mode == sp_variable::MODE_INOUT); } - if (spvar->mode == sp_param_out) + if (spvar->mode == sp_variable::MODE_OUT) { Item_null *null_item= new Item_null(); Item *tmp_item= null_item; @@ -2118,6 +2037,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (!null_item || nctx->set_variable(thd, i, &tmp_item)) { + DBUG_PRINT("error", ("set variable failed")); err_status= TRUE; break; } @@ -2126,6 +2046,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) { if (nctx->set_variable(thd, i, it_args.ref())) { + DBUG_PRINT("error", ("set variable 2 failed")); err_status= TRUE; break; } @@ -2141,9 +2062,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (!thd->in_sub_stmt) { - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); } thd_proc_info(thd, "closing tables"); @@ -2182,7 +2103,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) #endif if (!err_status) + { err_status= execute(thd, TRUE); + DBUG_PRINT("info", ("execute returned %d", (int) err_status)); + } if (save_log_general) thd->variables.option_bits &= ~OPTION_LOG_OFF; @@ -2210,9 +2134,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (!arg_item) break; - sp_variable_t *spvar= m_pcont->find_variable(i); + sp_variable *spvar= m_pcont->find_variable(i); - if (spvar->mode == sp_param_in) + if (spvar->mode == sp_variable::MODE_IN) continue; Settable_routine_parameter *srp= @@ -2222,6 +2146,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args) if (srp->set_value(thd, octx, nctx->get_item_addr(i))) { + DBUG_PRINT("error", ("set value failed")); err_status= TRUE; break; } @@ -2372,7 +2297,7 @@ sp_head::restore_lex(THD *thd) Put the instruction on the backpatch list, associated with the label. */ int -sp_head::push_backpatch(sp_instr *i, sp_label_t *lab) +sp_head::push_backpatch(sp_instr *i, sp_label *lab) { bp_t *bp= (bp_t *)sql_alloc(sizeof(bp_t)); @@ -2388,7 +2313,7 @@ sp_head::push_backpatch(sp_instr *i, sp_label_t *lab) the current position. */ void -sp_head::backpatch(sp_label_t *lab) +sp_head::backpatch(sp_label *lab) { bp_t *bp; uint dest= instructions(); @@ -2400,7 +2325,7 @@ sp_head::backpatch(sp_label_t *lab) if (bp->lab == lab) { DBUG_PRINT("info", ("backpatch: (m_ip %d, label 0x%lx <%s>) to dest %d", - bp->instr->m_ip, (ulong) lab, lab->name, dest)); + bp->instr->m_ip, (ulong) lab, lab->name.str, dest)); bp->instr->backpatch(dest, lab->ctx); } } @@ -2667,7 +2592,7 @@ sp_head::show_create_routine(THD *thd, int type) Item_empty_string *stmt_fld= new Item_empty_string(col3_caption, - max(m_defstr.length, 1024)); + MY_MAX(m_defstr.length, 1024)); stmt_fld->maybe_null= TRUE; @@ -2867,7 +2792,7 @@ sp_head::show_routine_code(THD *thd) field_list.push_back(new Item_uint("Pos", 9)); // 1024 is for not to confuse old clients field_list.push_back(new Item_empty_string("Instruction", - max(buffer.length(), 1024))); + MY_MAX(buffer.length(), 1024))); if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); @@ -2888,7 +2813,7 @@ sp_head::show_routine_code(THD *thd) Since this is for debugging purposes only, we don't bother to introduce a special error code for it. */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, tmp); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, tmp); } protocol->prepare_for_resend(); protocol->store((longlong)ip); @@ -2995,9 +2920,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, /* Here we also commit or rollback the current statement. */ if (! thd->in_sub_stmt) { - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); } thd_proc_info(thd, "closing tables"); close_thread_tables(thd); @@ -3031,10 +2956,10 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, open_tables stage. */ if (!res || !thd->is_error() || - (thd->stmt_da->sql_errno() != ER_CANT_REOPEN_TABLE && - thd->stmt_da->sql_errno() != ER_NO_SUCH_TABLE && - thd->stmt_da->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE && - thd->stmt_da->sql_errno() != ER_UPDATE_TABLE_USED)) + (thd->get_stmt_da()->sql_errno() != ER_CANT_REOPEN_TABLE && + thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE && + thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE && + thd->get_stmt_da()->sql_errno() != ER_UPDATE_TABLE_USED)) thd->stmt_arena->state= Query_arena::STMT_EXECUTED; /* @@ -3067,7 +2992,8 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables) Check whenever we have access to tables for this statement and open and lock them before executing instructions core function. */ - if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE) + if (open_temporary_tables(thd, tables) || + check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE) || open_and_lock_tables(thd, tables, TRUE, 0)) result= -1; else @@ -3079,7 +3005,7 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables) return result; } -uint sp_instr::get_cont_dest() +uint sp_instr::get_cont_dest() const { return (m_ip+1); } @@ -3121,7 +3047,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) { res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this); - if (thd->stmt_da->is_eof()) + if (thd->get_stmt_da()->is_eof()) { /* Finalize server status flags after executing a statement. */ thd->update_server_status(); @@ -3132,7 +3058,8 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) query_cache_end_of_result(thd); mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS, - thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0, + thd->get_stmt_da()->is_error() ? + thd->get_stmt_da()->sql_errno() : 0, command_name[COM_QUERY].str); if (!res && unlikely(thd->enable_slow_log)) @@ -3144,7 +3071,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp) thd->query_name_consts= 0; if (!thd->is_error()) - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); } DBUG_RETURN(res || thd->is_error()); } @@ -3237,7 +3164,7 @@ sp_instr_set::print(String *str) { /* set name@offset ... */ int rsrv = SP_INSTR_UINT_MAXLEN+6; - sp_variable_t *var = m_ctx->find_variable(m_offset); + sp_variable *var = m_ctx->find_variable(m_offset); /* 'var' should always be non-null, but just in case... */ if (var) @@ -3290,7 +3217,7 @@ sp_instr_set_trigger_field::print(String *str) sp_instr_opt_meta */ -uint sp_instr_opt_meta::get_cont_dest() +uint sp_instr_opt_meta::get_cont_dest() const { return m_cont_dest; } @@ -3471,6 +3398,14 @@ int sp_instr_freturn::exec_core(THD *thd, uint *nextp) { /* + RETURN is a "procedure statement" (in terms of the SQL standard). + That means, Diagnostics Area should be clean before its execution. + */ + + Diagnostics_area *da= thd->get_stmt_da(); + da->clear_warning_info(da->warning_info_id()); + + /* Change <next instruction pointer>, so that this will be the last instruction in the stored function. */ @@ -3508,14 +3443,12 @@ int sp_instr_hpush_jump::execute(THD *thd, uint *nextp) { DBUG_ENTER("sp_instr_hpush_jump::execute"); - List_iterator_fast<sp_cond_type_t> li(m_cond); - sp_cond_type_t *p; - while ((p= li++)) - thd->spcont->push_handler(p, m_ip+1, m_type); + int ret= thd->spcont->push_handler(m_handler, m_ip + 1); *nextp= m_dest; - DBUG_RETURN(0); + + DBUG_RETURN(ret); } @@ -3525,27 +3458,22 @@ sp_instr_hpush_jump::print(String *str) /* hpush_jump dest fsize type */ if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 21)) return; + str->qs_append(STRING_WITH_LEN("hpush_jump ")); str->qs_append(m_dest); str->qs_append(' '); str->qs_append(m_frame); - switch (m_type) { - case SP_HANDLER_NONE: - str->qs_append(STRING_WITH_LEN(" NONE")); // This would be a bug - break; - case SP_HANDLER_EXIT: + + switch (m_handler->type) { + case sp_handler::EXIT: str->qs_append(STRING_WITH_LEN(" EXIT")); break; - case SP_HANDLER_CONTINUE: + case sp_handler::CONTINUE: str->qs_append(STRING_WITH_LEN(" CONTINUE")); break; - case SP_HANDLER_UNDO: - str->qs_append(STRING_WITH_LEN(" UNDO")); - break; default: - // This would be a bug as well - str->qs_append(STRING_WITH_LEN(" UNKNOWN:")); - str->qs_append(m_type); + // The handler type must be either CONTINUE or EXIT. + DBUG_ASSERT(0); } } @@ -3573,7 +3501,7 @@ sp_instr_hpush_jump::opt_mark(sp_head *sp, List<sp_instr> *leads) above, so we start on m_dest+1 here. m_opt_hpop is the hpop marking the end of the handler scope. */ - if (m_type == SP_HANDLER_CONTINUE) + if (m_handler->type == sp_handler::CONTINUE) { for (uint scope_ip= m_dest+1; scope_ip <= m_opt_hpop; scope_ip++) sp->add_mark_lead(scope_ip, leads); @@ -3615,13 +3543,11 @@ int sp_instr_hreturn::execute(THD *thd, uint *nextp) { DBUG_ENTER("sp_instr_hreturn::execute"); - if (m_dest) - *nextp= m_dest; - else - { - *nextp= thd->spcont->pop_hstack(); - } - thd->spcont->exit_handler(); + + uint continue_ip= thd->spcont->exit_handler(thd->get_stmt_da()); + + *nextp= m_dest ? m_dest : continue_ip; + DBUG_RETURN(0); } @@ -3633,12 +3559,17 @@ sp_instr_hreturn::print(String *str) if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 9)) return; str->qs_append(STRING_WITH_LEN("hreturn ")); - str->qs_append(m_frame); if (m_dest) { - str->qs_append(' '); + // NOTE: this is legacy: hreturn instruction for EXIT handler + // should print out 0 as frame index. + str->qs_append(STRING_WITH_LEN("0 ")); str->qs_append(m_dest); } + else + { + str->qs_append(m_frame); + } } @@ -3670,41 +3601,32 @@ sp_instr_hreturn::opt_mark(sp_head *sp, List<sp_instr> *leads) int sp_instr_cpush::execute(THD *thd, uint *nextp) { - Query_arena backup_arena; DBUG_ENTER("sp_instr_cpush::execute"); - /* - We should create cursors in the callers arena, as - it could be (and usually is) used in several instructions. - */ - thd->set_n_backup_active_arena(thd->spcont->callers_arena, &backup_arena); - - thd->spcont->push_cursor(&m_lex_keeper, this); - - thd->restore_active_arena(thd->spcont->callers_arena, &backup_arena); + int ret= thd->spcont->push_cursor(&m_lex_keeper, this); *nextp= m_ip+1; - DBUG_RETURN(0); + DBUG_RETURN(ret); } void sp_instr_cpush::print(String *str) { - LEX_STRING n; - my_bool found= m_ctx->find_cursor(m_cursor, &n); + const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor); + /* cpush name@offset */ uint rsrv= SP_INSTR_UINT_MAXLEN+7; - if (found) - rsrv+= n.length; + if (cursor_name) + rsrv+= cursor_name->length; if (str->reserve(rsrv)) return; str->qs_append(STRING_WITH_LEN("cpush ")); - if (found) + if (cursor_name) { - str->qs_append(n.str, n.length); + str->qs_append(cursor_name->str, cursor_name->length); str->qs_append('@'); } str->qs_append(m_cursor); @@ -3792,19 +3714,19 @@ sp_instr_copen::exec_core(THD *thd, uint *nextp) void sp_instr_copen::print(String *str) { - LEX_STRING n; - my_bool found= m_ctx->find_cursor(m_cursor, &n); + const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor); + /* copen name@offset */ uint rsrv= SP_INSTR_UINT_MAXLEN+7; - if (found) - rsrv+= n.length; + if (cursor_name) + rsrv+= cursor_name->length; if (str->reserve(rsrv)) return; str->qs_append(STRING_WITH_LEN("copen ")); - if (found) + if (cursor_name) { - str->qs_append(n.str, n.length); + str->qs_append(cursor_name->str, cursor_name->length); str->qs_append('@'); } str->qs_append(m_cursor); @@ -3834,19 +3756,19 @@ sp_instr_cclose::execute(THD *thd, uint *nextp) void sp_instr_cclose::print(String *str) { - LEX_STRING n; - my_bool found= m_ctx->find_cursor(m_cursor, &n); + const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor); + /* cclose name@offset */ uint rsrv= SP_INSTR_UINT_MAXLEN+8; - if (found) - rsrv+= n.length; + if (cursor_name) + rsrv+= cursor_name->length; if (str->reserve(rsrv)) return; str->qs_append(STRING_WITH_LEN("cclose ")); - if (found) + if (cursor_name) { - str->qs_append(n.str, n.length); + str->qs_append(cursor_name->str, cursor_name->length); str->qs_append('@'); } str->qs_append(m_cursor); @@ -3875,21 +3797,21 @@ sp_instr_cfetch::execute(THD *thd, uint *nextp) void sp_instr_cfetch::print(String *str) { - List_iterator_fast<struct sp_variable> li(m_varlist); - sp_variable_t *pv; - LEX_STRING n; - my_bool found= m_ctx->find_cursor(m_cursor, &n); + List_iterator_fast<sp_variable> li(m_varlist); + sp_variable *pv; + const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor); + /* cfetch name@offset vars... */ uint rsrv= SP_INSTR_UINT_MAXLEN+8; - if (found) - rsrv+= n.length; + if (cursor_name) + rsrv+= cursor_name->length; if (str->reserve(rsrv)) return; str->qs_append(STRING_WITH_LEN("cfetch ")); - if (found) + if (cursor_name) { - str->qs_append(n.str, n.length); + str->qs_append(cursor_name->str, cursor_name->length); str->qs_append('@'); } str->qs_append(m_cursor); diff --git a/sql/sp_head.h b/sql/sp_head.h index 409db33ef02..77adbf091b8 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -30,8 +30,9 @@ #include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */ #include "sql_class.h" // THD, set_var.h: THD #include "set_var.h" // Item -#include "sp.h" +#include "sp_pcontext.h" // sp_pcontext #include <stddef.h> +#include "sp.h" /** @defgroup Stored_Routines Stored Routines @@ -39,6 +40,11 @@ @{ */ +// Values for the type enum. This reflects the order of the enum declaration +// in the CREATE TABLE command. +//#define TYPE_ENUM_FUNCTION 1 #define TYPE_ENUM_PROCEDURE 2 #define +//TYPE_ENUM_TRIGGER 3 #define TYPE_ENUM_PROXY 4 + Item_result sp_map_result_type(enum enum_field_types type); @@ -48,12 +54,9 @@ sp_map_item_type(enum enum_field_types type); uint sp_get_flags_for_command(LEX *lex); -struct sp_label; class sp_instr; class sp_instr_opt_meta; class sp_instr_jump_if_not; -struct sp_cond_type; -struct sp_variable; /*************************************************************************/ @@ -274,6 +277,15 @@ public: */ Security_context m_security_ctx; + /** + List of all items (Item_trigger_field objects) representing fields in + old/new version of row in trigger. We use this list for checking whenever + all such fields are valid at trigger creation time and for binding these + fields to TABLE object at table open (although for latter pointer to table + being opened is probably enough). + */ + SQL_I_List<Item_trigger_field> m_trg_table_fields; + static void * operator new(size_t size) throw (); @@ -352,12 +364,12 @@ public: /// Put the instruction on the backpatch list, associated with the label. int - push_backpatch(sp_instr *, struct sp_label *); + push_backpatch(sp_instr *, sp_label *); /// Update all instruction with this label in the backpatch list to /// the current position. void - backpatch(struct sp_label *); + backpatch(sp_label *); /// Start a new cont. backpatch level. If 'i' is NULL, the level is just incr. int @@ -493,7 +505,7 @@ private: DYNAMIC_ARRAY m_instr; ///< The "instructions" typedef struct { - struct sp_label *lab; + sp_label *lab; sp_instr *instr; } bp_t; List<bp_t> m_backpatch; ///< Instructions needing backpatching @@ -593,7 +605,7 @@ public: Get the continuation destination of this instruction. @return the continuation destination */ - virtual uint get_cont_dest(); + virtual uint get_cont_dest() const; /* Execute core function of instruction after all preparations (e.g. @@ -865,7 +877,7 @@ public: virtual void set_destination(uint old_dest, uint new_dest) = 0; - virtual uint get_cont_dest(); + virtual uint get_cont_dest() const; protected: @@ -1016,15 +1028,21 @@ class sp_instr_hpush_jump : public sp_instr_jump public: - sp_instr_hpush_jump(uint ip, sp_pcontext *ctx, int htype, uint fp) - : sp_instr_jump(ip, ctx), m_type(htype), m_frame(fp), m_opt_hpop(0) + sp_instr_hpush_jump(uint ip, + sp_pcontext *ctx, + sp_handler *handler) + :sp_instr_jump(ip, ctx), + m_handler(handler), + m_opt_hpop(0), + m_frame(ctx->current_var_count()) { - m_cond.empty(); + DBUG_ASSERT(m_handler->condition_values.elements == 0); } virtual ~sp_instr_hpush_jump() { - m_cond.empty(); + m_handler->condition_values.empty(); + m_handler= NULL; } virtual int execute(THD *thd, uint *nextp); @@ -1048,17 +1066,24 @@ public: m_opt_hpop= dest; } - inline void add_condition(struct sp_cond_type *cond) - { - m_cond.push_front(cond); - } + void add_condition(sp_condition_value *condition_value) + { m_handler->condition_values.push_back(condition_value); } + + sp_handler *get_handler() + { return m_handler; } private: - int m_type; ///< Handler type +private: + /// Handler. + sp_handler *m_handler; + + /// hpop marking end of handler scope. + uint m_opt_hpop; + + // This attribute is needed for SHOW PROCEDURE CODE only (i.e. it's needed in + // debug version only). It's used in print(). uint m_frame; - uint m_opt_hpop; // hpop marking end of handler scope. - List<struct sp_cond_type> m_cond; }; // class sp_instr_hpush_jump : public sp_instr_jump @@ -1095,8 +1120,9 @@ class sp_instr_hreturn : public sp_instr_jump public: - sp_instr_hreturn(uint ip, sp_pcontext *ctx, uint fp) - : sp_instr_jump(ip, ctx), m_frame(fp) + sp_instr_hreturn(uint ip, sp_pcontext *ctx) + :sp_instr_jump(ip, ctx), + m_frame(ctx->current_var_count()) {} virtual ~sp_instr_hreturn() @@ -1251,7 +1277,7 @@ public: virtual void print(String *str); - void add_to_varlist(struct sp_variable *var) + void add_to_varlist(sp_variable *var) { m_varlist.push_back(var); } @@ -1259,7 +1285,7 @@ public: private: uint m_cursor; - List<struct sp_variable> m_varlist; + List<sp_variable> m_varlist; }; // class sp_instr_cfetch : public sp_instr diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc index f11daeecb7b..7c44e675811 100644 --- a/sql/sp_pcontext.cc +++ b/sql/sp_pcontext.cc @@ -22,133 +22,86 @@ #include "sp_pcontext.h" #include "sp_head.h" -/* Initial size for the dynamic arrays in sp_pcontext */ -#define PCONTEXT_ARRAY_INIT_ALLOC 16 -/* Increment size for the dynamic arrays in sp_pcontext */ -#define PCONTEXT_ARRAY_INCREMENT_ALLOC 8 - -/* - Sanity check for SQLSTATEs. Will not check if it's really an existing - state (there are just too many), but will check length and bad characters. - Returns TRUE if it's ok, FALSE if it's bad. -*/ -bool -sp_cond_check(LEX_STRING *sqlstate) +bool sp_condition_value::equals(const sp_condition_value *cv) const { - int i; - const char *p; + DBUG_ASSERT(cv); - if (sqlstate->length != 5) - return FALSE; - for (p= sqlstate->str, i= 0 ; i < 5 ; i++) + if (this == cv) + return true; + + if (type != cv->type) + return false; + + switch (type) { - char c = p[i]; + case sp_condition_value::ERROR_CODE: + return (mysqlerr == cv->mysqlerr); + + case sp_condition_value::SQLSTATE: + return (strcmp(sql_state, cv->sql_state) == 0); - if ((c < '0' || '9' < c) && - (c < 'A' || 'Z' < c)) - return FALSE; + default: + return true; } - /* SQLSTATE class '00' : completion condition */ - if (strncmp(sqlstate->str, "00", 2) == 0) - return FALSE; - return TRUE; } + +void sp_pcontext::init(uint var_offset, + uint cursor_offset, + int num_case_expressions) +{ + m_var_offset= var_offset; + m_cursor_offset= cursor_offset; + m_num_case_exprs= num_case_expressions; + + m_labels.empty(); +} + + sp_pcontext::sp_pcontext() : Sql_alloc(), - m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), - m_context_handlers(0), m_parent(NULL), m_pboundary(0), - m_label_scope(LABEL_DEFAULT_SCOPE) + m_max_var_index(0), m_max_cursor_index(0), + m_parent(NULL), m_pboundary(0), + m_scope(REGULAR_SCOPE) { - (void) my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - m_label.empty(); - m_children.empty(); - - m_var_offset= m_cursor_offset= 0; - m_num_case_exprs= 0; + init(0, 0, 0); } -sp_pcontext::sp_pcontext(sp_pcontext *prev, label_scope_type label_scope) + +sp_pcontext::sp_pcontext(sp_pcontext *prev, sp_pcontext::enum_scope scope) : Sql_alloc(), - m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), - m_context_handlers(0), m_parent(prev), m_pboundary(0), - m_label_scope(label_scope) + m_max_var_index(0), m_max_cursor_index(0), + m_parent(prev), m_pboundary(0), + m_scope(scope) { - (void) my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - (void) my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), - PCONTEXT_ARRAY_INIT_ALLOC, - PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0)); - m_label.empty(); - m_children.empty(); - - m_var_offset= prev->m_var_offset + prev->m_max_var_index; - m_cursor_offset= prev->current_cursor_count(); - m_num_case_exprs= prev->get_num_case_exprs(); + init(prev->m_var_offset + prev->m_max_var_index, + prev->current_cursor_count(), + prev->get_num_case_exprs()); } -void -sp_pcontext::destroy() + +sp_pcontext::~sp_pcontext() { - List_iterator_fast<sp_pcontext> li(m_children); - sp_pcontext *child; - - while ((child= li++)) - child->destroy(); - - m_children.empty(); - m_label.empty(); - delete_dynamic(&m_vars); - delete_dynamic(&m_case_expr_id_lst); - delete_dynamic(&m_conds); - delete_dynamic(&m_cursors); - delete_dynamic(&m_handlers); + for (size_t i= 0; i < m_children.elements(); ++i) + delete m_children.at(i); } -sp_pcontext * -sp_pcontext::push_context(label_scope_type label_scope) + +sp_pcontext *sp_pcontext::push_context(THD *thd, sp_pcontext::enum_scope scope) { - sp_pcontext *child= new sp_pcontext(this, label_scope); + sp_pcontext *child= new (thd->mem_root) sp_pcontext(this, scope); if (child) - m_children.push_back(child); + m_children.append(child); return child; } -sp_pcontext * -sp_pcontext::pop_context() + +sp_pcontext *sp_pcontext::pop_context() { m_parent->m_max_var_index+= m_max_var_index; - uint submax= max_handler_index(); - if (submax > m_parent->m_max_handler_index) - m_parent->m_max_handler_index= submax; - - submax= max_cursor_index(); + uint submax= max_cursor_index(); if (submax > m_parent->m_max_cursor_index) m_parent->m_max_cursor_index= submax; @@ -158,142 +111,118 @@ sp_pcontext::pop_context() return m_parent; } -uint -sp_pcontext::diff_handlers(sp_pcontext *ctx, bool exclusive) + +uint sp_pcontext::diff_handlers(const sp_pcontext *ctx, bool exclusive) const { uint n= 0; - sp_pcontext *pctx= this; - sp_pcontext *last_ctx= NULL; + const sp_pcontext *pctx= this; + const sp_pcontext *last_ctx= NULL; while (pctx && pctx != ctx) { - n+= pctx->m_context_handlers; + n+= pctx->m_handlers.elements(); last_ctx= pctx; pctx= pctx->parent_context(); } if (pctx) - return (exclusive && last_ctx ? n - last_ctx->m_context_handlers : n); + return (exclusive && last_ctx ? n - last_ctx->m_handlers.elements() : n); return 0; // Didn't find ctx } -uint -sp_pcontext::diff_cursors(sp_pcontext *ctx, bool exclusive) + +uint sp_pcontext::diff_cursors(const sp_pcontext *ctx, bool exclusive) const { uint n= 0; - sp_pcontext *pctx= this; - sp_pcontext *last_ctx= NULL; + const sp_pcontext *pctx= this; + const sp_pcontext *last_ctx= NULL; while (pctx && pctx != ctx) { - n+= pctx->m_cursors.elements; + n+= pctx->m_cursors.elements(); last_ctx= pctx; pctx= pctx->parent_context(); } if (pctx) - return (exclusive && last_ctx ? n - last_ctx->m_cursors.elements : n); + return (exclusive && last_ctx ? n - last_ctx->m_cursors.elements() : n); return 0; // Didn't find ctx } -/* - This does a linear search (from newer to older variables, in case - we have shadowed names). - It's possible to have a more efficient allocation and search method, - but it might not be worth it. The typical number of parameters and - variables will in most cases be low (a handfull). - ...and, this is only called during parsing. -*/ -sp_variable_t * -sp_pcontext::find_variable(LEX_STRING *name, my_bool scoped) + +sp_variable *sp_pcontext::find_variable(LEX_STRING name, + bool current_scope_only) const { - uint i= m_vars.elements - m_pboundary; + uint i= m_vars.elements() - m_pboundary; while (i--) { - sp_variable_t *p; + sp_variable *p= m_vars.at(i); - get_dynamic(&m_vars, (uchar*)&p, i); if (my_strnncoll(system_charset_info, - (const uchar *)name->str, name->length, + (const uchar *)name.str, name.length, (const uchar *)p->name.str, p->name.length) == 0) { return p; } } - if (!scoped && m_parent) - return m_parent->find_variable(name, scoped); - return NULL; + + return (!current_scope_only && m_parent) ? + m_parent->find_variable(name, false) : + NULL; } -/* - Find a variable by offset from the top. - This used for two things: - - When evaluating parameters at the beginning, and setting out parameters - at the end, of invokation. (Top frame only, so no recursion then.) - - For printing of sp_instr_set. (Debug mode only.) -*/ -sp_variable_t * -sp_pcontext::find_variable(uint offset) + +sp_variable *sp_pcontext::find_variable(uint offset) const { - if (m_var_offset <= offset && offset < m_var_offset + m_vars.elements) - { // This frame - sp_variable_t *p; + if (m_var_offset <= offset && offset < m_var_offset + m_vars.elements()) + return m_vars.at(offset - m_var_offset); // This frame - get_dynamic(&m_vars, (uchar*)&p, offset - m_var_offset); - return p; - } - if (m_parent) - return m_parent->find_variable(offset); // Some previous frame - return NULL; // index out of bounds + return m_parent ? + m_parent->find_variable(offset) : // Some previous frame + NULL; // Index out of bounds } -sp_variable_t * -sp_pcontext::push_variable(LEX_STRING *name, enum enum_field_types type, - sp_param_mode_t mode) + +sp_variable *sp_pcontext::add_variable(THD *thd, + LEX_STRING name, + enum enum_field_types type, + sp_variable::enum_mode mode) { - sp_variable_t *p= (sp_variable_t *)sql_alloc(sizeof(sp_variable_t)); + sp_variable *p= + new (thd->mem_root) sp_variable(name, type,mode, current_var_count()); if (!p) return NULL; ++m_max_var_index; - p->name.str= name->str; - p->name.length= name->length; - p->type= type; - p->mode= mode; - p->offset= current_var_count(); - p->dflt= NULL; - if (insert_dynamic(&m_vars, (uchar*)&p)) - return NULL; - return p; + return m_vars.append(p) ? NULL : p; } -sp_label_t * -sp_pcontext::push_label(char *name, uint ip) +sp_label *sp_pcontext::push_label(THD *thd, LEX_STRING name, uint ip) { - sp_label_t *lab = (sp_label_t *)sql_alloc(sizeof(sp_label_t)); + sp_label *label= + new (thd->mem_root) sp_label(name, ip, sp_label::IMPLICIT, this); - if (lab) - { - lab->name= name; - lab->ip= ip; - lab->type= SP_LAB_IMPL; - lab->ctx= this; - m_label.push_front(lab); - } - return lab; + if (!label) + return NULL; + + m_labels.push_front(label); + + return label; } -sp_label_t * -sp_pcontext::find_label(char *name) + +sp_label *sp_pcontext::find_label(LEX_STRING name) { - List_iterator_fast<sp_label_t> li(m_label); - sp_label_t *lab; + List_iterator_fast<sp_label> li(m_labels); + sp_label *lab; while ((lab= li++)) - if (my_strcasecmp(system_charset_info, name, lab->name) == 0) + { + if (my_strcasecmp(system_charset_info, name.str, lab->name.str) == 0) return lab; + } /* Note about exception handlers. @@ -303,159 +232,253 @@ sp_pcontext::find_label(char *name) In short, a DECLARE HANDLER block can not refer to labels from the parent context, as they are out of scope. */ - if (m_parent && (m_label_scope == LABEL_DEFAULT_SCOPE)) - return m_parent->find_label(name); - return NULL; + return (m_parent && (m_scope == REGULAR_SCOPE)) ? + m_parent->find_label(name) : + NULL; } -int -sp_pcontext::push_cond(LEX_STRING *name, sp_cond_type_t *val) + +bool sp_pcontext::add_condition(THD *thd, + LEX_STRING name, + sp_condition_value *value) { - sp_cond_t *p= (sp_cond_t *)sql_alloc(sizeof(sp_cond_t)); + sp_condition *p= new (thd->mem_root) sp_condition(name, value); if (p == NULL) - return 1; - p->name.str= name->str; - p->name.length= name->length; - p->val= val; - return insert_dynamic(&m_conds, (uchar *)&p); + return true; + + return m_conditions.append(p); } -/* - See comment for find_variable() above -*/ -sp_cond_type_t * -sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped) + +sp_condition_value *sp_pcontext::find_condition(LEX_STRING name, + bool current_scope_only) const { - uint i= m_conds.elements; + uint i= m_conditions.elements(); while (i--) { - sp_cond_t *p; + sp_condition *p= m_conditions.at(i); - get_dynamic(&m_conds, (uchar*)&p, i); if (my_strnncoll(system_charset_info, - (const uchar *)name->str, name->length, - (const uchar *)p->name.str, p->name.length) == 0) + (const uchar *) name.str, name.length, + (const uchar *) p->name.str, p->name.length) == 0) { - return p->val; + return p->value; } } - if (!scoped && m_parent) - return m_parent->find_cond(name, scoped); - return NULL; + + return (!current_scope_only && m_parent) ? + m_parent->find_condition(name, false) : + NULL; } -/* - This only searches the current context, for error checking of - duplicates. - Returns TRUE if found. -*/ -bool -sp_pcontext::find_handler(sp_cond_type_t *cond) + +sp_handler *sp_pcontext::add_handler(THD *thd, + sp_handler::enum_type type) { - uint i= m_handlers.elements; + sp_handler *h= new (thd->mem_root) sp_handler(type); - while (i--) + if (!h) + return NULL; + + return m_handlers.append(h) ? NULL : h; +} + + +bool sp_pcontext::check_duplicate_handler( + const sp_condition_value *cond_value) const +{ + for (size_t i= 0; i < m_handlers.elements(); ++i) { - sp_cond_type_t *p; + sp_handler *h= m_handlers.at(i); + + List_iterator_fast<sp_condition_value> li(h->condition_values); + sp_condition_value *cv; - get_dynamic(&m_handlers, (uchar*)&p, i); - if (cond->type == p->type) + while ((cv= li++)) { - switch (p->type) + if (cond_value->equals(cv)) + return true; + } + } + + return false; +} + + +sp_handler* +sp_pcontext::find_handler(const char *sql_state, + uint sql_errno, + Sql_condition::enum_warning_level level) const +{ + sp_handler *found_handler= NULL; + sp_condition_value *found_cv= NULL; + + for (size_t i= 0; i < m_handlers.elements(); ++i) + { + sp_handler *h= m_handlers.at(i); + + List_iterator_fast<sp_condition_value> li(h->condition_values); + sp_condition_value *cv; + + while ((cv= li++)) + { + switch (cv->type) { - case sp_cond_type_t::number: - if (cond->mysqlerr == p->mysqlerr) - return TRUE; - break; - case sp_cond_type_t::state: - if (strcmp(cond->sqlstate, p->sqlstate) == 0) - return TRUE; - break; - default: - return TRUE; + case sp_condition_value::ERROR_CODE: + if (sql_errno == cv->mysqlerr && + (!found_cv || + found_cv->type > sp_condition_value::ERROR_CODE)) + { + found_cv= cv; + found_handler= h; + } + break; + + case sp_condition_value::SQLSTATE: + if (strcmp(sql_state, cv->sql_state) == 0 && + (!found_cv || + found_cv->type > sp_condition_value::SQLSTATE)) + { + found_cv= cv; + found_handler= h; + } + break; + + case sp_condition_value::WARNING: + if ((is_sqlstate_warning(sql_state) || + level == Sql_condition::WARN_LEVEL_WARN) && !found_cv) + { + found_cv= cv; + found_handler= h; + } + break; + + case sp_condition_value::NOT_FOUND: + if (is_sqlstate_not_found(sql_state) && !found_cv) + { + found_cv= cv; + found_handler= h; + } + break; + + case sp_condition_value::EXCEPTION: + if (is_sqlstate_exception(sql_state) && + level == Sql_condition::WARN_LEVEL_ERROR && !found_cv) + { + found_cv= cv; + found_handler= h; + } + break; } } } - return FALSE; + + if (found_handler) + return found_handler; + + + // There is no appropriate handler in this parsing context. We need to look up + // in parent contexts. There might be two cases here: + // + // 1. The current context has REGULAR_SCOPE. That means, it's a simple + // BEGIN..END block: + // ... + // BEGIN + // ... # We're here. + // END + // ... + // In this case we simply call find_handler() on parent's context recursively. + // + // 2. The current context has HANDLER_SCOPE. That means, we're inside an + // SQL-handler block: + // ... + // DECLARE ... HANDLER FOR ... + // BEGIN + // ... # We're here. + // END + // ... + // In this case we can not just call parent's find_handler(), because + // parent's handler don't catch conditions from this scope. Instead, we should + // try to find first parent context (we might have nested handler + // declarations), which has REGULAR_SCOPE (i.e. which is regular BEGIN..END + // block). + + const sp_pcontext *p= this; + + while (p && p->m_scope == HANDLER_SCOPE) + p= p->m_parent; + + if (!p || !p->m_parent) + return NULL; + + return p->m_parent->find_handler(sql_state, sql_errno, level); } -int -sp_pcontext::push_cursor(LEX_STRING *name) + +bool sp_pcontext::add_cursor(LEX_STRING name) { - LEX_STRING n; + if (m_cursors.elements() == m_max_cursor_index) + ++m_max_cursor_index; - if (m_cursors.elements == m_max_cursor_index) - m_max_cursor_index+= 1; - n.str= name->str; - n.length= name->length; - return insert_dynamic(&m_cursors, (uchar *)&n); + return m_cursors.append(name); } -/* - See comment for find_variable() above -*/ -my_bool -sp_pcontext::find_cursor(LEX_STRING *name, uint *poff, my_bool scoped) + +bool sp_pcontext::find_cursor(LEX_STRING name, + uint *poff, + bool current_scope_only) const { - uint i= m_cursors.elements; + uint i= m_cursors.elements(); while (i--) { - LEX_STRING n; + LEX_STRING n= m_cursors.at(i); - get_dynamic(&m_cursors, (uchar*)&n, i); if (my_strnncoll(system_charset_info, - (const uchar *)name->str, name->length, - (const uchar *)n.str, n.length) == 0) + (const uchar *) name.str, name.length, + (const uchar *) n.str, n.length) == 0) { *poff= m_cursor_offset + i; - return TRUE; + return true; } } - if (!scoped && m_parent) - return m_parent->find_cursor(name, poff, scoped); - return FALSE; + + return (!current_scope_only && m_parent) ? + m_parent->find_cursor(name, poff, false) : + false; } -void -sp_pcontext::retrieve_field_definitions(List<Create_field> *field_def_lst) +void sp_pcontext::retrieve_field_definitions( + List<Create_field> *field_def_lst) const { /* Put local/context fields in the result list. */ - for (uint i = 0; i < m_vars.elements; ++i) + for (size_t i= 0; i < m_vars.elements(); ++i) { - sp_variable_t *var_def; - get_dynamic(&m_vars, (uchar*) &var_def, i); + sp_variable *var_def= m_vars.at(i); field_def_lst->push_back(&var_def->field_def); } /* Put the fields of the enclosed contexts in the result list. */ - List_iterator_fast<sp_pcontext> li(m_children); - sp_pcontext *ctx; - - while ((ctx = li++)) - ctx->retrieve_field_definitions(field_def_lst); + for (size_t i= 0; i < m_children.elements(); ++i) + m_children.at(i)->retrieve_field_definitions(field_def_lst); } -/* - Find a cursor by offset from the top. - This is only used for debugging. -*/ -my_bool -sp_pcontext::find_cursor(uint offset, LEX_STRING *n) + +const LEX_STRING *sp_pcontext::find_cursor(uint offset) const { if (m_cursor_offset <= offset && - offset < m_cursor_offset + m_cursors.elements) - { // This frame - get_dynamic(&m_cursors, (uchar*)n, offset - m_cursor_offset); - return TRUE; + offset < m_cursor_offset + m_cursors.elements()) + { + return &m_cursors.at(offset - m_cursor_offset); // This frame } - if (m_parent) - return m_parent->find_cursor(offset, n); // Some previous frame - return FALSE; // index out of bounds + + return m_parent ? + m_parent->find_cursor(offset) : // Some previous frame + NULL; // Index out of bounds } diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h index f1d0d250c47..4d8623108aa 100644 --- a/sql/sp_pcontext.h +++ b/sql/sp_pcontext.h @@ -24,438 +24,541 @@ #include "sql_string.h" // LEX_STRING #include "mysql_com.h" // enum_field_types #include "field.h" // Create_field +#include "sql_array.h" // Dynamic_array -class sp_pcontext; -typedef enum -{ - sp_param_in, - sp_param_out, - sp_param_inout -} sp_param_mode_t; +/// This class represents a stored program variable or a parameter +/// (also referenced as 'SP-variable'). -typedef struct sp_variable +class sp_variable : public Sql_alloc { - LEX_STRING name; - enum enum_field_types type; - sp_param_mode_t mode; - - /* - offset -- this the index to the variable's value in the runtime frame. - This is calculated during parsing and used when creating sp_instr_set - instructions and Item_splocal items. - I.e. values are set/referred by array indexing in runtime. - */ - uint offset; - - Item *dflt; - Create_field field_def; -} sp_variable_t; +public: + enum enum_mode + { + MODE_IN, + MODE_OUT, + MODE_INOUT + }; + /// Name of the SP-variable. + LEX_STRING name; -#define SP_LAB_IMPL 0 // Implicit label generated by parser -#define SP_LAB_BEGIN 1 // Label at BEGIN -#define SP_LAB_ITER 2 // Label at iteration control + /// Field-type of the SP-variable. + enum enum_field_types type; -/* - An SQL/PSM label. Can refer to the identifier used with the - "label_name:" construct which may precede some SQL/PSM statements, or - to an implicit implementation-dependent identifier which the parser - inserts before a high-level flow control statement such as - IF/WHILE/REPEAT/LOOP, when such statement is rewritten into - a combination of low-level jump/jump_if instructions and labels. -*/ + /// Mode of the SP-variable. + enum_mode mode; -typedef struct sp_label -{ - char *name; - uint ip; // Instruction index - int type; // begin/iter or ref/free - sp_pcontext *ctx; // The label's context -} sp_label_t; + /// The index to the variable's value in the runtime frame. + /// + /// It is calculated during parsing and used when creating sp_instr_set + /// instructions and Item_splocal items. I.e. values are set/referred by + /// array indexing in runtime. + uint offset; -typedef struct sp_cond_type -{ - enum { number, state, warning, notfound, exception } type; - char sqlstate[SQLSTATE_LENGTH+1]; - uint mysqlerr; -} sp_cond_type_t; + /// Default value of the SP-variable (if any). + Item *default_value; -/* - Sanity check for SQLSTATEs. Will not check if it's really an existing - state (there are just too many), but will check length bad characters. -*/ -extern bool -sp_cond_check(LEX_STRING *sqlstate); + /// Full type information (field meta-data) of the SP-variable. + Create_field field_def; -typedef struct sp_cond -{ - LEX_STRING name; - sp_cond_type_t *val; -} sp_cond_t; - -/** - The scope of a label in Stored Procedures, - for name resolution of labels in a parsing context. -*/ -enum label_scope_type -{ - /** - The labels declared in a parent context are in scope. - */ - LABEL_DEFAULT_SCOPE, - /** - The labels declared in a parent context are not in scope. - */ - LABEL_HANDLER_SCOPE +public: + sp_variable(LEX_STRING _name, enum_field_types _type, enum_mode _mode, + uint _offset) + :Sql_alloc(), + name(_name), + type(_type), + mode(_mode), + offset(_offset), + default_value(NULL) + { } }; -/** - The parse-time context, used to keep track of declared variables/parameters, - conditions, handlers, cursors and labels, during parsing. - sp_contexts are organized as a tree, with one object for each begin-end - block, one object for each exception handler, - plus a root-context for the parameters. - This is used during parsing for looking up defined names (e.g. declared - variables and visible labels), for error checking, and to calculate offsets - to be used at runtime. (During execution variable values, active handlers - and cursors, etc, are referred to by an index in a stack.) - Parsing contexts for exception handlers limit the visibility of labels. - The pcontext tree is also kept during execution and is used for error - checking (e.g. correct number of parameters), and in the future, used by - the debugger. -*/ +/////////////////////////////////////////////////////////////////////////// -class sp_pcontext : public Sql_alloc +/// This class represents an SQL/PSM label. Can refer to the identifier +/// used with the "label_name:" construct which may precede some SQL/PSM +/// statements, or to an implicit implementation-dependent identifier which +/// the parser inserts before a high-level flow control statement such as +/// IF/WHILE/REPEAT/LOOP, when such statement is rewritten into a +/// combination of low-level jump/jump_if instructions and labels. + +class sp_label : public Sql_alloc { public: - - /** - Constructor. - Builds a parsing context root node. - */ - sp_pcontext(); - - // Free memory - void - destroy(); - - /** - Create and push a new context in the tree. - @param label_scope label scope for the new parsing context - @return the node created - */ - sp_pcontext * - push_context(label_scope_type label_scope); - - /** - Pop a node from the parsing context tree. - @return the parent node - */ - sp_pcontext * - pop_context(); - - sp_pcontext * - parent_context() + enum enum_type { - return m_parent; - } + /// Implicit label generated by parser. + IMPLICIT, - /* - Number of handlers/cursors to pop between this context and 'ctx'. - If 'exclusive' is true, don't count the last block we are leaving; - this is used for LEAVE where we will jump to the cpop/hpop instructions. - */ - uint - diff_handlers(sp_pcontext *ctx, bool exclusive); - uint - diff_cursors(sp_pcontext *ctx, bool exclusive); - - - // - // Parameters and variables - // - - /* - The maximum number of variables used in this and all child contexts - In the root, this gives us the number of slots needed for variables - during execution. - */ - inline uint - max_var_index() - { - return m_max_var_index; - } + /// Label at BEGIN. + BEGIN, - /* - The current number of variables used in the parents (from the root), - including this context. - */ - inline uint - current_var_count() - { - return m_var_offset + m_vars.elements; - } + /// Label at iteration control + ITERATION + }; - /* The number of variables in this context alone */ - inline uint - context_var_count() - { - return m_vars.elements; - } + /// Name of the label. + LEX_STRING name; - /* Map index in this pcontext to runtime offset */ - inline uint - var_context2runtime(uint i) - { - return m_var_offset + i; - } + /// Instruction pointer of the label. + uint ip; - /* Set type of variable. 'i' is the offset from the top */ - inline void - set_type(uint i, enum enum_field_types type) - { - sp_variable_t *p= find_variable(i); + /// Type of the label. + enum_type type; - if (p) - p->type= type; - } + /// Scope of the label. + class sp_pcontext *ctx; - /* Set default value of variable. 'i' is the offset from the top */ - inline void - set_default(uint i, Item *it) - { - sp_variable_t *p= find_variable(i); +public: + sp_label(LEX_STRING _name, uint _ip, enum_type _type, sp_pcontext *_ctx) + :Sql_alloc(), + name(_name), + ip(_ip), + type(_type), + ctx(_ctx) + { } +}; - if (p) - p->dflt= it; - } +/////////////////////////////////////////////////////////////////////////// + +/// This class represents condition-value term in DECLARE CONDITION or +/// DECLARE HANDLER statements. sp_condition_value has little to do with +/// SQL-conditions. +/// +/// In some sense, this class is a union -- a set of filled attributes +/// depends on the sp_condition_value::type value. - sp_variable_t * - push_variable(LEX_STRING *name, enum enum_field_types type, - sp_param_mode_t mode); - - /* - Retrieve definitions of fields from the current context and its - children. - */ - void - retrieve_field_definitions(List<Create_field> *field_def_lst); - - // Find by name - sp_variable_t * - find_variable(LEX_STRING *name, my_bool scoped=0); - - // Find by offset (from the top) - sp_variable_t * - find_variable(uint offset); - - /* - Set the current scope boundary (for default values). - The argument is the number of variables to skip. - */ - inline void - declare_var_boundary(uint n) +class sp_condition_value : public Sql_alloc +{ +public: + enum enum_type { - m_pboundary= n; - } + ERROR_CODE, + SQLSTATE, + WARNING, + NOT_FOUND, + EXCEPTION + }; - /* - CASE expressions support. - */ + /// Type of the condition value. + enum_type type; - inline int - register_case_expr() - { - return m_num_case_exprs++; - } + /// SQLSTATE of the condition value. + char sql_state[SQLSTATE_LENGTH+1]; - inline int - get_num_case_exprs() const - { - return m_num_case_exprs; - } + /// MySQL error code of the condition value. + uint mysqlerr; - inline bool - push_case_expr_id(int case_expr_id) +public: + sp_condition_value(uint _mysqlerr) + :Sql_alloc(), + type(ERROR_CODE), + mysqlerr(_mysqlerr) + { } + + sp_condition_value(const char *_sql_state) + :Sql_alloc(), + type(SQLSTATE) { - return insert_dynamic(&m_case_expr_id_lst, (uchar*) &case_expr_id); + memcpy(sql_state, _sql_state, SQLSTATE_LENGTH); + sql_state[SQLSTATE_LENGTH]= 0; } - inline void - pop_case_expr_id() + sp_condition_value(enum_type _type) + :Sql_alloc(), + type(_type) { - pop_dynamic(&m_case_expr_id_lst); + DBUG_ASSERT(type != ERROR_CODE && type != SQLSTATE); } - inline int - get_current_case_expr_id() const - { - int case_expr_id; + /// Check if two instances of sp_condition_value are equal or not. + /// + /// @param cv another instance of sp_condition_value to check. + /// + /// @return true if the instances are equal, false otherwise. + bool equals(const sp_condition_value *cv) const; +}; - get_dynamic((DYNAMIC_ARRAY*)&m_case_expr_id_lst, (uchar*) &case_expr_id, - m_case_expr_id_lst.elements - 1); +/////////////////////////////////////////////////////////////////////////// - return case_expr_id; - } +/// This class represents 'DECLARE CONDITION' statement. +/// sp_condition has little to do with SQL-conditions. - // - // Labels - // +class sp_condition : public Sql_alloc +{ +public: + /// Name of the condition. + LEX_STRING name; - sp_label_t * - push_label(char *name, uint ip); + /// Value of the condition. + sp_condition_value *value; - sp_label_t * - find_label(char *name); +public: + sp_condition(LEX_STRING _name, sp_condition_value *_value) + :Sql_alloc(), + name(_name), + value(_value) + { } +}; - inline sp_label_t * - last_label() - { - sp_label_t *lab= m_label.head(); +/////////////////////////////////////////////////////////////////////////// - if (!lab && m_parent) - lab= m_parent->last_label(); - return lab; - } +/// This class represents 'DECLARE HANDLER' statement. - inline sp_label_t * - pop_label() +class sp_handler : public Sql_alloc +{ +public: + /// Enumeration of possible handler types. + /// Note: UNDO handlers are not (and have never been) supported. + enum enum_type { - return m_label.pop(); - } + EXIT, + CONTINUE + }; - // - // Conditions - // + /// Handler type. + enum_type type; - int - push_cond(LEX_STRING *name, sp_cond_type_t *val); + /// Conditions caught by this handler. + List<sp_condition_value> condition_values; - sp_cond_type_t * - find_cond(LEX_STRING *name, my_bool scoped=0); +public: + /// The constructor. + /// + /// @param _type SQL-handler type. + sp_handler(enum_type _type) + :Sql_alloc(), + type(_type) + { } +}; - // - // Handlers - // +/////////////////////////////////////////////////////////////////////////// + +/// The class represents parse-time context, which keeps track of declared +/// variables/parameters, conditions, handlers, cursors and labels. +/// +/// sp_pcontext objects are organized in a tree according to the following +/// rules: +/// - one sp_pcontext object corresponds for for each BEGIN..END block; +/// - one sp_pcontext object corresponds for each exception handler; +/// - one additional sp_pcontext object is created to contain +/// Stored Program parameters. +/// +/// sp_pcontext objects are used both at parse-time and at runtime. +/// +/// During the parsing stage sp_pcontext objects are used: +/// - to look up defined names (e.g. declared variables and visible +/// labels); +/// - to check for duplicates; +/// - for error checking; +/// - to calculate offsets to be used at runtime. +/// +/// During the runtime phase, a tree of sp_pcontext objects is used: +/// - for error checking (e.g. to check correct number of parameters); +/// - to resolve SQL-handlers. - inline void - push_handler(sp_cond_type_t *cond) +class sp_pcontext : public Sql_alloc +{ +public: + enum enum_scope { - insert_dynamic(&m_handlers, (uchar*)&cond); - } - - bool - find_handler(sp_cond_type *cond); + /// REGULAR_SCOPE designates regular BEGIN ... END blocks. + REGULAR_SCOPE, - inline uint - max_handler_index() - { - return m_max_handler_index + m_context_handlers; - } + /// HANDLER_SCOPE designates SQL-handler blocks. + HANDLER_SCOPE + }; - inline void - add_handlers(uint n) +public: + sp_pcontext(); + ~sp_pcontext(); + + + /// Create and push a new context in the tree. + + /// @param thd thread context. + /// @param scope scope of the new parsing context. + /// @return the node created. + sp_pcontext *push_context(THD *thd, enum_scope scope); + + /// Pop a node from the parsing context tree. + /// @return the parent node. + sp_pcontext *pop_context(); + + sp_pcontext *parent_context() const + { return m_parent; } + + /// Calculate and return the number of handlers to pop between the given + /// context and this one. + /// + /// @param ctx the other parsing context. + /// @param exclusive specifies if the last scope should be excluded. + /// + /// @return the number of handlers to pop between the given context and + /// this one. If 'exclusive' is true, don't count the last scope we are + /// leaving; this is used for LEAVE where we will jump to the hpop + /// instructions. + uint diff_handlers(const sp_pcontext *ctx, bool exclusive) const; + + /// Calculate and return the number of cursors to pop between the given + /// context and this one. + /// + /// @param ctx the other parsing context. + /// @param exclusive specifies if the last scope should be excluded. + /// + /// @return the number of cursors to pop between the given context and + /// this one. If 'exclusive' is true, don't count the last scope we are + /// leaving; this is used for LEAVE where we will jump to the cpop + /// instructions. + uint diff_cursors(const sp_pcontext *ctx, bool exclusive) const; + + ///////////////////////////////////////////////////////////////////////// + // SP-variables (parameters and variables). + ///////////////////////////////////////////////////////////////////////// + + /// @return the maximum number of variables used in this and all child + /// contexts. For the root parsing context, this gives us the number of + /// slots needed for variables during the runtime phase. + uint max_var_index() const + { return m_max_var_index; } + + /// @return the current number of variables used in the parent contexts + /// (from the root), including this context. + uint current_var_count() const + { return m_var_offset + m_vars.elements(); } + + /// @return the number of variables in this context alone. + uint context_var_count() const + { return m_vars.elements(); } + + /// @return map index in this parsing context to runtime offset. + uint var_context2runtime(uint i) const + { return m_var_offset + i; } + + /// Add SP-variable to the parsing context. + /// + /// @param thd Thread context. + /// @param name Name of the SP-variable. + /// @param type Type of the SP-variable. + /// @param mode Mode of the SP-variable. + /// + /// @return instance of newly added SP-variable. + sp_variable *add_variable(THD *thd, + LEX_STRING name, + enum enum_field_types type, + sp_variable::enum_mode mode); + + /// Retrieve full type information about SP-variables in this parsing + /// context and its children. + /// + /// @param field_def_lst[out] Container to store type information. + void retrieve_field_definitions(List<Create_field> *field_def_lst) const; + + /// Find SP-variable by name. + /// + /// The function does a linear search (from newer to older variables, + /// in case we have shadowed names). + /// + /// The function is called only at parsing time. + /// + /// @param name Variable name. + /// @param current_scope_only A flag if we search only in current scope. + /// + /// @return instance of found SP-variable, or NULL if not found. + sp_variable *find_variable(LEX_STRING name, bool current_scope_only) const; + + /// Find SP-variable by the offset in the root parsing context. + /// + /// The function is used for two things: + /// - When evaluating parameters at the beginning, and setting out parameters + /// at the end, of invocation. (Top frame only, so no recursion then.) + /// - For printing of sp_instr_set. (Debug mode only.) + /// + /// @param offset Variable offset in the root parsing context. + /// + /// @return instance of found SP-variable, or NULL if not found. + sp_variable *find_variable(uint offset) const; + + /// Set the current scope boundary (for default values). + /// + /// @param n The number of variables to skip. + void declare_var_boundary(uint n) + { m_pboundary= n; } + + ///////////////////////////////////////////////////////////////////////// + // CASE expressions. + ///////////////////////////////////////////////////////////////////////// + + int register_case_expr() + { return m_num_case_exprs++; } + + int get_num_case_exprs() const + { return m_num_case_exprs; } + + bool push_case_expr_id(int case_expr_id) + { return m_case_expr_ids.append(case_expr_id); } + + void pop_case_expr_id() + { m_case_expr_ids.pop(); } + + int get_current_case_expr_id() const + { return *m_case_expr_ids.back(); } + + ///////////////////////////////////////////////////////////////////////// + // Labels. + ///////////////////////////////////////////////////////////////////////// + + sp_label *push_label(THD *thd, LEX_STRING name, uint ip); + + sp_label *find_label(LEX_STRING name); + + sp_label *last_label() { - m_context_handlers+= n; - } - - // - // Cursors - // + sp_label *label= m_labels.head(); - int - push_cursor(LEX_STRING *name); + if (!label && m_parent) + label= m_parent->last_label(); - my_bool - find_cursor(LEX_STRING *name, uint *poff, my_bool scoped=0); - - /* Find by offset (for debugging only) */ - my_bool - find_cursor(uint offset, LEX_STRING *n); - - inline uint - max_cursor_index() - { - return m_max_cursor_index + m_cursors.elements; - } - - inline uint - current_cursor_count() - { - return m_cursor_offset + m_cursors.elements; + return label; } -protected: + sp_label *pop_label() + { return m_labels.pop(); } + + ///////////////////////////////////////////////////////////////////////// + // Conditions. + ///////////////////////////////////////////////////////////////////////// + + bool add_condition(THD *thd, LEX_STRING name, sp_condition_value *value); + + /// See comment for find_variable() above. + sp_condition_value *find_condition(LEX_STRING name, + bool current_scope_only) const; + + ///////////////////////////////////////////////////////////////////////// + // Handlers. + ///////////////////////////////////////////////////////////////////////// + + sp_handler *add_handler(THD* thd, sp_handler::enum_type type); + + /// This is an auxilary parsing-time function to check if an SQL-handler + /// exists in the current parsing context (current scope) for the given + /// SQL-condition. This function is used to check for duplicates during + /// the parsing phase. + /// + /// This function can not be used during the runtime phase to check + /// SQL-handler existence because it searches for the SQL-handler in the + /// current scope only (during runtime, current and parent scopes + /// should be checked according to the SQL-handler resolution rules). + /// + /// @param condition_value the handler condition value + /// (not SQL-condition!). + /// + /// @retval true if such SQL-handler exists. + /// @retval false otherwise. + bool check_duplicate_handler(const sp_condition_value *cond_value) const; + + /// Find an SQL handler for the given SQL condition according to the + /// SQL-handler resolution rules. This function is used at runtime. + /// + /// @param sql_state The SQL condition state + /// @param sql_errno The error code + /// @param level The SQL condition level + /// + /// @return a pointer to the found SQL-handler or NULL. + sp_handler *find_handler(const char *sql_state, + uint sql_errno, + Sql_condition::enum_warning_level level) const; + + ///////////////////////////////////////////////////////////////////////// + // Cursors. + ///////////////////////////////////////////////////////////////////////// + + bool add_cursor(LEX_STRING name); + + /// See comment for find_variable() above. + bool find_cursor(LEX_STRING name, uint *poff, bool current_scope_only) const; + + /// Find cursor by offset (for debugging only). + const LEX_STRING *find_cursor(uint offset) const; + + uint max_cursor_index() const + { return m_max_cursor_index + m_cursors.elements(); } + + uint current_cursor_count() const + { return m_cursor_offset + m_cursors.elements(); } - /** - Constructor for a tree node. - @param prev the parent parsing context - @param label_scope label_scope for this parsing context - */ - sp_pcontext(sp_pcontext *prev, label_scope_type label_scope); - - /* - m_max_var_index -- number of variables (including all types of arguments) - in this context including all children contexts. - - m_max_var_index >= m_vars.elements. +private: + /// Constructor for a tree node. + /// @param prev the parent parsing context + /// @param scope scope of this parsing context + sp_pcontext(sp_pcontext *prev, enum_scope scope); - m_max_var_index of the root parsing context contains number of all - variables (including arguments) in all enclosed contexts. - */ - uint m_max_var_index; + void init(uint var_offset, uint cursor_offset, int num_case_expressions); - // The maximum sub context's framesizes - uint m_max_cursor_index; - uint m_max_handler_index; - uint m_context_handlers; // No. of handlers in this context + /* Prevent use of these */ + sp_pcontext(const sp_pcontext &); + void operator=(sp_pcontext &); private: + /// m_max_var_index -- number of variables (including all types of arguments) + /// in this context including all children contexts. + /// + /// m_max_var_index >= m_vars.elements(). + /// + /// m_max_var_index of the root parsing context contains number of all + /// variables (including arguments) in all enclosed contexts. + uint m_max_var_index; + + /// The maximum sub context's framesizes. + uint m_max_cursor_index; - sp_pcontext *m_parent; // Parent context - - /* - m_var_offset -- this is an index of the first variable in this - parsing context. - - m_var_offset is 0 for root context. + /// Parent context. + sp_pcontext *m_parent; - Since now each variable is stored in separate place, no reuse is done, - so m_var_offset is different for all enclosed contexts. - */ + /// An index of the first SP-variable in this parsing context. The index + /// belongs to a runtime table of SP-variables. + /// + /// Note: + /// - m_var_offset is 0 for root parsing context; + /// - m_var_offset is different for all nested parsing contexts. uint m_var_offset; - uint m_cursor_offset; // Cursor offset for this context + /// Cursor offset for this context. + uint m_cursor_offset; - /* - Boundary for finding variables in this context. This is the number - of variables currently "invisible" to default clauses. - This is normally 0, but will be larger during parsing of - DECLARE ... DEFAULT, to get the scope right for DEFAULT values. - */ + /// Boundary for finding variables in this context. This is the number of + /// variables currently "invisible" to default clauses. This is normally 0, + /// but will be larger during parsing of DECLARE ... DEFAULT, to get the + /// scope right for DEFAULT values. uint m_pboundary; int m_num_case_exprs; - DYNAMIC_ARRAY m_vars; // Parameters/variables - DYNAMIC_ARRAY m_case_expr_id_lst; /* Stack of CASE expression ids. */ - DYNAMIC_ARRAY m_conds; // Conditions - DYNAMIC_ARRAY m_cursors; // Cursors - DYNAMIC_ARRAY m_handlers; // Handlers, for checking for duplicates + /// SP parameters/variables. + Dynamic_array<sp_variable *> m_vars; - List<sp_label_t> m_label; // The label list + /// Stack of CASE expression ids. + Dynamic_array<int> m_case_expr_ids; - List<sp_pcontext> m_children; // Children contexts, used for destruction + /// Stack of SQL-conditions. + Dynamic_array<sp_condition *> m_conditions; - /** - Scope of labels for this parsing context. - */ - label_scope_type m_label_scope; + /// Stack of cursors. + Dynamic_array<LEX_STRING> m_cursors; -private: - sp_pcontext(const sp_pcontext &); /* Prevent use of these */ - void operator=(sp_pcontext &); + /// Stack of SQL-handlers. + Dynamic_array<sp_handler *> m_handlers; + + /// List of labels. + List<sp_label> m_labels; + + /// Children contexts, used for destruction. + Dynamic_array<sp_pcontext *> m_children; + + /// Scope of this parsing context. + enum_scope m_scope; }; // class sp_pcontext : public Sql_alloc diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index 30acfebabb2..42476f7a596 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -26,23 +26,21 @@ #include "sp_pcontext.h" #include "sql_select.h" // create_virtual_tmp_table -sp_rcontext::sp_rcontext(sp_pcontext *root_parsing_ctx, +/////////////////////////////////////////////////////////////////////////// +// sp_rcontext implementation. +/////////////////////////////////////////////////////////////////////////// + + +sp_rcontext::sp_rcontext(const sp_pcontext *root_parsing_ctx, Field *return_value_fld, - sp_rcontext *prev_runtime_ctx) - :end_partial_result_set(FALSE), + bool in_sub_stmt) + :end_partial_result_set(false), m_root_parsing_ctx(root_parsing_ctx), - m_var_table(0), - m_var_items(0), + m_var_table(NULL), m_return_value_fld(return_value_fld), - m_return_value_set(FALSE), - in_sub_stmt(FALSE), - m_hcount(0), - m_hsp(0), - m_ihsp(0), - m_hfound(-1), - m_ccount(0), - m_case_expr_holders(0), - m_prev_runtime_ctx(prev_runtime_ctx) + m_return_value_set(false), + m_in_sub_stmt(in_sub_stmt), + m_ccount(0) { } @@ -51,422 +49,324 @@ sp_rcontext::~sp_rcontext() { if (m_var_table) free_blobs(m_var_table); + + // Leave m_handlers, m_handler_call_stack, m_var_items, m_cstack + // and m_case_expr_holders untouched. + // They are allocated in mem roots and will be freed accordingly. } -/* - Initialize sp_rcontext instance. +sp_rcontext *sp_rcontext::create(THD *thd, + const sp_pcontext *root_parsing_ctx, + Field *return_value_fld) +{ + sp_rcontext *ctx= new (thd->mem_root) sp_rcontext(root_parsing_ctx, + return_value_fld, + thd->in_sub_stmt); - SYNOPSIS - thd Thread handle - RETURN - FALSE on success - TRUE on error -*/ + if (!ctx) + return NULL; -bool sp_rcontext::init(THD *thd) -{ - uint handler_count= m_root_parsing_ctx->max_handler_index(); - - in_sub_stmt= thd->in_sub_stmt; - - if (init_var_table(thd) || init_var_items()) - return TRUE; - - if (!(m_raised_conditions= new (thd->mem_root) Sql_condition_info[handler_count])) - return TRUE; - - return - !(m_handler= - (sp_handler_t*)thd->alloc(handler_count * sizeof(sp_handler_t))) || - !(m_hstack= - (uint*)thd->alloc(handler_count * sizeof(uint))) || - !(m_in_handler= - (sp_active_handler_t*)thd->alloc(handler_count * - sizeof(sp_active_handler_t))) || - !(m_cstack= - (sp_cursor**)thd->alloc(m_root_parsing_ctx->max_cursor_index() * - sizeof(sp_cursor*))) || - !(m_case_expr_holders= - (Item_cache**)thd->calloc(m_root_parsing_ctx->get_num_case_exprs() * - sizeof (Item_cache*))); + if (ctx->alloc_arrays(thd) || + ctx->init_var_table(thd) || + ctx->init_var_items(thd)) + { + delete ctx; + return NULL; + } + + return ctx; } -/* - Create and initialize a table to store SP-vars. +bool sp_rcontext::alloc_arrays(THD *thd) +{ + { + size_t n= m_root_parsing_ctx->max_cursor_index(); + m_cstack.reset( + static_cast<sp_cursor **> ( + thd->alloc(n * sizeof (sp_cursor*))), + n); + } + + { + size_t n= m_root_parsing_ctx->get_num_case_exprs(); + m_case_expr_holders.reset( + static_cast<Item_cache **> ( + thd->calloc(n * sizeof (Item_cache*))), + n); + } + + return !m_cstack.array() || !m_case_expr_holders.array(); +} - SYNOPSIS - thd Thread handler. - RETURN - FALSE on success - TRUE on error -*/ -bool -sp_rcontext::init_var_table(THD *thd) +bool sp_rcontext::init_var_table(THD *thd) { List<Create_field> field_def_lst; if (!m_root_parsing_ctx->max_var_index()) - return FALSE; + return false; m_root_parsing_ctx->retrieve_field_definitions(&field_def_lst); DBUG_ASSERT(field_def_lst.elements == m_root_parsing_ctx->max_var_index()); - + if (!(m_var_table= create_virtual_tmp_table(thd, field_def_lst))) - return TRUE; + return true; - m_var_table->copy_blobs= TRUE; - m_var_table->alias.set("", 0, table_alias_charset); + m_var_table->copy_blobs= true; + m_var_table->alias.set("", 0, m_var_table->alias.charset()); - return FALSE; + return false; } -/* - Create and initialize an Item-adapter (Item_field) for each SP-var field. - - RETURN - FALSE on success - TRUE on error -*/ - -bool -sp_rcontext::init_var_items() +bool sp_rcontext::init_var_items(THD *thd) { - uint idx; uint num_vars= m_root_parsing_ctx->max_var_index(); - if (!(m_var_items= (Item**) sql_alloc(num_vars * sizeof (Item *)))) - return TRUE; + m_var_items.reset( + static_cast<Item **> ( + thd->alloc(num_vars * sizeof (Item *))), + num_vars); + + if (!m_var_items.array()) + return true; - for (idx = 0; idx < num_vars; ++idx) + for (uint idx = 0; idx < num_vars; ++idx) { if (!(m_var_items[idx]= new Item_field(m_var_table->field[idx]))) - return TRUE; + return true; } - return FALSE; + return false; } -bool -sp_rcontext::set_return_value(THD *thd, Item **return_value_item) +bool sp_rcontext::set_return_value(THD *thd, Item **return_value_item) { DBUG_ASSERT(m_return_value_fld); - m_return_value_set = TRUE; + m_return_value_set = true; return sp_eval_expr(thd, m_return_value_fld, return_value_item); } -#define IS_WARNING_CONDITION(S) ((S)[0] == '0' && (S)[1] == '1') -#define IS_NOT_FOUND_CONDITION(S) ((S)[0] == '0' && (S)[1] == '2') -#define IS_EXCEPTION_CONDITION(S) ((S)[0] != '0' || (S)[1] > '2') - -/** - Find an SQL handler for the given error. - - SQL handlers are pushed on the stack m_handler, with the latest/innermost - one on the top; we then search for matching handlers from the top and - down. - - We search through all the handlers, looking for the most specific one - (sql_errno more specific than sqlstate more specific than the rest). - Note that mysql error code handlers is a MySQL extension, not part of - the standard. - - SQL handlers for warnings are searched in the current scope only. - - SQL handlers for errors are searched in the current and in outer scopes. - That's why finding and activation of handler must be separated: an errror - handler might be located in the outer scope, which is not active at the - moment. Before such handler can be activated, execution flow should - unwind to that scope. - - Found SQL handler is remembered in m_hfound for future activation. - If no handler is found, m_hfound is -1. - - @param thd Thread handle - @param sql_errno The error code - @param sqlstate The error SQL state - @param level The error level - @param msg The error message - - @retval TRUE if an SQL handler was found - @retval FALSE otherwise -*/ - -bool -sp_rcontext::find_handler(THD *thd, - uint sql_errno, - const char *sqlstate, - MYSQL_ERROR::enum_warning_level level, - const char *msg) +bool sp_rcontext::push_cursor(sp_lex_keeper *lex_keeper, + sp_instr_cpush *i) { - int i= m_hcount; - - /* Reset previously found handler. */ - m_hfound= -1; - /* - If this is a fatal sub-statement error, and this runtime - context corresponds to a sub-statement, no CONTINUE/EXIT - handlers from this context are applicable: try to locate one - in the outer scope. + We should create cursors in the callers arena, as + it could be (and usually is) used in several instructions. */ - if (thd->is_fatal_sub_stmt_error && in_sub_stmt) - i= 0; - - /* Search handlers from the latest (innermost) to the oldest (outermost) */ - while (i--) - { - sp_cond_type_t *cond= m_handler[i].cond; - int j= m_ihsp; - - /* Check active handlers, to avoid invoking one recursively */ - while (j--) - if (m_in_handler[j].ip == m_handler[i].handler) - break; - if (j >= 0) - continue; // Already executing this handler + sp_cursor *c= new (callers_arena->mem_root) sp_cursor(lex_keeper, i); - switch (cond->type) - { - case sp_cond_type_t::number: - if (sql_errno == cond->mysqlerr && - (m_hfound < 0 || m_handler[m_hfound].cond->type > sp_cond_type_t::number)) - m_hfound= i; // Always the most specific - break; - case sp_cond_type_t::state: - if (strcmp(sqlstate, cond->sqlstate) == 0 && - (m_hfound < 0 || m_handler[m_hfound].cond->type > sp_cond_type_t::state)) - m_hfound= i; - break; - case sp_cond_type_t::warning: - if ((IS_WARNING_CONDITION(sqlstate) || - level == MYSQL_ERROR::WARN_LEVEL_WARN) && - m_hfound < 0) - m_hfound= i; - break; - case sp_cond_type_t::notfound: - if (IS_NOT_FOUND_CONDITION(sqlstate) && m_hfound < 0) - m_hfound= i; - break; - case sp_cond_type_t::exception: - if (IS_EXCEPTION_CONDITION(sqlstate) && - level == MYSQL_ERROR::WARN_LEVEL_ERROR && - m_hfound < 0) - m_hfound= i; - break; - } - } - - if (m_hfound >= 0) - { - DBUG_ASSERT((uint) m_hfound < m_root_parsing_ctx->max_handler_index()); - - m_raised_conditions[m_hfound].clear(); - m_raised_conditions[m_hfound].set(sql_errno, sqlstate, level, msg); - - return TRUE; - } + if (c == NULL) + return true; - /* - Only "exception conditions" are propagated to handlers in calling - contexts. If no handler is found locally for a "completion condition" - (warning or "not found") we will simply resume execution. - */ - if (m_prev_runtime_ctx && IS_EXCEPTION_CONDITION(sqlstate) && - level == MYSQL_ERROR::WARN_LEVEL_ERROR) - { - return m_prev_runtime_ctx->find_handler(thd, sql_errno, sqlstate, - level, msg); - } - - return FALSE; + m_cstack[m_ccount++]= c; + return false; } -void -sp_rcontext::push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i) -{ - DBUG_ENTER("sp_rcontext::push_cursor"); - DBUG_ASSERT(m_ccount < m_root_parsing_ctx->max_cursor_index()); - m_cstack[m_ccount++]= new sp_cursor(lex_keeper, i); - DBUG_PRINT("info", ("m_ccount: %d", m_ccount)); - DBUG_VOID_RETURN; -} -void -sp_rcontext::pop_cursors(uint count) +void sp_rcontext::pop_cursors(uint count) { - DBUG_ENTER("sp_rcontext::pop_cursors"); DBUG_ASSERT(m_ccount >= count); + while (count--) - { delete m_cstack[--m_ccount]; - } - DBUG_PRINT("info", ("m_ccount: %d", m_ccount)); - DBUG_VOID_RETURN; } -void -sp_rcontext::push_handler(struct sp_cond_type *cond, uint h, int type) -{ - DBUG_ENTER("sp_rcontext::push_handler"); - DBUG_ASSERT(m_hcount < m_root_parsing_ctx->max_handler_index()); - - m_handler[m_hcount].cond= cond; - m_handler[m_hcount].handler= h; - m_handler[m_hcount].type= type; - m_hcount+= 1; - DBUG_PRINT("info", ("m_hcount: %d", m_hcount)); - DBUG_VOID_RETURN; -} - -void -sp_rcontext::pop_handlers(uint count) +bool sp_rcontext::push_handler(sp_handler *handler, uint first_ip) { - DBUG_ENTER("sp_rcontext::pop_handlers"); - DBUG_ASSERT(m_hcount >= count); + /* + We should create handler entries in the callers arena, as + they could be (and usually are) used in several instructions. + */ + sp_handler_entry *he= + new (callers_arena->mem_root) sp_handler_entry(handler, first_ip); - m_hcount-= count; + if (he == NULL) + return true; - DBUG_PRINT("info", ("m_hcount: %d", m_hcount)); - DBUG_VOID_RETURN; + return m_handlers.append(he); } -void -sp_rcontext::push_hstack(uint h) -{ - DBUG_ENTER("sp_rcontext::push_hstack"); - DBUG_ASSERT(m_hsp < m_root_parsing_ctx->max_handler_index()); - - m_hstack[m_hsp++]= h; - DBUG_PRINT("info", ("m_hsp: %d", m_hsp)); - DBUG_VOID_RETURN; -} - -uint -sp_rcontext::pop_hstack() +void sp_rcontext::pop_handlers(size_t count) { - uint handler; - DBUG_ENTER("sp_rcontext::pop_hstack"); - DBUG_ASSERT(m_hsp); - - handler= m_hstack[--m_hsp]; + DBUG_ASSERT(m_handlers.elements() >= count); - DBUG_PRINT("info", ("m_hsp: %d", m_hsp)); - DBUG_RETURN(handler); + for (size_t i= 0; i < count; ++i) + m_handlers.pop(); } -/** - Prepare found handler to be executed. - - @retval TRUE if an SQL handler is activated (was found) and IP of the - first handler instruction. - @retval FALSE if there is no active handler -*/ -bool -sp_rcontext::activate_handler(THD *thd, - uint *ip, - sp_instr *instr, - Query_arena *execute_arena, - Query_arena *backup_arena) +bool sp_rcontext::handle_sql_condition(THD *thd, + uint *ip, + const sp_instr *cur_spi) { - if (m_hfound < 0) - return FALSE; + DBUG_ENTER("sp_rcontext::handle_sql_condition"); - switch (m_handler[m_hfound].type) { - case SP_HANDLER_NONE: - break; + /* + If this is a fatal sub-statement error, and this runtime + context corresponds to a sub-statement, no CONTINUE/EXIT + handlers from this context are applicable: try to locate one + in the outer scope. + */ + if (thd->is_fatal_sub_stmt_error && m_in_sub_stmt) + DBUG_RETURN(false); - case SP_HANDLER_CONTINUE: - thd->restore_active_arena(execute_arena, backup_arena); - thd->set_n_backup_active_arena(execute_arena, backup_arena); - push_hstack(instr->get_cont_dest()); + Diagnostics_area *da= thd->get_stmt_da(); + const sp_handler *found_handler= NULL; + const Sql_condition *found_condition= NULL; - /* Fall through */ + if (thd->is_error()) + { + found_handler= + cur_spi->m_ctx->find_handler(da->get_sqlstate(), + da->sql_errno(), + Sql_condition::WARN_LEVEL_ERROR); + + if (found_handler) + found_condition= da->get_error_condition(); + + /* + Found condition can be NULL if the diagnostics area was full + when the error was raised. It can also be NULL if + Diagnostics_area::set_error_status(uint sql_error) was used. + In these cases, make a temporary Sql_condition here so the + error can be handled. + */ + if (!found_condition) + { + Sql_condition *condition= + new (callers_arena->mem_root) Sql_condition(callers_arena->mem_root); + condition->set(da->sql_errno(), da->get_sqlstate(), + Sql_condition::WARN_LEVEL_ERROR, + da->message()); + found_condition= condition; + } + } + else if (da->current_statement_warn_count()) + { + Diagnostics_area::Sql_condition_iterator it= da->sql_conditions(); + const Sql_condition *c; - default: - /* End aborted result set. */ + // Here we need to find the last warning/note from the stack. + // In MySQL most substantial warning is the last one. + // (We could have used a reverse iterator here if one existed) - if (end_partial_result_set) - thd->protocol->end_partial_result_set(thd); + while ((c= it++)) + { + if (c->get_level() == Sql_condition::WARN_LEVEL_WARN || + c->get_level() == Sql_condition::WARN_LEVEL_NOTE) + { + const sp_handler *handler= + cur_spi->m_ctx->find_handler(c->get_sqlstate(), + c->get_sql_errno(), + c->get_level()); + if (handler) + { + found_handler= handler; + found_condition= c; + } + } + } + } - /* Enter handler. */ + if (!found_handler) + DBUG_RETURN(false); - DBUG_ASSERT(m_ihsp < m_root_parsing_ctx->max_handler_index()); - DBUG_ASSERT(m_hfound >= 0); + // At this point, we know that: + // - there is a pending SQL-condition (error or warning); + // - there is an SQL-handler for it. - m_in_handler[m_ihsp].ip= m_handler[m_hfound].handler; - m_in_handler[m_ihsp].index= m_hfound; - m_ihsp++; + DBUG_ASSERT(found_condition); - DBUG_PRINT("info", ("Entering handler...")); - DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp)); + sp_handler_entry *handler_entry= NULL; + for (size_t i= 0; i < m_handlers.elements(); ++i) + { + sp_handler_entry *h= m_handlers.at(i); - /* Reset error state. */ + if (h->handler == found_handler) + { + handler_entry= h; + break; + } + } - thd->clear_error(); - thd->reset_killed(); // Some errors set thd->killed - // (e.g. "bad data"). + /* + handler_entry usually should not be NULL here, as that indicates + that the parser context thinks a HANDLER should be activated, + but the runtime context cannot find it. + + However, this can happen (and this is in line with the Standard) + if SQL-condition has been raised before DECLARE HANDLER instruction + is processed. + + For example: + CREATE PROCEDURE p() + BEGIN + DECLARE v INT DEFAULT 'get'; -- raises SQL-warning here + DECLARE EXIT HANDLER ... -- this handler does not catch the warning + END + */ + if (!handler_entry) + DBUG_RETURN(false); - /* Return IP of the activated SQL handler. */ - *ip= m_handler[m_hfound].handler; + // Mark active conditions so that they can be deleted when the handler exits. + da->mark_sql_conditions_for_removal(); - /* Reset found handler. */ - m_hfound= -1; - } + uint continue_ip= handler_entry->handler->type == sp_handler::CONTINUE ? + cur_spi->get_cont_dest() : 0; - return TRUE; -} + /* End aborted result set. */ + if (end_partial_result_set) + thd->protocol->end_partial_result_set(thd); -void -sp_rcontext::exit_handler() -{ - DBUG_ENTER("sp_rcontext::exit_handler"); - DBUG_ASSERT(m_ihsp); + /* Reset error state. */ + thd->clear_error(); + thd->killed= NOT_KILLED; // Some errors set thd->killed + // (e.g. "bad data"). + + /* Add a frame to handler-call-stack. */ + Sql_condition_info *cond_info= + new (callers_arena->mem_root) Sql_condition_info(found_condition, + callers_arena); + Handler_call_frame *frame= + new (callers_arena->mem_root) Handler_call_frame(cond_info, continue_ip); + m_handler_call_stack.append(frame); - uint hindex= m_in_handler[m_ihsp-1].index; - m_raised_conditions[hindex].clear(); - m_ihsp-= 1; + *ip= handler_entry->first_ip; - DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp)); - DBUG_VOID_RETURN; + DBUG_RETURN(true); } -Sql_condition_info* sp_rcontext::raised_condition() const + +uint sp_rcontext::exit_handler(Diagnostics_area *da) { - if (m_ihsp > 0) - { - uint hindex= m_in_handler[m_ihsp - 1].index; - Sql_condition_info *raised= & m_raised_conditions[hindex]; - return raised; - } + DBUG_ENTER("sp_rcontext::exit_handler"); + DBUG_ASSERT(m_handler_call_stack.elements() > 0); - if (m_prev_runtime_ctx) - return m_prev_runtime_ctx->raised_condition(); + Handler_call_frame *f= m_handler_call_stack.pop(); - return NULL; -} + /* + Remove the SQL conditions that were present in DA when the + handler was activated. + */ + da->remove_marked_sql_conditions(); + uint continue_ip= f->continue_ip; -int -sp_rcontext::set_variable(THD *thd, uint var_idx, Item **value) -{ - return set_variable(thd, m_var_table->field[var_idx], value); + DBUG_RETURN(continue_ip); } -int -sp_rcontext::set_variable(THD *thd, Field *field, Item **value) +int sp_rcontext::set_variable(THD *thd, Field *field, Item **value) { if (!value) { @@ -478,25 +378,47 @@ sp_rcontext::set_variable(THD *thd, Field *field, Item **value) } -Item * -sp_rcontext::get_item(uint var_idx) +Item_cache *sp_rcontext::create_case_expr_holder(THD *thd, + const Item *item) const { - return m_var_items[var_idx]; + Item_cache *holder; + Query_arena current_arena; + + thd->set_n_backup_active_arena(thd->spcont->callers_arena, ¤t_arena); + + holder= Item_cache::get_cache(item); + + thd->restore_active_arena(thd->spcont->callers_arena, ¤t_arena); + + return holder; } -Item ** -sp_rcontext::get_item_addr(uint var_idx) +bool sp_rcontext::set_case_expr(THD *thd, int case_expr_id, + Item **case_expr_item_ptr) { - return m_var_items + var_idx; + Item *case_expr_item= sp_prepare_func_item(thd, case_expr_item_ptr); + if (!case_expr_item) + return true; + + if (!m_case_expr_holders[case_expr_id] || + m_case_expr_holders[case_expr_id]->result_type() != + case_expr_item->result_type()) + { + m_case_expr_holders[case_expr_id]= + create_case_expr_holder(thd, case_expr_item); + } + + m_case_expr_holders[case_expr_id]->store(case_expr_item); + m_case_expr_holders[case_expr_id]->cache_value(); + return false; } -/* - * - * sp_cursor - * - */ +/////////////////////////////////////////////////////////////////////////// +// sp_cursor implementation. +/////////////////////////////////////////////////////////////////////////// + sp_cursor::sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i) :m_lex_keeper(lex_keeper), @@ -523,8 +445,7 @@ sp_cursor::sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i) 0 in case of success, -1 otherwise */ -int -sp_cursor::open(THD *thd) +int sp_cursor::open(THD *thd) { if (server_side_cursor) { @@ -538,8 +459,7 @@ sp_cursor::open(THD *thd) } -int -sp_cursor::close(THD *thd) +int sp_cursor::close(THD *thd) { if (! server_side_cursor) { @@ -551,16 +471,14 @@ sp_cursor::close(THD *thd) } -void -sp_cursor::destroy() +void sp_cursor::destroy() { delete server_side_cursor; - server_side_cursor= 0; + server_side_cursor= NULL; } -int -sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars) +int sp_cursor::fetch(THD *thd, List<sp_variable> *vars) { if (! server_side_cursor) { @@ -575,7 +493,7 @@ sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars) } DBUG_EXECUTE_IF("bug23032_emit_warning", - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));); @@ -599,108 +517,13 @@ sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars) } -/* - Create an instance of appropriate Item_cache class depending on the - specified type in the callers arena. - - SYNOPSIS - thd thread handler - result_type type of the expression +/////////////////////////////////////////////////////////////////////////// +// sp_cursor::Select_fetch_into_spvars implementation. +/////////////////////////////////////////////////////////////////////////// - RETURN - Pointer to valid object on success - NULL on error - NOTE - We should create cache items in the callers arena, as they are used - between in several instructions. -*/ - -Item_cache * -sp_rcontext::create_case_expr_holder(THD *thd, const Item *item) -{ - Item_cache *holder; - Query_arena current_arena; - - thd->set_n_backup_active_arena(thd->spcont->callers_arena, ¤t_arena); - - holder= Item_cache::get_cache(item); - - thd->restore_active_arena(thd->spcont->callers_arena, ¤t_arena); - - return holder; -} - - -/* - Set CASE expression to the specified value. - - SYNOPSIS - thd thread handler - case_expr_id identifier of the CASE expression - case_expr_item a value of the CASE expression - - RETURN - FALSE on success - TRUE on error - - NOTE - The idea is to reuse Item_cache for the expression of the one CASE - statement. This optimization takes place when there is CASE statement - inside of a loop. So, in other words, we will use the same object on each - iteration instead of creating a new one for each iteration. - - TODO - Hypothetically, a type of CASE expression can be different for each - iteration. For instance, this can happen if the expression contains a - session variable (something like @@VAR) and its type is changed from one - iteration to another. - - In order to cope with this problem, we check type each time, when we use - already created object. If the type does not match, we re-create Item. - This also can (should?) be optimized. -*/ - -int -sp_rcontext::set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr) -{ - Item *case_expr_item= sp_prepare_func_item(thd, case_expr_item_ptr); - if (!case_expr_item) - return TRUE; - - if (!m_case_expr_holders[case_expr_id] || - m_case_expr_holders[case_expr_id]->result_type() != - case_expr_item->result_type()) - { - m_case_expr_holders[case_expr_id]= - create_case_expr_holder(thd, case_expr_item); - } - - m_case_expr_holders[case_expr_id]->store(case_expr_item); - m_case_expr_holders[case_expr_id]->cache_value(); - return FALSE; -} - - -Item * -sp_rcontext::get_case_expr(int case_expr_id) -{ - return m_case_expr_holders[case_expr_id]; -} - - -Item ** -sp_rcontext::get_case_expr_addr(int case_expr_id) -{ - return (Item**) m_case_expr_holders + case_expr_id; -} - - -/*************************************************************************** - Select_fetch_into_spvars -****************************************************************************/ - -int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u) +int sp_cursor::Select_fetch_into_spvars::prepare(List<Item> &fields, + SELECT_LEX_UNIT *u) { /* Cache the number of columns in the result set in order to easily @@ -711,11 +534,11 @@ int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u) } -int Select_fetch_into_spvars::send_data(List<Item> &items) +int sp_cursor::Select_fetch_into_spvars::send_data(List<Item> &items) { - List_iterator_fast<struct sp_variable> spvar_iter(*spvar_list); + List_iterator_fast<sp_variable> spvar_iter(*spvar_list); List_iterator_fast<Item> item_iter(items); - sp_variable_t *spvar; + sp_variable *spvar; Item *item; /* Must be ensured by the caller */ @@ -728,7 +551,7 @@ int Select_fetch_into_spvars::send_data(List<Item> &items) for (; spvar= spvar_iter++, item= item_iter++; ) { if (thd->spcont->set_variable(thd, spvar->offset, &item)) - return 1; + return true; } - return 0; + return false; } diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h index 5008a73d96c..ce692024d0d 100644 --- a/sql/sp_rcontext.h +++ b/sql/sp_rcontext.h @@ -22,80 +22,18 @@ #endif #include "sql_class.h" // select_result_interceptor +#include "sp_pcontext.h" // sp_condition_value + +/////////////////////////////////////////////////////////////////////////// +// sp_rcontext declaration. +/////////////////////////////////////////////////////////////////////////// -struct sp_cond_type; class sp_cursor; -struct sp_variable; class sp_lex_keeper; class sp_instr_cpush; class Query_arena; class sp_head; -class sp_pcontext; class Item_cache; -typedef class st_select_lex_unit SELECT_LEX_UNIT; -class Server_side_cursor; - -#define SP_HANDLER_NONE 0 -#define SP_HANDLER_EXIT 1 -#define SP_HANDLER_CONTINUE 2 -#define SP_HANDLER_UNDO 3 - -typedef struct -{ - /** Condition caught by this HANDLER. */ - struct sp_cond_type *cond; - /** Location (instruction pointer) of the handler code. */ - uint handler; - /** Handler type (EXIT, CONTINUE). */ - int type; -} sp_handler_t; - -typedef struct -{ - /** Instruction pointer of the active handler. */ - uint ip; - /** Handler index of the active handler. */ - uint index; -} sp_active_handler_t; - - -class Sql_condition_info : public Sql_alloc -{ -public: - /** SQL error code. */ - uint m_sql_errno; - - /** Error level. */ - MYSQL_ERROR::enum_warning_level m_level; - - /** SQLSTATE. */ - char m_sql_state[SQLSTATE_LENGTH + 1]; - - /** Text message. */ - char m_message[MYSQL_ERRMSG_SIZE]; - - void set(uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, - const char* msg) - { - m_sql_errno= sql_errno; - m_level= level; - - memcpy(m_sql_state, sqlstate, SQLSTATE_LENGTH); - m_sql_state[SQLSTATE_LENGTH]= '\0'; - - strncpy(m_message, msg, MYSQL_ERRMSG_SIZE); - } - - void clear() - { - m_sql_errno= 0; - m_level= MYSQL_ERROR::WARN_LEVEL_ERROR; - - m_sql_state[0]= '\0'; - m_message[0]= '\0'; - } -}; /* @@ -119,251 +57,412 @@ public: class sp_rcontext : public Sql_alloc { - sp_rcontext(const sp_rcontext &); /* Prevent use of these */ - void operator=(sp_rcontext &); - - public: - - /* - Arena used to (re) allocate items on . E.g. reallocate INOUT/OUT - SP parameters when they don't fit into prealloced items. This - is common situation with String items. It is used mainly in - sp_eval_func_item(). - */ - Query_arena *callers_arena; - - /* - End a open result set before start executing a continue/exit - handler if one is found as otherwise the client will hang - due to a violation of the client/server protocol. - */ - bool end_partial_result_set; - -#ifndef DBUG_OFF - /* - The routine for which this runtime context is created. Used for checking - if correct runtime context is used for variable handling. - */ - sp_head *sp; -#endif - - sp_rcontext(sp_pcontext *root_parsing_ctx, Field *return_value_fld, - sp_rcontext *prev_runtime_ctx); - bool init(THD *thd); +public: + /// Construct and properly initialize a new sp_rcontext instance. The static + /// create-function is needed because we need a way to return an error from + /// the constructor. + /// + /// @param thd Thread handle. + /// @param root_parsing_ctx Top-level parsing context for this stored program. + /// @param return_value_fld Field object to store the return value + /// (for stored functions only). + /// + /// @return valid sp_rcontext object or NULL in case of OOM-error. + static sp_rcontext *create(THD *thd, + const sp_pcontext *root_parsing_ctx, + Field *return_value_fld); ~sp_rcontext(); - int - set_variable(THD *thd, uint var_idx, Item **value); - - Item * - get_item(uint var_idx); +private: + sp_rcontext(const sp_pcontext *root_parsing_ctx, + Field *return_value_fld, + bool in_sub_stmt); - Item ** - get_item_addr(uint var_idx); + // Prevent use of copying constructor and operator. + sp_rcontext(const sp_rcontext &); + void operator=(sp_rcontext &); - bool - set_return_value(THD *thd, Item **return_value_item); +private: + /// This is an auxillary class to store entering instruction pointer for an + /// SQL-handler. + class sp_handler_entry : public Sql_alloc + { + public: + /// Handler definition (from parsing context). + const sp_handler *handler; + + /// Instruction pointer to the first instruction. + uint first_ip; + + /// The constructor. + /// + /// @param _handler sp_handler object. + /// @param _first_ip first instruction pointer. + sp_handler_entry(const sp_handler *_handler, uint _first_ip) + :handler(_handler), first_ip(_first_ip) + { } + }; - inline bool - is_return_value_set() const +public: + /// This class stores basic information about SQL-condition, such as: + /// - SQL error code; + /// - error level; + /// - SQLSTATE; + /// - text message. + /// + /// It's used to organize runtime SQL-handler call stack. + /// + /// Standard Sql_condition class can not be used, because we don't always have + /// an Sql_condition object for an SQL-condition in Diagnostics_area. + /// + /// Eventually, this class should be moved to sql_error.h, and be a part of + /// standard SQL-condition processing (Diagnostics_area should contain an + /// object for active SQL-condition, not just information stored in DA's + /// fields). + class Sql_condition_info : public Sql_alloc { - return m_return_value_set; - } + public: + /// SQL error code. + uint sql_errno; + + /// Error level. + Sql_condition::enum_warning_level level; + + /// SQLSTATE. + char sql_state[SQLSTATE_LENGTH + 1]; + + /// Text message. + char *message; + + /// The constructor. + /// + /// @param _sql_condition The SQL condition. + /// @param arena Query arena for SP + Sql_condition_info(const Sql_condition *_sql_condition, + Query_arena *arena) + :sql_errno(_sql_condition->get_sql_errno()), + level(_sql_condition->get_level()) + { + memcpy(sql_state, _sql_condition->get_sqlstate(), SQLSTATE_LENGTH); + sql_state[SQLSTATE_LENGTH]= '\0'; + + message= strdup_root(arena->mem_root, _sql_condition->get_message_text()); + } + }; - /* - SQL handlers support. - */ +private: + /// This class represents a call frame of SQL-handler (one invocation of a + /// handler). Basically, it's needed to store continue instruction pointer for + /// CONTINUE SQL-handlers. + class Handler_call_frame : public Sql_alloc + { + public: + /// SQL-condition, triggered handler activation. + const Sql_condition_info *sql_condition; + + /// Continue-instruction-pointer for CONTINUE-handlers. + /// The attribute contains 0 for EXIT-handlers. + uint continue_ip; + + /// The constructor. + /// + /// @param _sql_condition SQL-condition, triggered handler activation. + /// @param _continue_ip Continue instruction pointer. + Handler_call_frame(const Sql_condition_info *_sql_condition, + uint _continue_ip) + :sql_condition(_sql_condition), + continue_ip(_continue_ip) + { } + }; - void push_handler(struct sp_cond_type *cond, uint h, int type); +public: + /// Arena used to (re) allocate items on. E.g. reallocate INOUT/OUT + /// SP-variables when they don't fit into prealloced items. This is common + /// situation with String items. It is used mainly in sp_eval_func_item(). + Query_arena *callers_arena; - void pop_handlers(uint count); + /// Flag to end an open result set before start executing an SQL-handler + /// (if one is found). Otherwise the client will hang due to a violation + /// of the client/server protocol. + bool end_partial_result_set; - bool - find_handler(THD *thd, - uint sql_errno, - const char *sqlstate, - MYSQL_ERROR::enum_warning_level level, - const char *msg); +#ifndef DBUG_OFF + /// The stored program for which this runtime context is created. Used for + /// checking if correct runtime context is used for variable handling. + sp_head *sp; +#endif - Sql_condition_info *raised_condition() const; + ///////////////////////////////////////////////////////////////////////// + // SP-variables. + ///////////////////////////////////////////////////////////////////////// - void - push_hstack(uint h); + int set_variable(THD *thd, uint var_idx, Item **value) + { return set_variable(thd, m_var_table->field[var_idx], value); } - uint - pop_hstack(); + Item *get_item(uint var_idx) const + { return m_var_items[var_idx]; } - bool - activate_handler(THD *thd, - uint *ip, - sp_instr *instr, - Query_arena *execute_arena, - Query_arena *backup_arena); + Item **get_item_addr(uint var_idx) const + { return m_var_items.array() + var_idx; } + bool set_return_value(THD *thd, Item **return_value_item); - void - exit_handler(); + bool is_return_value_set() const + { return m_return_value_set; } - void - push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i); + ///////////////////////////////////////////////////////////////////////// + // SQL-handlers. + ///////////////////////////////////////////////////////////////////////// - void - pop_cursors(uint count); + /// Create a new sp_handler_entry instance and push it to the handler call + /// stack. + /// + /// @param handler SQL-handler object. + /// @param first_ip First instruction pointer of the handler. + /// + /// @return error flag. + /// @retval false on success. + /// @retval true on error. + bool push_handler(sp_handler *handler, uint first_ip); - inline void - pop_all_cursors() - { - pop_cursors(m_ccount); - } + /// Pop and delete given number of sp_handler_entry instances from the handler + /// call stack. + /// + /// @param count Number of handler entries to pop & delete. + void pop_handlers(size_t count); - inline sp_cursor * - get_cursor(uint i) + const Sql_condition_info *raised_condition() const { - return m_cstack[i]; + return m_handler_call_stack.elements() ? + (*m_handler_call_stack.back())->sql_condition : NULL; } - /* - CASE expressions support. - */ + /// Handle current SQL condition (if any). + /// + /// This is the public-interface function to handle SQL conditions in + /// stored routines. + /// + /// @param thd Thread handle. + /// @param ip[out] Instruction pointer to the first handler + /// instruction. + /// @param cur_spi Current SP instruction. + /// + /// @retval true if an SQL-handler has been activated. That means, all of + /// the following conditions are satisfied: + /// - the SP-instruction raised SQL-condition(s), + /// - and there is an SQL-handler to process at least one of those + /// SQL-conditions, + /// - and that SQL-handler has been activated. + /// Note, that the return value has nothing to do with "error flag" + /// semantics. + /// + /// @retval false otherwise. + bool handle_sql_condition(THD *thd, + uint *ip, + const sp_instr *cur_spi); + + /// Remove latest call frame from the handler call stack. + /// + /// @param da Diagnostics area containing handled conditions. + /// + /// @return continue instruction pointer of the removed handler. + uint exit_handler(Diagnostics_area *da); + + ///////////////////////////////////////////////////////////////////////// + // Cursors. + ///////////////////////////////////////////////////////////////////////// + + /// Create a new sp_cursor instance and push it to the cursor stack. + /// + /// @param lex_keeper SP-instruction execution helper. + /// @param i Cursor-push instruction. + /// + /// @return error flag. + /// @retval false on success. + /// @retval true on error. + bool push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i); + + /// Pop and delete given number of sp_cursor instance from the cursor stack. + /// + /// @param count Number of cursors to pop & delete. + void pop_cursors(uint count); + + void pop_all_cursors() + { pop_cursors(m_ccount); } + + sp_cursor *get_cursor(uint i) const + { return m_cstack[i]; } + + ///////////////////////////////////////////////////////////////////////// + // CASE expressions. + ///////////////////////////////////////////////////////////////////////// + + /// Set CASE expression to the specified value. + /// + /// @param thd Thread handler. + /// @param case_expr_id The CASE expression identifier. + /// @param case_expr_item The CASE expression value + /// + /// @return error flag. + /// @retval false on success. + /// @retval true on error. + /// + /// @note The idea is to reuse Item_cache for the expression of the one + /// CASE statement. This optimization takes place when there is CASE + /// statement inside of a loop. So, in other words, we will use the same + /// object on each iteration instead of creating a new one for each + /// iteration. + /// + /// TODO + /// Hypothetically, a type of CASE expression can be different for each + /// iteration. For instance, this can happen if the expression contains + /// a session variable (something like @@VAR) and its type is changed + /// from one iteration to another. + /// + /// In order to cope with this problem, we check type each time, when we + /// use already created object. If the type does not match, we re-create + /// Item. This also can (should?) be optimized. + bool set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr); + + Item *get_case_expr(int case_expr_id) const + { return m_case_expr_holders[case_expr_id]; } + + Item ** get_case_expr_addr(int case_expr_id) const + { return (Item**) m_case_expr_holders.array() + case_expr_id; } - int - set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr); +private: + /// Internal function to allocate memory for arrays. + /// + /// @param thd Thread handle. + /// + /// @return error flag: false on success, true in case of failure. + bool alloc_arrays(THD *thd); + + /// Create and initialize a table to store SP-variables. + /// + /// param thd Thread handle. + /// + /// @return error flag. + /// @retval false on success. + /// @retval true on error. + bool init_var_table(THD *thd); - Item * - get_case_expr(int case_expr_id); + /// Create and initialize an Item-adapter (Item_field) for each SP-var field. + /// + /// param thd Thread handle. + /// + /// @return error flag. + /// @retval false on success. + /// @retval true on error. + bool init_var_items(THD *thd); + + /// Create an instance of appropriate Item_cache class depending on the + /// specified type in the callers arena. + /// + /// @note We should create cache items in the callers arena, as they are + /// used between in several instructions. + /// + /// @param thd Thread handler. + /// @param item Item to get the expression type. + /// + /// @return Pointer to valid object on success, or NULL in case of error. + Item_cache *create_case_expr_holder(THD *thd, const Item *item) const; - Item ** - get_case_expr_addr(int case_expr_id); + int set_variable(THD *thd, Field *field, Item **value); private: - sp_pcontext *m_root_parsing_ctx; + /// Top-level (root) parsing context for this runtime context. + const sp_pcontext *m_root_parsing_ctx; - /* Virtual table for storing variables. */ + /// Virtual table for storing SP-variables. TABLE *m_var_table; - /* - Collection of Item_field proxies, each of them points to the corresponding - field in m_var_table. - */ - Item **m_var_items; + /// Collection of Item_field proxies, each of them points to the + /// corresponding field in m_var_table. + Bounds_checked_array<Item *> m_var_items; - /* - This is a pointer to a field, which should contain return value for stored - functions (only). For stored procedures, this pointer is NULL. - */ + /// This is a pointer to a field, which should contain return value for + /// stored functions (only). For stored procedures, this pointer is NULL. Field *m_return_value_fld; - /* - Indicates whether the return value (in m_return_value_fld) has been set - during execution. - */ + /// Indicates whether the return value (in m_return_value_fld) has been + /// set during execution. bool m_return_value_set; - /** - TRUE if the context is created for a sub-statement. - */ - bool in_sub_stmt; + /// Flag to tell if the runtime context is created for a sub-statement. + bool m_in_sub_stmt; - sp_handler_t *m_handler; // Visible handlers + /// Stack of visible handlers. + Dynamic_array<sp_handler_entry *> m_handlers; - /** - SQL conditions caught by each handler. - This is an array indexed by handler index. - */ - Sql_condition_info *m_raised_conditions; + /// Stack of caught SQL conditions. + Dynamic_array<Handler_call_frame *> m_handler_call_stack; - uint m_hcount; // Stack pointer for m_handler - uint *m_hstack; // Return stack for continue handlers - uint m_hsp; // Stack pointer for m_hstack - /** Active handler stack. */ - sp_active_handler_t *m_in_handler; - uint m_ihsp; // Stack pointer for m_in_handler - int m_hfound; // Set by find_handler; -1 if not found + /// Stack of cursors. + Bounds_checked_array<sp_cursor *> m_cstack; - sp_cursor **m_cstack; + /// Current number of cursors in m_cstack. uint m_ccount; - Item_cache **m_case_expr_holders; - - /* Previous runtime context (NULL if none) */ - sp_rcontext *m_prev_runtime_ctx; - -private: - bool init_var_table(THD *thd); - bool init_var_items(); - - Item_cache *create_case_expr_holder(THD *thd, const Item *item); - - int set_variable(THD *thd, Field *field, Item **value); + /// Array of CASE expression holders. + Bounds_checked_array<Item_cache *> m_case_expr_holders; }; // class sp_rcontext : public Sql_alloc +/////////////////////////////////////////////////////////////////////////// +// sp_cursor declaration. +/////////////////////////////////////////////////////////////////////////// -/* - An interceptor of cursor result set used to implement - FETCH <cname> INTO <varlist>. -*/ - -class Select_fetch_into_spvars: public select_result_interceptor -{ - List<struct sp_variable> *spvar_list; - uint field_count; -public: - Select_fetch_into_spvars() {} /* Remove gcc warning */ - uint get_field_count() { return field_count; } - void set_spvar_list(List<struct sp_variable> *vars) { spvar_list= vars; } - - virtual bool send_eof() { return FALSE; } - virtual int send_data(List<Item> &items); - virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u); -}; - +class Server_side_cursor; +typedef class st_select_lex_unit SELECT_LEX_UNIT; /* A mediator between stored procedures and server side cursors */ class sp_cursor : public Sql_alloc { -public: +private: + /// An interceptor of cursor result set used to implement + /// FETCH <cname> INTO <varlist>. + class Select_fetch_into_spvars: public select_result_interceptor + { + List<sp_variable> *spvar_list; + uint field_count; + public: + Select_fetch_into_spvars() {} /* Remove gcc warning */ + uint get_field_count() { return field_count; } + void set_spvar_list(List<sp_variable> *vars) { spvar_list= vars; } + + virtual bool send_eof() { return FALSE; } + virtual int send_data(List<Item> &items); + virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u); +}; +public: sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i); virtual ~sp_cursor() - { - destroy(); - } + { destroy(); } - sp_lex_keeper * - get_lex_keeper() { return m_lex_keeper; } + sp_lex_keeper *get_lex_keeper() { return m_lex_keeper; } - int - open(THD *thd); + int open(THD *thd); - int - close(THD *thd); + int close(THD *thd); - inline bool - is_open() - { - return test(server_side_cursor); - } + my_bool is_open() + { return test(server_side_cursor); } - int - fetch(THD *, List<struct sp_variable> *vars); + int fetch(THD *, List<sp_variable> *vars); - inline sp_instr_cpush * - get_instr() - { - return m_i; - } + sp_instr_cpush *get_instr() + { return m_i; } private: - Select_fetch_into_spvars result; sp_lex_keeper *m_lex_keeper; Server_side_cursor *server_side_cursor; sp_instr_cpush *m_i; // My push instruction - void - destroy(); + void destroy(); }; // class sp_cursor : public Sql_alloc diff --git a/sql/spatial.h b/sql/spatial.h index 6df6e37e9b8..b0e4b83bf6a 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -196,8 +196,8 @@ struct MBR if (d != mbr->dimension() || d <= 0 || contains(mbr) || within(mbr)) return 0; - MBR intersection(max(xmin, mbr->xmin), max(ymin, mbr->ymin), - min(xmax, mbr->xmax), min(ymax, mbr->ymax)); + MBR intersection(MY_MAX(xmin, mbr->xmin), MY_MAX(ymin, mbr->ymin), + MY_MIN(xmax, mbr->xmax), MY_MIN(ymax, mbr->ymax)); return (d == intersection.dimension()); } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index e2e3647ff2a..80e7d405a04 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -829,7 +829,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) goto end; table->use_all_columns(); (void) my_init_dynamic_array(&acl_users,sizeof(ACL_USER), 50, 100, MYF(0)); - username_char_length= min(table->field[1]->char_length(), USERNAME_CHAR_LENGTH); + username_char_length= MY_MIN(table->field[1]->char_length(), + USERNAME_CHAR_LENGTH); password_length= table->field[2]->field_length / table->field[2]->charset()->mbmaxlen; if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323) @@ -1210,9 +1211,9 @@ my_bool acl_reload(THD *thd) Execution might have been interrupted; only print the error message if an error condition has been raised. */ - if (thd->stmt_da->is_error()) + if (thd->get_stmt_da()->is_error()) sql_print_error("Fatal error: Can't open and lock privilege tables: %s", - thd->stmt_da->message()); + thd->get_stmt_da()->message()); goto end; } @@ -1331,7 +1332,7 @@ static ulong get_sort(uint count,...) chars= 128; // Marker that chars existed } } - sort= (sort << 8) + (wild_pos ? min(wild_pos, 127U) : chars); + sort= (sort << 8) + (wild_pos ? MY_MIN(wild_pos, 127U) : chars); } va_end(args); return sort; @@ -1832,6 +1833,13 @@ bool acl_check_host(const char *host, const char *ip) } } mysql_mutex_unlock(&acl_cache->lock); + if (ip != NULL) + { + /* Increment HOST_CACHE.COUNT_HOST_ACL_ERRORS. */ + Host_errors errors; + errors.m_host_acl= 1; + inc_host_errors(ip, &errors); + } return 1; // Host is not allowed } @@ -1972,7 +1980,7 @@ bool change_password(THD *thd, const char *host, const char *user, set_user_plugin(acl_user, new_password_len); } else - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SET_PASSWORD_AUTH_PLUGIN, ER(ER_SET_PASSWORD_AUTH_PLUGIN)); if (update_user_table(thd, table, @@ -4566,7 +4574,6 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, uint i; ulong orig_want_access= want_access; my_bool locked= 0; - GRANT_TABLE *grant_table; DBUG_ENTER("check_grant"); DBUG_ASSERT(number > 0); @@ -4646,17 +4653,32 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, continue; } + if (is_temporary_table(tl)) + { + /* + If this table list element corresponds to a pre-opened temporary + table skip checking of all relevant table-level privileges for it. + Note that during creation of temporary table we still need to check + if user has CREATE_TMP_ACL. + */ + tl->grant.privilege|= TMP_TABLE_ACLS; + tl->grant.want_privilege= 0; + continue; + } + if (!locked) { locked= 1; mysql_rwlock_rdlock(&LOCK_grant); } - if (!(grant_table= table_hash_search(sctx->host, sctx->ip, - tl->get_db_name(), - sctx->priv_user, - tl->get_table_name(), - FALSE))) + GRANT_TABLE *grant_table= table_hash_search(sctx->host, sctx->ip, + tl->get_db_name(), + sctx->priv_user, + tl->get_table_name(), + FALSE); + + if (!grant_table) { want_access &= ~tl->grant.privilege; goto err; // No grants @@ -6876,9 +6898,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); bool has_errors() { return is_grave; } @@ -6891,18 +6913,18 @@ Silence_routine_definer_errors::handle_condition( THD *thd, uint sql_errno, const char*, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; - if (level == MYSQL_ERROR::WARN_LEVEL_ERROR) + if (level == Sql_condition::WARN_LEVEL_ERROR) { switch (sql_errno) { case ER_NONEXISTING_PROC_GRANT: /* Convert the error into a warning. */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, sql_errno, msg); return TRUE; default: @@ -7067,7 +7089,7 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, } else { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_PASSWD_LENGTH, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_PASSWD_LENGTH, ER(ER_PASSWD_LENGTH), SCRAMBLED_PASSWORD_CHAR_LENGTH); return TRUE; } @@ -8362,7 +8384,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio, DBUG_ASSERT(net->read_pos[pkt_len] == 0); if (mpvio->connect_errors) - reset_host_errors(thd->main_security_ctx.ip); + reset_host_connect_errors(thd->main_security_ctx.ip); ulong client_capabilities= uint2korr(net->read_pos); if (client_capabilities & CLIENT_PROTOCOL_41) @@ -8740,7 +8762,6 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf) err: if (mpvio->status == MPVIO_EXT::FAILURE) { - inc_host_errors(mpvio->thd->security_ctx->ip); if (!mpvio->thd->is_error()) { if (mpvio->make_it_fail) @@ -8913,6 +8934,9 @@ static int do_auth_once(THD *thd, const LEX_STRING *auth_plugin_name, else { /* Server cannot load the required plugin. */ + Host_errors errors; + errors.m_no_auth_plugin= 1; + inc_host_errors(mpvio->thd->security_ctx->ip, &errors); my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), auth_plugin_name->str); res= CR_ERROR; } @@ -9038,8 +9062,26 @@ bool acl_authenticate(THD *thd, uint connect_errors, if (res > CR_OK && mpvio.status != MPVIO_EXT::SUCCESS) { + Host_errors errors; DBUG_ASSERT(mpvio.status == MPVIO_EXT::FAILURE); - + switch (res) + { + case CR_AUTH_PLUGIN_ERROR: + errors.m_auth_plugin= 1; + break; + case CR_AUTH_HANDSHAKE: + errors.m_handshake= 1; + break; + case CR_AUTH_USER_CREDENTIALS: + errors.m_authentication= 1; + break; + case CR_ERROR: + default: + /* Unknown of unspecified auth plugin error. */ + errors.m_auth_plugin= 1; + break; + } + inc_host_errors(mpvio.thd->security_ctx->ip, &errors); if (!thd->is_error()) login_failed_error(thd); DBUG_RETURN(1); @@ -9064,6 +9106,9 @@ bool acl_authenticate(THD *thd, uint connect_errors, /* we need to find the proxy user, but there was none */ if (!proxy_user) { + Host_errors errors; + errors.m_proxy_user= 1; + inc_host_errors(mpvio.thd->security_ctx->ip, &errors); if (!thd->is_error()) login_failed_error(thd); DBUG_RETURN(1); @@ -9080,6 +9125,9 @@ bool acl_authenticate(THD *thd, uint connect_errors, mpvio.auth_info.authenticated_as, TRUE); if (!acl_proxy_user) { + Host_errors errors; + errors.m_proxy_user_acl= 1; + inc_host_errors(mpvio.thd->security_ctx->ip, &errors); if (!thd->is_error()) login_failed_error(thd); mysql_mutex_unlock(&acl_cache->lock); @@ -9108,6 +9156,9 @@ bool acl_authenticate(THD *thd, uint connect_errors, */ if (acl_check_ssl(thd, acl_user)) { + Host_errors errors; + errors.m_ssl= 1; + inc_host_errors(mpvio.thd->security_ctx->ip, &errors); login_failed_error(thd); DBUG_RETURN(1); } @@ -9190,15 +9241,14 @@ bool acl_authenticate(THD *thd, uint connect_errors, sctx->external_user= my_strdup(mpvio.auth_info.external_user, MYF(0)); if (res == CR_OK_HANDSHAKE_COMPLETE) - thd->stmt_da->disable_status(); + thd->get_stmt_da()->disable_status(); else my_ok(thd); #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_user_host)(thd->main_security_ctx.user, - strlen(thd->main_security_ctx.user), - thd->main_security_ctx.host_or_ip, - strlen(thd->main_security_ctx.host_or_ip)); + PSI_THREAD_CALL(set_thread_user_host) + (thd->main_security_ctx.user, strlen(thd->main_security_ctx.user), + thd->main_security_ctx.host_or_ip, strlen(thd->main_security_ctx.host_or_ip)); #endif /* Ready to handle queries */ @@ -9228,7 +9278,7 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio, create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand); /* and send it to the client */ if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1)) - DBUG_RETURN(CR_ERROR); + DBUG_RETURN(CR_AUTH_HANDSHAKE); } /* reply and authenticate */ @@ -9270,7 +9320,7 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio, /* read the reply with the encrypted password */ if ((pkt_len= mpvio->read_packet(mpvio, &pkt)) < 0) - DBUG_RETURN(CR_ERROR); + DBUG_RETURN(CR_AUTH_HANDSHAKE); DBUG_PRINT("info", ("reply read : pkt_len=%d", pkt_len)); #ifdef NO_EMBEDDED_ACCESS_CHECKS @@ -9278,23 +9328,22 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio, #endif if (pkt_len == 0) /* no password */ - DBUG_RETURN(info->auth_string[0] ? CR_ERROR : CR_OK); + DBUG_RETURN(mpvio->acl_user->salt_len != 0 ? CR_AUTH_USER_CREDENTIALS : CR_OK); info->password_used= PASSWORD_USED_YES; if (pkt_len == SCRAMBLE_LENGTH) { if (!mpvio->acl_user->salt_len) - DBUG_RETURN(CR_ERROR); + DBUG_RETURN(CR_AUTH_USER_CREDENTIALS); if (check_scramble(pkt, thd->scramble, mpvio->acl_user->salt)) - DBUG_RETURN(CR_ERROR); + DBUG_RETURN(CR_AUTH_USER_CREDENTIALS); else DBUG_RETURN(CR_OK); } - inc_host_errors(mpvio->thd->security_ctx->ip); my_error(ER_HANDSHAKE_ERROR, MYF(0)); - DBUG_RETURN(CR_ERROR); + DBUG_RETURN(CR_AUTH_HANDSHAKE); } static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio, @@ -9311,12 +9360,12 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio, create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand); /* and send it to the client */ if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1)) - return CR_ERROR; + return CR_AUTH_HANDSHAKE; } /* read the reply and authenticate */ if ((pkt_len= mpvio->read_packet(mpvio, &pkt)) < 0) - return CR_ERROR; + return CR_AUTH_HANDSHAKE; #ifdef NO_EMBEDDED_ACCESS_CHECKS return CR_OK; @@ -9331,26 +9380,25 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio, pkt_len= strnlen((char*)pkt, pkt_len); if (pkt_len == 0) /* no password */ - return info->auth_string[0] ? CR_ERROR : CR_OK; + return info->auth_string[0] ? CR_AUTH_USER_CREDENTIALS : CR_OK; if (secure_auth(thd)) - return CR_ERROR; + return CR_AUTH_HANDSHAKE; info->password_used= PASSWORD_USED_YES; if (pkt_len == SCRAMBLE_LENGTH_323) { if (!mpvio->acl_user->salt_len) - return CR_ERROR; + return CR_AUTH_USER_CREDENTIALS; return check_scramble_323(pkt, thd->scramble, (ulong *) mpvio->acl_user->salt) ? - CR_ERROR : CR_OK; + CR_AUTH_USER_CREDENTIALS : CR_OK; } - inc_host_errors(mpvio->thd->security_ctx->ip); my_error(ER_HANDSHAKE_ERROR, MYF(0)); - return CR_ERROR; + return CR_AUTH_HANDSHAKE; } static struct st_mysql_auth native_password_handler= @@ -9399,3 +9447,10 @@ maria_declare_plugin(mysql_password) MariaDB_PLUGIN_MATURITY_BETA /* Maturity */ } maria_declare_plugin_end; + + +/* called when new user is created or exsisting password is changed */ +int check_password_policy(String *password) +{ + return (0); +} diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 3169746419c..d519279e9c2 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -95,6 +95,14 @@ CREATE_ACL | DROP_ACL | ALTER_ACL | INDEX_ACL | \ TRIGGER_ACL | REFERENCES_ACL | GRANT_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL) +/** + Table-level privileges which are automatically "granted" to everyone on + existing temporary tables (CREATE_ACL is necessary for ALTER ... RENAME). +*/ +#define TMP_TABLE_ACLS \ +(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ + INDEX_ACL | ALTER_ACL) + /* Defines to change the above bits to how things are stored in tables This is needed as the 'host' and 'db' table is missing a few privileges @@ -245,7 +253,7 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond); int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr); - +int check_password_policy(String *password); #ifdef NO_EMBEDDED_ACCESS_CHECKS #define check_grant(A,B,C,D,E,F) 0 #define check_grant_db(A,B) 0 diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 0b2a939d0ba..8f3ea0fedb1 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -1,4 +1,5 @@ -/* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2010, 2013, Oracle and/or its affiliates. + Copyright (c) 2012, 2013, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -42,9 +43,19 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list) trans_rollback(thd); close_thread_tables(thd); thd->mdl_context.release_transactional_locks(); + + /* + table_list->table has been closed and freed. Do not reference + uninitialized data. open_tables() could fail. + */ + table_list->table= NULL; + /* Same applies to MDL ticket. */ + table_list->mdl_request.ticket= NULL; + DEBUG_SYNC(thd, "ha_admin_try_alter"); tmp_disable_binlog(thd); // binlogging is done by caller if wanted - result_code= mysql_recreate_table(thd, table_list); + result_code= (open_temporary_tables(thd, table_list) || + mysql_recreate_table(thd, table_list)); reenable_binlog(thd); /* mysql_recreate_table() can push OK or ERROR. @@ -52,8 +63,8 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list) we will store the error message in a result set row and then clear. */ - if (thd->stmt_da->is_ok()) - thd->stmt_da->reset_diagnostics_area(); + if (thd->get_stmt_da()->is_ok()) + thd->get_stmt_da()->reset_diagnostics_area(); table_list->table= NULL; result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; DBUG_RETURN(result_code); @@ -117,8 +128,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, MDL_EXCLUSIVE, MDL_TRANSACTION); if (lock_table_names(thd, table_list, table_list->next_global, - thd->variables.lock_wait_timeout, - MYSQL_OPEN_SKIP_TEMPORARY)) + thd->variables.lock_wait_timeout, 0)) DBUG_RETURN(0); has_mdl_lock= TRUE; @@ -198,7 +208,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, goto end; /* Close table but don't remove from locked list */ close_all_tables_for_name(thd, table_list->table->s, - HA_EXTRA_NOT_USED); + HA_EXTRA_NOT_USED, NULL); table_list->table= 0; } /* @@ -338,6 +348,14 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, mysql_ha_rm_tables(thd, tables); + /* + Close all temporary tables which were pre-open to simplify + privilege checking. Clear all references to closed tables. + */ + close_thread_tables(thd); + for (table= tables; table; table= table->next_local) + table->table= NULL; + for (table= tables; table; table= table->next_local) { char table_name[SAFE_NAME_LEN*2+2]; @@ -394,14 +412,15 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, because it's already known that the table is badly damaged. */ - Warning_info wi(thd->query_id, false); - Warning_info *wi_saved= thd->warning_info; + Diagnostics_area *da= thd->get_stmt_da(); + Warning_info tmp_wi(thd->query_id, false, true); - thd->warning_info= &wi; + da->push_warning_info(&tmp_wi); - open_error= open_and_lock_tables(thd, table, TRUE, 0); + open_error= (open_temporary_tables(thd, table) || + open_and_lock_tables(thd, table, TRUE, 0)); - thd->warning_info= wi_saved; + da->pop_warning_info(); } else { @@ -413,7 +432,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, mode. It does make sense for the user to see such errors. */ - open_error= open_and_lock_tables(thd, table, TRUE, 0); + open_error= (open_temporary_tables(thd, table) || + open_and_lock_tables(thd, table, TRUE, 0)); } thd->prepare_derived_at_open= FALSE; @@ -448,7 +468,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, */ Alter_info *alter_info= &lex->alter_info; - if (alter_info->flags & ALTER_ADMIN_PARTITION) + if (alter_info->flags & Alter_info::ALTER_ADMIN_PARTITION) { if (!table->table->part_info) { @@ -512,16 +532,16 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, if (!table->table) { DBUG_PRINT("admin", ("open table failed")); - if (thd->warning_info->is_empty()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + if (thd->get_stmt_da()->is_warning_info_empty()) + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_CHECK_NO_SUCH_TABLE, ER(ER_CHECK_NO_SUCH_TABLE)); /* if it was a view will check md5 sum */ if (table->view && view_checksum(thd, table) == HA_ADMIN_WRONG_CHECKSUM) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_VIEW_CHECKSUM, ER(ER_VIEW_CHECKSUM)); - if (thd->stmt_da->is_error() && - table_not_corrupt_error(thd->stmt_da->sql_errno())) + if (thd->get_stmt_da()->is_error() && + table_not_corrupt_error(thd->get_stmt_da()->sql_errno())) result_code= HA_ADMIN_FAILED; else /* Default failure code is corrupt table */ @@ -569,7 +589,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, table->table=0; // For query cache if (protocol->write()) goto err; - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); continue; /* purecov: end */ } @@ -742,8 +762,9 @@ send_result: lex->cleanup_after_one_table_open(); thd->clear_error(); // these errors shouldn't get client { - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; + Diagnostics_area::Sql_condition_iterator it= + thd->get_stmt_da()->sql_conditions(); + const Sql_condition *err; while ((err= it++)) { protocol->prepare_for_resend(); @@ -756,7 +777,7 @@ send_result: if (protocol->write()) goto err; } - thd->warning_info->clear_warning_info(thd->query_id); + thd->get_stmt_da()->clear_warning_info(thd->query_id); } protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); @@ -824,19 +845,10 @@ send_result_message: case HA_ADMIN_TRY_ALTER: { - uint save_flags; Alter_info *alter_info= &lex->alter_info; - /* Store the original value of alter_info->flags */ - save_flags= alter_info->flags; - /* - This is currently used only by InnoDB. ha_innobase::optimize() answers - "try with alter", so here we close the table, do an ALTER TABLE, - reopen the table and do ha_innobase::analyze() on it. - We have to end the row, so analyze could return more rows. - */ protocol->store(STRING_WITH_LEN("note"), system_charset_info); - if(alter_info->flags & ALTER_ADMIN_PARTITION) + if (alter_info->flags & Alter_info::ALTER_ADMIN_PARTITION) { protocol->store(STRING_WITH_LEN( "Table does not support optimize on partitions. All partitions " @@ -848,18 +860,23 @@ send_result_message: "Table does not support optimize, doing recreate + analyze instead"), system_charset_info); } - if (protocol->write()) + if (protocol->write()) goto err; + DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze...")); TABLE_LIST *save_next_local= table->next_local, *save_next_global= table->next_global; table->next_local= table->next_global= 0; - result_code= admin_recreate_table(thd, table); + tmp_disable_binlog(thd); // binlogging is done by caller if wanted + result_code= admin_recreate_table(thd, table); + reenable_binlog(thd); trans_commit_stmt(thd); trans_commit(thd); close_thread_tables(thd); thd->mdl_context.release_transactional_locks(); + /* Clear references to TABLE and MDL_ticket after releasing them. */ + table->mdl_request.ticket= NULL; if (!result_code) // recreation went ok { @@ -867,22 +884,27 @@ send_result_message: table->mdl_request.ticket= NULL; DEBUG_SYNC(thd, "ha_admin_open_ltable"); table->mdl_request.set_type(MDL_SHARED_WRITE); - /* - Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags - to force analyze on all partitions. - */ - alter_info->flags &= ~(ALTER_ADMIN_PARTITION); - if ((table->table= open_ltable(thd, table, lock_type, 0))) + if (!open_temporary_tables(thd, table) && + (table->table= open_ltable(thd, table, lock_type, 0))) { + uint save_flags; + /* Store the original value of alter_info->flags */ + save_flags= alter_info->flags; + + /* + Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags + to force analyze on all partitions. + */ + alter_info->flags &= ~(Alter_info::ALTER_ADMIN_PARTITION); result_code= table->table->file->ha_analyze(thd, check_opt); if (result_code == HA_ADMIN_ALREADY_DONE) result_code= HA_ADMIN_OK; else if (result_code) // analyze failed table->table->file->print_error(result_code, MYF(0)); + alter_info->flags= save_flags; } else result_code= -1; // open failed - alter_info->flags= save_flags; } /* Start a new row for the final status row */ protocol->prepare_for_resend(); @@ -893,7 +915,7 @@ send_result_message: DBUG_ASSERT(thd->is_error() || thd->killed); if (thd->is_error()) { - const char *err_msg= thd->stmt_da->message(); + const char *err_msg= thd->get_stmt_da()->message(); if (!thd->vio_ok()) { sql_print_error("%s", err_msg); @@ -912,6 +934,9 @@ send_result_message: } thd->clear_error(); } + /* Make sure this table instance is not reused after the operation. */ + if (table->table) + table->table->m_needs_reopen= true; } result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; table->next_local= save_next_local; @@ -1010,14 +1035,15 @@ send_result_message: err: /* Make sure this table instance is not reused after the failure. */ - if (table && table->table) - table->table->m_needs_reopen= true; trans_rollback_stmt(thd); trans_rollback(thd); + if (table && table->table) + { + table->table->m_needs_reopen= true; + table->table= 0; + } close_thread_tables(thd); // Shouldn't be needed thd->mdl_context.release_transactional_locks(); - if (table) - table->table=0; DBUG_RETURN(TRUE); } @@ -1090,12 +1116,13 @@ bool mysql_preload_keys(THD* thd, TABLE_LIST* tables) } -bool Analyze_table_statement::execute(THD *thd) +bool Sql_cmd_analyze_table::execute(THD *thd) { + LEX *m_lex= thd->lex; TABLE_LIST *first_table= m_lex->select_lex.table_list.first; bool res= TRUE; thr_lock_type lock_type = TL_READ_NO_INSERT; - DBUG_ENTER("Analyze_table_statement::execute"); + DBUG_ENTER("Sql_cmd_analyze_table::execute"); if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table, FALSE, UINT_MAX, FALSE)) @@ -1120,12 +1147,13 @@ error: } -bool Check_table_statement::execute(THD *thd) +bool Sql_cmd_check_table::execute(THD *thd) { + LEX *m_lex= thd->lex; TABLE_LIST *first_table= m_lex->select_lex.table_list.first; thr_lock_type lock_type = TL_READ_NO_INSERT; bool res= TRUE; - DBUG_ENTER("Check_table_statement::execute"); + DBUG_ENTER("Sql_cmd_check_table::execute"); if (check_table_access(thd, SELECT_ACL, first_table, TRUE, UINT_MAX, FALSE)) @@ -1144,17 +1172,18 @@ error: } -bool Optimize_table_statement::execute(THD *thd) +bool Sql_cmd_optimize_table::execute(THD *thd) { + LEX *m_lex= thd->lex; TABLE_LIST *first_table= m_lex->select_lex.table_list.first; bool res= TRUE; - DBUG_ENTER("Optimize_table_statement::execute"); + DBUG_ENTER("Sql_cmd_optimize_table::execute"); if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table, FALSE, UINT_MAX, FALSE)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; - res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ? + res= (specialflag & SPECIAL_NO_NEW_FUNC) ? mysql_recreate_table(thd, first_table) : mysql_admin_table(thd, first_table, &m_lex->check_opt, "optimize", TL_WRITE, 1, 0, 0, 0, @@ -1175,11 +1204,12 @@ error: } -bool Repair_table_statement::execute(THD *thd) +bool Sql_cmd_repair_table::execute(THD *thd) { + LEX *m_lex= thd->lex; TABLE_LIST *first_table= m_lex->select_lex.table_list.first; bool res= TRUE; - DBUG_ENTER("Repair_table_statement::execute"); + DBUG_ENTER("Sql_cmd_repair_table::execute"); if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table, FALSE, UINT_MAX, FALSE)) diff --git a/sql/sql_admin.h b/sql/sql_admin.h index 5398e3019f1..fa89fc9063f 100644 --- a/sql/sql_admin.h +++ b/sql/sql_admin.h @@ -26,109 +26,100 @@ int reassign_keycache_tables(THD* thd, KEY_CACHE *src_cache, KEY_CACHE *dst_cache); /** - Analyze_statement represents the ANALYZE TABLE statement. + Sql_cmd_analyze_table represents the ANALYZE TABLE statement. */ -class Analyze_table_statement : public Sql_statement +class Sql_cmd_analyze_table : public Sql_cmd { public: /** Constructor, used to represent a ANALYZE TABLE statement. - @param lex the LEX structure for this statement. */ - Analyze_table_statement(LEX *lex) - : Sql_statement(lex) + Sql_cmd_analyze_table() {} - ~Analyze_table_statement() + ~Sql_cmd_analyze_table() {} - /** - Execute a ANALYZE TABLE statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ANALYZE; + } }; /** - Check_table_statement represents the CHECK TABLE statement. + Sql_cmd_check_table represents the CHECK TABLE statement. */ -class Check_table_statement : public Sql_statement +class Sql_cmd_check_table : public Sql_cmd { public: /** Constructor, used to represent a CHECK TABLE statement. - @param lex the LEX structure for this statement. */ - Check_table_statement(LEX *lex) - : Sql_statement(lex) + Sql_cmd_check_table() {} - ~Check_table_statement() + ~Sql_cmd_check_table() {} - /** - Execute a CHECK TABLE statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); -}; + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_CHECK; + } +}; /** - Optimize_table_statement represents the OPTIMIZE TABLE statement. + Sql_cmd_optimize_table represents the OPTIMIZE TABLE statement. */ -class Optimize_table_statement : public Sql_statement +class Sql_cmd_optimize_table : public Sql_cmd { public: /** Constructor, used to represent a OPTIMIZE TABLE statement. - @param lex the LEX structure for this statement. */ - Optimize_table_statement(LEX *lex) - : Sql_statement(lex) + Sql_cmd_optimize_table() {} - ~Optimize_table_statement() + ~Sql_cmd_optimize_table() {} - /** - Execute a OPTIMIZE TABLE statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_OPTIMIZE; + } }; /** - Repair_table_statement represents the REPAIR TABLE statement. + Sql_cmd_repair_table represents the REPAIR TABLE statement. */ -class Repair_table_statement : public Sql_statement +class Sql_cmd_repair_table : public Sql_cmd { public: /** Constructor, used to represent a REPAIR TABLE statement. - @param lex the LEX structure for this statement. */ - Repair_table_statement(LEX *lex) - : Sql_statement(lex) + Sql_cmd_repair_table() {} - ~Repair_table_statement() + ~Sql_cmd_repair_table() {} - /** - Execute a REPAIR TABLE statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_REPAIR; + } }; #endif diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc index c6c02773286..01bffaf132f 100644 --- a/sql/sql_alter.cc +++ b/sql/sql_alter.cc @@ -16,9 +16,176 @@ #include "sql_parse.h" // check_access #include "sql_table.h" // mysql_alter_table, // mysql_exchange_partition +#include "sql_base.h" // open_temporary_tables #include "sql_alter.h" -bool Alter_table_statement::execute(THD *thd) +Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) + :drop_list(rhs.drop_list, mem_root), + alter_list(rhs.alter_list, mem_root), + key_list(rhs.key_list, mem_root), + create_list(rhs.create_list, mem_root), + flags(rhs.flags), + keys_onoff(rhs.keys_onoff), + partition_names(rhs.partition_names, mem_root), + num_parts(rhs.num_parts), + requested_algorithm(rhs.requested_algorithm), + requested_lock(rhs.requested_lock) +{ + /* + Make deep copies of used objects. + This is not a fully deep copy - clone() implementations + of Alter_drop, Alter_column, Key, foreign_key, Key_part_spec + do not copy string constants. At the same length the only + reason we make a copy currently is that ALTER/CREATE TABLE + code changes input Alter_info definitions, but string + constants never change. + */ + list_copy_and_replace_each_value(drop_list, mem_root); + list_copy_and_replace_each_value(alter_list, mem_root); + list_copy_and_replace_each_value(key_list, mem_root); + list_copy_and_replace_each_value(create_list, mem_root); + /* partition_names are not deeply copied currently */ +} + + +bool Alter_info::set_requested_algorithm(const LEX_STRING *str) +{ + // To avoid adding new keywords to the grammar, we match strings here. + if (!my_strcasecmp(system_charset_info, str->str, "INPLACE")) + requested_algorithm= ALTER_TABLE_ALGORITHM_INPLACE; + else if (!my_strcasecmp(system_charset_info, str->str, "COPY")) + requested_algorithm= ALTER_TABLE_ALGORITHM_COPY; + else if (!my_strcasecmp(system_charset_info, str->str, "DEFAULT")) + requested_algorithm= ALTER_TABLE_ALGORITHM_DEFAULT; + else + return true; + return false; +} + + +bool Alter_info::set_requested_lock(const LEX_STRING *str) +{ + // To avoid adding new keywords to the grammar, we match strings here. + if (!my_strcasecmp(system_charset_info, str->str, "NONE")) + requested_lock= ALTER_TABLE_LOCK_NONE; + else if (!my_strcasecmp(system_charset_info, str->str, "SHARED")) + requested_lock= ALTER_TABLE_LOCK_SHARED; + else if (!my_strcasecmp(system_charset_info, str->str, "EXCLUSIVE")) + requested_lock= ALTER_TABLE_LOCK_EXCLUSIVE; + else if (!my_strcasecmp(system_charset_info, str->str, "DEFAULT")) + requested_lock= ALTER_TABLE_LOCK_DEFAULT; + else + return true; + return false; +} + + +Alter_table_ctx::Alter_table_ctx() + : datetime_field(NULL), error_if_not_empty(false), + tables_opened(0), + db(NULL), table_name(NULL), alias(NULL), + new_db(NULL), new_name(NULL), new_alias(NULL), + fk_error_if_delete_row(false), fk_error_id(NULL), + fk_error_table(NULL) +#ifndef DBUG_OFF + , tmp_table(false) +#endif +{ +} + + +Alter_table_ctx::Alter_table_ctx(THD *thd, TABLE_LIST *table_list, + uint tables_opened_arg, + char *new_db_arg, char *new_name_arg) + : datetime_field(NULL), error_if_not_empty(false), + tables_opened(tables_opened_arg), + new_db(new_db_arg), new_name(new_name_arg), + fk_error_if_delete_row(false), fk_error_id(NULL), + fk_error_table(NULL) +#ifndef DBUG_OFF + , tmp_table(false) +#endif +{ + /* + Assign members db, table_name, new_db and new_name + to simplify further comparisions: we want to see if it's a RENAME + later just by comparing the pointers, avoiding the need for strcmp. + */ + db= table_list->db; + table_name= table_list->table_name; + alias= (lower_case_table_names == 2) ? table_list->alias : table_name; + + if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db)) + new_db= db; + + if (new_name) + { + DBUG_PRINT("info", ("new_db.new_name: '%s'.'%s'", new_db, new_name)); + + if (lower_case_table_names == 1) // Convert new_name/new_alias to lower case + { + my_casedn_str(files_charset_info, new_name); + new_alias= new_name; + } + else if (lower_case_table_names == 2) // Convert new_name to lower case + { + strmov(new_alias= new_alias_buff, new_name); + my_casedn_str(files_charset_info, new_name); + } + else + new_alias= new_name; // LCTN=0 => case sensitive + case preserving + + if (!is_database_changed() && + !my_strcasecmp(table_alias_charset, new_name, table_name)) + { + /* + Source and destination table names are equal: + make is_table_renamed() more efficient. + */ + new_alias= table_name; + new_name= table_name; + } + } + else + { + new_alias= alias; + new_name= table_name; + } + + my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix, + current_pid, thd->thread_id); + /* Safety fix for InnoDB */ + if (lower_case_table_names) + my_casedn_str(files_charset_info, tmp_name); + + if (table_list->table->s->tmp_table == NO_TMP_TABLE) + { + build_table_filename(path, sizeof(path) - 1, db, table_name, "", 0); + + build_table_filename(new_path, sizeof(new_path) - 1, new_db, new_name, "", 0); + + build_table_filename(new_filename, sizeof(new_filename) - 1, + new_db, new_name, reg_ext, 0); + + build_table_filename(tmp_path, sizeof(tmp_path) - 1, new_db, tmp_name, "", + FN_IS_TMP); + } + else + { + /* + We are not filling path, new_path and new_filename members if + we are altering temporary table as these members are not used in + this case. This fact is enforced with assert. + */ + build_tmptable_filename(thd, tmp_path, sizeof(tmp_path)); +#ifndef DBUG_OFF + tmp_table= true; +#endif + } +} + + +bool Sql_cmd_alter_table::execute(THD *thd) { LEX *lex= thd->lex; /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ @@ -38,7 +205,7 @@ bool Alter_table_statement::execute(THD *thd) ulong priv_needed= ALTER_ACL; bool result; - DBUG_ENTER("Alter_table_statement::execute"); + DBUG_ENTER("Sql_cmd_alter_table::execute"); if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */ DBUG_RETURN(TRUE); @@ -46,12 +213,14 @@ bool Alter_table_statement::execute(THD *thd) We also require DROP priv for ALTER TABLE ... DROP PARTITION, as well as for RENAME TO, as being done by SQLCOM_RENAME_TABLE */ - if (alter_info.flags & (ALTER_DROP_PARTITION | ALTER_RENAME)) + if (alter_info.flags & (Alter_info::ALTER_DROP_PARTITION | + Alter_info::ALTER_RENAME)) priv_needed|= DROP_ACL; /* Must be set in the parser */ DBUG_ASSERT(select_lex->db); - DBUG_ASSERT(!(alter_info.flags & ALTER_ADMIN_PARTITION)); + DBUG_ASSERT(!(alter_info.flags & Alter_info::ALTER_EXCHANGE_PARTITION)); + DBUG_ASSERT(!(alter_info.flags & Alter_info::ALTER_ADMIN_PARTITION)); if (check_access(thd, priv_needed, first_table->db, &first_table->grant.privilege, &first_table->grant.m_internal, @@ -63,10 +232,47 @@ bool Alter_table_statement::execute(THD *thd) DBUG_RETURN(TRUE); /* purecov: inspected */ /* If it is a merge table, check privileges for merge children. */ - if (create_info.merge_list.first && - check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL, - create_info.merge_list.first, FALSE, UINT_MAX, FALSE)) - DBUG_RETURN(TRUE); + if (create_info.merge_list.first) + { + /* + The user must have (SELECT_ACL | UPDATE_ACL | DELETE_ACL) on the + underlying base tables, even if there are temporary tables with the same + names. + + From user's point of view, it might look as if the user must have these + privileges on temporary tables to create a merge table over them. This is + one of two cases when a set of privileges is required for operations on + temporary tables (see also CREATE TABLE). + + The reason for this behavior stems from the following facts: + + - For merge tables, the underlying table privileges are checked only + at CREATE TABLE / ALTER TABLE time. + + In other words, once a merge table is created, the privileges of + the underlying tables can be revoked, but the user will still have + access to the merge table (provided that the user has privileges on + the merge table itself). + + - Temporary tables shadow base tables. + + I.e. there might be temporary and base tables with the same name, and + the temporary table takes the precedence in all operations. + + - For temporary MERGE tables we do not track if their child tables are + base or temporary. As result we can't guarantee that privilege check + which was done in presence of temporary child will stay relevant later + as this temporary table might be removed. + + If SELECT_ACL | UPDATE_ACL | DELETE_ACL privileges were not checked for + the underlying *base* tables, it would create a security breach as in + Bug#12771903. + */ + + if (check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL, + create_info.merge_list.first, FALSE, UINT_MAX, FALSE)) + DBUG_RETURN(TRUE); + } if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE)) DBUG_RETURN(TRUE); /* purecov: inspected */ @@ -75,7 +281,7 @@ bool Alter_table_statement::execute(THD *thd) { // Rename of table TABLE_LIST tmp_table; - bzero((char*) &tmp_table,sizeof(tmp_table)); + memset(&tmp_table, 0, sizeof(tmp_table)); tmp_table.table_name= lex->name.str; tmp_table.db= select_lex->db; tmp_table.grant.privilege= priv; @@ -86,11 +292,11 @@ bool Alter_table_statement::execute(THD *thd) /* Don't yet allow changing of symlinks with ALTER TABLE */ if (create_info.data_file_name) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED), "DATA DIRECTORY"); if (create_info.index_file_name) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED), "INDEX DIRECTORY"); create_info.data_file_name= create_info.index_file_name= NULL; @@ -103,7 +309,50 @@ bool Alter_table_statement::execute(THD *thd) &alter_info, select_lex->order_list.elements, select_lex->order_list.first, - lex->ignore, lex->online); + lex->ignore); DBUG_RETURN(result); } + +bool Sql_cmd_discard_import_tablespace::execute(THD *thd) +{ + /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ + SELECT_LEX *select_lex= &thd->lex->select_lex; + /* first table of first SELECT_LEX */ + TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first; + + if (check_access(thd, ALTER_ACL, table_list->db, + &table_list->grant.privilege, + &table_list->grant.m_internal, + 0, 0)) + return true; + + if (check_grant(thd, ALTER_ACL, table_list, false, UINT_MAX, false)) + return true; + + thd->enable_slow_log= opt_log_slow_admin_statements; + + /* + Check if we attempt to alter mysql.slow_log or + mysql.general_log table and return an error if + it is the case. + TODO: this design is obsolete and will be removed. + */ + int table_kind= check_if_log_table(table_list->db_length, table_list->db, + table_list->table_name_length, + table_list->table_name, false); + + if (table_kind) + { + /* Disable alter of enabled log tables */ + if (logger.is_log_table_enabled(table_kind)) + { + my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER"); + return true; + } + } + + return + mysql_discard_or_import_tablespace(thd, table_list, + m_tablespace_op == DISCARD_TABLESPACE); +} diff --git a/sql/sql_alter.h b/sql/sql_alter.h index 6660748f666..f0c0a873a5c 100644 --- a/sql/sql_alter.h +++ b/sql/sql_alter.h @@ -16,51 +16,412 @@ #ifndef SQL_ALTER_TABLE_H #define SQL_ALTER_TABLE_H +class Alter_drop; +class Alter_column; +class Key; + +/** + Data describing the table being created by CREATE TABLE or + altered by ALTER TABLE. +*/ + +class Alter_info +{ +public: + /* + These flags are set by the parser and describes the type of + operation(s) specified by the ALTER TABLE statement. + + They do *not* describe the type operation(s) to be executed + by the storage engine. For example, we don't yet know the + type of index to be added/dropped. + */ + + // Set for ADD [COLUMN] + static const uint ALTER_ADD_COLUMN = 1L << 0; + + // Set for DROP [COLUMN] + static const uint ALTER_DROP_COLUMN = 1L << 1; + + // Set for CHANGE [COLUMN] | MODIFY [CHANGE] + // Set by mysql_recreate_table() + static const uint ALTER_CHANGE_COLUMN = 1L << 2; + + // Set for ADD INDEX | ADD KEY | ADD PRIMARY KEY | ADD UNIQUE KEY | + // ADD UNIQUE INDEX | ALTER ADD [COLUMN] + static const uint ALTER_ADD_INDEX = 1L << 3; + + // Set for DROP PRIMARY KEY | DROP FOREIGN KEY | DROP KEY | DROP INDEX + static const uint ALTER_DROP_INDEX = 1L << 4; + + // Set for RENAME [TO] + static const uint ALTER_RENAME = 1L << 5; + + // Set for ORDER BY + static const uint ALTER_ORDER = 1L << 6; + + // Set for table_options + static const uint ALTER_OPTIONS = 1L << 7; + + // Set for ALTER [COLUMN] ... SET DEFAULT ... | DROP DEFAULT + static const uint ALTER_CHANGE_COLUMN_DEFAULT = 1L << 8; + + // Set for DISABLE KEYS | ENABLE KEYS + static const uint ALTER_KEYS_ONOFF = 1L << 9; + + // Set for CONVERT TO CHARACTER SET + static const uint ALTER_CONVERT = 1L << 10; + + // Set for FORCE + // Set by mysql_recreate_table() + static const uint ALTER_RECREATE = 1L << 11; + + // Set for ADD PARTITION + static const uint ALTER_ADD_PARTITION = 1L << 12; + + // Set for DROP PARTITION + static const uint ALTER_DROP_PARTITION = 1L << 13; + + // Set for COALESCE PARTITION + static const uint ALTER_COALESCE_PARTITION = 1L << 14; + + // Set for REORGANIZE PARTITION ... INTO + static const uint ALTER_REORGANIZE_PARTITION = 1L << 15; + + // Set for partition_options + static const uint ALTER_PARTITION = 1L << 16; + + // Set for LOAD INDEX INTO CACHE ... PARTITION + // Set for CACHE INDEX ... PARTITION + static const uint ALTER_ADMIN_PARTITION = 1L << 17; + + // Set for REORGANIZE PARTITION + static const uint ALTER_TABLE_REORG = 1L << 18; + + // Set for REBUILD PARTITION + static const uint ALTER_REBUILD_PARTITION = 1L << 19; + + // Set for partitioning operations specifying ALL keyword + static const uint ALTER_ALL_PARTITION = 1L << 20; + + // Set for REMOVE PARTITIONING + static const uint ALTER_REMOVE_PARTITIONING = 1L << 21; + + // Set for ADD FOREIGN KEY + static const uint ADD_FOREIGN_KEY = 1L << 22; + + // Set for DROP FOREIGN KEY + static const uint DROP_FOREIGN_KEY = 1L << 23; + + // Set for EXCHANGE PARITION + static const uint ALTER_EXCHANGE_PARTITION = 1L << 24; + + // Set by Sql_cmd_alter_table_truncate_partition::execute() + static const uint ALTER_TRUNCATE_PARTITION = 1L << 25; + + // Set for ADD [COLUMN] FIRST | AFTER + static const uint ALTER_COLUMN_ORDER = 1L << 26; + + + enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; + + /** + The different values of the ALGORITHM clause. + Describes which algorithm to use when altering the table. + */ + enum enum_alter_table_algorithm + { + // In-place if supported, copy otherwise. + ALTER_TABLE_ALGORITHM_DEFAULT, + + // In-place if supported, error otherwise. + ALTER_TABLE_ALGORITHM_INPLACE, + + // Copy if supported, error otherwise. + ALTER_TABLE_ALGORITHM_COPY + }; + + + /** + The different values of the LOCK clause. + Describes the level of concurrency during ALTER TABLE. + */ + enum enum_alter_table_lock + { + // Maximum supported level of concurency for the given operation. + ALTER_TABLE_LOCK_DEFAULT, + + // Allow concurrent reads & writes. If not supported, give erorr. + ALTER_TABLE_LOCK_NONE, + + // Allow concurrent reads only. If not supported, give error. + ALTER_TABLE_LOCK_SHARED, + + // Block reads and writes. + ALTER_TABLE_LOCK_EXCLUSIVE + }; + + + // Columns and keys to be dropped. + List<Alter_drop> drop_list; + // Columns for ALTER_COLUMN_CHANGE_DEFAULT. + List<Alter_column> alter_list; + // List of keys, used by both CREATE and ALTER TABLE. + List<Key> key_list; + // List of columns, used by both CREATE and ALTER TABLE. + List<Create_field> create_list; + // Type of ALTER TABLE operation. + uint flags; + // Enable or disable keys. + enum_enable_or_disable keys_onoff; + // List of partitions. + List<char> partition_names; + // Number of partitions. + uint num_parts; + // Type of ALTER TABLE algorithm. + enum_alter_table_algorithm requested_algorithm; + // Type of ALTER TABLE lock. + enum_alter_table_lock requested_lock; + + + Alter_info() : + flags(0), + keys_onoff(LEAVE_AS_IS), + num_parts(0), + requested_algorithm(ALTER_TABLE_ALGORITHM_DEFAULT), + requested_lock(ALTER_TABLE_LOCK_DEFAULT) + {} + + void reset() + { + drop_list.empty(); + alter_list.empty(); + key_list.empty(); + create_list.empty(); + flags= 0; + keys_onoff= LEAVE_AS_IS; + num_parts= 0; + partition_names.empty(); + requested_algorithm= ALTER_TABLE_ALGORITHM_DEFAULT; + requested_lock= ALTER_TABLE_LOCK_DEFAULT; + } + + + /** + Construct a copy of this object to be used for mysql_alter_table + and mysql_create_table. + + Historically, these two functions modify their Alter_info + arguments. This behaviour breaks re-execution of prepared + statements and stored procedures and is compensated by always + supplying a copy of Alter_info to these functions. + + @param rhs Alter_info to make copy of + @param mem_root Mem_root for new Alter_info + + @note You need to use check the error in THD for out + of memory condition after calling this function. + */ + Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root); + + + /** + Parses the given string and sets requested_algorithm + if the string value matches a supported value. + Supported values: INPLACE, COPY, DEFAULT + + @param str String containing the supplied value + @retval false Supported value found, state updated + @retval true Not supported value, no changes made + */ + bool set_requested_algorithm(const LEX_STRING *str); + + + /** + Parses the given string and sets requested_lock + if the string value matches a supported value. + Supported values: NONE, SHARED, EXCLUSIVE, DEFAULT + + @param str String containing the supplied value + @retval false Supported value found, state updated + @retval true Not supported value, no changes made + */ + + bool set_requested_lock(const LEX_STRING *str); + +private: + Alter_info &operator=(const Alter_info &rhs); // not implemented + Alter_info(const Alter_info &rhs); // not implemented +}; + + +/** Runtime context for ALTER TABLE. */ +class Alter_table_ctx +{ +public: + Alter_table_ctx(); + + Alter_table_ctx(THD *thd, TABLE_LIST *table_list, uint tables_opened_arg, + char *new_db_arg, char *new_name_arg); + + /** + @return true if the table is moved to another database, false otherwise. + */ + bool is_database_changed() const + { return (new_db != db); }; + + /** + @return true if the table is renamed, false otherwise. + */ + bool is_table_renamed() const + { return (is_database_changed() || new_name != table_name); }; + + /** + @return filename (including .frm) for the new table. + */ + const char *get_new_filename() const + { + DBUG_ASSERT(!tmp_table); + return new_filename; + } + + /** + @return path to the original table. + */ + const char *get_path() const + { + DBUG_ASSERT(!tmp_table); + return path; + } + + /** + @return path to the new table. + */ + const char *get_new_path() const + { + DBUG_ASSERT(!tmp_table); + return new_path; + } + + /** + @return path to the temporary table created during ALTER TABLE. + */ + const char *get_tmp_path() const + { return tmp_path; } + + /** + Mark ALTER TABLE as needing to produce foreign key error if + it deletes a row from the table being changed. + */ + void set_fk_error_if_delete_row(FOREIGN_KEY_INFO *fk) + { + fk_error_if_delete_row= true; + fk_error_id= fk->foreign_id->str; + fk_error_table= fk->foreign_table->str; + } + +public: + Create_field *datetime_field; + bool error_if_not_empty; + uint tables_opened; + char *db; + char *table_name; + char *alias; + char *new_db; + char *new_name; + char *new_alias; + char tmp_name[80]; + /** + Indicates that if a row is deleted during copying of data from old version + of table to the new version ER_FK_CANNOT_DELETE_PARENT error should be + emitted. + */ + bool fk_error_if_delete_row; + /** Name of foreign key for the above error. */ + const char *fk_error_id; + /** Name of table for the above error. */ + const char *fk_error_table; + +private: + char new_filename[FN_REFLEN + 1]; + char new_alias_buff[FN_REFLEN + 1]; + char path[FN_REFLEN + 1]; + char new_path[FN_REFLEN + 1]; + char tmp_path[FN_REFLEN + 1]; + +#ifndef DBUG_OFF + /** Indicates that we are altering temporary table. Used only in asserts. */ + bool tmp_table; +#endif + + Alter_table_ctx &operator=(const Alter_table_ctx &rhs); // not implemented + Alter_table_ctx(const Alter_table_ctx &rhs); // not implemented +}; + + /** - Alter_table_common represents the common properties of the ALTER TABLE + Sql_cmd_common_alter_table represents the common properties of the ALTER TABLE statements. @todo move Alter_info and other ALTER generic structures from Lex here. */ -class Alter_table_common : public Sql_statement +class Sql_cmd_common_alter_table : public Sql_cmd { protected: /** Constructor. - @param lex the LEX structure for this statement. */ - Alter_table_common(LEX *lex) - : Sql_statement(lex) + Sql_cmd_common_alter_table() {} - virtual ~Alter_table_common() + virtual ~Sql_cmd_common_alter_table() {} + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } }; /** - Alter_table_statement represents the generic ALTER TABLE statement. + Sql_cmd_alter_table represents the generic ALTER TABLE statement. @todo move Alter_info and other ALTER specific structures from Lex here. */ -class Alter_table_statement : public Alter_table_common +class Sql_cmd_alter_table : public Sql_cmd_common_alter_table { public: /** Constructor, used to represent a ALTER TABLE statement. - @param lex the LEX structure for this statement. */ - Alter_table_statement(LEX *lex) - : Alter_table_common(lex) + Sql_cmd_alter_table() {} - ~Alter_table_statement() + ~Sql_cmd_alter_table() {} - /** - Execute a ALTER TABLE statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); }; + +/** + Sql_cmd_alter_table_tablespace represents ALTER TABLE + IMPORT/DISCARD TABLESPACE statements. +*/ +class Sql_cmd_discard_import_tablespace : public Sql_cmd_common_alter_table +{ +public: + enum enum_tablespace_op_type + { + DISCARD_TABLESPACE, IMPORT_TABLESPACE + }; + + Sql_cmd_discard_import_tablespace(enum_tablespace_op_type tablespace_op_arg) + : m_tablespace_op(tablespace_op_arg) + {} + + bool execute(THD *thd); + +private: + const enum_tablespace_op_type m_tablespace_op; +}; + #endif diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index f287bf47e81..be35340df27 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -282,16 +282,16 @@ bool get_ev_num_info(EV_NUM_INFO *ev_info, NUM_INFO *info, const char *num) { if (((longlong) info->ullval) < 0) return 0; // Impossible to store as a negative number - ev_info->llval = -(longlong) max((ulonglong) -ev_info->llval, + ev_info->llval = -(longlong) MY_MAX((ulonglong) -ev_info->llval, info->ullval); - ev_info->min_dval = (double) -max(-ev_info->min_dval, info->dval); + ev_info->min_dval = (double) -MY_MAX(-ev_info->min_dval, info->dval); } else // ulonglong is as big as bigint in MySQL { if ((check_ulonglong(num, info->integers) == DECIMAL_NUM)) return 0; - ev_info->ullval = (ulonglong) max(ev_info->ullval, info->ullval); - ev_info->max_dval = (double) max(ev_info->max_dval, info->dval); + ev_info->ullval = (ulonglong) MY_MAX(ev_info->ullval, info->ullval); + ev_info->max_dval = (double) MY_MAX(ev_info->max_dval, info->dval); } return 1; } // get_ev_num_info @@ -1040,7 +1040,7 @@ String *field_decimal::avg(String *s, ha_rows rows) my_decimal_div(E_DEC_FATAL_ERROR, &avg_val, sum+cur_sum, &num, prec_increment); /* TODO remove this after decimal_div returns proper frac */ my_decimal_round(E_DEC_FATAL_ERROR, &avg_val, - min(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE), + MY_MIN(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE), FALSE,&rounded_avg); my_decimal2string(E_DEC_FATAL_ERROR, &rounded_avg, 0, 0, '0', s); return s; @@ -1065,7 +1065,7 @@ String *field_decimal::std(String *s, ha_rows rows) my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment); my_decimal2double(E_DEC_FATAL_ERROR, &tmp, &std_sqr); s->set_real(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)), - min(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset); + MY_MIN(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset); return s; } @@ -1182,7 +1182,7 @@ bool analyse::change_columns(List<Item> &field_list) func_items[8] = new Item_proc_string("Std", 255); func_items[8]->maybe_null = 1; func_items[9] = new Item_proc_string("Optimal_fieldtype", - max(64, output_str_length)); + MY_MAX(64, output_str_length)); for (uint i = 0; i < array_elements(func_items); i++) field_list.push_back(func_items[i]); diff --git a/sql/sql_array.h b/sql/sql_array.h index 43ca4ef4219..697819787f2 100644 --- a/sql/sql_array.h +++ b/sql/sql_array.h @@ -92,6 +92,8 @@ private: /* A typesafe wrapper around DYNAMIC_ARRAY + + TODO: Change creator to take a THREAD_SPECIFIC option. */ template <class Elem> class Dynamic_array @@ -100,125 +102,113 @@ template <class Elem> class Dynamic_array public: Dynamic_array(uint prealloc=16, uint increment=16) { + init(prealloc, increment); + } + + void init(uint prealloc=16, uint increment=16) + { my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment, - MYF(MY_THREAD_SPECIFIC)); + MYF(0)); } + /** + @note Though formally this could be declared "const" it would be + misleading at it returns a non-const pointer to array's data. + */ Elem& at(size_t idx) { return *(((Elem*)array.buffer) + idx); } - - Elem *front() + /// Const variant of at(), which cannot change data + const Elem& at(size_t idx) const { - return (Elem*)array.buffer; - } - - Elem *back() - { - return ((Elem*)array.buffer) + array.elements; + return *(((Elem*)array.buffer) + idx); } - bool append(Elem &el) + /// @returns pointer to first element; undefined behaviour if array is empty + Elem *front() { - return (insert_dynamic(&array, (uchar*)&el)); + DBUG_ASSERT(array.elements >= 1); + return (Elem*)array.buffer; } - bool append_val(Elem el) + /// @returns pointer to first element; undefined behaviour if array is empty + const Elem *front() const { - return (insert_dynamic(&array, (uchar*)&el)); + DBUG_ASSERT(array.elements >= 1); + return (const Elem*)array.buffer; } - size_t elements() + /// @returns pointer to last element; undefined behaviour if array is empty. + Elem *back() { - return array.elements; + DBUG_ASSERT(array.elements >= 1); + return ((Elem*)array.buffer) + (array.elements - 1); } - void set_elements(size_t n) + /// @returns pointer to last element; undefined behaviour if array is empty. + const Elem *back() const { - array.elements= n; + DBUG_ASSERT(array.elements >= 1); + return ((const Elem*)array.buffer) + (array.elements - 1); } - ~Dynamic_array() + /** + @retval false ok + @retval true OOM, @c my_error() has been called. + */ + bool append(const Elem &el) { - delete_dynamic(&array); + return insert_dynamic(&array, &el); } - typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2); - - void sort(CMP_FUNC cmp_func) + bool append_val(Elem el) { - my_qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func); + return (insert_dynamic(&array, (uchar*)&el)); } -}; -/* - Array of pointers to Elem that uses memory from MEM_ROOT - - MEM_ROOT has no realloc() so this is supposed to be used for cases when - reallocations are rare. -*/ - -template <class Elem> class Array -{ - enum {alloc_increment = 16}; - Elem **buffer; - uint n_elements, max_element; -public: - Array(MEM_ROOT *mem_root, uint prealloc=16) + /// Pops the last element. Does nothing if array is empty. + Elem& pop() { - buffer= (Elem**)alloc_root(mem_root, prealloc * sizeof(Elem**)); - max_element = buffer? prealloc : 0; - n_elements= 0; + return *((Elem*)pop_dynamic(&array)); } - Elem& at(int idx) + void del(uint idx) { - return *(((Elem*)buffer) + idx); + delete_dynamic_element(&array, idx); } - Elem **front() + size_t elements() const { - return buffer; + return array.elements; } - Elem **back() + void elements(size_t num_elements) { - return buffer + n_elements; + DBUG_ASSERT(num_elements <= array.max_element); + array.elements= num_elements; } - bool append(MEM_ROOT *mem_root, Elem *el) + void clear() { - if (n_elements == max_element) - { - Elem **newbuf; - if (!(newbuf= (Elem**)alloc_root(mem_root, (n_elements + alloc_increment)* - sizeof(Elem**)))) - { - return FALSE; - } - memcpy(newbuf, buffer, n_elements*sizeof(Elem*)); - buffer= newbuf; - } - buffer[n_elements++]= el; - return FALSE; + elements(0); } - int elements() + void set(uint idx, const Elem &el) { - return n_elements; + set_dynamic(&array, &el, idx); } - void clear() + ~Dynamic_array() { - n_elements= 0; + delete_dynamic(&array); } - typedef int (*CMP_FUNC)(Elem * const *el1, Elem *const *el2); + typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2); void sort(CMP_FUNC cmp_func) { - my_qsort(buffer, n_elements, sizeof(Elem*), (qsort_cmp)cmp_func); + my_qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func); } }; diff --git a/sql/sql_audit.h b/sql/sql_audit.h index 22fdd221e62..1c7d6a1c224 100644 --- a/sql/sql_audit.h +++ b/sql/sql_audit.h @@ -134,7 +134,7 @@ void mysql_audit_general(THD *thd, uint event_subtype, query= thd->query_string; user= user_buff; userlen= make_user_name(thd, user_buff); - rows= thd->warning_info->current_row_for_warning(); + rows= thd->get_stmt_da()->current_row_for_warning(); } else { @@ -155,9 +155,10 @@ void mysql_audit_notify_connection_connect(THD *thd) if (mysql_audit_connection_enabled()) { const Security_context *sctx= thd->security_ctx; + Diagnostics_area *da= thd->get_stmt_da(); mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, MYSQL_AUDIT_CONNECTION_CONNECT, - thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0, + da->is_error() ? da->sql_errno() : 0, thd->thread_id, sctx->user, sctx->user ? strlen(sctx->user) : 0, sctx->priv_user, strlen(sctx->priv_user), @@ -188,9 +189,10 @@ void mysql_audit_notify_connection_change_user(THD *thd) if (mysql_audit_connection_enabled()) { const Security_context *sctx= thd->security_ctx; + Diagnostics_area *da= thd->get_stmt_da(); mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS, MYSQL_AUDIT_CONNECTION_CHANGE_USER, - thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0, + da->is_error() ? da->sql_errno() : 0, thd->thread_id, sctx->user, sctx->user ? strlen(sctx->user) : 0, sctx->priv_user, strlen(sctx->priv_user), diff --git a/sql/sql_base.cc b/sql/sql_base.cc index e384fbcc32d..5baf05c7f38 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -66,9 +66,9 @@ bool No_such_table_error_handler::handle_condition(THD *, uint sql_errno, const char*, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char*, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; if (sql_errno == ER_NO_SUCH_TABLE || sql_errno == ER_NO_SUCH_TABLE_IN_ENGINE) @@ -77,7 +77,7 @@ No_such_table_error_handler::handle_condition(THD *, return TRUE; } - if (level == MYSQL_ERROR::WARN_LEVEL_ERROR) + if (level == Sql_condition::WARN_LEVEL_ERROR) m_unhandled_errors++; return FALSE; } @@ -110,9 +110,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); /** Returns TRUE if there were ER_NO_SUCH_/WRONG_MRG_TABLE and there @@ -140,9 +140,9 @@ bool Repair_mrg_table_error_handler::handle_condition(THD *, uint sql_errno, const char*, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char*, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; if (sql_errno == ER_NO_SUCH_TABLE || @@ -164,10 +164,7 @@ Repair_mrg_table_error_handler::handle_condition(THD *, */ /** - Protects table_def_hash, used and unused lists in the - TABLE_SHARE object, LRU lists of used TABLEs and used - TABLE_SHAREs, refresh_version and the table id counter. - In particular: + LOCK_open protects the following variables/objects: end_of_unused_share last_table_id @@ -204,11 +201,8 @@ static void init_tdc_psi_keys(void) const char *category= "sql"; int count; - if (PSI_server == NULL) - return; - count= array_elements(all_tdc_mutexes); - PSI_server->register_mutex(category, all_tdc_mutexes, count); + mysql_mutex_register(category, all_tdc_mutexes, count); } #endif /* HAVE_PSI_INTERFACE */ @@ -275,7 +269,7 @@ static void check_unused(THD *thd) { share= (TABLE_SHARE*) my_hash_element(&table_def_cache, idx); - I_P_List_iterator<TABLE, TABLE_share> it(share->free_tables); + TABLE_SHARE::TABLE_list::Iterator it(share->free_tables); while ((entry= it++)) { /* @@ -285,11 +279,13 @@ static void check_unused(THD *thd) /* Merge children should be detached from a merge parent */ if (entry->in_use) { - DBUG_PRINT("error",("Used table is in share's list of unused tables")); /* purecov: inspected */ + /* purecov: begin inspected */ + DBUG_PRINT("error",("Used table is in share's list of unused tables")); + /* purecov: end */ } /* extra() may assume that in_use is set */ entry->in_use= thd; - DBUG_ASSERT(! entry->file->extra(HA_EXTRA_IS_ATTACHED_CHILDREN)); + DBUG_ASSERT(!thd || ! entry->file->extra(HA_EXTRA_IS_ATTACHED_CHILDREN)); entry->in_use= 0; count--; @@ -315,18 +311,16 @@ static void check_unused(THD *thd) #define check_unused(A) #endif +/** + Create a table cache/table definition cache key -/* - Create a table cache key - - SYNOPSIS - create_tmp_table_def_key() - thd Thread handler - key Create key here (must be of size MAX_DBKEY_LENGTH) - db Database name. - table_name Table name. + @param thd Thread context + @param key Buffer for the key to be created (must be of + size MAX_DBKEY_LENGTH). + @param db_name Database name. + @param table_name Table name. - IMPLEMENTATION + @note The table cache_key is created from: db_name + \0 table_name + \0 @@ -337,8 +331,7 @@ static void check_unused(THD *thd) 4 bytes for master thread id 4 bytes pseudo thread id - RETURN - Length of key + @return Length of key. */ uint create_tmp_table_def_key(THD *thd, char *key, @@ -352,9 +345,46 @@ uint create_tmp_table_def_key(THD *thd, char *key, } +/** + Get table cache key for a table list element. + + @param table_list[in] Table list element. + @param key[out] On return points to table cache key for the table. + + @note Unlike create_table_def_key() call this function doesn't construct + key in a buffer provider by caller. Instead it relies on the fact + that table list element for which key is requested has properly + initialized MDL_request object and the fact that table definition + cache key is suffix of key used in MDL subsystem. So to get table + definition key it simply needs to return pointer to appropriate + part of MDL_key object nested in this table list element. + Indeed, this means that lifetime of key produced by this call is + limited by the lifetime of table list element which it got as + parameter. + + @return Length of key. +*/ + +uint get_table_def_key(const TABLE_LIST *table_list, const char **key) +{ + /* + This call relies on the fact that TABLE_LIST::mdl_request::key object + is properly initialized, so table definition cache can be produced + from key used by MDL subsystem. + */ + DBUG_ASSERT(!strcmp(table_list->get_db_name(), + table_list->mdl_request.key.db_name()) && + !strcmp(table_list->get_table_name(), + table_list->mdl_request.key.name())); + + *key= (const char*)table_list->mdl_request.key.ptr() + 1; + return table_list->mdl_request.key.length() - 1; +} + + /***************************************************************************** - Functions to handle table definition cach (TABLE_SHARE) + Functions to handle table definition cache (TABLE_SHARE) *****************************************************************************/ extern "C" uchar *table_def_key(const uchar *record, size_t *length, @@ -399,7 +429,6 @@ bool table_def_init(void) oldest_unused_share= &end_of_unused_share; end_of_unused_share.prev= &oldest_unused_share; - return my_hash_init(&table_def_cache, &my_charset_bin, table_def_size, 0, 0, table_def_key, (my_hash_free_key) table_def_free_entry, 0) != 0; @@ -591,7 +620,7 @@ static void table_def_unuse_table(TABLE *table) */ TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name, - char *key, uint key_length, uint flags, + const char *key, uint key_length, uint flags, my_hash_value_type hash_value) { TABLE_SHARE *share; @@ -630,13 +659,13 @@ TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name, } share->ref_count++; // Mark in use share->error= OPEN_FRM_OPEN_ERROR; - mysql_mutex_lock(&share->LOCK_ha_data); + mysql_mutex_lock(&share->LOCK_share); mysql_mutex_unlock(&LOCK_open); /* note that get_table_share() *always* uses discovery */ open_table_def(thd, share, flags | GTS_USE_DISCOVERY); - mysql_mutex_unlock(&share->LOCK_ha_data); + mysql_mutex_unlock(&share->LOCK_share); mysql_mutex_lock(&LOCK_open); if (share->error) @@ -658,8 +687,8 @@ TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name, DBUG_ASSERT(!(flags & GTS_FORCE_DISCOVERY)); /* make sure that open_table_def() for this share is not running */ - mysql_mutex_lock(&share->LOCK_ha_data); - mysql_mutex_unlock(&share->LOCK_ha_data); + mysql_mutex_lock(&share->LOCK_share); + mysql_mutex_unlock(&share->LOCK_share); /* We found an existing table definition. Return it if we didn't get @@ -792,7 +821,7 @@ void release_table_share(TABLE_SHARE *share) TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name) { - char key[SAFE_NAME_LEN*2+2]; + char key[MAX_DBKEY_LENGTH]; uint key_length; mysql_mutex_assert_owner(&LOCK_open); @@ -861,7 +890,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild) share->db.str)+1, share->table_name.str); (*start_list)->in_use= 0; - I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables); + TABLE_SHARE::TABLE_list::Iterator it(share->used_tables); while (it++) ++(*start_list)->in_use; (*start_list)->locked= 0; /* Obsolete. */ @@ -943,7 +972,7 @@ void free_io_cache(TABLE *table) static void kill_delayed_threads_for_table(TABLE_SHARE *share) { - I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables); + TABLE_SHARE::TABLE_list::Iterator it(share->used_tables); TABLE *tab; mysql_mutex_assert_owner(&LOCK_open); @@ -1084,7 +1113,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, result= TRUE; goto err_with_reopen; } - close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED); + close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL); } } @@ -1157,12 +1186,12 @@ err_with_reopen: */ thd->locked_tables_list.reopen_tables(thd); /* - Since downgrade_exclusive_lock() won't do anything with shared + Since downgrade_lock() won't do anything with shared metadata lock it is much simpler to go through all open tables rather than picking only those tables that were flushed. */ for (TABLE *tab= thd->open_tables; tab; tab= tab->next) - tab->mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE); + tab->mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); } DBUG_RETURN(result); } @@ -1363,7 +1392,8 @@ static void close_open_tables(THD *thd) void close_all_tables_for_name(THD *thd, TABLE_SHARE *share, - ha_extra_function extra) + ha_extra_function extra, + TABLE *skip_table) { char key[MAX_DBKEY_LENGTH]; uint key_length= share->table_cache_key.length; @@ -1378,7 +1408,8 @@ close_all_tables_for_name(THD *thd, TABLE_SHARE *share, TABLE *table= *prev; if (table->s->table_cache_key.length == key_length && - !memcmp(table->s->table_cache_key.str, key, key_length)) + !memcmp(table->s->table_cache_key.str, key, key_length) && + table != skip_table) { thd->locked_tables_list.unlink_from_list(thd, table->pos_in_locked_tables, @@ -1404,9 +1435,12 @@ close_all_tables_for_name(THD *thd, TABLE_SHARE *share, prev= &table->next; } } - /* Remove the table share from the cache. */ - tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table_name, - FALSE); + if (skip_table == NULL) + { + /* Remove the table share from the cache. */ + tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table_name, + FALSE); + } } @@ -1556,9 +1590,8 @@ void close_thread_tables(THD *thd) /* move one table to free list */ -bool close_thread_table(THD *thd, TABLE **table_ptr) +void close_thread_table(THD *thd, TABLE **table_ptr) { - bool found_old_table= 0; TABLE *table= *table_ptr; DBUG_ENTER("close_thread_table"); DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str, @@ -1604,10 +1637,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) if (table->s->has_old_version() || table->needs_reopen() || table_def_shutdown_in_progress) - { free_cache_entry(table); - found_old_table= 1; - } else { DBUG_ASSERT(table->file); @@ -1620,7 +1650,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) free_cache_entry(unused_tables); } mysql_mutex_unlock(&LOCK_open); - DBUG_RETURN(found_old_table); + DBUG_VOID_RETURN; } @@ -1763,7 +1793,7 @@ bool close_temporary_tables(THD *thd) qinfo.db_len= db.length(); thd->variables.character_set_client= cs_save; - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); if ((error= (mysql_bin_log.write(&qinfo) || error))) { /* @@ -1781,7 +1811,7 @@ bool close_temporary_tables(THD *thd) sql_print_error("Failed to write the DROP statement for " "temporary tables to binary log"); } - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); thd->variables.pseudo_thread_id= save_pseudo_thread_id; thd->thread_specific_used= save_thread_specific_used; @@ -2067,12 +2097,9 @@ void update_non_unique_table_error(TABLE_LIST *update, TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name) { - TABLE_LIST tl; - - tl.db= (char*) db; - tl.table_name= (char*) table_name; - - return find_temporary_table(thd, &tl); + char key[MAX_DBKEY_LENGTH]; + uint key_length= create_tmp_table_def_key(thd, key, db, table_name); + return find_temporary_table(thd, key, key_length); } @@ -2085,10 +2112,26 @@ TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name) TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl) { - char key[MAX_DBKEY_LENGTH]; - uint key_length= create_tmp_table_def_key(thd, key, tl->db, tl->table_name); + const char *key; + uint key_length; + char key_suffix[TMP_TABLE_KEY_EXTRA]; + TABLE *table; - return find_temporary_table(thd, key, key_length); + key_length= get_table_def_key(tl, &key); + + int4store(key_suffix, thd->variables.server_id); + int4store(key_suffix + 4, thd->variables.pseudo_thread_id); + + for (table= thd->temporary_tables; table; table= table->next) + { + if ((table->s->table_cache_key.length == key_length + + TMP_TABLE_KEY_EXTRA) && + !memcmp(table->s->table_cache_key.str, key, key_length) && + !memcmp(table->s->table_cache_key.str + key_length, key_suffix, + TMP_TABLE_KEY_EXTRA)) + return table; + } + return NULL; } @@ -2148,14 +2191,15 @@ TABLE *find_temporary_table(THD *thd, int drop_temporary_table(THD *thd, TABLE_LIST *table_list, bool *is_trans) { - TABLE *table; DBUG_ENTER("drop_temporary_table"); DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'", table_list->db, table_list->table_name)); - if (!(table= find_temporary_table(thd, table_list))) + if (!is_temporary_table(table_list)) DBUG_RETURN(1); + TABLE *table= table_list->table; + /* Table might be in use by some outer statement. */ if (table->query_id && table->query_id != thd->query_id) { @@ -2163,8 +2207,7 @@ int drop_temporary_table(THD *thd, TABLE_LIST *table_list, bool *is_trans) DBUG_RETURN(-1); } - if (is_trans != NULL) - *is_trans= table->file->has_transactions(); + *is_trans= table->file->has_transactions(); /* If LOCK TABLES list is not empty and contains this table, @@ -2172,6 +2215,7 @@ int drop_temporary_table(THD *thd, TABLE_LIST *table_list, bool *is_trans) */ mysql_lock_remove(thd, thd->lock, table); close_temporary_table(thd, table, 1, 1); + table_list->table= NULL; DBUG_RETURN(0); } @@ -2232,13 +2276,6 @@ void close_temporary(TABLE *table, bool free_share, bool delete_table) DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'", table->s->db.str, table->s->table_name.str)); - /* in_use is not set for replication temporary tables during shutdown */ - if (table->in_use) - { - table->file->update_global_table_stats(); - table->file->update_global_index_stats(); - } - free_io_cache(table); closefrm(table, 0); if (delete_table) @@ -2304,8 +2341,9 @@ bool wait_while_table_is_used(THD *thd, TABLE *table, table->s->table_name.str, (ulong) table->s, table->db_stat, table->s->version)); - if (thd->mdl_context.upgrade_shared_lock_to_exclusive( - table->mdl_ticket, thd->variables.lock_wait_timeout)) + if (thd->mdl_context.upgrade_shared_lock( + table->mdl_ticket, MDL_EXCLUSIVE, + thd->variables.lock_wait_timeout)) DBUG_RETURN(TRUE); tdc_remove_table(thd, remove_type, @@ -2354,8 +2392,8 @@ void drop_open_table(THD *thd, TABLE *table, const char *db_name, tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db_name, table_name, FALSE); /* Remove the table from the storage engine and rm the .frm. */ - quick_rm_table(table_type, db_name, table_name, 0); - } + quick_rm_table(thd, table_type, db_name, table_name, 0); + } DBUG_VOID_RETURN; } @@ -2378,9 +2416,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); private: /** Open table context to be used for back-off request. */ @@ -2397,9 +2435,9 @@ private: bool MDL_deadlock_handler::handle_condition(THD *, uint sql_errno, const char*, - MYSQL_ERROR::enum_warning_level, + Sql_condition::enum_warning_level, const char*, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; if (! m_is_active && sql_errno == ER_LOCK_DEADLOCK) @@ -2584,51 +2622,46 @@ tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name, } -/* - Open a table. - - SYNOPSIS - open_table() - thd Thread context. - table_list Open first table in list. - action INOUT Pointer to variable of enum_open_table_action type - which will be set according to action which is - required to remedy problem appeared during attempt - to open table. - flags Bitmap of flags to modify how open works: - MYSQL_OPEN_IGNORE_FLUSH - Open table even if - someone has done a flush or there is a pending - exclusive metadata lock requests against it - (i.e. request high priority metadata lock). - No version number checking is done. - MYSQL_OPEN_TEMPORARY_ONLY - Open only temporary - table not the base table or view. - MYSQL_OPEN_TAKE_UPGRADABLE_MDL - Obtain upgradable - metadata lock for tables on which we are going to - take some kind of write table-level lock. - - IMPLEMENTATION - Uses a cache of open tables to find a table not in use. - - If TABLE_LIST::open_strategy is set to OPEN_IF_EXISTS, the table is opened - only if it exists. If the open strategy is OPEN_STUB, the underlying table - is never opened. In both cases, metadata locks are always taken according - to the lock strategy. - - RETURN - TRUE Open failed. "action" parameter may contain type of action - needed to remedy problem before retrying again. - FALSE Success. Members of TABLE_LIST structure are filled properly (e.g. - TABLE_LIST::table is set for real tables and TABLE_LIST::view is - set for views). +/** + Open a base table. + + @param thd Thread context. + @param table_list Open first table in list. + @param mem_root Temporary MEM_ROOT to be used for + parsing .FRMs for views. + @param ot_ctx Context with flags which modify how open works + and which is used to recover from a failed + open_table() attempt. + Some examples of flags: + MYSQL_OPEN_IGNORE_FLUSH - Open table even if + someone has done a flush. No version number + checking is done. + MYSQL_OPEN_HAS_MDL_LOCK - instead of acquiring + metadata locks rely on that caller already has + appropriate ones. + + Uses a cache of open tables to find a TABLE instance not in use. + + If TABLE_LIST::open_strategy is set to OPEN_IF_EXISTS, the table is + opened only if it exists. If the open strategy is OPEN_STUB, the + underlying table is never opened. In both cases, metadata locks are + always taken according to the lock strategy. + + The function used to open temporary tables, but now it opens base tables + only. + + @retval TRUE Open failed. "action" parameter may contain type of action + needed to remedy problem before retrying again. + @retval FALSE Success. Members of TABLE_LIST structure are filled properly + (e.g. TABLE_LIST::table is set for real tables and + TABLE_LIST::view is set for views). */ - bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, Open_table_context *ot_ctx) { reg1 TABLE *table; - char key[MAX_DBKEY_LENGTH]; + const char *key; uint key_length; char *alias= table_list->alias; uint flags= ot_ctx->get_flags(); @@ -2638,74 +2671,42 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, uint gts_flags; DBUG_ENTER("open_table"); + /* + The table must not be opened already. The table can be pre-opened for + some statements if it is a temporary table. + + open_temporary_table() must be used to open temporary tables. + */ + DBUG_ASSERT(!table_list->table); + /* an open table operation needs a lot of the stack space */ if (check_stack_overrun(thd, STACK_MIN_SIZE_FOR_OPEN, (uchar *)&alias)) DBUG_RETURN(TRUE); - if (thd->killed) + if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed) DBUG_RETURN(TRUE); - key_length= create_tmp_table_def_key(thd, key, table_list->db, - table_list->table_name) - - TMP_TABLE_KEY_EXTRA; - /* - Unless requested otherwise, try to resolve this table in the list - of temporary tables of this thread. In MySQL temporary tables - are always thread-local and "shadow" possible base tables with the - same name. This block implements the behaviour. - TODO: move this block into a separate function. + Check if we're trying to take a write lock in a read only transaction. + + Note that we allow write locks on log tables as otherwise logging + to general/slow log would be disabled in read only transactions. */ - if (table_list->open_type != OT_BASE_ONLY && - ! (flags & MYSQL_OPEN_SKIP_TEMPORARY)) + if (table_list->mdl_request.type >= MDL_SHARED_WRITE && + thd->tx_read_only && + !(flags & (MYSQL_LOCK_LOG_TABLE | MYSQL_OPEN_HAS_MDL_LOCK))) { - for (table= thd->temporary_tables; table ; table=table->next) - { - if (table->s->table_cache_key.length == key_length + - TMP_TABLE_KEY_EXTRA && - !memcmp(table->s->table_cache_key.str, key, - key_length + TMP_TABLE_KEY_EXTRA)) - { - /* - We're trying to use the same temporary table twice in a query. - Right now we don't support this because a temporary table - is always represented by only one TABLE object in THD, and - it can not be cloned. Emit an error for an unsupported behaviour. - */ - if (table->query_id) - { - DBUG_PRINT("error", - ("query_id: %lu server_id: %u pseudo_thread_id: %lu", - (ulong) table->query_id, (uint) thd->variables.server_id, - (ulong) thd->variables.pseudo_thread_id)); - my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr()); - DBUG_RETURN(TRUE); - } - table->query_id= thd->query_id; - thd->thread_specific_used= TRUE; - DBUG_PRINT("info",("Using temporary table")); - goto reset; - } - } + my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0)); + DBUG_RETURN(true); } - if (table_list->open_type == OT_TEMPORARY_ONLY || - (flags & MYSQL_OPEN_TEMPORARY_ONLY)) - { - if (table_list->open_strategy == TABLE_LIST::OPEN_NORMAL) - { - my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->table_name); - DBUG_RETURN(TRUE); - } - else - DBUG_RETURN(FALSE); - } + key_length= get_table_def_key(table_list, &key); /* - The table is not temporary - if we're in pre-locked or LOCK TABLES - mode, let's try to find the requested table in the list of pre-opened - and locked tables. If the table is not there, return an error - we can't - open not pre-opened tables in pre-locked/LOCK TABLES mode. + If we're in pre-locked or LOCK TABLES mode, let's try to find the + requested table in the list of pre-opened and locked tables. If the + table is not there, return an error - we can't open not pre-opened + tables in pre-locked/LOCK TABLES mode. TODO: move this block into a separate function. */ if (thd->locked_tables_mode && @@ -2791,7 +2792,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, if (dd_frm_is_view(thd, path)) { if (!tdc_open_view(thd, table_list, alias, key, key_length, - mem_root, 0)) + mem_root, CHECK_METADATA_VERSION)) { DBUG_ASSERT(table_list->view != 0); DBUG_RETURN(FALSE); // VIEW @@ -2800,7 +2801,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, } /* No table in the locked tables list. In case of explicit LOCK TABLES - this can happen if a user did not include the able into the list. + this can happen if a user did not include the table into the list. In case of pre-locked mode locked tables list is generated automatically, so we may only end up here if the table did not exist when locked tables list was created. @@ -2820,19 +2821,6 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, if (! (flags & MYSQL_OPEN_HAS_MDL_LOCK)) { /* - Check if we're trying to take a write lock in a read only transaction. - */ - if (table_list->mdl_request.type >= MDL_SHARED_WRITE && - thd->tx_read_only && - !(flags & (MYSQL_OPEN_HAS_MDL_LOCK | - MYSQL_LOCK_LOG_TABLE | - MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY))) - { - my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0)); - DBUG_RETURN(true); - } - - /* We are not under LOCK TABLES and going to acquire write-lock/ modify the base table. We need to acquire protection against global read lock until end of this statement in order to have @@ -3013,12 +3001,12 @@ retry_share: Release our reference to share, wait until old version of share goes away and then try to get new version of table share. */ - MDL_deadlock_handler mdl_deadlock_handler(ot_ctx); - bool wait_result; - release_table_share(share); mysql_mutex_unlock(&LOCK_open); + MDL_deadlock_handler mdl_deadlock_handler(ot_ctx); + bool wait_result; + thd->push_internal_handler(&mdl_deadlock_handler); wait_result= tdc_wait_for_old_version(thd, table_list->db, table_list->table_name, @@ -3093,10 +3081,8 @@ retry_share: else if (share->crashed) (void) ot_ctx->request_backoff_action(Open_table_context::OT_REPAIR, table_list); - goto err_lock; } - if (open_table_entry_fini(thd, share, table)) { closefrm(table, 0); @@ -3126,6 +3112,21 @@ retry_share: table_list->updatable= 1; // It is not derived table nor non-updatable VIEW table_list->table= table; +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (table->part_info) + { + /* Set all [named] partitions as used. */ + if (table->part_info->set_partition_bitmaps(table_list)) + DBUG_RETURN(true); + } + else if (table_list->partition_names) + { + /* Don't allow PARTITION () clause on a nonpartitioned table */ + my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(true); + } +#endif + table->init(thd, table_list); DBUG_RETURN(FALSE); @@ -3180,9 +3181,9 @@ TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name) upgrade the lock and ER_TABLE_NOT_LOCKED_FOR_WRITE will be reported. - @return Pointer to TABLE instance with MDL_SHARED_NO_WRITE, - MDL_SHARED_NO_READ_WRITE, or MDL_EXCLUSIVE metadata - lock, NULL otherwise. + @return Pointer to TABLE instance with MDL_SHARED_UPGRADABLE + MDL_SHARED_NO_WRITE, MDL_SHARED_NO_READ_WRITE, or + MDL_EXCLUSIVE metadata lock, NULL otherwise. */ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db, @@ -3756,11 +3757,12 @@ check_and_update_routine_version(THD *thd, Sroutine_hash_entry *rt, */ bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias, - char *cache_key, uint cache_key_length, + const char *cache_key, uint cache_key_length, MEM_ROOT *mem_root, uint flags) { TABLE not_used; TABLE_SHARE *share; + bool err= TRUE; if (!(share= get_table_share(thd, table_list->db, table_list->table_name, cache_key, cache_key_length, GTS_VIEW))) @@ -3768,12 +3770,28 @@ bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias, DBUG_ASSERT(share->is_view); - bool err= open_new_frm(thd, share, alias, + if (flags & CHECK_METADATA_VERSION) + { + /* + Check TABLE_SHARE-version of view only if we have been instructed to do + so. We do not need to check the version if we're executing CREATE VIEW or + ALTER VIEW statements. + + In the future, this functionality should be moved out from + tdc_open_view(), and tdc_open_view() should became a part of a clean + table-definition-cache interface. + */ + if (check_and_update_table_version(thd, table_list, share)) + goto ret; + } + + err= open_new_frm(thd, share, alias, (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | HA_TRY_READ_ONLY), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD | flags, thd->open_options, ¬_used, table_list, mem_root); +ret: mysql_mutex_lock(&LOCK_open); release_table_share(share); mysql_mutex_unlock(&LOCK_open); @@ -4014,14 +4032,13 @@ recover_from_failed_open(THD *thd) case OT_DISCOVER: { if ((result= lock_table_names(thd, m_failed_table, NULL, - get_timeout(), - MYSQL_OPEN_SKIP_TEMPORARY))) + get_timeout(), 0))) break; tdc_remove_table(thd, TDC_RT_REMOVE_ALL, m_failed_table->db, m_failed_table->table_name, FALSE); - thd->warning_info->clear_warning_info(thd->query_id); + thd->get_stmt_da()->clear_warning_info(thd->query_id); thd->clear_error(); // Clear error message if ((result= @@ -4036,8 +4053,7 @@ recover_from_failed_open(THD *thd) case OT_REPAIR: { if ((result= lock_table_names(thd, m_failed_table, NULL, - get_timeout(), - MYSQL_OPEN_SKIP_TEMPORARY))) + get_timeout(), 0))) break; tdc_remove_table(thd, TDC_RT_REMOVE_ALL, m_failed_table->db, @@ -4369,9 +4385,35 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, tables->db, tables->table_name, tables)); //psergey: invalid read of size 1 here (*counter)++; - /* Not a placeholder: must be a base table or a view. Let us open it. */ - DBUG_ASSERT(!tables->table); + /* + Not a placeholder: must be a base/temporary table or a view. Let us open it. + */ + if (tables->table) + { + /* + If this TABLE_LIST object has an associated open TABLE object + (TABLE_LIST::table is not NULL), that TABLE object must be a pre-opened + temporary table. + */ + DBUG_ASSERT(is_temporary_table(tables)); + } + else if (tables->open_type == OT_TEMPORARY_ONLY) + { + /* + OT_TEMPORARY_ONLY means that we are in CREATE TEMPORARY TABLE statement. + Also such table list element can't correspond to prelocking placeholder + or to underlying table of merge table. + So existing temporary table should have been preopened by this moment + and we can simply continue without trying to open temporary or base + table. + */ + DBUG_ASSERT(tables->open_strategy); + DBUG_ASSERT(!tables->prelocking_placeholder); + DBUG_ASSERT(!tables->parent_l); + DBUG_RETURN(0); + } + /* Not a placeholder: must be a base table or a view. Let us open it. */ if (tables->prelocking_placeholder) { /* @@ -4382,7 +4424,35 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, */ No_such_table_error_handler no_such_table_handler; thd->push_internal_handler(&no_such_table_handler); - error= open_table(thd, tables, new_frm_mem, ot_ctx); + + /* + We're opening a table from the prelocking list. + + Since this table list element might have been added after pre-opening + of temporary tables we have to try to open temporary table for it. + + We can't simply skip this table list element and postpone opening of + temporary tabletill the execution of substatement for several reasons: + - Temporary table can be a MERGE table with base underlying tables, + so its underlying tables has to be properly open and locked at + prelocking stage. + - Temporary table can be a MERGE table and we might be in PREPARE + phase for a prepared statement. In this case it is important to call + HA_ATTACH_CHILDREN for all merge children. + This is necessary because merge children remember "TABLE_SHARE ref type" + and "TABLE_SHARE def version" in the HA_ATTACH_CHILDREN operation. + If HA_ATTACH_CHILDREN is not called, these attributes are not set. + Then, during the first EXECUTE, those attributes need to be updated. + That would cause statement re-preparing (because changing those + attributes during EXECUTE is caught by THD::m_reprepare_observers). + The problem is that since those attributes are not set in merge + children, another round of PREPARE will not help. + */ + error= open_temporary_table(thd, tables); + + if (!error && !tables->table) + error= open_table(thd, tables, new_frm_mem, ot_ctx); + thd->pop_internal_handler(); safe_to_ignore_table= no_such_table_handler.safely_trapped_errors(); } @@ -4396,12 +4466,29 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables, */ Repair_mrg_table_error_handler repair_mrg_table_handler; thd->push_internal_handler(&repair_mrg_table_handler); - error= open_table(thd, tables, new_frm_mem, ot_ctx); + + error= open_temporary_table(thd, tables); + if (!error && !tables->table) + error= open_table(thd, tables, new_frm_mem, ot_ctx); + thd->pop_internal_handler(); safe_to_ignore_table= repair_mrg_table_handler.safely_trapped_errors(); } else - error= open_table(thd, tables, new_frm_mem, ot_ctx); + { + if (tables->parent_l) + { + /* + Even if we are opening table not from the prelocking list we + still might need to look for a temporary table if this table + list element corresponds to underlying table of a merge table. + */ + error= open_temporary_table(thd, tables); + } + + if (!error && !tables->table) + error= open_table(thd, tables, new_frm_mem, ot_ctx); + } free_root(new_frm_mem, MYF(MY_KEEP_PREALLOC)); @@ -4628,27 +4715,25 @@ lock_table_names(THD *thd, for (table= tables_start; table && table != tables_end; table= table->next_global) { - if (table->mdl_request.type >= MDL_SHARED_NO_WRITE && - !(table->open_type == OT_TEMPORARY_ONLY || - (flags & MYSQL_OPEN_TEMPORARY_ONLY) || - (table->open_type != OT_BASE_ONLY && - ! (flags & MYSQL_OPEN_SKIP_TEMPORARY) && - find_temporary_table(thd, table)))) + if (table->mdl_request.type < MDL_SHARED_UPGRADABLE || + table->open_type == OT_TEMPORARY_ONLY || + (table->open_type == OT_TEMPORARY_OR_BASE && is_temporary_table(table))) { - /* - Write lock on normal tables is not allowed in a read only transaction. - */ - if (thd->tx_read_only) - { - my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0)); - DBUG_RETURN(true); - } + continue; + } - if (! (flags & MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK) && - schema_set.insert(table)) - DBUG_RETURN(TRUE); - mdl_requests.push_front(&table->mdl_request); + /* Write lock on normal tables is not allowed in a read only transaction. */ + if (thd->tx_read_only) + { + my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0)); + DBUG_RETURN(true); } + + if (! (flags & MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK) && + schema_set.insert(table)) + DBUG_RETURN(TRUE); + + mdl_requests.push_front(&table->mdl_request); } if (mdl_requests.is_empty()) @@ -4712,7 +4797,7 @@ lock_table_names(THD *thd, { if (thd->lex->create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), tables_start->table_name); } @@ -4761,34 +4846,33 @@ open_tables_check_upgradable_mdl(THD *thd, TABLE_LIST *tables_start, for (table= tables_start; table && table != tables_end; table= table->next_global) { - if (table->mdl_request.type >= MDL_SHARED_NO_WRITE && - !(table->open_type == OT_TEMPORARY_ONLY || - (flags & MYSQL_OPEN_TEMPORARY_ONLY) || - (table->open_type != OT_BASE_ONLY && - ! (flags & MYSQL_OPEN_SKIP_TEMPORARY) && - find_temporary_table(thd, table)))) + if (table->mdl_request.type < MDL_SHARED_UPGRADABLE || + table->open_type == OT_TEMPORARY_ONLY || + (table->open_type == OT_TEMPORARY_OR_BASE && is_temporary_table(table))) { - /* - We don't need to do anything about the found TABLE instance as it - will be handled later in open_tables(), we only need to check that - an upgradable lock is already acquired. When we enter LOCK TABLES - mode, SNRW locks are acquired before all other locks. So if under - LOCK TABLES we find that there is TABLE instance with upgradeable - lock, all other instances of TABLE for the same table will have the - same ticket. - - Note that this works OK even for CREATE TABLE statements which - request X type of metadata lock. This is because under LOCK TABLES - such statements don't create the table but only check if it exists - or, in most complex case, only insert into it. - Thus SNRW lock should be enough. - - Note that find_table_for_mdl_upgrade() will report an error if - no suitable ticket is found. - */ - if (!find_table_for_mdl_upgrade(thd, table->db, table->table_name, false)) - return TRUE; + continue; } + + /* + We don't need to do anything about the found TABLE instance as it + will be handled later in open_tables(), we only need to check that + an upgradable lock is already acquired. When we enter LOCK TABLES + mode, SNRW locks are acquired before all other locks. So if under + LOCK TABLES we find that there is TABLE instance with upgradeable + lock, all other instances of TABLE for the same table will have the + same ticket. + + Note that this works OK even for CREATE TABLE statements which + request X type of metadata lock. This is because under LOCK TABLES + such statements don't create the table but only check if it exists + or, in most complex case, only insert into it. + Thus SNRW lock should be enough. + + Note that find_table_for_mdl_upgrade() will report an error if + no suitable ticket is found. + */ + if (!find_table_for_mdl_upgrade(thd, table->db, table->table_name, false)) + return TRUE; } return FALSE; @@ -4828,11 +4912,12 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags, Prelocking_strategy *prelocking_strategy) { /* - We use pointers to "next_global" member in the last processed TABLE_LIST - element and to the "next" member in the last processed Sroutine_hash_entry - element as iterators over, correspondingly, the table list and stored routines - list which stay valid and allow to continue iteration when new elements are - added to the tail of the lists. + We use pointers to "next_global" member in the last processed + TABLE_LIST element and to the "next" member in the last processed + Sroutine_hash_entry element as iterators over, correspondingly, + the table list and stored routines list which stay valid and allow + to continue iteration when new elements are added to the tail of + the lists. */ TABLE_LIST **table_to_open; Sroutine_hash_entry **sroutine_to_open; @@ -4921,7 +5006,7 @@ restart: for (table= *start; table && table != thd->lex->first_not_own_table(); table= table->next_global) { - if (table->mdl_request.type >= MDL_SHARED_NO_WRITE) + if (table->mdl_request.type >= MDL_SHARED_UPGRADABLE) table->mdl_request.ticket= NULL; } } @@ -4976,6 +5061,10 @@ restart: if (ot_ctx.recover_from_failed_open(thd)) goto err; + /* Re-open temporary tables after close_tables_for_reopen(). */ + if (open_temporary_tables(thd, *start)) + goto err; + error= FALSE; goto restart; } @@ -5029,6 +5118,10 @@ restart: if (ot_ctx.recover_from_failed_open(thd)) goto err; + /* Re-open temporary tables after close_tables_for_reopen(). */ + if (open_temporary_tables(thd, *start)) + goto err; + error= FALSE; goto restart; } @@ -5443,6 +5536,10 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type, bool error; DBUG_ENTER("open_ltable"); + /* Ignore temporary tables as they have already ben opened*/ + if (table_list->table) + DBUG_RETURN(table_list->table); + /* should not be used in a prelocked_mode context, see NOTE above */ DBUG_ASSERT(thd->locked_tables_mode < LTM_PRELOCKED); @@ -5452,7 +5549,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type, table_list->required_type= FRMTYPE_TABLE; /* This function can't properly handle requests for such metadata locks. */ - DBUG_ASSERT(table_list->mdl_request.type < MDL_SHARED_NO_WRITE); + DBUG_ASSERT(table_list->mdl_request.type < MDL_SHARED_UPGRADABLE); while ((error= open_table(thd, table_list, thd->mem_root, &ot_ctx)) && ot_ctx.can_recover_from_failed_open()) @@ -5692,7 +5789,6 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, uint flags) { TABLE_LIST *table; - DBUG_ENTER("lock_tables"); /* We can't meet statement requiring prelocking if we already @@ -5960,6 +6056,9 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables, @param add_to_temporary_tables_list Specifies if the opened TABLE instance should be linked into THD::temporary_tables list. + @param open_in_engine Indicates that we need to open table + in storage engine in addition to + constructing TABLE object for it. @note This function is used: - by alter_table() to open a temporary table; @@ -5972,7 +6071,8 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables, TABLE *open_table_uncached(THD *thd, handlerton *hton, const char *path, const char *db, const char *table_name, - bool add_to_temporary_tables_list) + bool add_to_temporary_tables_list, + bool open_in_engine) { TABLE *tmp_table; TABLE_SHARE *share; @@ -5994,6 +6094,13 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton, MYF(MY_WME)))) DBUG_RETURN(0); /* purecov: inspected */ +#ifndef DBUG_OFF + mysql_mutex_lock(&LOCK_open); + DBUG_ASSERT(!my_hash_search(&table_def_cache, (uchar*) cache_key, + key_length)); + mysql_mutex_unlock(&LOCK_open); +#endif + share= (TABLE_SHARE*) (tmp_table+1); tmp_path= (char*) (share+1); saved_cache_key= strmov(tmp_path, path)+1; @@ -6014,11 +6121,17 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton, share->m_psi= PSI_CALL_get_table_share(true, share); if (open_table_from_share(thd, share, table_name, + open_in_engine ? (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | - HA_GET_INDEX), + HA_GET_INDEX) : 0, READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options, - tmp_table, FALSE)) + tmp_table, + /* + Set "is_create_table" if the table does not + exist in SE + */ + open_in_engine ? false : true)) { /* No need to lock share->mutex as this is not needed for tmp tables */ free_table_share(share); @@ -6027,6 +6140,7 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton, } tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked + tmp_table->grant.privilege= TMP_TABLE_ACLS; share->tmp_table= (tmp_table->file->has_transactions() ? TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE); @@ -6149,6 +6263,143 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table) } +/** + Find a temporary table specified by TABLE_LIST instance in the cache and + prepare its TABLE instance for use. + + This function tries to resolve this table in the list of temporary tables + of this thread. Temporary tables are thread-local and "shadow" base + tables with the same name. + + @note In most cases one should use open_temporary_tables() instead + of this call. + + @note One should finalize process of opening temporary table for table + list element by calling open_and_process_table(). This function + is responsible for table version checking and handling of merge + tables. + + @note We used to check global_read_lock before opening temporary tables. + However, that limitation was artificial and is removed now. + + @return Error status. + @retval FALSE On success. If a temporary table exists for the given + key, tl->table is set. + @retval TRUE On error. my_error() has been called. +*/ + +bool open_temporary_table(THD *thd, TABLE_LIST *tl) +{ + TABLE *table; + DBUG_ENTER("open_temporary_table"); + DBUG_PRINT("enter", ("table: '%s'.'%s'", tl->db, tl->table_name)); + + /* + Code in open_table() assumes that TABLE_LIST::table can + be non-zero only for pre-opened temporary tables. + */ + DBUG_ASSERT(tl->table == NULL); + + /* + This function should not be called for cases when derived or I_S + tables can be met since table list elements for such tables can + have invalid db or table name. + Instead open_temporary_tables() should be used. + */ + DBUG_ASSERT(!tl->derived && !tl->schema_table); + + if (tl->open_type == OT_BASE_ONLY) + { + DBUG_PRINT("info", ("skip_temporary is set")); + DBUG_RETURN(FALSE); + } + + if (!(table= find_temporary_table(thd, tl))) + { + if (tl->open_type == OT_TEMPORARY_ONLY && + tl->open_strategy == TABLE_LIST::OPEN_NORMAL) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), tl->db, tl->table_name); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); + } + +#ifdef WITH_PARTITION_STORAGE_ENGINE + if (tl->partition_names) + { + /* Partitioned temporary tables is not supported. */ + DBUG_ASSERT(!table->part_info); + my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(true); + } +#endif + + if (table->query_id) + { + /* + We're trying to use the same temporary table twice in a query. + Right now we don't support this because a temporary table is always + represented by only one TABLE object in THD, and it can not be + cloned. Emit an error for an unsupported behaviour. + */ + + DBUG_PRINT("error", + ("query_id: %lu server_id: %u pseudo_thread_id: %lu", + (ulong) table->query_id, (uint) thd->variables.server_id, + (ulong) thd->variables.pseudo_thread_id)); + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr()); + DBUG_RETURN(TRUE); + } + + table->query_id= thd->query_id; + thd->thread_specific_used= TRUE; + + tl->updatable= 1; // It is not derived table nor non-updatable VIEW. + tl->table= table; + + table->init(thd, tl); + + DBUG_PRINT("info", ("Using temporary table")); + DBUG_RETURN(FALSE); +} + + +/** + Pre-open temporary tables corresponding to table list elements. + + @note One should finalize process of opening temporary tables + by calling open_tables(). This function is responsible + for table version checking and handling of merge tables. + + @return Error status. + @retval FALSE On success. If a temporary tables exists for the + given element, tl->table is set. + @retval TRUE On error. my_error() has been called. +*/ + +bool open_temporary_tables(THD *thd, TABLE_LIST *tl_list) +{ + TABLE_LIST *first_not_own= thd->lex->first_not_own_table(); + DBUG_ENTER("open_temporary_tables"); + + for (TABLE_LIST *tl= tl_list; tl && tl != first_not_own; tl= tl->next_global) + { + if (tl->derived || tl->schema_table) + { + /* + Derived and I_S tables will be handled by a later call to open_tables(). + */ + continue; + } + + if (open_temporary_table(thd, tl)) + DBUG_RETURN(TRUE); + } + + DBUG_RETURN(FALSE); +} + /* Find a field by name in a view that uses merge algorithm. @@ -8818,7 +9069,7 @@ err_no_arena: @retval false OK. */ -static bool +bool fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values, bool ignore_errors) { @@ -8842,7 +9093,7 @@ fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values, thus we safely can take table from the first field. */ fld= (Item_field*)f++; - if (!(field= fld->filed_for_view_update())) + if (!(field= fld->field_for_view_update())) { my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); goto err; @@ -8856,7 +9107,7 @@ fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values, while ((fld= f++)) { - if (!(field= fld->filed_for_view_update())) + if (!(field= fld->field_for_view_update())) { my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); goto err; @@ -8871,7 +9122,7 @@ fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values, value->type() != Item::NULL_ITEM && table->s->table_category != TABLE_CATEGORY_TEMPORARY) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN, ER(ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN), rfield->field_name, table->s->table_name.str); @@ -8948,7 +9199,7 @@ fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, List<Item> &fields, if (fields.elements) { fld= (Item_field*)f++; - item_field= fld->filed_for_view_update(); + item_field= fld->field_for_view_update(); if (item_field && item_field->field && table && table->vfield) { DBUG_ASSERT(table == item_field->field->table); @@ -9023,7 +9274,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values, value->type() != Item::NULL_ITEM && table->s->table_category != TABLE_CATEGORY_TEMPORARY) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN, ER(ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN), field->field_name, table->s->table_name.str); @@ -9282,6 +9533,15 @@ bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use, instances (if there are no used instances will also remove TABLE_SHARE). + TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE - + remove all TABLE instances + except those that belong to + this thread, but don't mark + TABLE_SHARE as old. There + should be no TABLE objects + used by other threads and + caller should have exclusive + metadata lock on the table. @param db Name of database @param table_name Name of table @param has_lock If TRUE, LOCK_open is already acquired @@ -9319,7 +9579,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, { if (share->ref_count) { - I_P_List_iterator<TABLE, TABLE_share> it(share->free_tables); + TABLE_SHARE::TABLE_list::Iterator it(share->free_tables); #ifndef DBUG_OFF if (remove_type == TDC_RT_REMOVE_ALL) { @@ -9328,12 +9588,14 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, else if (remove_type == TDC_RT_REMOVE_NOT_OWN || remove_type == TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE) { - I_P_List_iterator<TABLE, TABLE_share> it2(share->used_tables); + TABLE_SHARE::TABLE_list::Iterator it2(share->used_tables); while ((table= it2++)) + { if (table->in_use != thd) { DBUG_ASSERT(0); } + } } #endif /* @@ -9359,7 +9621,10 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, free_cache_entry(table); } else + { + DBUG_ASSERT(remove_type != TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE); (void) my_hash_delete(&table_def_cache, (uchar*) share); + } } if (! has_lock) @@ -9745,7 +10010,7 @@ open_log_table(THD *thd, TABLE_LIST *one_table, Open_tables_backup *backup) DBUG_ASSERT(table->s->table_category == TABLE_CATEGORY_LOG); /* Make sure all columns get assigned to a default value */ table->use_all_columns(); - table->no_replicate= 1; + DBUG_ASSERT(table->no_replicate); } else thd->restore_backup_open_tables_state(backup); diff --git a/sql/sql_base.h b/sql/sql_base.h index 95d9bf21fe8..a4f35b59ba9 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -62,6 +62,7 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, enum enum_tdc_remove_table_type {TDC_RT_REMOVE_ALL, TDC_RT_REMOVE_NOT_OWN, TDC_RT_REMOVE_UNUSED, TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE}; +#define TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE /* bits for last argument to remove_table_from_cache() */ #define RTFC_NO_FLAG 0x0000 @@ -79,13 +80,13 @@ bool table_def_init(void); void table_def_free(void); void table_def_start_shutdown(void); void assign_new_table_id(TABLE_SHARE *share); -uint cached_open_tables(void); uint cached_table_definitions(void); +uint cached_open_tables(void); /** Create a table cache key for non-temporary table. - @param key Buffer for key (must be at least NAME_LEN*2+2 bytes). + @param key Buffer for key (must be at least MAX_DBKEY_LENGTH bytes). @param db Database name. @param table_name Table name. @@ -108,8 +109,9 @@ create_table_def_key(char *key, const char *db, const char *table_name) uint create_tmp_table_def_key(THD *thd, char *key, const char *db, const char *table_name); +uint get_table_def_key(const TABLE_LIST *table_list, const char **key); TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name, - char *key, uint key_length, uint flags, + const char *key, uint key_length, uint flags, my_hash_value_type hash_value); void release_table_share(TABLE_SHARE *share); TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name); @@ -117,7 +119,7 @@ TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name); // convenience helper: call get_table_share() without precomputed hash_value static inline TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name, - char *key, uint key_length, + const char *key, uint key_length, uint flags) { return get_table_share(thd, db, table_name, key, key_length, flags, @@ -140,7 +142,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, /* mysql_lock_tables() and open_table() flags bits */ #define MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK 0x0001 #define MYSQL_OPEN_IGNORE_FLUSH 0x0002 -#define MYSQL_OPEN_TEMPORARY_ONLY 0x0004 +/* MYSQL_OPEN_TEMPORARY_ONLY (0x0004) is not used anymore. */ #define MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY 0x0008 #define MYSQL_LOCK_LOG_TABLE 0x0010 /** @@ -153,8 +155,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, a new instance of the table. */ #define MYSQL_OPEN_GET_NEW_TABLE 0x0040 -/** Don't look up the table in the list of temporary tables. */ -#define MYSQL_OPEN_SKIP_TEMPORARY 0x0080 +/* 0x0080 used to be MYSQL_OPEN_SKIP_TEMPORARY */ /** Fail instead of waiting when conficting metadata lock is discovered. */ #define MYSQL_OPEN_FAIL_ON_MDL_CONFLICT 0x0100 /** Open tables using MDL_SHARED lock instead of one specified in parser. */ @@ -176,6 +177,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, #define MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK 0x1000 #define MYSQL_LOCK_NOT_TEMPORARY 0x2000 #define MYSQL_OPEN_FOR_REPAIR 0x4000 +/** + Only check THD::killed if waits happen (e.g. wait on MDL, wait on + table flush, wait on thr_lock.c locks) while opening and locking table. +*/ +#define MYSQL_OPEN_IGNORE_KILLED 0x8000 /** Please refer to the internals manual. */ #define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\ @@ -183,11 +189,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update, MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY |\ MYSQL_LOCK_IGNORE_TIMEOUT |\ MYSQL_OPEN_GET_NEW_TABLE |\ - MYSQL_OPEN_SKIP_TEMPORARY |\ MYSQL_OPEN_HAS_MDL_LOCK) bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, Open_table_context *ot_ctx); + bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias, uint db_stat, uint prgflag, uint ha_open_flags, TABLE *outparam, TABLE_LIST *table_desc, @@ -197,7 +203,8 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list); TABLE *open_table_uncached(THD *thd, handlerton *hton, const char *path, const char *db, const char *table_name, - bool add_to_temporary_tables_list); + bool add_to_temporary_tables_list, + bool open_in_engine); TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name); TABLE *find_write_locked_table(TABLE *list, const char *db, const char *table_name); @@ -239,6 +246,8 @@ bool setup_fields(THD *thd, Item** ref_pointer_array, List<Item> &item, enum_mark_columns mark_used_columns, List<Item> *sum_func_list, bool allow_sum_func); void unfix_fields(List<Item> &items); +bool fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, + List<Item> &values, bool ignore_errors); bool fill_record(THD *thd, TABLE *table, Field **field, List<Item> &values, bool ignore_errors, bool use_value); @@ -310,7 +319,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint counter, uint flags); int decide_logging_format(THD *thd, TABLE_LIST *tables); void free_io_cache(TABLE *entry); void intern_close_table(TABLE *entry); -bool close_thread_table(THD *thd, TABLE **table_ptr); +void close_thread_table(THD *thd, TABLE **table_ptr); bool close_temporary_tables(THD *thd); TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, bool check_alias); @@ -320,6 +329,8 @@ void close_temporary_table(THD *thd, TABLE *table, bool free_share, void close_temporary(TABLE *table, bool free_share, bool delete_table); bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db, const char *table_name); +bool open_temporary_tables(THD *thd, TABLE_LIST *tl_list); +bool open_temporary_table(THD *thd, TABLE_LIST *tl); bool is_equal(const LEX_STRING *a, const LEX_STRING *b); class Open_tables_backup; @@ -340,13 +351,14 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables, bool wait_for_refresh, ulong timeout); bool close_cached_connection_tables(THD *thd, LEX_STRING *connect_string); void close_all_tables_for_name(THD *thd, TABLE_SHARE *share, - ha_extra_function extra); + ha_extra_function extra, + TABLE *skip_table); OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild); void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type, const char *db, const char *table_name, bool has_lock); bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias, - char *cache_key, uint cache_key_length, + const char *cache_key, uint cache_key_length, MEM_ROOT *mem_root, uint flags); static inline bool tdc_open_view(THD *thd, TABLE_LIST *table_list, @@ -516,11 +528,6 @@ class Lock_tables_prelocking_strategy : public DML_prelocking_strategy class Alter_table_prelocking_strategy : public Prelocking_strategy { public: - - Alter_table_prelocking_strategy(Alter_info *alter_info) - : m_alter_info(alter_info) - {} - virtual bool handle_routine(THD *thd, Query_tables_list *prelocking_ctx, Sroutine_hash_entry *rt, sp_head *sp, bool *need_prelocking); @@ -528,9 +535,6 @@ public: TABLE_LIST *table_list, bool *need_prelocking); virtual bool handle_view(THD *thd, Query_tables_list *prelocking_ctx, TABLE_LIST *table_list, bool *need_prelocking); - -private: - Alter_info *m_alter_info; }; @@ -652,6 +656,30 @@ private: /** + Check if a TABLE_LIST instance represents a pre-opened temporary table. +*/ + +inline bool is_temporary_table(TABLE_LIST *tl) +{ + if (tl->view || tl->schema_table) + return FALSE; + + if (!tl->table) + return FALSE; + + /* + NOTE: 'table->s' might be NULL for specially constructed TABLE + instances. See SHOW TRIGGERS for example. + */ + + if (!tl->table->s) + return FALSE; + + return tl->table->s->tmp_table != NO_TMP_TABLE; +} + + +/** This internal handler is used to trap ER_NO_SUCH_TABLE. */ @@ -665,9 +693,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); /** Returns TRUE if one or more ER_NO_SUCH_TABLE errors have been diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 436f7043c49..007a1b3b585 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -336,6 +336,7 @@ TODO list: #include "sql_acl.h" // SELECT_ACL #include "sql_base.h" // TMP_TABLE_KEY_EXTRA #include "debug_sync.h" // DEBUG_SYNC +#include "sql_table.h" #ifdef HAVE_QUERY_CACHE #include <m_ctype.h> #include <my_dir.h> @@ -345,6 +346,7 @@ TODO list: #include "probes_mysql.h" #include "log_slow.h" #include "transaction.h" +#include "strfunc.h" const uchar *query_state_map; @@ -1175,7 +1177,7 @@ void Query_cache::end_of_result(THD *thd) DBUG_VOID_RETURN; /* Ensure that only complete results are cached. */ - DBUG_ASSERT(thd->stmt_da->is_eof()); + DBUG_ASSERT(thd->get_stmt_da()->is_eof()); if (thd->killed) { @@ -1223,7 +1225,7 @@ void Query_cache::end_of_result(THD *thd) } last_result_block= header->result()->prev; allign_size= ALIGN_SIZE(last_result_block->used); - len= max(query_cache.min_allocation_unit, allign_size); + len= MY_MAX(query_cache.min_allocation_unit, allign_size); if (last_result_block->length >= query_cache.min_allocation_unit + len) query_cache.split_block(last_result_block,len); @@ -1638,6 +1640,41 @@ send_data_in_chunks(NET *net, const uchar *packet, ulong len) #endif +/** + Build a normalized table name suitable for query cache engine callback + + This consist of normalized directory '/' normalized_file_name + followed by suffix. + Suffix is needed for partitioned tables. +*/ + +size_t build_normalized_name(char *buff, size_t bufflen, + const char *db, size_t db_len, + const char *table_name, size_t table_len, + size_t suffix_len) +{ + uint errors; + size_t length; + char *pos= buff, *end= buff+bufflen; + DBUG_ENTER("build_normalized_name"); + + (*pos++)= FN_LIBCHAR; + length= strconvert(system_charset_info, db, db_len, + &my_charset_filename, pos, bufflen - 3, + &errors); + pos+= length; + (*pos++)= FN_LIBCHAR; + length= strconvert(system_charset_info, table_name, table_len, + &my_charset_filename, pos, (uint) (end - pos), + &errors); + pos+= length; + if (pos + suffix_len < end) + pos= strmake(pos, table_name + table_len, suffix_len); + + DBUG_RETURN((size_t) (pos - buff)); +} + + /* Check if the query is in the cache. If it was cached, send it to the user. @@ -2013,35 +2050,50 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", } #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ engine_data= table->engine_data(); - if (table->callback() && - !(*table->callback())(thd, table->db(), - table->key_length(), - &engine_data)) + if (table->callback()) { - DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", - table_list.db, table_list.alias)); - BLOCK_UNLOCK_RD(query_block); - if (engine_data != table->engine_data()) + char qcache_se_key_name[FN_REFLEN + 10]; + uint qcache_se_key_len, db_length= strlen(table->db()); + engine_data= table->engine_data(); + + qcache_se_key_len= build_normalized_name(qcache_se_key_name, + sizeof(qcache_se_key_name), + table->db(), + db_length, + table->table(), + table->key_length() - + db_length - 2 - + table->suffix_length(), + table->suffix_length()); + + if (!(*table->callback())(thd, qcache_se_key_name, + qcache_se_key_len, &engine_data)) { - DBUG_PRINT("qcache", - ("Handler require invalidation queries of %s.%s %lu-%lu", - table_list.db, table_list.alias, - (ulong) engine_data, (ulong) table->engine_data())); - invalidate_table_internal(thd, - (uchar *) table->db(), - table->key_length()); - } - else - { - /* - As this can change from call to call, don't reset set - thd->lex->safe_to_cache_query - */ - thd->query_cache_is_applicable= 0; // Query can't be cached + DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s", + qcache_se_key_len, qcache_se_key_name)); + BLOCK_UNLOCK_RD(query_block); + if (engine_data != table->engine_data()) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %.*s %lu-%lu", + qcache_se_key_len, qcache_se_key_name, + (ulong) engine_data, (ulong) table->engine_data())); + invalidate_table_internal(thd, + (uchar *) table->db(), + table->key_length()); + } + else + { + /* + As this can change from call to call, don't reset set + thd->lex->safe_to_cache_query + */ + thd->query_cache_is_applicable= 0; // Query can't be cached + } + /* End the statement transaction potentially started by engine. */ + trans_rollback_stmt(thd); + goto err_unlock; // Parse query } - /* End the statement transaction potentially started by engine. */ - trans_rollback_stmt(thd); - goto err_unlock; // Parse query } else DBUG_PRINT("qcache", ("handler allow caching %s,%s", @@ -2091,8 +2143,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d", response, we can't handle it anyway. */ (void) trans_commit_stmt(thd); - if (!thd->stmt_da->is_set()) - thd->stmt_da->disable_status(); + if (!thd->get_stmt_da()->is_set()) + thd->get_stmt_da()->disable_status(); BLOCK_UNLOCK_RD(query_block); MYSQL_QUERY_CACHE_HIT(thd->query(), (ulong) thd->limit_found_rows); @@ -2898,7 +2950,7 @@ Query_cache::write_block_data(ulong data_len, uchar* data, DBUG_ENTER("Query_cache::write_block_data"); DBUG_PRINT("qcache", ("data: %ld, header: %ld, all header: %ld", data_len, header_len, all_headers_len)); - Query_cache_block *block= allocate_block(max(align_len, + Query_cache_block *block= allocate_block(MY_MAX(align_len, min_allocation_unit),1, 0); if (block != 0) { @@ -2953,7 +3005,7 @@ Query_cache::append_result_data(Query_cache_block **current_block, ulong append_min = get_min_append_result_data_size(); if (last_block_free_space < data_len && append_next_free_block(last_block, - max(tail, append_min))) + MY_MAX(tail, append_min))) last_block_free_space = last_block->length - last_block->used; // If no space in last block (even after join) allocate new block if (last_block_free_space < data_len) @@ -2981,7 +3033,7 @@ Query_cache::append_result_data(Query_cache_block **current_block, // Now finally write data to the last block if (success && last_block_free_space > 0) { - ulong to_copy = min(data_len,last_block_free_space); + ulong to_copy = MY_MIN(data_len,last_block_free_space); DBUG_PRINT("qcache", ("use free space %lub at block 0x%lx to copy %lub", last_block_free_space, (ulong)last_block, to_copy)); memcpy((uchar*) last_block + last_block->used, data, to_copy); @@ -3069,8 +3121,8 @@ inline ulong Query_cache::get_min_first_result_data_size() if (queries_in_cache < QUERY_CACHE_MIN_ESTIMATED_QUERIES_NUMBER) return min_result_data_size; ulong avg_result = (query_cache_size - free_memory) / queries_in_cache; - avg_result = min(avg_result, query_cache_limit); - return max(min_result_data_size, avg_result); + avg_result = MY_MIN(avg_result, query_cache_limit); + return MY_MAX(min_result_data_size, avg_result); } inline ulong Query_cache::get_min_append_result_data_size() @@ -3102,7 +3154,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block, ulong len= data_len + all_headers_len; ulong align_len= ALIGN_SIZE(len); - if (!(new_block= allocate_block(max(min_size, align_len), + if (!(new_block= allocate_block(MY_MAX(min_size, align_len), min_result_data_size == 0, all_headers_len + min_result_data_size))) { @@ -3111,7 +3163,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block, } new_block->n_tables = 0; - new_block->used = min(len, new_block->length); + new_block->used = MY_MIN(len, new_block->length); new_block->type = Query_cache_block::RES_INCOMPLETE; new_block->next = new_block->prev = new_block; Query_cache_result *header = new_block->result(); @@ -3280,7 +3332,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used, There are not callback function for for VIEWs */ if (!insert_table(key_length, key, (*block_table), - tables_used->view_db.length + 1, + tables_used->view_db.length + 1, 0, HA_CACHE_TBL_NONTRANSACT, 0, 0, TRUE)) DBUG_RETURN(0); /* @@ -3301,7 +3353,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used, if (!insert_table(tables_used->table->s->table_cache_key.length, tables_used->table->s->table_cache_key.str, (*block_table), - tables_used->db_length, + tables_used->db_length, 0, tables_used->table->file->table_cache_type(), tables_used->callback_func, tables_used->engine_data, @@ -3366,7 +3418,8 @@ my_bool Query_cache::register_all_tables(THD *thd, my_bool Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type, + uint32 db_length, uint8 suffix_length_arg, + uint8 cache_type, qc_engine_callback callback, ulonglong engine_data, my_bool hash) @@ -3441,6 +3494,7 @@ Query_cache::insert_table(uint key_len, char *key, char *db= header->db(); header->table(db + db_length + 1); header->key_length(key_len); + header->suffix_length(suffix_length_arg); header->type(cache_type); header->callback(callback); header->engine_data(engine_data); @@ -3517,7 +3571,7 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min) DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu", len, not_less,min)); - if (len >= min(query_cache_size, query_cache_limit)) + if (len >= MY_MIN(query_cache_size, query_cache_limit)) { DBUG_PRINT("qcache", ("Query cache hase only %lu memory and limit %lu", query_cache_size, query_cache_limit)); @@ -4078,13 +4132,13 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, continue; handler= table->file; if (!handler->register_query_cache_table(thd, - table->s->table_cache_key.str, - table->s->table_cache_key.length, + table->s->normalized_path.str, + table->s->normalized_path.length, &tables_used->callback_func, &tables_used->engine_data)) { - DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", - tables_used->db, tables_used->alias)); + DBUG_PRINT("qcache", ("Handler does not allow caching for %s", + table->s->normalized_path.str)); /* As this can change from call to call, don't reset set thd->lex->safe_to_cache_query @@ -4503,7 +4557,7 @@ uint Query_cache::filename_2_table_key (char *key, const char *path, DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename)); DBUG_RETURN((uint) (strmake(strmake(key, dbname, - min(*db_length, NAME_LEN)) + 1, + MY_MIN(*db_length, NAME_LEN)) + 1, filename, NAME_LEN) - key) + 1); } diff --git a/sql/sql_cache.h b/sql/sql_cache.h index f35ac889b23..15848dabd33 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -190,6 +190,7 @@ struct Query_cache_table Query_cache_table() {} /* Remove gcc warning */ char *tbl; uint32 key_len; + uint8 suffix_len; /* For partitioned tables */ uint8 table_type; /* unique for every engine reference */ qc_engine_callback callback_func; @@ -210,6 +211,8 @@ struct Query_cache_table inline void table(char *table_arg) { tbl= table_arg; } inline uint32 key_length() { return key_len; } inline void key_length(uint32 len) { key_len= len; } + inline uint8 suffix_length() { return suffix_len; } + inline void suffix_length(uint8 len) { suffix_len= len; } inline uint8 type() { return table_type; } inline void type(uint8 t) { table_type= t; } inline qc_engine_callback callback() { return callback_func; } @@ -490,7 +493,8 @@ protected: unsigned pkt_nr); my_bool insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type, + uint32 db_length, uint8 suffix_length_arg, + uint8 cache_type, qc_engine_callback callback, ulonglong engine_data, my_bool hash); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 4fc1769ba1f..c9f07c4d036 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -63,6 +63,7 @@ #include "debug_sync.h" #include "sql_parse.h" // is_update_query #include "sql_callback.h" +#include "lock.h" #include "sql_connect.h" /* @@ -72,6 +73,8 @@ char internal_table_name[2]= "*"; char empty_c_string[1]= {0}; /* used for not defined db */ +LEX_STRING EMPTY_STR= { (char *) "", 0 }; + const char * const THD::DEFAULT_WHERE= "field list"; /**************************************************************************** @@ -128,6 +131,7 @@ Key::Key(const Key &rhs, MEM_ROOT *mem_root) Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root) :Key(rhs,mem_root), + ref_db(rhs.ref_db), ref_table(rhs.ref_table), ref_columns(rhs.ref_columns,mem_root), delete_opt(rhs.delete_opt), @@ -583,7 +587,7 @@ void THD::enter_stage(const PSI_stage_info *new_stage, proc_info= msg; #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_state)(msg); + PSI_THREAD_CALL(set_thread_state)(msg); MYSQL_SET_STAGE(m_current_stage_key, calling_file, calling_line); #endif } @@ -682,7 +686,7 @@ int thd_tx_is_read_only(const THD *thd) extern "C" void thd_inc_row_count(THD *thd) { - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); } @@ -761,7 +765,7 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length, if (max_query_len < 1) len= thd->query_length(); else - len= min(thd->query_length(), max_query_len); + len= MY_MIN(thd->query_length(), max_query_len); str.append('\n'); str.append(thd->query(), len); } @@ -776,7 +780,7 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length, was reallocated to a larger buffer to be able to fit. */ DBUG_ASSERT(buffer != NULL); - length= min(str.length(), length-1); + length= MY_MIN(str.length(), length-1); memcpy(buffer, str.c_ptr_quick(), length); /* Make sure that the new string is null terminated */ buffer[length]= '\0'; @@ -801,9 +805,9 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length, bool Drop_table_error_handler::handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { *cond_hdl= NULL; return ((sql_errno == EE_DELETE && my_errno == ENOENT) || @@ -826,8 +830,6 @@ THD::THD() stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE), m_examined_row_count(0), accessed_rows_and_keys(0), - warning_info(&main_warning_info), - stmt_da(&main_da), m_statement_psi(NULL), m_idle_psi(NULL), m_server_idle(false), @@ -847,7 +849,8 @@ THD::THD() #if defined(ENABLED_DEBUG_SYNC) debug_sync_control(0), #endif /* defined(ENABLED_DEBUG_SYNC) */ - main_warning_info(0, false, false) + main_da(0, false, false), + m_stmt_da(&main_da) { ulong tmp; @@ -859,8 +862,8 @@ THD::THD() THD *old_THR_THD= current_thd; set_current_thd(this); status_var.memory_used= 0; + main_da.init(); - main_warning_info.init(); /* Pass nominal parameters to init_alloc_root only to ensure that the destructor works OK in case of an error. The main_mem_root @@ -917,6 +920,7 @@ THD::THD() mysys_var=0; binlog_evt_union.do_union= FALSE; enable_slow_log= 0; + durability_property= HA_REGULAR_DURABILITY; #ifndef DBUG_OFF dbug_sentry=THD_SENTRY_MAGIC; @@ -1026,9 +1030,9 @@ void THD::push_internal_handler(Internal_error_handler *handler) bool THD::handle_condition(uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { if (!m_internal_handler) { @@ -1065,7 +1069,7 @@ void THD::raise_error(uint sql_errno) const char* msg= ER(sql_errno); (void) raise_condition(sql_errno, NULL, - MYSQL_ERROR::WARN_LEVEL_ERROR, + Sql_condition::WARN_LEVEL_ERROR, msg); } @@ -1081,7 +1085,7 @@ void THD::raise_error_printf(uint sql_errno, ...) va_end(args); (void) raise_condition(sql_errno, NULL, - MYSQL_ERROR::WARN_LEVEL_ERROR, + Sql_condition::WARN_LEVEL_ERROR, ebuff); DBUG_VOID_RETURN; } @@ -1091,7 +1095,7 @@ void THD::raise_warning(uint sql_errno) const char* msg= ER(sql_errno); (void) raise_condition(sql_errno, NULL, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, msg); } @@ -1107,7 +1111,7 @@ void THD::raise_warning_printf(uint sql_errno, ...) va_end(args); (void) raise_condition(sql_errno, NULL, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ebuff); DBUG_VOID_RETURN; } @@ -1121,7 +1125,7 @@ void THD::raise_note(uint sql_errno) const char* msg= ER(sql_errno); (void) raise_condition(sql_errno, NULL, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, msg); DBUG_VOID_RETURN; } @@ -1140,24 +1144,25 @@ void THD::raise_note_printf(uint sql_errno, ...) va_end(args); (void) raise_condition(sql_errno, NULL, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ebuff); DBUG_VOID_RETURN; } -MYSQL_ERROR* THD::raise_condition(uint sql_errno, +Sql_condition* THD::raise_condition(uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg) { - MYSQL_ERROR *cond= NULL; + Diagnostics_area *da= get_stmt_da(); + Sql_condition *cond= NULL; DBUG_ENTER("THD::raise_condition"); if (!(variables.option_bits & OPTION_SQL_NOTES) && - (level == MYSQL_ERROR::WARN_LEVEL_NOTE)) + (level == Sql_condition::WARN_LEVEL_NOTE)) DBUG_RETURN(NULL); - warning_info->opt_clear_warning_info(query_id); + da->opt_clear_warning_info(query_id); /* TODO: replace by DBUG_ASSERT(sql_errno != 0) once all bugs similar to @@ -1171,24 +1176,24 @@ MYSQL_ERROR* THD::raise_condition(uint sql_errno, if (sqlstate == NULL) sqlstate= mysql_errno_to_sqlstate(sql_errno); - if ((level == MYSQL_ERROR::WARN_LEVEL_WARN) && + if ((level == Sql_condition::WARN_LEVEL_WARN) && really_abort_on_warning()) { /* FIXME: push_warning and strict SQL_MODE case. */ - level= MYSQL_ERROR::WARN_LEVEL_ERROR; + level= Sql_condition::WARN_LEVEL_ERROR; killed= KILL_BAD_DATA; } switch (level) { - case MYSQL_ERROR::WARN_LEVEL_NOTE: - case MYSQL_ERROR::WARN_LEVEL_WARN: + case Sql_condition::WARN_LEVEL_NOTE: + case Sql_condition::WARN_LEVEL_WARN: got_warning= 1; break; - case MYSQL_ERROR::WARN_LEVEL_ERROR: + case Sql_condition::WARN_LEVEL_ERROR: break; default: DBUG_ASSERT(FALSE); @@ -1197,29 +1202,31 @@ MYSQL_ERROR* THD::raise_condition(uint sql_errno, if (handle_condition(sql_errno, sqlstate, level, msg, &cond)) DBUG_RETURN(cond); - if (level == MYSQL_ERROR::WARN_LEVEL_ERROR) + /* + Avoid pushing a condition for fatal out of memory errors as this will + require memory allocation and therefore might fail. Non fatal out of + memory errors can occur if raised by SIGNAL/RESIGNAL statement. + */ + if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY || + sql_errno == ER_OUTOFMEMORY))) + { + cond= da->push_warning(this, sql_errno, sqlstate, level, msg); + } + + + if (level == Sql_condition::WARN_LEVEL_ERROR) { is_slave_error= 1; // needed to catch query errors during replication - if (! stmt_da->is_error()) + if (!da->is_error()) { set_row_count_func(-1); - stmt_da->set_error_status(this, sql_errno, msg, sqlstate); + da->set_error_status(sql_errno, msg, sqlstate, cond); } } query_cache_abort(&query_cache_tls); - /* - Avoid pushing a condition for fatal out of memory errors as this will - require memory allocation and therefore might fail. Non fatal out of - memory errors can occur if raised by SIGNAL/RESIGNAL statement. - */ - if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY || - sql_errno == ER_OUTOFMEMORY))) - { - cond= warning_info->push_warning(this, sql_errno, sqlstate, level, msg); - } DBUG_RETURN(cond); } @@ -1331,6 +1338,7 @@ void THD::init(void) tx_read_only= variables.tx_read_only; update_charset(); reset_current_stmt_binlog_format_row(); + reset_binlog_local_stmt_filter(); set_status_var_init(); bzero((char *) &org_status_var, sizeof(org_status_var)); @@ -1546,7 +1554,6 @@ THD::~THD() mysql_audit_release(this); plugin_thdvar_cleanup(this); - DBUG_PRINT("info", ("freeing security context")); main_security_ctx.destroy(); my_free(db); db= NULL; @@ -1570,12 +1577,14 @@ THD::~THD() #endif free_root(&main_mem_root, MYF(0)); - main_warning_info.free_memory(); + main_da.free_memory(); if (status_var.memory_used != 0) { DBUG_PRINT("error", ("memory_used: %lld", status_var.memory_used)); SAFEMALLOC_REPORT_MEMORY(my_thread_dbug_id()); +#ifdef ENABLE_BEFORE_END_OF_MERGE_QQ DBUG_ASSERT(status_var.memory_used == 0); // Ensure everything is freed +#endif } set_current_thd(orig_thd); @@ -1805,6 +1814,46 @@ void THD::disconnect() } +bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use, + bool needs_thr_lock_abort) +{ + THD *in_use= ctx_in_use->get_thd(); + bool signalled= FALSE; + + if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) && + !in_use->killed) + { + in_use->killed= KILL_CONNECTION; + mysql_mutex_lock(&in_use->mysys_var->mutex); + if (in_use->mysys_var->current_cond) + mysql_cond_broadcast(in_use->mysys_var->current_cond); + mysql_mutex_unlock(&in_use->mysys_var->mutex); + signalled= TRUE; + } + + if (needs_thr_lock_abort) + { + mysql_mutex_lock(&in_use->LOCK_thd_data); + for (TABLE *thd_table= in_use->open_tables; + thd_table ; + thd_table= thd_table->next) + { + /* + Check for TABLE::needs_reopen() is needed since in some places we call + handler::close() for table instance (and set TABLE::db_stat to 0) + and do not remove such instances from the THD::open_tables + for some time, during which other thread can see those instances + (e.g. see partitioning code). + */ + if (!thd_table->needs_reopen()) + signalled|= mysql_lock_abort_for_thread(this, thd_table); + } + mysql_mutex_unlock(&in_use->LOCK_thd_data); + } + return signalled; +} + + /* Get error number for killed state Note that the error message can't have any parameters. @@ -1955,6 +2004,14 @@ void THD::cleanup_after_query() auto_inc_intervals_forced.empty(); #endif } + /* + Forget the binlog stmt filter for the next query. + There are some code paths that: + - do not call THD::decide_logging_format() + - do call THD::binlog_query(), + making this reset necessary. + */ + reset_binlog_local_stmt_filter(); if (first_successful_insert_id_in_cur_stmt > 0) { /* set what LAST_INSERT_ID() will return */ @@ -2661,7 +2718,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) Non-ASCII separator arguments are not fully supported */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED, ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED)); } @@ -2692,7 +2749,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) (exchange->opt_enclosed && non_string_results && field_term_length && strchr(NUMERIC_CHARS, field_term_char))) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_AMBIGUOUS_FIELD_TERM, ER(ER_AMBIGUOUS_FIELD_TERM)); is_ambiguous_field_term= TRUE; } @@ -2775,7 +2832,7 @@ int select_export::send_data(List<Item> &items) convert_to_printable(printable_buff, sizeof(printable_buff), error_pos, res->ptr() + res->length() - error_pos, res->charset(), 6); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), "string", printable_buff, @@ -2786,7 +2843,7 @@ int select_export::send_data(List<Item> &items) /* result is longer than UINT_MAX32 and doesn't fit into String */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED), item->full_name(), static_cast<long>(row_count)); } @@ -2821,7 +2878,7 @@ int select_export::send_data(List<Item> &items) else { if (fixed_row_size) - used_length=min(res->length(),item->max_length); + used_length=MY_MIN(res->length(),item->max_length); else used_length=res->length(); if ((result_type == STRING_RESULT || is_unsafe_field_sep) && @@ -3562,7 +3619,7 @@ int select_dumpvar::send_data(List<Item> &items) bool select_dumpvar::send_eof() { if (! row_count) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA)); /* Don't send EOF if we're in error condition (which implies we've already @@ -3733,6 +3790,7 @@ void Security_context::init() void Security_context::destroy() { + DBUG_PRINT("info", ("freeing security context")); // If not pointer to constant if (host != my_localhost) { @@ -3966,7 +4024,7 @@ static void thd_send_progress(THD *thd) ulonglong report_time= my_interval_timer(); if (report_time > thd->progress.next_report_time) { - uint seconds_to_next= max(thd->variables.progress_report_time, + uint seconds_to_next= MY_MAX(thd->variables.progress_report_time, global_system_variables.progress_report_time); if (seconds_to_next == 0) // Turned off seconds_to_next= 1; // Check again after 1 second @@ -4152,6 +4210,41 @@ extern "C" bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd) } +extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd) +{ + enum durability_properties ret= HA_REGULAR_DURABILITY; + + if (thd != NULL) + ret= thd->durability_property; + + return ret; +} + +/** Get the auto_increment_offset auto_increment_increment. +Needed by InnoDB. +@param thd Thread object +@param off auto_increment_offset +@param inc auto_increment_increment */ +extern "C" void thd_get_autoinc(const MYSQL_THD thd, ulong* off, ulong* inc) +{ + *off = thd->variables.auto_increment_offset; + *inc = thd->variables.auto_increment_increment; +} + + +/** + Is strict sql_mode set. + Needed by InnoDB. + @param thd Thread object + @return True if sql_mode has strict mode (all or trans). + @retval true sql_mode has strict mode (all or trans). + @retval false sql_mode has not strict mode (all or trans). +*/ +extern "C" bool thd_is_strict_mode(const MYSQL_THD thd) +{ + return thd->is_strict_mode(); +} + /* Interface for MySQL Server, plugins and storage engines to report @@ -4387,7 +4480,7 @@ void THD::inc_status_created_tmp_disk_tables() { status_var_increment(status_var.created_tmp_disk_tables_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_created_tmp_disk_tables)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_created_tmp_disk_tables)(m_statement_psi, 1); #endif } @@ -4395,7 +4488,7 @@ void THD::inc_status_created_tmp_tables() { status_var_increment(status_var.created_tmp_tables_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_created_tmp_tables)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_created_tmp_tables)(m_statement_psi, 1); #endif } @@ -4403,7 +4496,7 @@ void THD::inc_status_select_full_join() { status_var_increment(status_var.select_full_join_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_select_full_join)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_select_full_join)(m_statement_psi, 1); #endif } @@ -4411,7 +4504,7 @@ void THD::inc_status_select_full_range_join() { status_var_increment(status_var.select_full_range_join_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_select_full_range_join)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_select_full_range_join)(m_statement_psi, 1); #endif } @@ -4419,7 +4512,7 @@ void THD::inc_status_select_range() { status_var_increment(status_var.select_range_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_select_range)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_select_range)(m_statement_psi, 1); #endif } @@ -4427,7 +4520,7 @@ void THD::inc_status_select_range_check() { status_var_increment(status_var.select_range_check_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_select_range_check)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_select_range_check)(m_statement_psi, 1); #endif } @@ -4435,7 +4528,7 @@ void THD::inc_status_select_scan() { status_var_increment(status_var.select_scan_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_select_scan)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_select_scan)(m_statement_psi, 1); #endif } @@ -4443,7 +4536,7 @@ void THD::inc_status_sort_merge_passes() { status_var_increment(status_var.filesort_merge_passes_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_sort_merge_passes)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_sort_merge_passes)(m_statement_psi, 1); #endif } @@ -4451,7 +4544,7 @@ void THD::inc_status_sort_range() { status_var_increment(status_var.filesort_range_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_sort_range)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_sort_range)(m_statement_psi, 1); #endif } @@ -4459,7 +4552,7 @@ void THD::inc_status_sort_rows(ha_rows count) { statistic_add(status_var.filesort_rows_, count, &LOCK_status); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_sort_rows)(m_statement_psi, count); + PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, count); #endif } @@ -4467,7 +4560,7 @@ void THD::inc_status_sort_scan() { status_var_increment(status_var.filesort_scan_count_); #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(inc_statement_sort_scan)(m_statement_psi, 1); + PSI_STATEMENT_CALL(inc_statement_sort_scan)(m_statement_psi, 1); #endif } @@ -4475,7 +4568,7 @@ void THD::set_status_no_index_used() { server_status|= SERVER_QUERY_NO_INDEX_USED; #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(set_statement_no_index_used)(m_statement_psi); + PSI_STATEMENT_CALL(set_statement_no_index_used)(m_statement_psi); #endif } @@ -4483,7 +4576,7 @@ void THD::set_status_no_good_index_used() { server_status|= SERVER_QUERY_NO_GOOD_INDEX_USED; #ifdef HAVE_PSI_STATEMENT_INTERFACE - PSI_CALL(set_statement_no_good_index_used)(m_statement_psi); + PSI_STATEMENT_CALL(set_statement_no_good_index_used)(m_statement_psi); #endif } @@ -4491,7 +4584,7 @@ void THD::set_command(enum enum_server_command command) { m_command= command; #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_command)(m_command); + PSI_STATEMENT_CALL(set_thread_command)(m_command); #endif } @@ -4504,7 +4597,7 @@ void THD::set_query(const CSET_STRING &string_arg) mysql_mutex_unlock(&LOCK_thd_data); #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_info)(query(), query_length()); + PSI_THREAD_CALL(set_thread_info)(query(), query_length()); #endif } @@ -4817,6 +4910,8 @@ int THD::decide_logging_format(TABLE_LIST *tables) DBUG_PRINT("info", ("lex->get_stmt_unsafe_flags(): 0x%x", lex->get_stmt_unsafe_flags())); + reset_binlog_local_stmt_filter(); + /* We should not decide logging format if the binlog is closed or binlogging is off, or if the statement is filtered out from the @@ -4859,6 +4954,28 @@ int THD::decide_logging_format(TABLE_LIST *tables) A pointer to a previous table that was accessed. */ TABLE* prev_access_table= NULL; + /** + The number of tables used in the current statement, + that should be replicated. + */ + uint replicated_tables_count= 0; + /** + The number of tables written to in the current statement, + that should not be replicated. + A table should not be replicated when it is considered + 'local' to a MySQL instance. + Currently, these tables are: + - mysql.slow_log + - mysql.general_log + - mysql.slave_relay_log_info + - mysql.slave_master_info + - mysql.slave_worker_info + - performance_schema.* + - TODO: information_schema.* + In practice, from this list, only performance_schema.* tables + are written to by user queries. + */ + uint non_replicated_tables_count= 0; #ifndef DBUG_OFF { @@ -4881,14 +4998,38 @@ int THD::decide_logging_format(TABLE_LIST *tables) if (table->placeholder()) continue; - if (table->table->s->table_category == TABLE_CATEGORY_PERFORMANCE || - table->table->s->table_category == TABLE_CATEGORY_LOG) - lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_TABLE); - handler::Table_flags const flags= table->table->file->ha_table_flags(); DBUG_PRINT("info", ("table: %s; ha_table_flags: 0x%llx", table->table_name, flags)); + + if (table->table->no_replicate) + { + /* + The statement uses a table that is not replicated. + The following properties about the table: + - persistent / transient + - transactional / non transactional + - temporary / permanent + - read or write + - multiple engines involved because of this table + are not relevant, as this table is completely ignored. + Because the statement uses a non replicated table, + using STATEMENT format in the binlog is impossible. + Either this statement will be discarded entirely, + or it will be logged (possibly partially) in ROW format. + */ + lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_TABLE); + + if (table->lock_type >= TL_WRITE_ALLOW_WRITE) + { + non_replicated_tables_count++; + continue; + } + } + + replicated_tables_count++; + if (table->lock_type >= TL_WRITE_ALLOW_WRITE) { if (prev_write_table && prev_write_table->file->ht != @@ -5064,6 +5205,30 @@ int THD::decide_logging_format(TABLE_LIST *tables) } } + if (non_replicated_tables_count > 0) + { + if ((replicated_tables_count == 0) || ! is_write) + { + DBUG_PRINT("info", ("decision: no logging, no replicated table affected")); + set_binlog_local_stmt_filter(); + } + else + { + if (! is_current_stmt_binlog_format_row()) + { + my_error((error= ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES), MYF(0)); + } + else + { + clear_binlog_local_stmt_filter(); + } + } + } + else + { + clear_binlog_local_stmt_filter(); + } + if (error) { DBUG_PRINT("info", ("decision: no logging since an error was generated")); DBUG_RETURN(-1); @@ -5102,7 +5267,7 @@ int THD::decide_logging_format(TABLE_LIST *tables) Replace the last ',' with '.' for table_names */ table_names.replace(table_names.length()-1, 1, ".", 1); - push_warning_printf(this, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(this, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Row events are not logged for %s statements " "that modify BLACKHOLE tables in row format. " @@ -5679,7 +5844,7 @@ void THD::issue_unsafe_warnings() { if ((unsafe_type_flags & (1 << unsafe_type)) != 0) { - push_warning_printf(this, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(this, Sql_condition::WARN_LEVEL_NOTE, ER_BINLOG_UNSAFE_STATEMENT, ER(ER_BINLOG_UNSAFE_STATEMENT), ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type])); @@ -5729,6 +5894,15 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg, show_query_type(qtype), (int) query_len, query_arg)); DBUG_ASSERT(query_arg && mysql_bin_log.is_open()); + if (get_binlog_local_stmt_filter() == BINLOG_FILTER_SET) + { + /* + The current statement is to be ignored, and not written to + the binlog. Do not call issue_unsafe_warnings(). + */ + DBUG_RETURN(0); + } + /* If we are not in prelocked mode, mysql_unlock_tables() will be called after this binlog_query(), so we have to flush the pending diff --git a/sql/sql_class.h b/sql/sql_class.h index 2e5e87fd232..889028ce8e5 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2012, Oracle and/or its affiliates. - Copyright (c) 2009, 2013, Monty Program Ab + Copyright (c) 2009, 2013, Monty Program Ab. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -72,7 +72,6 @@ class Rows_log_event; class Sroutine_hash_entry; class user_var_entry; -enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE }; enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON, @@ -122,6 +121,7 @@ enum enum_filetype { FILETYPE_CSV, FILETYPE_XML }; extern char internal_table_name[2]; extern char empty_c_string[1]; +extern LEX_STRING EMPTY_STR; extern MYSQL_PLUGIN_IMPORT const char **errmesg; extern bool volatile shutdown_in_progress; @@ -232,12 +232,15 @@ public: class Alter_drop :public Sql_alloc { public: - enum drop_type {KEY, COLUMN }; + enum drop_type {KEY, COLUMN, FOREIGN_KEY }; const char *name; enum drop_type type; bool drop_if_exists; Alter_drop(enum drop_type par_type,const char *par_name, bool par_exists) - :name(par_name), type(par_type), drop_if_exists(par_exists) {} + :name(par_name), type(par_type), drop_if_exists(par_exists) + { + DBUG_ASSERT(par_name != NULL); + } /** Used to make a clone of this object for ALTER/CREATE TABLE @sa comment for Key_part_spec::clone @@ -304,7 +307,6 @@ public: { return new (mem_root) Key(*this, mem_root); } }; -class Table_ident; class Foreign_key: public Key { public: @@ -313,20 +315,25 @@ public: enum fk_option { FK_OPTION_UNDEF, FK_OPTION_RESTRICT, FK_OPTION_CASCADE, FK_OPTION_SET_NULL, FK_OPTION_NO_ACTION, FK_OPTION_DEFAULT}; - Table_ident *ref_table; + LEX_STRING ref_db; + LEX_STRING ref_table; List<Key_part_spec> ref_columns; uint delete_opt, update_opt, match_opt; Foreign_key(const LEX_STRING &name_arg, List<Key_part_spec> &cols, - Table_ident *table, List<Key_part_spec> &ref_cols, + const LEX_STRING &ref_db_arg, const LEX_STRING &ref_table_arg, + List<Key_part_spec> &ref_cols, uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg, bool if_not_exists_opt) :Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols, NULL, if_not_exists_opt), - ref_table(table), ref_columns(ref_cols), + ref_db(ref_db_arg), ref_table(ref_table_arg), ref_columns(ref_cols), delete_opt(delete_opt_arg), update_opt(update_opt_arg), match_opt(match_opt_arg) - {} - Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root); + { + // We don't check for duplicate FKs. + key_create_info.check_for_duplicate_indexes= false; + } + Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root); /** Used to make a clone of this object for ALTER/CREATE TABLE @sa comment for Key_part_spec::clone @@ -463,6 +470,8 @@ class Time_zone; #define THD_CHECK_SENTRY(thd) DBUG_ASSERT(thd->dbug_sentry == THD_SENTRY_MAGIC) +typedef ulonglong sql_mode_t; + typedef struct system_variables { /* @@ -486,7 +495,7 @@ typedef struct system_variables ulonglong tmp_table_size; ulonglong long_query_time; ulonglong optimizer_switch; - ulonglong sql_mode; ///< which non-standard SQL behaviour should be enabled + sql_mode_t sql_mode; ///< which non-standard SQL behaviour should be enabled ulonglong option_bits; ///< OPTION_xxx constants, e.g. OPTION_PROFILING ulonglong join_buff_space_limit; ulonglong log_slow_filter; @@ -1362,9 +1371,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) = 0; + Sql_condition ** cond_hdl) = 0; private: Internal_error_handler *m_prev_internal_handler; @@ -1383,9 +1392,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { /* Ignore error */ return TRUE; @@ -1410,9 +1419,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl); + Sql_condition ** cond_hdl); private: }; @@ -1584,6 +1593,7 @@ void dbug_serve_apcs(THD *thd, int n_calls); */ class THD :public Statement, + public MDL_context_owner, public Open_tables_state { private: @@ -1864,8 +1874,46 @@ public: return current_stmt_binlog_format == BINLOG_FORMAT_ROW; } + enum binlog_filter_state + { + BINLOG_FILTER_UNKNOWN, + BINLOG_FILTER_CLEAR, + BINLOG_FILTER_SET + }; + + inline void reset_binlog_local_stmt_filter() + { + m_binlog_filter_state= BINLOG_FILTER_UNKNOWN; + } + + inline void clear_binlog_local_stmt_filter() + { + DBUG_ASSERT(m_binlog_filter_state == BINLOG_FILTER_UNKNOWN); + m_binlog_filter_state= BINLOG_FILTER_CLEAR; + } + + inline void set_binlog_local_stmt_filter() + { + DBUG_ASSERT(m_binlog_filter_state == BINLOG_FILTER_UNKNOWN); + m_binlog_filter_state= BINLOG_FILTER_SET; + } + + inline binlog_filter_state get_binlog_local_stmt_filter() + { + return m_binlog_filter_state; + } + private: /** + Indicate if the current statement should be discarded + instead of written to the binlog. + This is used to discard special statements, such as + DML or DDL that affects only 'local' (non replicated) + tables, such as performance_schema.* + */ + binlog_filter_state m_binlog_filter_state; + + /** Indicates the format in which the current statement will be logged. This can only be set from @c decide_logging_format(). */ @@ -2233,8 +2281,6 @@ public: USER_CONN *user_connect; CHARSET_INFO *db_charset; - Warning_info *warning_info; - Diagnostics_area *stmt_da; #if defined(ENABLED_PROFILING) PROFILING profiling; #endif @@ -2312,6 +2358,12 @@ public: MEM_ROOT *user_var_events_alloc; /* Allocate above array elements here */ /* + Define durability properties that engines may check to + improve performance. Not yet used in MariaDB + */ + enum durability_properties durability_property; + + /* If checking this in conjunction with a wait condition, please include a check after enter_cond() if you want to avoid a race condition. For details see the implementation of awake(), @@ -2595,10 +2647,41 @@ public: mysql_mutex_unlock(&mysys_var->mutex); return; } + virtual int is_killed() { return killed; } + virtual THD* get_thd() { return this; } + + /** + A callback to the server internals that is used to address + special cases of the locking protocol. + Invoked when acquiring an exclusive lock, for each thread that + has a conflicting shared metadata lock. + + This function: + - aborts waiting of the thread on a data lock, to make it notice + the pending exclusive lock and back off. + - if the thread is an INSERT DELAYED thread, sends it a KILL + signal to terminate it. + + @note This function does not wait for the thread to give away its + locks. Waiting is done outside for all threads at once. + + @param ctx_in_use The MDL context owner (thread) to wake up. + @param needs_thr_lock_abort Indicates that to wake up thread + this call needs to abort its waiting + on table-level lock. + + @retval TRUE if the thread was woken up + @retval FALSE otherwise. + */ + virtual bool notify_shared_lock(MDL_context_owner *ctx_in_use, + bool needs_thr_lock_abort); + + // End implementation of MDL_context_owner interface. + inline bool is_strict_mode() const { - return variables.sql_mode & (MODE_STRICT_TRANS_TABLES | - MODE_STRICT_ALL_TABLES); + return (bool) (variables.sql_mode & (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); } inline my_time_t query_start() { query_start_used=1; return start_time; } inline ulong query_start_sec_part() @@ -2609,7 +2692,7 @@ public: start_time= hrtime_to_my_time(hrtime); start_time_sec_part= hrtime_sec_part(hrtime); #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_start_time)(start_time); + PSI_THREAD_CALL(set_thread_start_time)(start_time); #endif } inline void set_start_time() @@ -2619,7 +2702,7 @@ public: start_time= hrtime_to_my_time(user_time); start_time_sec_part= hrtime_sec_part(user_time); #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_start_time)(start_time); + PSI_THREAD_CALL(set_thread_start_time)(start_time); #endif } else @@ -2779,8 +2862,8 @@ public: inline void clear_error() { DBUG_ENTER("clear_error"); - if (stmt_da->is_error()) - stmt_da->reset_diagnostics_area(); + if (get_stmt_da()->is_error()) + get_stmt_da()->reset_diagnostics_area(); is_slave_error= 0; DBUG_VOID_RETURN; } @@ -2807,7 +2890,7 @@ public: */ inline void fatal_error() { - DBUG_ASSERT(stmt_da->is_error() || killed); + DBUG_ASSERT(get_stmt_da()->is_error() || killed); is_fatal_error= 1; DBUG_PRINT("error",("Fatal error set")); } @@ -2824,11 +2907,19 @@ public: To raise this flag, use my_error(). */ - inline bool is_error() const { return stmt_da->is_error(); } + inline bool is_error() const { return m_stmt_da->is_error(); } /// Returns Diagnostics-area for the current statement. Diagnostics_area *get_stmt_da() - { return stmt_da; } + { return m_stmt_da; } + + /// Returns Diagnostics-area for the current statement. + const Diagnostics_area *get_stmt_da() const + { return m_stmt_da; } + + /// Sets Diagnostics-area for the current statement. + void set_stmt_da(Diagnostics_area *da) + { m_stmt_da= da; } inline CHARSET_INFO *charset() { return variables.character_set_client; } void update_charset(); @@ -3069,7 +3160,7 @@ public: mysql_mutex_unlock(&LOCK_thd_data); #ifdef HAVE_PSI_THREAD_INTERFACE if (result) - PSI_CALL(set_thread_db)(new_db, new_db_len); + PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len); #endif return result; } @@ -3094,7 +3185,7 @@ public: db_length= new_db_len; mysql_mutex_unlock(&LOCK_thd_data); #ifdef HAVE_PSI_THREAD_INTERFACE - PSI_CALL(set_thread_db)(new_db, new_db_len); + PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len); #endif } } @@ -3126,6 +3217,7 @@ public: */ void push_internal_handler(Internal_error_handler *handler); +private: /** Handle a sql condition. @param sql_errno the condition error number @@ -3135,12 +3227,13 @@ public: @param[out] cond_hdl the sql condition raised, if any @return true if the condition is handled */ - virtual bool handle_condition(uint sql_errno, - const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, - const char* msg, - MYSQL_ERROR ** cond_hdl); + bool handle_condition(uint sql_errno, + const char* sqlstate, + Sql_condition::enum_warning_level level, + const char* msg, + Sql_condition ** cond_hdl); +public: /** Remove the error handler last pushed. */ @@ -3190,10 +3283,10 @@ private: To raise a SQL condition, the code should use the public raise_error() or raise_warning() methods provided by class THD. */ - friend class Signal_common; - friend class Signal_statement; - friend class Resignal_statement; - friend void push_warning(THD*, MYSQL_ERROR::enum_warning_level, uint, const char*); + friend class Sql_cmd_common_signal; + friend class Sql_cmd_signal; + friend class Sql_cmd_resignal; + friend void push_warning(THD*, Sql_condition::enum_warning_level, uint, const char*); friend void my_message_sql(uint, const char *, myf); /** @@ -3204,10 +3297,10 @@ private: @param msg the condition message text @return The condition raised, or NULL */ - MYSQL_ERROR* + Sql_condition* raise_condition(uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg); public: @@ -3340,8 +3433,8 @@ private: tree itself is reused between executions and thus is stored elsewhere. */ MEM_ROOT main_mem_root; - Warning_info main_warning_info; Diagnostics_area main_da; + Diagnostics_area *m_stmt_da; /** It will be set TURE if CURRENT_USER() is called in account management @@ -3374,24 +3467,24 @@ private: }; -/** A short cut for thd->stmt_da->set_ok_status(). */ +/** A short cut for thd->get_stmt_da()->set_ok_status(). */ inline void my_ok(THD *thd, ulonglong affected_rows= 0, ulonglong id= 0, const char *message= NULL) { thd->set_row_count_func(affected_rows); - thd->stmt_da->set_ok_status(thd, affected_rows, id, message); + thd->get_stmt_da()->set_ok_status(affected_rows, id, message); } -/** A short cut for thd->stmt_da->set_eof_status(). */ +/** A short cut for thd->get_stmt_da()->set_eof_status(). */ inline void my_eof(THD *thd) { thd->set_row_count_func(-1); - thd->stmt_da->set_eof_status(thd); + thd->get_stmt_da()->set_eof_status(thd); } #define tmp_disable_binlog(A) \ @@ -3406,9 +3499,9 @@ my_eof(THD *thd) checking for all date handling. */ -const my_bool strict_date_checking= 0; +const my_bool strict_date_checking= 1; -inline ulonglong sql_mode_for_dates(THD *thd) +inline sql_mode_t sql_mode_for_dates(THD *thd) { if (strict_date_checking) return (thd->variables.sql_mode & @@ -3417,7 +3510,7 @@ inline ulonglong sql_mode_for_dates(THD *thd) return (thd->variables.sql_mode & MODE_INVALID_DATES); } -inline ulonglong sql_mode_for_dates() +inline sql_mode_t sql_mode_for_dates() { return sql_mode_for_dates(current_thd); } diff --git a/sql/sql_client.cc b/sql/sql_client.cc index eb6c039c065..e7c555b5947 100644 --- a/sql/sql_client.cc +++ b/sql/sql_client.cc @@ -36,7 +36,7 @@ void my_net_local_init(NET *net) (uint)global_system_variables.net_write_timeout); net->retry_count= (uint) global_system_variables.net_retry_count; - net->max_packet_size= max(global_system_variables.net_buffer_length, + net->max_packet_size= MY_MAX(global_system_variables.net_buffer_length, global_system_variables.max_allowed_packet); #endif } diff --git a/sql/sql_cmd.h b/sql/sql_cmd.h index 794037a0033..de7ef5fc832 100644 --- a/sql/sql_cmd.h +++ b/sql/sql_cmd.h @@ -88,6 +88,7 @@ enum enum_sql_command { SQLCOM_SHOW_PROFILE, SQLCOM_SHOW_PROFILES, SQLCOM_SIGNAL, SQLCOM_RESIGNAL, SQLCOM_SHOW_RELAYLOG_EVENTS, + SQLCOM_GET_DIAGNOSTICS, SQLCOM_SHOW_USER_STATS, SQLCOM_SHOW_TABLE_STATS, SQLCOM_SHOW_INDEX_STATS, SQLCOM_SHOW_CLIENT_STATS, SQLCOM_SLAVE_ALL_START, SQLCOM_SLAVE_ALL_STOP, diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 19e02cc7dae..f14c43d4c54 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -124,6 +124,7 @@ end: int check_for_max_user_connections(THD *thd, USER_CONN *uc) { int error= 1; + Host_errors errors; DBUG_ENTER("check_for_max_user_connections"); mysql_mutex_lock(&LOCK_user_conn); @@ -135,6 +136,8 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc) !(thd->security_ctx->master_access & SUPER_ACL)) { my_error(ER_TOO_MANY_USER_CONNECTIONS, MYF(0), uc->user); + error=1; + errors.m_max_user_connection= 1; goto end; } time_out_user_resource_limits(thd, uc); @@ -144,6 +147,8 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc) my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_user_connections", (long) uc->user_resources.user_conn); + error= 1; + errors.m_max_user_connection= 1; goto end; } if (uc->user_resources.conn_per_hour && @@ -152,6 +157,8 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc) my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user, "max_connections_per_hour", (long) uc->user_resources.conn_per_hour); + error=1; + errors.m_max_user_connection_per_hour= 1; goto end; } uc->conn_per_hour++; @@ -169,6 +176,10 @@ end: thd->user_connect= NULL; } mysql_mutex_unlock(&LOCK_user_conn); + if (error) + { + inc_host_errors(thd->main_security_ctx.ip, &errors); + } DBUG_RETURN(error); } @@ -431,7 +442,7 @@ void init_user_stats(USER_STATS *user_stats, DBUG_ENTER("init_user_stats"); DBUG_PRINT("enter", ("user: %s priv_user: %s", user, priv_user)); - user_length= min(user_length, sizeof(user_stats->user)-1); + user_length= MY_MIN(user_length, sizeof(user_stats->user)-1); memcpy(user_stats->user, user, user_length); user_stats->user[user_length]= 0; user_stats->user_name_length= user_length; @@ -867,7 +878,10 @@ bool init_new_connection_handler_thread() { pthread_detach_this_thread(); if (my_thread_init()) + { + statistic_increment(connection_errors_internal, &LOCK_status); return 1; + } return 0; } @@ -887,6 +901,7 @@ bool init_new_connection_handler_thread() static int check_connection(THD *thd) { uint connect_errors= 0; + int auth_rc; NET *net= &thd->net; DBUG_PRINT("info", @@ -898,48 +913,116 @@ static int check_connection(THD *thd) if (!thd->main_security_ctx.host) // If TCP/IP connection { + my_bool peer_rc; char ip[NI_MAXHOST]; - if (vio_peer_addr(net->vio, ip, &thd->peer_port, NI_MAXHOST)) - { - my_error(ER_BAD_HOST_ERROR, MYF(0)); - return 1; - } - /* BEGIN : DEBUG */ - DBUG_EXECUTE_IF("addr_fake_ipv4", + peer_rc= vio_peer_addr(net->vio, ip, &thd->peer_port, NI_MAXHOST); + + /* + =========================================================================== + DEBUG code only (begin) + Simulate various output from vio_peer_addr(). + =========================================================================== + */ + + DBUG_EXECUTE_IF("vio_peer_addr_error", + { + peer_rc= 1; + } + ); + DBUG_EXECUTE_IF("vio_peer_addr_fake_ipv4", { struct sockaddr *sa= (sockaddr *) &net->vio->remote; sa->sa_family= AF_INET; - struct in_addr *ip4= &((struct sockaddr_in *)sa)->sin_addr; - /* See RFC 5737, 192.0.2.0/23 is reserved */ + struct in_addr *ip4= &((struct sockaddr_in *) sa)->sin_addr; + /* See RFC 5737, 192.0.2.0/24 is reserved. */ const char* fake= "192.0.2.4"; ip4->s_addr= inet_addr(fake); strcpy(ip, fake); - };); - /* END : DEBUG */ + peer_rc= 0; + } + ); +#ifdef HAVE_IPV6 + DBUG_EXECUTE_IF("vio_peer_addr_fake_ipv6", + { + struct sockaddr_in6 *sa= (sockaddr_in6 *) &net->vio->remote; + sa->sin6_family= AF_INET6; + struct in6_addr *ip6= & sa->sin6_addr; + /* See RFC 3849, ipv6 2001:DB8::/32 is reserved. */ + const char* fake= "2001:db8::6:6"; + /* inet_pton(AF_INET6, fake, ip6); not available on Windows XP. */ + ip6->s6_addr[ 0] = 0x20; + ip6->s6_addr[ 1] = 0x01; + ip6->s6_addr[ 2] = 0x0d; + ip6->s6_addr[ 3] = 0xb8; + ip6->s6_addr[ 4] = 0x00; + ip6->s6_addr[ 5] = 0x00; + ip6->s6_addr[ 6] = 0x00; + ip6->s6_addr[ 7] = 0x00; + ip6->s6_addr[ 8] = 0x00; + ip6->s6_addr[ 9] = 0x00; + ip6->s6_addr[10] = 0x00; + ip6->s6_addr[11] = 0x00; + ip6->s6_addr[12] = 0x00; + ip6->s6_addr[13] = 0x06; + ip6->s6_addr[14] = 0x00; + ip6->s6_addr[15] = 0x06; + strcpy(ip, fake); + peer_rc= 0; + } + ); +#endif /* HAVE_IPV6 */ + + /* + =========================================================================== + DEBUG code only (end) + =========================================================================== + */ + + if (peer_rc) + { + /* + Since we can not even get the peer IP address, + there is nothing to show in the host_cache, + so increment the global status variable for peer address errors. + */ + statistic_increment(connection_errors_peer_addr, &LOCK_status); + my_error(ER_BAD_HOST_ERROR, MYF(0)); + return 1; + } if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(MY_WME)))) + { + /* + No error accounting per IP in host_cache, + this is treated as a global server OOM error. + TODO: remove the need for my_strdup. + */ + statistic_increment(connection_errors_internal, &LOCK_status); return 1; /* The error is set by my_strdup(). */ + } thd->main_security_ctx.host_or_ip= thd->main_security_ctx.ip; if (!(specialflag & SPECIAL_NO_RESOLVE)) { - if (ip_to_hostname(&net->vio->remote, thd->main_security_ctx.ip, - &thd->main_security_ctx.host, &connect_errors)) - { - my_error(ER_BAD_HOST_ERROR, MYF(0)); - return 1; - } + int rc; + + rc= ip_to_hostname(&net->vio->remote, + thd->main_security_ctx.ip, + &thd->main_security_ctx.host, + &connect_errors); /* Cut very long hostnames to avoid possible overflows */ if (thd->main_security_ctx.host) { if (thd->main_security_ctx.host != my_localhost) - thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host), + thd->main_security_ctx.host[MY_MIN(strlen(thd->main_security_ctx.host), HOSTNAME_LENGTH)]= 0; thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; } - if (connect_errors > max_connect_errors) + + if (rc == RC_BLOCKED_HOST) { + /* HOST_CACHE stats updated by ip_to_hostname(). */ my_error(ER_HOST_IS_BLOCKED, MYF(0), thd->main_security_ctx.host_or_ip); return 1; } @@ -951,6 +1034,7 @@ static int check_connection(THD *thd) thd->main_security_ctx.ip : "unknown ip"))); if (acl_check_host(thd->main_security_ctx.host, thd->main_security_ctx.ip)) { + /* HOST_CACHE stats updated by acl_check_host(). */ my_error(ER_HOST_NOT_PRIVILEGED, MYF(0), thd->main_security_ctx.host_or_ip); return 1; @@ -967,9 +1051,34 @@ static int check_connection(THD *thd) vio_keepalive(net->vio, TRUE); if (thd->packet.alloc(thd->variables.net_buffer_length)) + { + /* + Important note: + net_buffer_length is a SESSION variable, + so it may be tempting to account OOM conditions per IP in the HOST_CACHE, + in case some clients are more demanding than others ... + However, this session variable is *not* initialized with a per client + value during the initial connection, it is initialized from the + GLOBAL net_buffer_length variable from the server. + Hence, there is no reason to account on OOM conditions per client IP, + we count failures in the global server status instead. + */ + statistic_increment(connection_errors_internal, &LOCK_status); return 1; /* The error is set by alloc(). */ + } + + auth_rc= acl_authenticate(thd, connect_errors, 0); + if (auth_rc == 0 && connect_errors != 0) + { + /* + A client connection from this IP was successful, + after some previous failures. + Reset the connection error counter. + */ + reset_host_connect_errors(thd->main_security_ctx.ip); + } - return acl_authenticate(thd, connect_errors, 0); + return auth_rc; } @@ -1118,9 +1227,10 @@ void prepare_new_connection_state(THD* thd) execute_init_command(thd, &opt_init_connect, &LOCK_sys_init_connect); if (thd->is_error()) { + Host_errors errors; thd->killed= KILL_CONNECTION; thd->print_aborted_warning(0, "init_connect command failed"); - sql_print_warning("%s", thd->stmt_da->message()); + sql_print_warning("%s", thd->get_stmt_da()->message()); /* now let client to send its first command, @@ -1145,6 +1255,8 @@ void prepare_new_connection_state(THD* thd) thd->server_status&= ~SERVER_STATUS_CLEAR_SET; thd->protocol->end_statement(); thd->killed = KILL_CONNECTION; + errors.m_init_connect= 1; + inc_host_errors(thd->main_security_ctx.ip, &errors); return; } diff --git a/sql/sql_const.h b/sql/sql_const.h index d0a7a83f3a1..4ad39bad14a 100644 --- a/sql/sql_const.h +++ b/sql/sql_const.h @@ -68,7 +68,7 @@ #define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \ RAND_TABLE_BIT) #define MAX_FIELDS 4096 /* Limit in the .frm file */ -#define MAX_PARTITIONS 1024 +#define MAX_PARTITIONS 8192 #define MAX_SELECT_NESTING (sizeof(nesting_map)*8-1) @@ -77,7 +77,6 @@ /* Some portable defines */ -#define portable_sizeof_char_ptr 8 #define STRING_BUFFER_USUAL_SIZE 80 /* Memory allocated when parsing a statement / saving a statement */ @@ -129,6 +128,13 @@ */ #define TABLE_DEF_CACHE_MIN 400 +/** + Maximum number of connections default value. + 151 is larger than Apache's default max children, + to avoid "too many connections" error in a common setup. +*/ +#define MAX_CONNECTIONS_DEFAULT 151 + /* Stack reservation. Feel free to raise this by the smallest amount you can to get the @@ -233,7 +239,7 @@ #define DELAYED_LIMIT 100 /**< pause after xxx inserts */ #define DELAYED_QUEUE_SIZE 1000 #define DELAYED_WAIT_TIMEOUT 5*60 /**< Wait for delayed insert */ -#define MAX_CONNECT_ERRORS 10 ///< errors before disabling host +#define MAX_CONNECT_ERRORS 100 ///< errors before disabling host #define LONG_TIMEOUT ((ulong) 3600L*24L*365L) diff --git a/sql/sql_crypt.h b/sql/sql_crypt.h index 3a12d603601..3df554e9d31 100644 --- a/sql/sql_crypt.h +++ b/sql/sql_crypt.h @@ -22,7 +22,7 @@ #endif #include "sql_list.h" /* Sql_alloc */ -#include "mysql_com.h" /* rand_struct */ +#include "my_rnd.h" /* rand_struct */ class SQL_CRYPT :public Sql_alloc { diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 39c30959fe4..9e30ed4513e 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -32,6 +32,7 @@ #include "log_event.h" // Query_log_event #include "sql_base.h" // lock_table_names, tdc_remove_table #include "sql_handler.h" // mysql_ha_rm_tables +#include "sql_class.h" #include <mysys_err.h> #include "sp_head.h" #include "sp.h" @@ -572,7 +573,7 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, error= -1; goto exit; } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db); error= 0; goto not_silent; @@ -780,7 +781,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) } else { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DB_DROP_EXISTS, ER(ER_DB_DROP_EXISTS), db); error= false; goto update_binlog; @@ -809,7 +810,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) /* Lock all tables and stored routines about to be dropped. */ if (lock_table_names(thd, tables, NULL, thd->variables.lock_wait_timeout, - MYSQL_OPEN_SKIP_TEMPORARY) || + 0) || lock_db_routines(thd, db)) goto exit; @@ -1502,7 +1503,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch) { /* Throw a warning and free new_db_file_name. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_BAD_DB_ERROR, ER(ER_BAD_DB_ERROR), new_db_file_name.str); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 2992bb0da6e..d9dd538f96d 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -675,9 +675,9 @@ exit: if (derived->view) { if (thd->is_error() && - (thd->stmt_da->sql_errno() == ER_BAD_FIELD_ERROR || - thd->stmt_da->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION || - thd->stmt_da->sql_errno() == ER_SP_DOES_NOT_EXIST)) + (thd->get_stmt_da()->sql_errno() == ER_BAD_FIELD_ERROR || + thd->get_stmt_da()->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION || + thd->get_stmt_da()->sql_errno() == ER_SP_DOES_NOT_EXIST)) { thd->clear_error(); my_error(ER_VIEW_INVALID, MYF(0), derived->db, diff --git a/sql/sql_error.cc b/sql/sql_error.cc index acb61fe68c5..f382f18a983 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -47,12 +47,12 @@ This file contains the implementation of error and warnings related #include "sp_rcontext.h" /* - Design notes about MYSQL_ERROR::m_message_text. + Design notes about Sql_condition::m_message_text. - The member MYSQL_ERROR::m_message_text contains the text associated with + The member Sql_condition::m_message_text contains the text associated with an error, warning or note (which are all SQL 'conditions') - Producer of MYSQL_ERROR::m_message_text: + Producer of Sql_condition::m_message_text: ---------------------------------------- (#1) the server implementation itself, when invoking functions like @@ -78,16 +78,16 @@ This file contains the implementation of error and warnings related - a RESIGNAL statement, the message text is provided by the user logic, and is expressed in UTF8. - Storage of MYSQL_ERROR::m_message_text: + Storage of Sql_condition::m_message_text: --------------------------------------- - (#4) The class MYSQL_ERROR is used to hold the message text member. + (#4) The class Sql_condition is used to hold the message text member. This class represents a single SQL condition. (#5) The class Warning_info represents a SQL condition area, and contains a collection of SQL conditions in the Warning_info::m_warn_list - Consumer of MYSQL_ERROR::m_message_text: + Consumer of Sql_condition::m_message_text: ---------------------------------------- (#6) The statements SHOW WARNINGS and SHOW ERRORS display the content of @@ -97,9 +97,9 @@ This file contains the implementation of error and warnings related also read the content of: - the top level statement condition area (when executed in a query), - a sub statement (when executed in a stored program) - and return the data stored in a MYSQL_ERROR. + and return the data stored in a Sql_condition. - (#8) The RESIGNAL statement reads the MYSQL_ERROR caught by an exception + (#8) The RESIGNAL statement reads the Sql_condition caught by an exception handler, to raise a new or modified condition (in #3). The big picture @@ -113,7 +113,7 @@ This file contains the implementation of error and warnings related ----------------------------|---------------------------- | | | V | - MYSQL_ERROR(#4) | + Sql_condition(#4) | | | | | V | @@ -151,10 +151,10 @@ This file contains the implementation of error and warnings related As a result, the design choice for (#4) and (#5) is to store data in the 'error_message_charset_info' CHARSET, to minimize impact on the code base. - This is implemented by using 'String MYSQL_ERROR::m_message_text'. + This is implemented by using 'String Sql_condition::m_message_text'. The UTF8 -> error_message_charset_info conversion is implemented in - Signal_common::eval_signal_informations() (for path #B and #C). + Sql_cmd_common_signal::eval_signal_informations() (for path #B and #C). Future work ----------- @@ -164,14 +164,14 @@ This file contains the implementation of error and warnings related - Change (#4 and #5) to store message text in UTF8 natively. In practice, this means changing the type of the message text to - '<UTF8 String 128 class> MYSQL_ERROR::m_message_text', and is a direct + '<UTF8 String 128 class> Sql_condition::m_message_text', and is a direct consequence of WL#751. - Implement (#9) (GET DIAGNOSTICS). See WL#2111 (Stored Procedures: Implement GET DIAGNOSTICS) */ -MYSQL_ERROR::MYSQL_ERROR() +Sql_condition::Sql_condition() : Sql_alloc(), m_class_origin((const char*) NULL, 0, & my_charset_utf8_bin), m_subclass_origin((const char*) NULL, 0, & my_charset_utf8_bin), @@ -185,21 +185,20 @@ MYSQL_ERROR::MYSQL_ERROR() m_cursor_name((const char*) NULL, 0, & my_charset_utf8_bin), m_message_text(), m_sql_errno(0), - m_handled(0), - m_level(MYSQL_ERROR::WARN_LEVEL_ERROR), + m_level(Sql_condition::WARN_LEVEL_ERROR), m_mem_root(NULL) { memset(m_returned_sqlstate, 0, sizeof(m_returned_sqlstate)); } -void MYSQL_ERROR::init(MEM_ROOT *mem_root) +void Sql_condition::init(MEM_ROOT *mem_root) { DBUG_ASSERT(mem_root != NULL); DBUG_ASSERT(m_mem_root == NULL); m_mem_root= mem_root; } -void MYSQL_ERROR::clear() +void Sql_condition::clear() { m_class_origin.length(0); m_subclass_origin.length(0); @@ -213,11 +212,10 @@ void MYSQL_ERROR::clear() m_cursor_name.length(0); m_message_text.length(0); m_sql_errno= 0; - m_handled= 0; - m_level= MYSQL_ERROR::WARN_LEVEL_ERROR; + m_level= Sql_condition::WARN_LEVEL_ERROR; } -MYSQL_ERROR::MYSQL_ERROR(MEM_ROOT *mem_root) +Sql_condition::Sql_condition(MEM_ROOT *mem_root) : Sql_alloc(), m_class_origin((const char*) NULL, 0, & my_charset_utf8_bin), m_subclass_origin((const char*) NULL, 0, & my_charset_utf8_bin), @@ -231,8 +229,7 @@ MYSQL_ERROR::MYSQL_ERROR(MEM_ROOT *mem_root) m_cursor_name((const char*) NULL, 0, & my_charset_utf8_bin), m_message_text(), m_sql_errno(0), - m_handled(0), - m_level(MYSQL_ERROR::WARN_LEVEL_ERROR), + m_level(Sql_condition::WARN_LEVEL_ERROR), m_mem_root(mem_root) { DBUG_ASSERT(mem_root != NULL); @@ -257,7 +254,7 @@ static void copy_string(MEM_ROOT *mem_root, String* dst, const String* src) } void -MYSQL_ERROR::copy_opt_attributes(const MYSQL_ERROR *cond) +Sql_condition::copy_opt_attributes(const Sql_condition *cond) { DBUG_ASSERT(this != cond); copy_string(m_mem_root, & m_class_origin, & cond->m_class_origin); @@ -270,12 +267,11 @@ MYSQL_ERROR::copy_opt_attributes(const MYSQL_ERROR *cond) copy_string(m_mem_root, & m_table_name, & cond->m_table_name); copy_string(m_mem_root, & m_column_name, & cond->m_column_name); copy_string(m_mem_root, & m_cursor_name, & cond->m_cursor_name); - m_handled= cond->m_handled; } void -MYSQL_ERROR::set(uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, const char* msg) +Sql_condition::set(uint sql_errno, const char* sqlstate, + Sql_condition::enum_warning_level level, const char* msg) { DBUG_ASSERT(sql_errno != 0); DBUG_ASSERT(sqlstate != NULL); @@ -290,11 +286,11 @@ MYSQL_ERROR::set(uint sql_errno, const char* sqlstate, } void -MYSQL_ERROR::set_builtin_message_text(const char* str) +Sql_condition::set_builtin_message_text(const char* str) { /* See the comments - "Design notes about MYSQL_ERROR::m_message_text." + "Design notes about Sql_condition::m_message_text." */ const char* copy; @@ -304,24 +300,42 @@ MYSQL_ERROR::set_builtin_message_text(const char* str) } const char* -MYSQL_ERROR::get_message_text() const +Sql_condition::get_message_text() const { return m_message_text.ptr(); } int -MYSQL_ERROR::get_message_octet_length() const +Sql_condition::get_message_octet_length() const { return m_message_text.length(); } void -MYSQL_ERROR::set_sqlstate(const char* sqlstate) +Sql_condition::set_sqlstate(const char* sqlstate) { memcpy(m_returned_sqlstate, sqlstate, SQLSTATE_LENGTH); m_returned_sqlstate[SQLSTATE_LENGTH]= '\0'; } +Diagnostics_area::Diagnostics_area(bool initialize) + : m_main_wi(0, false, initialize) +{ + push_warning_info(&m_main_wi); + + reset_diagnostics_area(); +} + +Diagnostics_area::Diagnostics_area(ulonglong warning_info_id, + bool allow_unlimited_warnings, + bool initialize) + : m_main_wi(warning_info_id, allow_unlimited_warnings, initialize) +{ + push_warning_info(&m_main_wi); + + reset_diagnostics_area(); +} + /** Clear this diagnostics area. @@ -333,7 +347,7 @@ Diagnostics_area::reset_diagnostics_area() { DBUG_ENTER("reset_diagnostics_area"); #ifdef DBUG_OFF - can_overwrite_status= FALSE; + m_can_overwrite_status= FALSE; /** Don't take chances in production */ m_message[0]= '\0'; m_sql_errno= 0; @@ -341,7 +355,8 @@ Diagnostics_area::reset_diagnostics_area() m_last_insert_id= 0; m_statement_warn_count= 0; #endif - is_sent= FALSE; + get_warning_info()->clear_error_condition(); + set_is_sent(false); /** Tiny reset in debug mode to see garbage right away */ m_status= DA_EMPTY; DBUG_VOID_RETURN; @@ -354,9 +369,9 @@ Diagnostics_area::reset_diagnostics_area() */ void -Diagnostics_area::set_ok_status(THD *thd, ulonglong affected_rows_arg, - ulonglong last_insert_id_arg, - const char *message_arg) +Diagnostics_area::set_ok_status(ulonglong affected_rows, + ulonglong last_insert_id, + const char *message) { DBUG_ENTER("set_ok_status"); DBUG_ASSERT(! is_set()); @@ -367,11 +382,11 @@ Diagnostics_area::set_ok_status(THD *thd, ulonglong affected_rows_arg, if (is_error() || is_disabled()) return; - m_statement_warn_count= thd->warning_info->statement_warn_count(); - m_affected_rows= affected_rows_arg; - m_last_insert_id= last_insert_id_arg; - if (message_arg) - strmake_buf(m_message, message_arg); + m_statement_warn_count= current_statement_warn_count(); + m_affected_rows= affected_rows; + m_last_insert_id= last_insert_id; + if (message) + strmake_buf(m_message, message); else m_message[0]= '\0'; m_status= DA_OK; @@ -402,20 +417,51 @@ Diagnostics_area::set_eof_status(THD *thd) anyway. */ m_statement_warn_count= (thd->spcont ? - 0 : thd->warning_info->statement_warn_count()); + 0 : + current_statement_warn_count()); m_status= DA_EOF; DBUG_VOID_RETURN; } /** - Set ERROR status. + Set ERROR status in the Diagnostics Area. This function should be used to + report fatal errors (such as out-of-memory errors) when no further + processing is possible. + + @param sql_errno SQL-condition error number */ void -Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg, - const char *message_arg, - const char *sqlstate) +Diagnostics_area::set_error_status(uint sql_errno) +{ + set_error_status(sql_errno, + ER(sql_errno), + mysql_errno_to_sqlstate(sql_errno), + NULL); +} + + +/** + Set ERROR status in the Diagnostics Area. + + @note error_condition may be NULL. It happens if a) OOM error is being + reported; or b) when Warning_info is full. + + @param sql_errno SQL-condition error number + @param message SQL-condition message + @param sqlstate SQL-condition state + @param error_condition SQL-condition object representing the error state + + @note Note, that error_condition may be NULL. It happens if a) OOM error is + being reported; or b) when Warning_info is full. +*/ + +void +Diagnostics_area::set_error_status(uint sql_errno, + const char *message, + const char *sqlstate, + const Sql_condition *error_condition) { DBUG_ENTER("set_error_status"); /* @@ -423,7 +469,14 @@ Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg, The only exception is when we flush the message to the client, an error can happen during the flush. */ - DBUG_ASSERT(! is_set() || can_overwrite_status); + DBUG_ASSERT(! is_set() || m_can_overwrite_status); + + // message must be set properly by the caller. + DBUG_ASSERT(message); + + // sqlstate must be set properly by the caller. + DBUG_ASSERT(sqlstate); + #ifdef DBUG_OFF /* In production, refuse to overwrite a custom response with an @@ -433,19 +486,17 @@ Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg, return; #endif - if (sqlstate == NULL) - sqlstate= mysql_errno_to_sqlstate(sql_errno_arg); - - m_sql_errno= sql_errno_arg; + m_sql_errno= sql_errno; memcpy(m_sqlstate, sqlstate, SQLSTATE_LENGTH); m_sqlstate[SQLSTATE_LENGTH]= '\0'; - strmake_buf(m_message, message_arg); + strmake_buf(m_message, message); + + get_warning_info()->set_error_condition(error_condition); m_status= DA_ERROR; DBUG_VOID_RETURN; } - /** Mark the diagnostics area as 'DISABLED'. @@ -463,15 +514,16 @@ Diagnostics_area::disable_status() Warning_info::Warning_info(ulonglong warn_id_arg, bool allow_unlimited_warnings, bool initialize) - :m_statement_warn_count(0), + :m_current_statement_warn_count(0), m_current_row_for_warning(1), m_warn_id(warn_id_arg), + m_error_condition(NULL), m_allow_unlimited_warnings(allow_unlimited_warnings), initialized(0), m_read_only(FALSE) { m_warn_list.empty(); - bzero((char*) m_warn_count, sizeof(m_warn_count)); + memset(m_warn_count, 0, sizeof(m_warn_count)); if (initialize) init(); } @@ -479,6 +531,7 @@ Warning_info::Warning_info(ulonglong warn_id_arg, void Warning_info::init() { /* Initialize sub structures */ + DBUG_ASSERT(initialized == 0); init_sql_alloc(&m_warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE, MYF(MY_THREAD_SPECIFIC)); initialized= 1; @@ -496,92 +549,164 @@ Warning_info::~Warning_info() } -/** - Reset the warning information of this connection. -*/ +bool Warning_info::has_sql_condition(const char *message_str, + ulong message_length) const +{ + Diagnostics_area::Sql_condition_iterator it(m_warn_list); + const Sql_condition *err; -void Warning_info::clear_warning_info(ulonglong warn_id_arg) + while ((err= it++)) + { + if (strncmp(message_str, err->get_message_text(), message_length) == 0) + return true; + } + + return false; +} + + +void Warning_info::clear(ulonglong new_id) { - m_warn_id= warn_id_arg; - free_memory(); - bzero((char*) m_warn_count, sizeof(m_warn_count)); + id(new_id); m_warn_list.empty(); - m_statement_warn_count= 0; + m_marked_sql_conditions.empty(); + free_memory(); + memset(m_warn_count, 0, sizeof(m_warn_count)); + m_current_statement_warn_count= 0; m_current_row_for_warning= 1; /* Start counting from the first row */ + clear_error_condition(); } -/** - Append warnings only if the original contents of the routine - warning info was replaced. -*/ -void Warning_info::merge_with_routine_info(THD *thd, Warning_info *source) +void Warning_info::append_warning_info(THD *thd, const Warning_info *source) { - /* - If a routine body is empty or if a routine did not - generate any warnings (thus m_warn_id didn't change), - do not duplicate our own contents by appending the - contents of the called routine. We know that the called - routine did not change its warning info. - - On the other hand, if the routine body is not empty and - some statement in the routine generates a warning or - uses tables, m_warn_id is guaranteed to have changed. - In this case we know that the routine warning info - contains only new warnings, and thus we perform a copy. - */ - if (m_warn_id != source->m_warn_id) + const Sql_condition *err; + Diagnostics_area::Sql_condition_iterator it(source->m_warn_list); + const Sql_condition *src_error_condition = source->get_error_condition(); + + while ((err= it++)) { - /* - If the invocation of the routine was a standalone statement, - rather than a sub-statement, in other words, if it's a CALL - of a procedure, rather than invocation of a function or a - trigger, we need to clear the current contents of the caller's - warning info. - - This is per MySQL rules: if a statement generates a warning, - warnings from the previous statement are flushed. Normally - it's done in push_warning(). However, here we don't use - push_warning() to avoid invocation of condition handlers or - escalation of warnings to errors. - */ - opt_clear_warning_info(thd->query_id); - append_warning_info(thd, source); + // Do not use ::push_warning() to avoid invocation of THD-internal-handlers. + Sql_condition *new_error= Warning_info::push_warning(thd, err); + + if (src_error_condition && src_error_condition == err) + set_error_condition(new_error); + + if (source->is_marked_for_removal(err)) + mark_condition_for_removal(new_error); } } + /** - Add a warning to the list of warnings. Increment the respective - counters. + Copy Sql_conditions that are not WARN_LEVEL_ERROR from the source + Warning_info to the current Warning_info. + + @param thd Thread context. + @param sp_wi Stored-program Warning_info + @param thd Thread context. + @param src_wi Warning_info to copy from. */ -MYSQL_ERROR *Warning_info::push_warning(THD *thd, - uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, - const char *msg) +void Diagnostics_area::copy_non_errors_from_wi(THD *thd, + const Warning_info *src_wi) { - MYSQL_ERROR *cond= NULL; + Sql_condition_iterator it(src_wi->m_warn_list); + const Sql_condition *cond; + Warning_info *wi= get_warning_info(); + + while ((cond= it++)) + { + if (cond->get_level() == Sql_condition::WARN_LEVEL_ERROR) + continue; + + Sql_condition *new_condition= wi->push_warning(thd, cond); + + if (src_wi->is_marked_for_removal(cond)) + wi->mark_condition_for_removal(new_condition); + } +} + + +void Warning_info::mark_sql_conditions_for_removal() +{ + Sql_condition_list::Iterator it(m_warn_list); + Sql_condition *cond; + + while ((cond= it++)) + mark_condition_for_removal(cond); +} + + +void Warning_info::remove_marked_sql_conditions() +{ + List_iterator_fast<Sql_condition> it(m_marked_sql_conditions); + Sql_condition *cond; + + while ((cond= it++)) + { + m_warn_list.remove(cond); + m_warn_count[cond->get_level()]--; + m_current_statement_warn_count--; + if (cond == m_error_condition) + m_error_condition= NULL; + } + + m_marked_sql_conditions.empty(); +} + + +bool Warning_info::is_marked_for_removal(const Sql_condition *cond) const +{ + List_iterator_fast<Sql_condition> it( + const_cast<List<Sql_condition>&> (m_marked_sql_conditions)); + Sql_condition *c; + + while ((c= it++)) + { + if (c == cond) + return true; + } + + return false; +} + + +void Warning_info::reserve_space(THD *thd, uint count) +{ + while (m_warn_list.elements() && + (m_warn_list.elements() + count) > thd->variables.max_error_count) + m_warn_list.remove(m_warn_list.front()); +} + +Sql_condition *Warning_info::push_warning(THD *thd, + uint sql_errno, const char* sqlstate, + Sql_condition::enum_warning_level level, + const char *msg) +{ + Sql_condition *cond= NULL; if (! m_read_only) { if (m_allow_unlimited_warnings || - m_warn_list.elements < thd->variables.max_error_count) + m_warn_list.elements() < thd->variables.max_error_count) { - cond= new (& m_warn_root) MYSQL_ERROR(& m_warn_root); + cond= new (& m_warn_root) Sql_condition(& m_warn_root); if (cond) { cond->set(sql_errno, sqlstate, level, msg); - m_warn_list.push_back(cond, &m_warn_root); + m_warn_list.push_back(cond); } } m_warn_count[(uint) level]++; } - m_statement_warn_count++; + m_current_statement_warn_count++; return cond; } -MYSQL_ERROR *Warning_info::push_warning(THD *thd, const MYSQL_ERROR *sql_condition) + +Sql_condition *Warning_info::push_warning(THD *thd, const Sql_condition *sql_condition) { - MYSQL_ERROR *new_condition= push_warning(thd, + Sql_condition *new_condition= push_warning(thd, sql_condition->get_sql_errno(), sql_condition->get_sqlstate(), sql_condition->get_level(), @@ -604,7 +729,7 @@ MYSQL_ERROR *Warning_info::push_warning(THD *thd, const MYSQL_ERROR *sql_conditi msg Clear error message */ -void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, +void push_warning(THD *thd, Sql_condition::enum_warning_level level, uint code, const char *msg) { DBUG_ENTER("push_warning"); @@ -615,15 +740,15 @@ void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, WARN_LEVEL_ERROR *is* a bug. Either use my_printf_error(), my_error(), or WARN_LEVEL_WARN. */ - DBUG_ASSERT(level != MYSQL_ERROR::WARN_LEVEL_ERROR); + DBUG_ASSERT(level != Sql_condition::WARN_LEVEL_ERROR); - if (level == MYSQL_ERROR::WARN_LEVEL_ERROR) - level= MYSQL_ERROR::WARN_LEVEL_WARN; + if (level == Sql_condition::WARN_LEVEL_ERROR) + level= Sql_condition::WARN_LEVEL_WARN; (void) thd->raise_condition(code, NULL, level, msg); /* Make sure we also count warnings pushed after calling set_ok_status(). */ - thd->stmt_da->increment_warning(); + thd->get_stmt_da()->increment_warning(); DBUG_VOID_RETURN; } @@ -640,7 +765,7 @@ void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, msg Clear error message */ -void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, +void push_warning_printf(THD *thd, Sql_condition::enum_warning_level level, uint code, const char *format, ...) { va_list args; @@ -689,7 +814,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show) List<Item> field_list; DBUG_ENTER("mysqld_show_warnings"); - DBUG_ASSERT(thd->warning_info->is_read_only()); + DBUG_ASSERT(thd->get_stmt_da()->is_warning_info_read_only()); field_list.push_back(new Item_empty_string("Level", 7)); field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG)); @@ -699,7 +824,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show) Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); - MYSQL_ERROR *err; + const Sql_condition *err; SELECT_LEX *sel= &thd->lex->select_lex; SELECT_LEX_UNIT *unit= &thd->lex->unit; ulonglong idx= 0; @@ -707,7 +832,8 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show) unit->set_limit(sel); - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); + Diagnostics_area::Sql_condition_iterator it= + thd->get_stmt_da()->sql_conditions(); while ((err= it++)) { /* Skip levels that the user is not interested in */ @@ -730,7 +856,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show) } my_eof(thd); - thd->warning_info->set_read_only(FALSE); + thd->get_stmt_da()->set_warning_info_read_only(FALSE); DBUG_RETURN(FALSE); } @@ -838,7 +964,7 @@ uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs, if (!to_cs || from_cs == to_cs || to_cs == &my_charset_bin) { - length= min(to_length, from_length); + length= MY_MIN(to_length, from_length); memmove(to, from, length); to[length]= 0; return length; @@ -880,3 +1006,32 @@ uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs, *errors= error_count; return (uint32) (to - to_start); } + + +/** + Sanity check for SQLSTATEs. The function does not check if it's really an + existing SQL-state (there are just too many), it just checks string length and + looks for bad characters. + + @param sqlstate the condition SQLSTATE. + + @retval true if it's ok. + @retval false if it's bad. +*/ + +bool is_sqlstate_valid(const LEX_STRING *sqlstate) +{ + if (sqlstate->length != 5) + return false; + + for (int i= 0 ; i < 5 ; ++i) + { + char c = sqlstate->str[i]; + + if ((c < '0' || '9' < c) && + (c < 'A' || 'Z' < c)) + return false; + } + + return true; +} diff --git a/sql/sql_error.h b/sql/sql_error.h index cd1b92a2bcc..0a75d7a392d 100644 --- a/sql/sql_error.h +++ b/sql/sql_error.h @@ -19,126 +19,13 @@ #include "sql_list.h" /* Sql_alloc, MEM_ROOT */ #include "m_string.h" /* LEX_STRING */ #include "sql_string.h" /* String */ +#include "sql_plist.h" /* I_P_List */ #include "mysql_com.h" /* MYSQL_ERRMSG_SIZE */ #include "my_time.h" /* MYSQL_TIME */ #include "decimal.h" class THD; - -/** - Stores status of the currently executed statement. - Cleared at the beginning of the statement, and then - can hold either OK, ERROR, or EOF status. - Can not be assigned twice per statement. -*/ - -class Diagnostics_area -{ -public: - enum enum_diagnostics_status - { - /** The area is cleared at start of a statement. */ - DA_EMPTY= 0, - /** Set whenever one calls my_ok(). */ - DA_OK, - /** Set whenever one calls my_eof(). */ - DA_EOF, - /** Set whenever one calls my_error() or my_message(). */ - DA_ERROR, - /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */ - DA_DISABLED - }; - /** True if status information is sent to the client. */ - bool is_sent; - /** Set to make set_error_status after set_{ok,eof}_status possible. */ - bool can_overwrite_status; - - void set_ok_status(THD *thd, ulonglong affected_rows_arg, - ulonglong last_insert_id_arg, - const char *message); - void set_eof_status(THD *thd); - void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg, - const char *sqlstate); - - void disable_status(); - - void reset_diagnostics_area(); - - bool is_set() const { return m_status != DA_EMPTY; } - bool is_error() const { return m_status == DA_ERROR; } - bool is_eof() const { return m_status == DA_EOF; } - bool is_ok() const { return m_status == DA_OK; } - bool is_disabled() const { return m_status == DA_DISABLED; } - enum_diagnostics_status status() const { return m_status; } - - const char *message() const - { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; } - - uint sql_errno() const - { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; } - - const char* get_sqlstate() const - { DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; } - - ulonglong affected_rows() const - { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; } - - ulonglong last_insert_id() const - { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; } - - uint statement_warn_count() const - { - DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF); - return m_statement_warn_count; - } - - /* Used to count any warnings pushed after calling set_ok_status(). */ - void increment_warning() - { - if (m_status != DA_EMPTY) - m_statement_warn_count++; - } - - Diagnostics_area() { reset_diagnostics_area(); } - -private: - /** Message buffer. Can be used by OK or ERROR status. */ - char m_message[MYSQL_ERRMSG_SIZE]; - /** - SQL error number. One of ER_ codes from share/errmsg.txt. - Set by set_error_status. - */ - uint m_sql_errno; - - char m_sqlstate[SQLSTATE_LENGTH+1]; - - /** - The number of rows affected by the last statement. This is - semantically close to thd->row_count_func, but has a different - life cycle. thd->row_count_func stores the value returned by - function ROW_COUNT() and is cleared only by statements that - update its value, such as INSERT, UPDATE, DELETE and few others. - This member is cleared at the beginning of the next statement. - - We could possibly merge the two, but life cycle of thd->row_count_func - can not be changed. - */ - ulonglong m_affected_rows; - /** - Similarly to the previous member, this is a replacement of - thd->first_successful_insert_id_in_prev_stmt, which is used - to implement LAST_INSERT_ID(). - */ - ulonglong m_last_insert_id; - /** - Number of warnings of this last statement. May differ from - the number of warnings returned by SHOW WARNINGS e.g. in case - the statement doesn't clear the warnings, and doesn't generate - them. - */ - uint m_statement_warn_count; - enum_diagnostics_status m_status; -}; +class my_decimal; /////////////////////////////////////////////////////////////////////////// @@ -146,10 +33,8 @@ private: Representation of a SQL condition. A SQL condition can be a completion condition (note, warning), or an exception condition (error, not found). - @note This class is named MYSQL_ERROR instead of SQL_condition for - historical reasons, to facilitate merging code with previous releases. */ -class MYSQL_ERROR : public Sql_alloc +class Sql_condition : public Sql_alloc { public: /* @@ -160,6 +45,7 @@ public: */ enum enum_warning_level { WARN_LEVEL_NOTE, WARN_LEVEL_WARN, WARN_LEVEL_ERROR, WARN_LEVEL_END}; + /** Get the MESSAGE_TEXT of this condition. @return the message text. @@ -190,30 +76,15 @@ public: Get the error level of this condition. @return the error level condition item. */ - MYSQL_ERROR::enum_warning_level get_level() const + Sql_condition::enum_warning_level get_level() const { return m_level; } - /** Destructor. */ - ~MYSQL_ERROR() - {} - - /** check if condition was handled by a condition handler */ - bool handled() const - { - return m_handled; - } - /** mark that condition was handled */ - void mark_handled() - { - m_handled= 1; - } - private: /* - The interface of MYSQL_ERROR is mostly private, by design, + The interface of Sql_condition is mostly private, by design, so that only the following code: - various raise_error() or raise_warning() methods in class THD, - - the implementation of SIGNAL / RESIGNAL + - the implementation of SIGNAL / RESIGNAL / GET DIAGNOSTICS - catch / re-throw of SQL conditions in stored procedures (sp_rcontext) is allowed to create / modify a SQL condition. Enforcing this policy prevents confusion, since the only public @@ -223,20 +94,21 @@ private: */ friend class THD; friend class Warning_info; - friend class Signal_common; - friend class Signal_statement; - friend class Resignal_statement; + friend class Sql_cmd_common_signal; + friend class Sql_cmd_signal; + friend class Sql_cmd_resignal; friend class sp_rcontext; + friend class Condition_information_item; /** Default constructor. This constructor is usefull when allocating arrays. - Note that the init() method should be called to complete the MYSQL_ERROR. + Note that the init() method should be called to complete the Sql_condition. */ - MYSQL_ERROR(); + Sql_condition(); /** - Complete the MYSQL_ERROR initialisation. + Complete the Sql_condition initialisation. @param mem_root The memory root to use for the condition items of this condition */ @@ -247,15 +119,17 @@ private: @param mem_root The memory root to use for the condition items of this condition */ - MYSQL_ERROR(MEM_ROOT *mem_root); - + Sql_condition(MEM_ROOT *mem_root); + /** Destructor. */ + ~Sql_condition() + {} /** Copy optional condition items attributes. @param cond the condition to copy. */ - void copy_opt_attributes(const MYSQL_ERROR *cond); + void copy_opt_attributes(const Sql_condition *cond); /** Set this condition area with a fixed message text. @@ -266,7 +140,7 @@ private: @param MyFlags additional flags. */ void set(uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg); /** @@ -279,6 +153,12 @@ private: /** Set the SQLSTATE of this condition. */ void set_sqlstate(const char* sqlstate); + /** Set the CLASS_ORIGIN of this condition. */ + void set_class_origin(); + + /** Set the SUBCLASS_ORIGIN of this condition. */ + void set_subclass_origin(); + /** Clear this SQL condition. */ @@ -321,9 +201,6 @@ private: /** MySQL extension, MYSQL_ERRNO condition item. */ uint m_sql_errno; - /** Marker if error/warning was handled by a continue handler */ - bool m_handled; - /** SQL RETURNED_SQLSTATE condition item. This member is always NUL terminated. @@ -331,44 +208,48 @@ private: char m_returned_sqlstate[SQLSTATE_LENGTH+1]; /** Severity (error, warning, note) of this condition. */ - MYSQL_ERROR::enum_warning_level m_level; + Sql_condition::enum_warning_level m_level; + + /** Pointers for participating in the list of conditions. */ + Sql_condition *next_in_wi; + Sql_condition **prev_in_wi; /** Memory root to use to hold condition item values. */ MEM_ROOT *m_mem_root; }; -class Sql_condition : public MYSQL_ERROR -{ - /* - Wrapper class to allow one to use Sql_condition in handlers instead of - MYSQL_ERROR - */ -}; - /////////////////////////////////////////////////////////////////////////// /** Information about warnings of the current connection. */ - class Warning_info { + /** The type of the counted and doubly linked list of conditions. */ + typedef I_P_List<Sql_condition, + I_P_List_adapter<Sql_condition, + &Sql_condition::next_in_wi, + &Sql_condition::prev_in_wi>, + I_P_List_counter, + I_P_List_fast_push_back<Sql_condition> > + Sql_condition_list; + /** A memory root to allocate warnings and errors */ MEM_ROOT m_warn_root; /** List of warnings of all severities (levels). */ - List <MYSQL_ERROR> m_warn_list; + Sql_condition_list m_warn_list; /** A break down of the number of warnings per severity (level). */ - uint m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END]; + uint m_warn_count[(uint) Sql_condition::WARN_LEVEL_END]; /** The number of warnings of the current statement. Warning_info life cycle differs from statement life cycle -- it may span multiple statements. In that case we get - m_statement_warn_count 0, whereas m_warn_list is not empty. + m_current_statement_warn_count 0, whereas m_warn_list is not empty. */ - uint m_statement_warn_count; + uint m_current_statement_warn_count; /* Row counter, to print in errors and warnings. Not increased in @@ -379,29 +260,67 @@ class Warning_info /** Used to optionally clear warnings only once per statement. */ ulonglong m_warn_id; + /** + A pointer to an element of m_warn_list. It determines SQL-condition + instance which corresponds to the error state in Diagnostics_area. + + This is needed for properly processing SQL-conditions in SQL-handlers. + When an SQL-handler is found for the current error state in Diagnostics_area, + this pointer is needed to remove the corresponding SQL-condition from the + Warning_info list. + + @note m_error_condition might be NULL in the following cases: + - Diagnostics_area set to fatal error state (like OOM); + - Max number of Warning_info elements has been reached (thus, there is + no corresponding SQL-condition object in Warning_info). + */ + const Sql_condition *m_error_condition; + /** Indicates if push_warning() allows unlimited number of warnings. */ bool m_allow_unlimited_warnings; bool initialized; /* Set to 1 if init() has been called */ -private: - Warning_info(const Warning_info &rhs); /* Not implemented */ - Warning_info& operator=(const Warning_info &rhs); /* Not implemented */ -public: + /** Read only status. */ + bool m_read_only; + + /** Pointers for participating in the stack of Warning_info objects. */ + Warning_info *m_next_in_da; + Warning_info **m_prev_in_da; + List<Sql_condition> m_marked_sql_conditions; + +public: Warning_info(ulonglong warn_id_arg, bool allow_unlimited_warnings, - bool initialize=true); + bool initialized); ~Warning_info(); - /* Allocate memory for structures */ void init(); void free_memory(); +private: + Warning_info(const Warning_info &rhs); /* Not implemented */ + Warning_info& operator=(const Warning_info &rhs); /* Not implemented */ + + /** + Checks if Warning_info contains SQL-condition with the given message. + + @param message_str Message string. + @param message_length Length of message string. + + @return true if the Warning_info contains an SQL-condition with the given + message. + */ + bool has_sql_condition(const char *message_str, ulong message_length) const; + /** Reset the warning information. Clear all warnings, the number of warnings, reset current row counter to point to the first row. + + @param new_id new Warning_info id. */ - void clear_warning_info(ulonglong warn_id_arg); + void clear(ulonglong new_id); + /** Only clear warning info if haven't yet done that already for the current query. Allows to be issued at any time @@ -410,46 +329,72 @@ public: @todo: This is a sign of sloppy coding. Instead we need to designate one place in a statement life cycle where we call - clear_warning_info(). + Warning_info::clear(). + + @param query_id Current query id. */ - void opt_clear_warning_info(ulonglong query_id) + void opt_clear(ulonglong query_id) { if (query_id != m_warn_id) - clear_warning_info(query_id); - } - - void append_warning_info(THD *thd, Warning_info *source) - { - append_warnings(thd, & source->warn_list()); + clear(query_id); } /** Concatenate the list of warnings. - It's considered tolerable to lose a warning. - */ - void append_warnings(THD *thd, List<MYSQL_ERROR> *src) - { - MYSQL_ERROR *err; - List_iterator_fast<MYSQL_ERROR> it(*src); - /* - Don't use ::push_warning() to avoid invocation of condition - handlers or escalation of warnings to errors. - */ - while ((err= it++)) - Warning_info::push_warning(thd, err); - } - /** - Conditional merge of related warning information areas. + It's considered tolerable to lose an SQL-condition in case of OOM-error, + or if the number of SQL-conditions in the Warning_info reached top limit. + + @param thd Thread context. + @param source Warning_info object to copy SQL-conditions from. */ - void merge_with_routine_info(THD *thd, Warning_info *source); + void append_warning_info(THD *thd, const Warning_info *source); /** Reset between two COM_ commands. Warnings are preserved between commands, but statement_warn_count indicates the number of warnings of this particular statement only. */ - void reset_for_next_command() { m_statement_warn_count= 0; } + void reset_for_next_command() + { m_current_statement_warn_count= 0; } + + /** + Mark active SQL-conditions for later removal. + This is done to simulate stacked DAs for HANDLER statements. + */ + void mark_sql_conditions_for_removal(); + + /** + Unmark SQL-conditions, which were marked for later removal. + This is done to simulate stacked DAs for HANDLER statements. + */ + void unmark_sql_conditions_from_removal() + { m_marked_sql_conditions.empty(); } + + /** + Remove SQL-conditions that are marked for deletion. + This is done to simulate stacked DAs for HANDLER statements. + */ + void remove_marked_sql_conditions(); + + /** + Check if the given SQL-condition is marked for removal in this Warning_info + instance. + + @param cond the SQL-condition. + + @retval true if the given SQL-condition is marked for removal in this + Warning_info instance. + @retval false otherwise. + */ + bool is_marked_for_removal(const Sql_condition *cond) const; + + /** + Mark a single SQL-condition for removal (add the given SQL-condition to the + removal list of this Warning_info instance). + */ + void mark_condition_for_removal(Sql_condition *cond) + { m_marked_sql_conditions.push_back(cond, &m_warn_root); } /** Used for @@warning_count system variable, which prints @@ -458,52 +403,82 @@ public: ulong warn_count() const { /* - This may be higher than warn_list.elements if we have + This may be higher than warn_list.elements() if we have had more warnings than thd->variables.max_error_count. */ - return (m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] + - m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR] + - m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]); + return (m_warn_count[(uint) Sql_condition::WARN_LEVEL_NOTE] + + m_warn_count[(uint) Sql_condition::WARN_LEVEL_ERROR] + + m_warn_count[(uint) Sql_condition::WARN_LEVEL_WARN]); } /** - This is for iteration purposes. We return a non-constant reference - since List doesn't have constant iterators. - */ - List<MYSQL_ERROR> &warn_list() { return m_warn_list; } - - /** The number of errors, or number of rows returned by SHOW ERRORS, also the value of session variable @@error_count. */ ulong error_count() const + { return m_warn_count[(uint) Sql_condition::WARN_LEVEL_ERROR]; } + + /** + The number of conditions (errors, warnings and notes) in the list. + */ + uint cond_count() const { - return m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR]; + return m_warn_list.elements(); } /** Id of the warning information area. */ - ulonglong warn_id() const { return m_warn_id; } + ulonglong id() const { return m_warn_id; } + + /** Set id of the warning information area. */ + void id(ulonglong id) { m_warn_id= id; } /** Do we have any errors and warnings that we can *show*? */ - bool is_empty() const { return m_warn_list.elements == 0; } + bool is_empty() const { return m_warn_list.is_empty(); } /** Increment the current row counter to point at the next row. */ void inc_current_row_for_warning() { m_current_row_for_warning++; } + /** Reset the current row counter. Start counting from the first row. */ void reset_current_row_for_warning() { m_current_row_for_warning= 1; } + /** Return the current counter value. */ ulong current_row_for_warning() const { return m_current_row_for_warning; } - ulong statement_warn_count() const { return m_statement_warn_count; } + /** Return the number of warnings thrown by the current statement. */ + ulong current_statement_warn_count() const + { return m_current_statement_warn_count; } + + /** Make sure there is room for the given number of conditions. */ + void reserve_space(THD *thd, uint count); + + /** + Add a new SQL-condition to the current list and increment the respective + counters. + + @param thd Thread context. + @param sql_errno SQL-condition error number. + @param sqlstate SQL-condition state. + @param level SQL-condition level. + @param msg SQL-condition message. + + @return a pointer to the added SQL-condition. + */ + Sql_condition *push_warning(THD *thd, + uint sql_errno, + const char* sqlstate, + Sql_condition::enum_warning_level level, + const char* msg); + + /** + Add a new SQL-condition to the current list and increment the respective + counters. - /** Add a new condition to the current list. */ - MYSQL_ERROR *push_warning(THD *thd, - uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, - const char* msg); + @param thd Thread context. + @param sql_condition SQL-condition to copy values from. - /** Add a new condition to the current list. */ - MYSQL_ERROR *push_warning(THD *thd, const MYSQL_ERROR *sql_condition); + @return a pointer to the added SQL-condition. + */ + Sql_condition *push_warning(THD *thd, const Sql_condition *sql_condition); /** Set the read only status for this statement area. @@ -514,25 +489,51 @@ public: - SHOW WARNINGS - SHOW ERRORS - GET DIAGNOSTICS - @param read_only the read only property to set + @param read_only the read only property to set. */ void set_read_only(bool read_only) { m_read_only= read_only; } /** Read only status. - @return the read only property + @return the read only property. */ bool is_read_only() const { return m_read_only; } -private: - /** Read only status. */ - bool m_read_only; + /** + @return SQL-condition, which corresponds to the error state in + Diagnostics_area. - friend class Resignal_statement; + @see m_error_condition. + */ + const Sql_condition *get_error_condition() const + { return m_error_condition; } + + /** + Set SQL-condition, which corresponds to the error state in Diagnostics_area. + + @see m_error_condition. + */ + void set_error_condition(const Sql_condition *error_condition) + { m_error_condition= error_condition; } + + /** + Reset SQL-condition, which corresponds to the error state in + Diagnostics_area. + + @see m_error_condition. + */ + void clear_error_condition() + { m_error_condition= NULL; } + + // for: + // - m_next_in_da / m_prev_in_da + // - is_marked_for_removal() + friend class Diagnostics_area; }; + extern char *err_conv(char *buff, uint to_length, const char *from, uint from_length, CHARSET_INFO *from_cs); @@ -606,15 +607,353 @@ public: } }; -void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, +/////////////////////////////////////////////////////////////////////////// + +/** + Stores status of the currently executed statement. + Cleared at the beginning of the statement, and then + can hold either OK, ERROR, or EOF status. + Can not be assigned twice per statement. +*/ + +class Diagnostics_area +{ +private: + /** The type of the counted and doubly linked list of conditions. */ + typedef I_P_List<Warning_info, + I_P_List_adapter<Warning_info, + &Warning_info::m_next_in_da, + &Warning_info::m_prev_in_da>, + I_P_List_counter, + I_P_List_fast_push_back<Warning_info> > + Warning_info_list; + +public: + /** Const iterator used to iterate through the warning list. */ + typedef Warning_info::Sql_condition_list::Const_Iterator + Sql_condition_iterator; + + enum enum_diagnostics_status + { + /** The area is cleared at start of a statement. */ + DA_EMPTY= 0, + /** Set whenever one calls my_ok(). */ + DA_OK, + /** Set whenever one calls my_eof(). */ + DA_EOF, + /** Set whenever one calls my_error() or my_message(). */ + DA_ERROR, + /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */ + DA_DISABLED + }; + + void set_overwrite_status(bool can_overwrite_status) + { m_can_overwrite_status= can_overwrite_status; } + + /** True if status information is sent to the client. */ + bool is_sent() const { return m_is_sent; } + + void set_is_sent(bool is_sent) { m_is_sent= is_sent; } + + void set_ok_status(ulonglong affected_rows, + ulonglong last_insert_id, + const char *message); + + void set_eof_status(THD *thd); + + void set_error_status(uint sql_errno); + + void set_error_status(uint sql_errno, + const char *message, + const char *sqlstate, + const Sql_condition *error_condition); + + void disable_status(); + + void reset_diagnostics_area(); + + bool is_set() const { return m_status != DA_EMPTY; } + + bool is_error() const { return m_status == DA_ERROR; } + + bool is_eof() const { return m_status == DA_EOF; } + + bool is_ok() const { return m_status == DA_OK; } + + bool is_disabled() const { return m_status == DA_DISABLED; } + + enum_diagnostics_status status() const { return m_status; } + + const char *message() const + { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; } + + uint sql_errno() const + { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; } + + const char* get_sqlstate() const + { DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; } + + ulonglong affected_rows() const + { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; } + + ulonglong last_insert_id() const + { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; } + + uint statement_warn_count() const + { + DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF); + return m_statement_warn_count; + } + + /* Used to count any warnings pushed after calling set_ok_status(). */ + void increment_warning() + { + if (m_status != DA_EMPTY) + m_statement_warn_count++; + } + + Diagnostics_area(bool initialize); + Diagnostics_area(ulonglong warning_info_id, bool allow_unlimited_warnings, + bool initialize); + void init() { m_main_wi.init() ; } + void free_memory() { m_main_wi.free_memory() ; } + + void push_warning_info(Warning_info *wi) + { m_wi_stack.push_front(wi); } + + void pop_warning_info() + { + DBUG_ASSERT(m_wi_stack.elements() > 0); + m_wi_stack.remove(m_wi_stack.front()); + } + + void set_warning_info_id(ulonglong id) + { get_warning_info()->id(id); } + + ulonglong warning_info_id() const + { return get_warning_info()->id(); } + + /** + Compare given current warning info and current warning info + and see if they are different. They will be different if + warnings have been generated or statements that use tables + have been executed. This is checked by comparing m_warn_id. + + @param wi Warning info to compare with current Warning info. + + @return false if they are equal, true if they are not. + */ + bool warning_info_changed(const Warning_info *wi) const + { return get_warning_info()->id() != wi->id(); } + + bool is_warning_info_empty() const + { return get_warning_info()->is_empty(); } + + ulong current_statement_warn_count() const + { return get_warning_info()->current_statement_warn_count(); } + + bool has_sql_condition(const char *message_str, ulong message_length) const + { return get_warning_info()->has_sql_condition(message_str, message_length); } + + void reset_for_next_command() + { get_warning_info()->reset_for_next_command(); } + + void clear_warning_info(ulonglong id) + { get_warning_info()->clear(id); } + + void opt_clear_warning_info(ulonglong query_id) + { get_warning_info()->opt_clear(query_id); } + + ulong current_row_for_warning() const + { return get_warning_info()->current_row_for_warning(); } + + void inc_current_row_for_warning() + { get_warning_info()->inc_current_row_for_warning(); } + + void reset_current_row_for_warning() + { get_warning_info()->reset_current_row_for_warning(); } + + bool is_warning_info_read_only() const + { return get_warning_info()->is_read_only(); } + + void set_warning_info_read_only(bool read_only) + { get_warning_info()->set_read_only(read_only); } + + ulong error_count() const + { return get_warning_info()->error_count(); } + + ulong warn_count() const + { return get_warning_info()->warn_count(); } + + uint cond_count() const + { return get_warning_info()->cond_count(); } + + Sql_condition_iterator sql_conditions() const + { return get_warning_info()->m_warn_list; } + + void reserve_space(THD *thd, uint count) + { get_warning_info()->reserve_space(thd, count); } + + Sql_condition *push_warning(THD *thd, const Sql_condition *sql_condition) + { return get_warning_info()->push_warning(thd, sql_condition); } + + Sql_condition *push_warning(THD *thd, + uint sql_errno, + const char* sqlstate, + Sql_condition::enum_warning_level level, + const char* msg) + { + return get_warning_info()->push_warning(thd, + sql_errno, sqlstate, level, msg); + } + + void mark_sql_conditions_for_removal() + { get_warning_info()->mark_sql_conditions_for_removal(); } + + void unmark_sql_conditions_from_removal() + { get_warning_info()->unmark_sql_conditions_from_removal(); } + + void remove_marked_sql_conditions() + { get_warning_info()->remove_marked_sql_conditions(); } + + const Sql_condition *get_error_condition() const + { return get_warning_info()->get_error_condition(); } + + void copy_sql_conditions_to_wi(THD *thd, Warning_info *dst_wi) const + { dst_wi->append_warning_info(thd, get_warning_info()); } + + void copy_sql_conditions_from_wi(THD *thd, const Warning_info *src_wi) + { get_warning_info()->append_warning_info(thd, src_wi); } + + void copy_non_errors_from_wi(THD *thd, const Warning_info *src_wi); + +private: + Warning_info *get_warning_info() { return m_wi_stack.front(); } + + const Warning_info *get_warning_info() const { return m_wi_stack.front(); } + +private: + /** True if status information is sent to the client. */ + bool m_is_sent; + + /** Set to make set_error_status after set_{ok,eof}_status possible. */ + bool m_can_overwrite_status; + + /** Message buffer. Can be used by OK or ERROR status. */ + char m_message[MYSQL_ERRMSG_SIZE]; + + /** + SQL error number. One of ER_ codes from share/errmsg.txt. + Set by set_error_status. + */ + uint m_sql_errno; + + char m_sqlstate[SQLSTATE_LENGTH+1]; + + /** + The number of rows affected by the last statement. This is + semantically close to thd->row_count_func, but has a different + life cycle. thd->row_count_func stores the value returned by + function ROW_COUNT() and is cleared only by statements that + update its value, such as INSERT, UPDATE, DELETE and few others. + This member is cleared at the beginning of the next statement. + + We could possibly merge the two, but life cycle of thd->row_count_func + can not be changed. + */ + ulonglong m_affected_rows; + + /** + Similarly to the previous member, this is a replacement of + thd->first_successful_insert_id_in_prev_stmt, which is used + to implement LAST_INSERT_ID(). + */ + + ulonglong m_last_insert_id; + /** + Number of warnings of this last statement. May differ from + the number of warnings returned by SHOW WARNINGS e.g. in case + the statement doesn't clear the warnings, and doesn't generate + them. + */ + uint m_statement_warn_count; + + enum_diagnostics_status m_status; + + Warning_info m_main_wi; + + Warning_info_list m_wi_stack; +}; + +/////////////////////////////////////////////////////////////////////////// + + +void push_warning(THD *thd, Sql_condition::enum_warning_level level, uint code, const char *msg); -void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, - uint code, const char *format, ...); + +void push_warning_printf(THD *thd, Sql_condition::enum_warning_level level, + uint code, const char *format, ...); + bool mysqld_show_warnings(THD *thd, ulong levels_to_show); -uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs, + +uint32 convert_error_message(char *to, uint32 to_length, + CHARSET_INFO *to_cs, const char *from, uint32 from_length, CHARSET_INFO *from_cs, uint *errors); extern const LEX_STRING warning_level_names[]; +bool is_sqlstate_valid(const LEX_STRING *sqlstate); +/** + Checks if the specified SQL-state-string defines COMPLETION condition. + This function assumes that the given string contains a valid SQL-state. + + @param s the condition SQLSTATE. + + @retval true if the given string defines COMPLETION condition. + @retval false otherwise. +*/ +inline bool is_sqlstate_completion(const char *s) +{ return s[0] == '0' && s[1] == '0'; } + + +/** + Checks if the specified SQL-state-string defines WARNING condition. + This function assumes that the given string contains a valid SQL-state. + + @param s the condition SQLSTATE. + + @retval true if the given string defines WARNING condition. + @retval false otherwise. +*/ +inline bool is_sqlstate_warning(const char *s) +{ return s[0] == '0' && s[1] == '1'; } + + +/** + Checks if the specified SQL-state-string defines NOT FOUND condition. + This function assumes that the given string contains a valid SQL-state. + + @param s the condition SQLSTATE. + + @retval true if the given string defines NOT FOUND condition. + @retval false otherwise. +*/ +inline bool is_sqlstate_not_found(const char *s) +{ return s[0] == '0' && s[1] == '2'; } + + +/** + Checks if the specified SQL-state-string defines EXCEPTION condition. + This function assumes that the given string contains a valid SQL-state. + + @param s the condition SQLSTATE. + + @retval true if the given string defines EXCEPTION condition. + @retval false otherwise. +*/ +inline bool is_sqlstate_exception(const char *s) +{ return s[0] != '0' || s[1] > '2'; } + + #endif // SQL_ERROR_H diff --git a/sql/sql_get_diagnostics.cc b/sql/sql_get_diagnostics.cc new file mode 100644 index 00000000000..be1e3589cc6 --- /dev/null +++ b/sql/sql_get_diagnostics.cc @@ -0,0 +1,340 @@ +/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */ + +#include "sql_list.h" // Sql_alloc, List, List_iterator +#include "sql_cmd.h" // Sql_cmd +#include "sql_class.h" // Diagnostics_area +#include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics + +/** + Execute this GET DIAGNOSTICS statement. + + @param thd The current thread. + + @remark Errors or warnings occurring during the execution of the GET + DIAGNOSTICS statement should not affect the diagnostics area + of a previous statement as the diagnostics information there + would be wiped out. Thus, in order to preserve the contents + of the diagnostics area from which information is being + retrieved, the GET DIAGNOSTICS statement is executed under + a separate diagnostics area. If any errors or warnings occur + during the execution of the GET DIAGNOSTICS statement, these + error or warnings (conditions) are appended to the list of + the original diagnostics area. The only exception to this is + fatal errors, which must always cause the statement to fail. + + @retval false on success. + @retval true on error +*/ + +bool +Sql_cmd_get_diagnostics::execute(THD *thd) +{ + bool rv; + Diagnostics_area new_stmt_da(thd->query_id, false, true); + Diagnostics_area *save_stmt_da= thd->get_stmt_da(); + DBUG_ENTER("Sql_cmd_get_diagnostics::execute"); + + /* Disable the unneeded read-only mode of the original DA. */ + save_stmt_da->set_warning_info_read_only(false); + + /* Set new diagnostics area, execute statement and restore. */ + thd->set_stmt_da(&new_stmt_da); + rv= m_info->aggregate(thd, save_stmt_da); + thd->set_stmt_da(save_stmt_da); + + /* Bail out early if statement succeeded. */ + if (! rv) + { + thd->get_stmt_da()->set_ok_status(0, 0, NULL); + DBUG_RETURN(false); + } + + /* Statement failed, retrieve the error information for propagation. */ + uint sql_errno= new_stmt_da.sql_errno(); + const char *message= new_stmt_da.message(); + const char *sqlstate= new_stmt_da.get_sqlstate(); + + /* In case of a fatal error, set it into the original DA.*/ + if (thd->is_fatal_error) + { + save_stmt_da->set_error_status(sql_errno, message, sqlstate, NULL); + DBUG_RETURN(true); + } + + /* Otherwise, just append the new error as a exception condition. */ + save_stmt_da->push_warning(thd, sql_errno, sqlstate, + Sql_condition::WARN_LEVEL_ERROR, + message); + + /* Appending might have failed. */ + if (! (rv= thd->is_error())) + thd->get_stmt_da()->set_ok_status(0, 0, NULL); + + DBUG_RETURN(rv); +} + + +/** + Set a value for this item. + + @param thd The current thread. + @param value The obtained value. + + @retval false on success. + @retval true on error. +*/ + +bool +Diagnostics_information_item::set_value(THD *thd, Item **value) +{ + bool rv; + Settable_routine_parameter *srp; + DBUG_ENTER("Diagnostics_information_item::set_value"); + + /* Get a settable reference to the target. */ + srp= m_target->get_settable_routine_parameter(); + + DBUG_ASSERT(srp); + + /* Set variable/parameter value. */ + rv= srp->set_value(thd, thd->spcont, value); + + DBUG_RETURN(rv); +} + + +/** + Obtain statement information in the context of a given diagnostics area. + + @param thd The current thread. + @param da The diagnostics area. + + @retval false on success. + @retval true on error +*/ + +bool +Statement_information::aggregate(THD *thd, const Diagnostics_area *da) +{ + bool rv= false; + Statement_information_item *stmt_info_item; + List_iterator<Statement_information_item> it(*m_items); + DBUG_ENTER("Statement_information::aggregate"); + + /* + Each specified target gets the value of each given + information item obtained from the diagnostics area. + */ + while ((stmt_info_item= it++)) + { + if ((rv= evaluate(thd, stmt_info_item, da))) + break; + } + + DBUG_RETURN(rv); +} + + +/** + Obtain the value of this statement information item in the context of + a given diagnostics area. + + @param thd The current thread. + @param da The diagnostics area. + + @retval Item representing the value. + @retval NULL on error. +*/ + +Item * +Statement_information_item::get_value(THD *thd, const Diagnostics_area *da) +{ + Item *value= NULL; + DBUG_ENTER("Statement_information_item::get_value"); + + switch (m_name) + { + /* + The number of condition areas that have information. That is, + the number of errors and warnings within the diagnostics area. + */ + case NUMBER: + { + ulong count= da->cond_count(); + value= new (thd->mem_root) Item_uint(count); + break; + } + /* + Number that shows how many rows were directly affected by + a data-change statement (INSERT, UPDATE, DELETE, MERGE, + REPLACE, LOAD). + */ + case ROW_COUNT: + value= new (thd->mem_root) Item_int(thd->get_row_count_func()); + break; + } + + DBUG_RETURN(value); +} + + +/** + Obtain condition information in the context of a given diagnostics area. + + @param thd The current thread. + @param da The diagnostics area. + + @retval false on success. + @retval true on error +*/ + +bool +Condition_information::aggregate(THD *thd, const Diagnostics_area *da) +{ + bool rv= false; + longlong cond_number; + const Sql_condition *cond= NULL; + Condition_information_item *cond_info_item; + Diagnostics_area::Sql_condition_iterator it_conds= da->sql_conditions(); + List_iterator_fast<Condition_information_item> it_items(*m_items); + DBUG_ENTER("Condition_information::aggregate"); + + /* Prepare the expression for evaluation. */ + if (!m_cond_number_expr->fixed && + m_cond_number_expr->fix_fields(thd, &m_cond_number_expr)) + DBUG_RETURN(true); + + cond_number= m_cond_number_expr->val_int(); + + /* + Limit to the number of available conditions. Warning_info::warn_count() + is not used because it indicates the number of condition regardless of + @@max_error_count, which prevents conditions from being pushed, but not + counted. + */ + if (cond_number < 1 || (ulonglong) cond_number > da->cond_count()) + { + my_error(ER_DA_INVALID_CONDITION_NUMBER, MYF(0)); + DBUG_RETURN(true); + } + + /* Advance to the requested condition. */ + while (cond_number--) + cond= it_conds++; + + DBUG_ASSERT(cond); + + /* Evaluate the requested information in the context of the condition. */ + while ((cond_info_item= it_items++)) + { + if ((rv= evaluate(thd, cond_info_item, cond))) + break; + } + + DBUG_RETURN(rv); +} + + +/** + Create an UTF-8 string item to represent a condition item string. + + @remark The string might not have a associated charset. For example, + this can be the case if the server does not or fails to process + the error message file. + + @remark See "Design notes about Sql_condition::m_message_text." in sql_error.cc + + @return Pointer to an string item, NULL on failure. +*/ + +Item * +Condition_information_item::make_utf8_string_item(THD *thd, const String *str) +{ + /* Default is utf8 character set and utf8_general_ci collation. */ + CHARSET_INFO *to_cs= &my_charset_utf8_general_ci; + /* If a charset was not set, assume that no conversion is needed. */ + CHARSET_INFO *from_cs= str->charset() ? str->charset() : to_cs; + Item_string *item= new Item_string(str->ptr(), str->length(), from_cs); + /* If necessary, convert the string (ignoring errors), then copy it over. */ + return item ? item->charset_converter(to_cs, false) : NULL; +} + + +/** + Obtain the value of this condition information item in the context of + a given condition. + + @param thd The current thread. + @param da The diagnostics area. + + @retval Item representing the value. + @retval NULL on error. +*/ + +Item * +Condition_information_item::get_value(THD *thd, const Sql_condition *cond) +{ + String str; + Item *value= NULL; + DBUG_ENTER("Condition_information_item::get_value"); + + switch (m_name) + { + case CLASS_ORIGIN: + value= make_utf8_string_item(thd, &(cond->m_class_origin)); + break; + case SUBCLASS_ORIGIN: + value= make_utf8_string_item(thd, &(cond->m_subclass_origin)); + break; + case CONSTRAINT_CATALOG: + value= make_utf8_string_item(thd, &(cond->m_constraint_catalog)); + break; + case CONSTRAINT_SCHEMA: + value= make_utf8_string_item(thd, &(cond->m_constraint_schema)); + break; + case CONSTRAINT_NAME: + value= make_utf8_string_item(thd, &(cond->m_constraint_name)); + break; + case CATALOG_NAME: + value= make_utf8_string_item(thd, &(cond->m_catalog_name)); + break; + case SCHEMA_NAME: + value= make_utf8_string_item(thd, &(cond->m_schema_name)); + break; + case TABLE_NAME: + value= make_utf8_string_item(thd, &(cond->m_table_name)); + break; + case COLUMN_NAME: + value= make_utf8_string_item(thd, &(cond->m_column_name)); + break; + case CURSOR_NAME: + value= make_utf8_string_item(thd, &(cond->m_cursor_name)); + break; + case MESSAGE_TEXT: + value= make_utf8_string_item(thd, &(cond->m_message_text)); + break; + case MYSQL_ERRNO: + value= new (thd->mem_root) Item_uint(cond->m_sql_errno); + break; + case RETURNED_SQLSTATE: + str.set_ascii(cond->get_sqlstate(), strlen(cond->get_sqlstate())); + value= make_utf8_string_item(thd, &str); + break; + } + + DBUG_RETURN(value); +} + diff --git a/sql/sql_get_diagnostics.h b/sql/sql_get_diagnostics.h new file mode 100644 index 00000000000..f34820757f5 --- /dev/null +++ b/sql/sql_get_diagnostics.h @@ -0,0 +1,318 @@ +/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */ + +#ifndef SQL_GET_DIAGNOSTICS_H +#define SQL_GET_DIAGNOSTICS_H + +/** Diagnostics information forward reference. */ +class Diagnostics_information; + + +/** + Sql_cmd_get_diagnostics represents a GET DIAGNOSTICS statement. + + The GET DIAGNOSTICS statement retrieves exception or completion + condition information from a diagnostics area, usually pertaining + to the last non-diagnostic SQL statement that was executed. +*/ +class Sql_cmd_get_diagnostics : public Sql_cmd +{ +public: + /** + Constructor, used to represent a GET DIAGNOSTICS statement. + + @param info Diagnostics information to be obtained. + */ + Sql_cmd_get_diagnostics(Diagnostics_information *info) + : m_info(info) + {} + + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_GET_DIAGNOSTICS; + } + + virtual bool execute(THD *thd); + +private: + /** The information to be obtained. */ + Diagnostics_information *m_info; +}; + + +/** + Represents the diagnostics information to be obtained. + + Diagnostic information is made available through statement + information and condition information items. +*/ +class Diagnostics_information : public Sql_alloc +{ +public: + /** + Which diagnostics area to access. + Only CURRENT is supported for now. + */ + enum Which_area + { + /** Access the first diagnostics area. */ + CURRENT_AREA + }; + + /** Set which diagnostics area to access. */ + void set_which_da(Which_area area) + { m_area= area; } + + /** Get which diagnostics area to access. */ + Which_area get_which_da(void) const + { return m_area; } + + /** + Aggregate diagnostics information. + + @param thd The current thread. + @param da The diagnostics area. + + @retval false on success. + @retval true on error + */ + virtual bool aggregate(THD *thd, const Diagnostics_area *da) = 0; + +protected: + /** + Diagnostics_information objects are allocated in thd->mem_root. + Do not rely on the destructor for any cleanup. + */ + virtual ~Diagnostics_information() + { + DBUG_ASSERT(false); + } + + /** + Evaluate a diagnostics information item in a specific context. + + @param thd The current thread. + @param diag_item The diagnostics information item. + @param ctx The context to evaluate the item. + + @retval false on success. + @retval true on error. + */ + template <typename Diag_item, typename Context> + bool evaluate(THD *thd, Diag_item *diag_item, Context ctx) + { + Item *value; + + /* Get this item's value. */ + if (! (value= diag_item->get_value(thd, ctx))) + return true; + + /* Set variable/parameter value. */ + return diag_item->set_value(thd, &value); + } + +private: + /** Which diagnostics area to access. */ + Which_area m_area; +}; + + +/** + A diagnostics information item. Used to associate a specific + diagnostics information item to a target variable. +*/ +class Diagnostics_information_item : public Sql_alloc +{ +public: + /** + Set a value for this item. + + @param thd The current thread. + @param value The obtained value. + + @retval false on success. + @retval true on error. + */ + bool set_value(THD *thd, Item **value); + +protected: + /** + Constructor, used to represent a diagnostics information item. + + @param target A target that gets the value of this item. + */ + Diagnostics_information_item(Item *target) + : m_target(target) + {} + + /** + Diagnostics_information_item objects are allocated in thd->mem_root. + Do not rely on the destructor for any cleanup. + */ + virtual ~Diagnostics_information_item() + { + DBUG_ASSERT(false); + } + +private: + /** The target variable that will receive the value of this item. */ + Item *m_target; +}; + + +/** + A statement information item. +*/ +class Statement_information_item : public Diagnostics_information_item +{ +public: + /** The name of a statement information item. */ + enum Name + { + NUMBER, + ROW_COUNT + }; + + /** + Constructor, used to represent a statement information item. + + @param name The name of this item. + @param target A target that gets the value of this item. + */ + Statement_information_item(Name name, Item *target) + : Diagnostics_information_item(target), m_name(name) + {} + + /** Obtain value of this statement information item. */ + Item *get_value(THD *thd, const Diagnostics_area *da); + +private: + /** The name of this statement information item. */ + Name m_name; +}; + + +/** + Statement information. + + @remark Provides information about the execution of a statement. +*/ +class Statement_information : public Diagnostics_information +{ +public: + /** + Constructor, used to represent the statement information of a + GET DIAGNOSTICS statement. + + @param items List of requested statement information items. + */ + Statement_information(List<Statement_information_item> *items) + : m_items(items) + {} + + /** Obtain statement information in the context of a diagnostics area. */ + bool aggregate(THD *thd, const Diagnostics_area *da); + +private: + /* List of statement information items. */ + List<Statement_information_item> *m_items; +}; + + +/** + A condition information item. +*/ +class Condition_information_item : public Diagnostics_information_item +{ +public: + /** + The name of a condition information item. + */ + enum Name + { + CLASS_ORIGIN, + SUBCLASS_ORIGIN, + CONSTRAINT_CATALOG, + CONSTRAINT_SCHEMA, + CONSTRAINT_NAME, + CATALOG_NAME, + SCHEMA_NAME, + TABLE_NAME, + COLUMN_NAME, + CURSOR_NAME, + MESSAGE_TEXT, + MYSQL_ERRNO, + RETURNED_SQLSTATE + }; + + /** + Constructor, used to represent a condition information item. + + @param name The name of this item. + @param target A target that gets the value of this item. + */ + Condition_information_item(Name name, Item *target) + : Diagnostics_information_item(target), m_name(name) + {} + + /** Obtain value of this condition information item. */ + Item *get_value(THD *thd, const Sql_condition *cond); + +private: + /** The name of this condition information item. */ + Name m_name; + + /** Create an string item to represent a condition item string. */ + Item *make_utf8_string_item(THD *thd, const String *str); +}; + + +/** + Condition information. + + @remark Provides information about conditions raised during the + execution of a statement. +*/ +class Condition_information : public Diagnostics_information +{ +public: + /** + Constructor, used to represent the condition information of a + GET DIAGNOSTICS statement. + + @param cond_number_expr Number that identifies the diagnostic condition. + @param items List of requested condition information items. + */ + Condition_information(Item *cond_number_expr, + List<Condition_information_item> *items) + : m_cond_number_expr(cond_number_expr), m_items(items) + {} + + /** Obtain condition information in the context of a diagnostics area. */ + bool aggregate(THD *thd, const Diagnostics_area *da); + +private: + /** + Number that identifies the diagnostic condition for which + information is to be obtained. + */ + Item *m_cond_number_expr; + + /** List of condition information items. */ + List<Condition_information_item> *m_items; +}; + +#endif + diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 3c4804c523a..4187327d622 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -171,7 +171,7 @@ static void mysql_ha_close_table(SQL_HANDLER *handler) table->file->ha_index_or_rnd_end(); table->open_by_handler= 0; - (void) close_thread_table(thd, &table); + close_thread_table(thd, &table); thd->mdl_context.release_lock(handler->mdl_request.ticket); } else @@ -294,7 +294,8 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen) open_ltable() or open_table() because we would like to be able to open a temporary table. */ - error= open_tables(thd, &tables, &counter, 0); + error= (open_temporary_tables(thd, tables) || + open_tables(thd, &tables, &counter, 0)); if (error) goto err; @@ -502,9 +503,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char *sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR **cond_hdl); + Sql_condition **cond_hdl); bool need_reopen() const { return m_need_reopen; }; void init() { m_need_reopen= FALSE; }; @@ -523,9 +524,9 @@ Sql_handler_lock_error_handler:: handle_condition(THD *thd, uint sql_errno, const char *sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR **cond_hdl) + Sql_condition **cond_hdl) { *cond_hdl= NULL; if (sql_errno == ER_LOCK_ABORTED) @@ -640,9 +641,10 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, key_part_map keypart_map; uint key_len; - if (key_expr->elements > keyinfo->key_parts) + if (key_expr->elements > keyinfo->user_defined_key_parts) { - my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), keyinfo->key_parts); + my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), + keyinfo->user_defined_key_parts); return 1; } for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 2eda80e8b36..b5178f865d1 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -957,7 +957,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, error=write_record(thd, table ,&info); if (error) break; - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); } free_underlaid_joins(thd, &thd->lex->select_lex); @@ -1114,11 +1114,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (lock_type == TL_WRITE_DELAYED) ? (ulong) 0 : (ulong) (info.records - info.copied), - (ulong) thd->warning_info->statement_warn_count()); + (long) thd->get_stmt_da()->current_statement_warn_count()); else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted + updated), - (ulong) thd->warning_info->statement_warn_count()); + (long) thd->get_stmt_da()->current_statement_warn_count()); ::my_ok(thd, info.copied + info.deleted + updated, id, buff); } thd->abort_on_warning= 0; @@ -1203,7 +1203,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view) } Item_field *field; /* simple SELECT list entry (field without expression) */ - if (!(field= trans->item->filed_for_view_update())) + if (!(field= trans->item->field_for_view_update())) { thd->mark_used_columns= save_mark_used_columns; DBUG_RETURN(TRUE); @@ -1639,7 +1639,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } } key_copy((uchar*) key,table->record[0],table->key_info+key_nr,0); - key_part_map keypart_map= (1 << table->key_info[key_nr].key_parts) - 1; + key_part_map keypart_map= (1 << table->key_info[key_nr].user_defined_key_parts) - 1; if ((error= (table->file->ha_index_read_idx_map(table->record[1], key_nr, (uchar*) key, keypart_map, @@ -1888,7 +1888,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, } if (view) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT_FOR_VIEW_FIELD, ER(ER_NO_DEFAULT_FOR_VIEW_FIELD), table_list->view_db.str, @@ -1896,7 +1896,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, } else { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NO_DEFAULT_FOR_FIELD, ER(ER_NO_DEFAULT_FOR_FIELD), (*field)->field_name); @@ -2246,7 +2246,8 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request, want to send "Server shutdown in progress" in the INSERT THREAD. */ - my_message(di->thd.stmt_da->sql_errno(), di->thd.stmt_da->message(), + my_message(di->thd.get_stmt_da()->sql_errno(), + di->thd.get_stmt_da()->message(), MYF(0)); } di->unlock(); @@ -2336,7 +2337,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) if (!thd.is_error()) my_message(ER_QUERY_INTERRUPTED, ER(ER_QUERY_INTERRUPTED), MYF(0)); else - my_message(thd.stmt_da->sql_errno(), thd.stmt_da->message(), MYF(0)); + my_message(thd.get_stmt_da()->sql_errno(), + thd.get_stmt_da()->message(), MYF(0)); goto error; } } @@ -2758,8 +2760,7 @@ pthread_handler_t handle_delayed_insert(void *arg) if (my_thread_init()) { /* Can't use my_error since store_globals has not yet been called */ - thd->stmt_da->set_error_status(thd, ER_OUT_OF_RESOURCES, - ER(ER_OUT_OF_RESOURCES), NULL); + thd->get_stmt_da()->set_error_status(ER_OUT_OF_RESOURCES); di->handler_thread_initialized= TRUE; } else @@ -2769,8 +2770,7 @@ pthread_handler_t handle_delayed_insert(void *arg) if (init_thr_lock() || thd->store_globals()) { /* Can't use my_error since store_globals has perhaps failed */ - thd->stmt_da->set_error_status(thd, ER_OUT_OF_RESOURCES, - ER(ER_OUT_OF_RESOURCES), NULL); + thd->get_stmt_da()->set_error_status(ER_OUT_OF_RESOURCES); di->handler_thread_initialized= TRUE; thd->fatal_error(); goto err; @@ -3160,7 +3160,7 @@ bool Delayed_insert::handle_inserts(void) { /* This should never happen */ table->file->print_error(error,MYF(0)); - sql_print_error("%s", thd.stmt_da->message()); + sql_print_error("%s", thd.get_stmt_da()->message()); DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop")); goto err; } @@ -3206,7 +3206,7 @@ bool Delayed_insert::handle_inserts(void) if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) { // This shouldn't happen table->file->print_error(error,MYF(0)); - sql_print_error("%s", thd.stmt_da->message()); + sql_print_error("%s", thd.get_stmt_da()->message()); DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop")); goto err; } @@ -3644,7 +3644,7 @@ bool select_insert::send_eof() error= (thd->locked_tables_mode <= LTM_LOCK_TABLES ? table->file->ha_end_bulk_insert() : 0); if (!error && thd->is_error()) - error= thd->stmt_da->sql_errno(); + error= thd->get_stmt_da()->sql_errno(); table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -3697,11 +3697,11 @@ bool select_insert::send_eof() if (info.ignore) sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.records - info.copied), - (ulong) thd->warning_info->statement_warn_count()); + (long) thd->get_stmt_da()->current_statement_warn_count()); else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), - (ulong) thd->warning_info->statement_warn_count()); + (long) thd->get_stmt_da()->current_statement_warn_count()); row_count= info.copied + info.deleted + ((thd->client_capabilities & CLIENT_FOUND_ROWS) ? info.touched : info.updated); @@ -3839,7 +3839,6 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, init_tmp_table_share(thd, &share, "", 0, "", ""); tmp_table.s->db_create_options=0; - tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr; tmp_table.null_row= 0; tmp_table.maybe_null= 0; @@ -3905,7 +3904,7 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, */ if (open_table(thd, create_table, thd->mem_root, &ot_ctx)) { - quick_rm_table(create_info->db_type, create_table->db, + quick_rm_table(thd, create_info->db_type, create_table->db, table_case_name(create_info, create_table->table_name), 0); } @@ -3914,15 +3913,14 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, } else { - Open_table_context ot_ctx(thd, MYSQL_OPEN_TEMPORARY_ONLY); - if (open_table(thd, create_table, thd->mem_root, &ot_ctx)) + if (open_temporary_table(thd, create_table)) { /* This shouldn't happen as creation of temporary table should make - it preparable for open. But let us do close_temporary_table() here - just in case. + it preparable for open. Anyway we can't drop temporary table if + we are unable to find it. */ - drop_temporary_table(thd, create_table, NULL); + DBUG_ASSERT(0); } else table= create_table->table; diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index 9fca8730cb5..0acccfcee48 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -696,7 +696,7 @@ void JOIN_CACHE::set_constants() pack_length_with_blob_ptrs= pack_length + blobs*sizeof(uchar *); min_buff_size= 0; min_records= 1; - buff_size= max(join->thd->variables.join_buff_size, + buff_size= MY_MAX(join->thd->variables.join_buff_size, get_min_join_buffer_size()); size_of_rec_ofs= offset_size(buff_size); size_of_rec_len= blobs ? size_of_rec_ofs : offset_size(len); @@ -2739,7 +2739,7 @@ int JOIN_CACHE_HASHED::init_hash_table() key_entries= 0; /* Calculate the minimal possible value of size_of_key_ofs greater than 1 */ - uint max_size_of_key_ofs= max(2, get_size_of_rec_offset()); + uint max_size_of_key_ofs= MY_MAX(2, get_size_of_rec_offset()); for (size_of_key_ofs= 2; size_of_key_ofs <= max_size_of_key_ofs; size_of_key_ofs+= 2) diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h index 6953f6881ee..1c56fc9b178 100644 --- a/sql/sql_join_cache.h +++ b/sql/sql_join_cache.h @@ -420,7 +420,7 @@ protected: /* Shall calculate how much space is remaining in the join buffer */ virtual size_t rem_space() { - return max(buff_size-(end_pos-buff)-aux_buff_size,0); + return MY_MAX(buff_size-(end_pos-buff)-aux_buff_size,0); } /* @@ -943,7 +943,7 @@ protected: */ size_t rem_space() { - return max(last_key_entry-end_pos-aux_buff_size,0); + return MY_MAX(last_key_entry-end_pos-aux_buff_size,0); } /* diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 501d846421d..1bf0d49214e 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -494,12 +494,13 @@ void lex_start(THD *thd) lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc; lex->select_lex.group_list.empty(); lex->select_lex.order_list.empty(); + lex->m_sql_cmd= NULL; lex->duplicates= DUP_ERROR; lex->ignore= 0; lex->spname= NULL; lex->sphead= NULL; lex->spcont= NULL; - lex->m_stmt= NULL; + lex->m_sql_cmd= NULL; lex->proc_list.first= 0; lex->escape_used= FALSE; lex->query_tables= 0; @@ -509,6 +510,7 @@ void lex_start(THD *thd) lex->parse_vcol_expr= FALSE; lex->check_exists= FALSE; lex->verbose= 0; + lex->contains_plaintext_password= false; lex->name.str= 0; lex->name.length= 0; @@ -1747,50 +1749,6 @@ int lex_one_token(void *arg, void *yythd) } -/** - Construct a copy of this object to be used for mysql_alter_table - and mysql_create_table. - - Historically, these two functions modify their Alter_info - arguments. This behaviour breaks re-execution of prepared - statements and stored procedures and is compensated by always - supplying a copy of Alter_info to these functions. - - @return You need to use check the error in THD for out - of memory condition after calling this function. -*/ - -Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) - :drop_list(rhs.drop_list, mem_root), - alter_list(rhs.alter_list, mem_root), - key_list(rhs.key_list, mem_root), - create_list(rhs.create_list, mem_root), - flags(rhs.flags), - keys_onoff(rhs.keys_onoff), - tablespace_op(rhs.tablespace_op), - partition_names(rhs.partition_names, mem_root), - num_parts(rhs.num_parts), - change_level(rhs.change_level), - datetime_field(rhs.datetime_field), - error_if_not_empty(rhs.error_if_not_empty) -{ - /* - Make deep copies of used objects. - This is not a fully deep copy - clone() implementations - of Alter_drop, Alter_column, Key, foreign_key, Key_part_spec - do not copy string constants. At the same length the only - reason we make a copy currently is that ALTER/CREATE TABLE - code changes input Alter_info definitions, but string - constants never change. - */ - list_copy_and_replace_each_value(drop_list, mem_root); - list_copy_and_replace_each_value(alter_list, mem_root); - list_copy_and_replace_each_value(key_list, mem_root); - list_copy_and_replace_each_value(create_list, mem_root); - /* partition_names are not deeply copied currently */ -} - - void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str) { /* @@ -2200,12 +2158,13 @@ bool st_select_lex_node::inc_in_sum_expr() { return 1; } uint st_select_lex_node::get_in_sum_expr() { return 0; } TABLE_LIST* st_select_lex_node::get_table_list() { return 0; } List<Item>* st_select_lex_node::get_item_list() { return 0; } -TABLE_LIST *st_select_lex_node::add_table_to_list (THD *thd, Table_ident *table, +TABLE_LIST *st_select_lex_node::add_table_to_list(THD *thd, Table_ident *table, LEX_STRING *alias, ulong table_join_options, thr_lock_type flags, enum_mdl_type mdl_type, List<Index_hint> *hints, + List<String> *partition_names, LEX_STRING *option) { return 0; @@ -4323,8 +4282,8 @@ int st_select_lex_unit::print_explain(select_result_sink *output, bool LEX::is_partition_management() const { return (sql_command == SQLCOM_ALTER_TABLE && - (alter_info.flags == ALTER_ADD_PARTITION || - alter_info.flags == ALTER_REORGANIZE_PARTITION)); + (alter_info.flags == Alter_info::ALTER_ADD_PARTITION || + alter_info.flags == Alter_info::ALTER_REORGANIZE_PARTITION)); } #ifdef MYSQL_SERVER diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 0802adc94aa..59f7c122646 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -27,6 +27,7 @@ #include "thr_lock.h" /* thr_lock_type, TL_UNLOCK */ #include "mem_root_array.h" #include "sql_cmd.h" +#include "sql_alter.h" // Alter_info /* YACC and LEX Definitions */ @@ -44,9 +45,6 @@ class Event_parse_data; class set_var_base; class sys_var; class Item_func_match; -class Alter_drop; -class Alter_column; -class Key; class File_parser; class Key_part_spec; @@ -117,6 +115,7 @@ struct sys_var_with_base #include "lex_symbol.h" #if MYSQL_LEX #include "item_func.h" /* Cast_target used in sql_yacc.h */ +#include "sql_get_diagnostics.h" /* Types used in sql_yacc.h */ #include "sql_yacc.h" #define LEX_YYSTYPE YYSTYPE * #else @@ -265,11 +264,6 @@ enum olap_type UNSPECIFIED_OLAP_TYPE, CUBE_TYPE, ROLLUP_TYPE }; -enum tablespace_op_type -{ - NO_TABLESPACE_OP, DISCARD_TABLESPACE, IMPORT_TABLESPACE -}; - /* String names used to print a statement with index hints. Keep in sync with index_hint_type. @@ -513,6 +507,7 @@ public: thr_lock_type flags= TL_UNLOCK, enum_mdl_type mdl_type= MDL_SHARED_READ, List<Index_hint> *hints= 0, + List<String> *partition_names= 0, LEX_STRING *option= 0); virtual void set_lock_for_tables(thr_lock_type lock_type) {} @@ -876,6 +871,7 @@ public: thr_lock_type flags= TL_UNLOCK, enum_mdl_type mdl_type= MDL_SHARED_READ, List<Index_hint> *hints= 0, + List<String> *partition_names= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); bool init_nested_join(THD *thd); @@ -1007,110 +1003,6 @@ inline bool st_select_lex_unit::is_union () first_select()->next_select()->linkage == UNION_TYPE; } -#define ALTER_ADD_COLUMN (1L << 0) -#define ALTER_DROP_COLUMN (1L << 1) -#define ALTER_CHANGE_COLUMN (1L << 2) -#define ALTER_ADD_INDEX (1L << 3) -#define ALTER_DROP_INDEX (1L << 4) -#define ALTER_RENAME (1L << 5) -#define ALTER_ORDER (1L << 6) -#define ALTER_OPTIONS (1L << 7) -#define ALTER_CHANGE_COLUMN_DEFAULT (1L << 8) -#define ALTER_KEYS_ONOFF (1L << 9) -#define ALTER_CONVERT (1L << 10) -#define ALTER_RECREATE (1L << 11) -#define ALTER_ADD_PARTITION (1L << 12) -#define ALTER_DROP_PARTITION (1L << 13) -#define ALTER_COALESCE_PARTITION (1L << 14) -#define ALTER_REORGANIZE_PARTITION (1L << 15) -#define ALTER_PARTITION (1L << 16) -#define ALTER_ADMIN_PARTITION (1L << 17) -#define ALTER_TABLE_REORG (1L << 18) -#define ALTER_REBUILD_PARTITION (1L << 19) -#define ALTER_ALL_PARTITION (1L << 20) -#define ALTER_REMOVE_PARTITIONING (1L << 21) -#define ALTER_FOREIGN_KEY (1L << 22) -#define ALTER_TRUNCATE_PARTITION (1L << 23) - -enum enum_alter_table_change_level -{ - ALTER_TABLE_METADATA_ONLY= 0, - ALTER_TABLE_DATA_CHANGED= 1, - ALTER_TABLE_INDEX_CHANGED= 2 -}; - - -/** - Temporary hack to enable a class bound forward declaration - of the enum_alter_table_change_level enumeration. To be - removed once Alter_info is moved to the sql_alter.h - header. -*/ -class Alter_table_change_level -{ -private: - typedef enum enum_alter_table_change_level enum_type; - enum_type value; -public: - void operator = (enum_type v) { value = v; } - operator enum_type () { return value; } -}; - - -/** - @brief Parsing data for CREATE or ALTER TABLE. - - This structure contains a list of columns or indexes to be created, - altered or dropped. -*/ - -class Alter_info -{ -public: - List<Alter_drop> drop_list; - List<Alter_column> alter_list; - List<Key> key_list; - List<Create_field> create_list; - uint flags; - enum enum_enable_or_disable keys_onoff; - enum tablespace_op_type tablespace_op; - List<char> partition_names; - uint num_parts; - enum_alter_table_change_level change_level; - Create_field *datetime_field; - bool error_if_not_empty; - - - Alter_info() : - flags(0), - keys_onoff(LEAVE_AS_IS), - tablespace_op(NO_TABLESPACE_OP), - num_parts(0), - change_level(ALTER_TABLE_METADATA_ONLY), - datetime_field(NULL), - error_if_not_empty(FALSE) - {} - - void reset() - { - drop_list.empty(); - alter_list.empty(); - key_list.empty(); - create_list.empty(); - flags= 0; - keys_onoff= LEAVE_AS_IS; - tablespace_op= NO_TABLESPACE_OP; - num_parts= 0; - partition_names.empty(); - change_level= ALTER_TABLE_METADATA_ONLY; - datetime_field= 0; - error_if_not_empty= FALSE; - } - Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root); -private: - Alter_info &operator=(const Alter_info &rhs); // not implemented - Alter_info(const Alter_info &rhs); // not implemented -}; struct st_sp_chistics { @@ -1193,7 +1085,38 @@ public: Sroutine_hash_entry **sroutines_list_own_last; uint sroutines_list_own_elements; - /* + /** + Locking state of tables in this particular statement. + + If we under LOCK TABLES or in prelocked mode we consider tables + for the statement to be "locked" if there was a call to lock_tables() + (which called handler::start_stmt()) for tables of this statement + and there was no matching close_thread_tables() call. + + As result this state may differ significantly from one represented + by Open_tables_state::lock/locked_tables_mode more, which are always + "on" under LOCK TABLES or in prelocked mode. + */ + enum enum_lock_tables_state { + LTS_NOT_LOCKED = 0, + LTS_LOCKED + }; + enum_lock_tables_state lock_tables_state; + bool is_query_tables_locked() + { + return (lock_tables_state == LTS_LOCKED); + } + + /** + Number of tables which were open by open_tables() and to be locked + by lock_tables(). + Note that we set this member only in some cases, when this value + needs to be passed from open_tables() to lock_tables() which are + separated by some amount of code. + */ + uint table_count; + + /* These constructor and destructor serve for creation/destruction of Query_tables_list instances which are used as backup storage. */ @@ -2393,7 +2316,7 @@ struct LEX: public Query_tables_list */ nesting_map allow_sum_func; - Sql_statement *m_stmt; + Sql_cmd *m_sql_cmd; /* Usually `expr` rule of yacc is quite reused but some commands better @@ -2456,7 +2379,7 @@ struct LEX: public Query_tables_list enum enum_yes_no_unknown tx_chain, tx_release; bool safe_to_cache_query; - bool subqueries, ignore, online; + bool subqueries, ignore; st_parsing_options parsing_options; Alter_info alter_info; /* @@ -2481,6 +2404,8 @@ struct LEX: public Query_tables_list bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ bool all_privileges; bool proxy_priv; + bool contains_plaintext_password; + sp_pcontext *spcont; st_sp_chistics sp_chistics; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 78e88e3ede2..0d0efb0c21f 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -218,7 +218,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, !field_term->is_ascii() || !ex->line_term->is_ascii() || !ex->line_start->is_ascii()) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED, ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED)); } @@ -588,7 +588,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, } sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted, (ulong) (info.records - info.copied), - (ulong) thd->warning_info->statement_warn_count()); + (long) thd->get_stmt_da()->current_statement_warn_count()); if (thd->transaction.stmt.modified_non_trans_table) thd->transaction.all.modified_non_trans_table= TRUE; @@ -829,10 +829,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (pos == read_info.row_end) { thd->cuted_fields++; /* Not enough fields */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_TOO_FEW_RECORDS, ER(ER_WARN_TOO_FEW_RECORDS), - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); /* Timestamp fields that are NOT NULL are autoupdated if there is no corresponding value in the data file. @@ -859,10 +859,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (pos != read_info.row_end) { thd->cuted_fields++; /* To long row */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); } if (thd->killed || @@ -895,12 +895,12 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (read_info.line_cuted) { thd->cuted_fields++; /* To long row */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); } - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); continue_loop:; } DBUG_RETURN(test(read_info.error)); @@ -980,7 +980,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (field->reset()) { my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field->field_name, - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); DBUG_RETURN(1); } field->set_null(); @@ -993,7 +993,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (field->type() == MYSQL_TYPE_TIMESTAMP) field->set_time(); else if (field != table->next_number_field) - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_NULL_TO_NOTNULL, 1); } /* Do not auto-update this field. */ @@ -1059,7 +1059,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (field->reset()) { my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0),field->field_name, - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); DBUG_RETURN(1); } if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP) @@ -1072,10 +1072,10 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, in the end ?) */ thd->cuted_fields++; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_TOO_FEW_RECORDS, ER(ER_WARN_TOO_FEW_RECORDS), - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); } else if (item->type() == Item::STRING_ITEM) { @@ -1119,13 +1119,13 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (read_info.line_cuted) { thd->cuted_fields++; /* To long row */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); if (thd->killed) DBUG_RETURN(1); } - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); continue_loop:; } DBUG_RETURN(test(read_info.error)); @@ -1206,7 +1206,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, if (field->type() == FIELD_TYPE_TIMESTAMP) field->set_time(); else if (field != table->next_number_field) - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_NULL_TO_NOTNULL, 1); } /* Do not auto-update this field. */ @@ -1259,10 +1259,10 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, in the end ?) */ thd->cuted_fields++; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_TOO_FEW_RECORDS, ER(ER_WARN_TOO_FEW_RECORDS), - thd->warning_info->current_row_for_warning()); + thd->get_stmt_da()->current_row_for_warning()); } else ((Item_user_var_as_out_param *)item)->set_null_value(cs); @@ -1293,7 +1293,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, its default value at the beginning of each loop iteration. */ thd->transaction.stmt.modified_non_trans_table= no_trans_update_stmt; - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); continue_loop:; } DBUG_RETURN(test(read_info.error) || thd->is_error()); @@ -1364,7 +1364,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; /* Set of a stack for unget if long terminators */ - uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1; + uint length= MY_MAX(cs->mbmaxlen, MY_MAX(field_term_length, line_term_length)) + 1; set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index 13e00c99f19..b2b112ed4ba 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -3422,7 +3422,7 @@ MY_LOCALE *my_locale_by_name(const char *name) if (thd) { // Send a warning to the client - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), name, locale->name); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index f9554c79305..7d057f4e91a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -24,7 +24,7 @@ // set_handler_table_locks, // lock_global_read_lock, // make_global_read_lock_block_commit -#include "sql_base.h" // find_temporary_tablesx +#include "sql_base.h" // find_temporary_table #include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE, query_cache_* #include "sql_show.h" // mysqld_list_*, mysqld_show_*, // calc_sum_of_all_status @@ -44,7 +44,6 @@ #include "sql_table.h" // mysql_create_like_table, // mysql_create_table, // mysql_alter_table, - // mysql_recreate_table, // mysql_backup_table, // mysql_restore_table #include "sql_reload.h" // reload_acl_and_cache @@ -499,6 +498,7 @@ void init_update_queries(void) sql_command_flags[SQLCOM_SELECT]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_SET_OPTION]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_DO]|= CF_PREOPEN_TMP_TABLES; + sql_command_flags[SQLCOM_HA_OPEN]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_CALL]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_CHECKSUM]|= CF_PREOPEN_TMP_TABLES; sql_command_flags[SQLCOM_ANALYZE]|= CF_PREOPEN_TMP_TABLES; @@ -512,7 +512,7 @@ void init_update_queries(void) DDL statements that should start with closing opened handlers. We use this flag only for statements for which open HANDLERs - have to be closed before emporary tables are pre-opened. + have to be closed before temporary tables are pre-opened. */ sql_command_flags[SQLCOM_CREATE_TABLE]|= CF_HA_CLOSE; sql_command_flags[SQLCOM_DROP_TABLE]|= CF_HA_CLOSE; @@ -888,7 +888,7 @@ bool do_command(THD *thd) Consider moving to init_connect() instead. */ thd->clear_error(); // Clear error message - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); net_new_transaction(net); @@ -1158,7 +1158,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_CHANGE_USER: { - bool rc; + int auth_rc; status_var_increment(thd->status_var.com_other); thd->change_user(); @@ -1189,13 +1189,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (thd->failed_com_change_user >= 3) { my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); - rc= 1; + auth_rc= 1; } else - rc= acl_authenticate(thd, 0, packet_length); + auth_rc= acl_authenticate(thd, 0, packet_length); mysql_audit_notify_connection_change_user(thd); - if (rc) + if (auth_rc) { /* Free user if allocated by acl_authenticate */ my_free(thd->security_ctx->user); @@ -1294,8 +1294,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, query_cache_end_of_result(thd); mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS, - thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() - : 0, command_name[command].str); + thd->get_stmt_da()->is_error() + ? thd->get_stmt_da()->sql_errno() + : 0, + command_name[command].str); ulong length= (ulong)(packet_end - beginning_of_next_stmt); @@ -1331,10 +1333,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, (char *) thd->security_ctx->host_or_ip); /* PSI begin */ - thd->m_statement_psi= - MYSQL_START_STATEMENT(&thd->m_statement_state, - com_statement_info[command].m_key, - thd->db, thd->db_length); + thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state, + com_statement_info[command].m_key, + thd->db, thd->db_length, + thd->charset()); THD_STAGE_INFO(thd, stage_init); MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, beginning_of_next_stmt, length); @@ -1427,6 +1429,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->set_query(fields, query_length); general_log_print(thd, command, "%s %s", table_list.table_name, fields); + if (open_temporary_tables(thd, &table_list)) + break; + if (check_table_access(thd, SELECT_ACL, &table_list, TRUE, UINT_MAX, FALSE)) break; @@ -1451,7 +1456,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* We don't calculate statistics for this command */ general_log_print(thd, command, NullS); net->error=0; // Don't give 'abort' message - thd->stmt_da->disable_status(); // Don't send anything back + thd->get_stmt_da()->disable_status(); // Don't send anything back error=TRUE; // End server break; #ifndef EMBEDDED_LIBRARY @@ -1605,7 +1610,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #else (void) my_net_write(net, (uchar*) buff, length); (void) net_flush(net); - thd->stmt_da->disable_status(); + thd->get_stmt_da()->disable_status(); #endif break; } @@ -1681,7 +1686,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_RESULT, 0, 0); mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS, - thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0, + thd->get_stmt_da()->is_error() ? + thd->get_stmt_da()->sql_errno() : 0, command_name[command].str); thd->update_all_stats(); @@ -2057,7 +2063,7 @@ bool sp_process_definer(THD *thd) if (!is_acl_user(lex->definer->host.str, lex->definer->user.str)) { push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ER_NO_SUCH_USER, ER(ER_NO_SUCH_USER), lex->definer->user.str, @@ -2204,12 +2210,12 @@ mysql_execute_command(THD *thd) variables, but for now this is probably good enough. */ if ((sql_command_flags[lex->sql_command] & CF_DIAGNOSTIC_STMT) != 0) - thd->warning_info->set_read_only(TRUE); + thd->get_stmt_da()->set_warning_info_read_only(TRUE); else { - thd->warning_info->set_read_only(FALSE); + thd->get_stmt_da()->set_warning_info_read_only(FALSE); if (all_tables) - thd->warning_info->opt_clear_warning_info(thd->query_id); + thd->get_stmt_da()->opt_clear_warning_info(thd->query_id); } #ifdef HAVE_REPLICATION @@ -2405,6 +2411,31 @@ mysql_execute_command(THD *thd) goto error; } + /* + Close tables open by HANDLERs before executing DDL statement + which is going to affect those tables. + + This should happen before temporary tables are pre-opened as + otherwise we will get errors about attempt to re-open tables + if table to be changed is open through HANDLER. + + Note that even although this is done before any privilege + checks there is no security problem here as closing open + HANDLER doesn't require any privileges anyway. + */ + if (sql_command_flags[lex->sql_command] & CF_HA_CLOSE) + mysql_ha_rm_tables(thd, all_tables); + + /* + Pre-open temporary tables to simplify privilege checking + for statements which need this. + */ + if (sql_command_flags[lex->sql_command] & CF_PREOPEN_TMP_TABLES) + { + if (open_temporary_tables(thd, all_tables)) + goto error; + } + switch (lex->sql_command) { case SQLCOM_SHOW_EVENTS: @@ -2412,13 +2443,6 @@ mysql_execute_command(THD *thd) my_error(ER_NOT_SUPPORTED_YET, MYF(0), "embedded server"); break; #endif - case SQLCOM_SHOW_STATUS_PROC: - case SQLCOM_SHOW_STATUS_FUNC: - if ((res= check_table_access(thd, SELECT_ACL, all_tables, FALSE, - UINT_MAX, FALSE))) - goto error; - res= execute_sqlcom_select(thd, all_tables); - break; case SQLCOM_SHOW_STATUS: { execute_show_status(thd, all_tables); @@ -2451,6 +2475,8 @@ mysql_execute_command(THD *thd) } /* no break; fall through */ } + case SQLCOM_SHOW_STATUS_PROC: + case SQLCOM_SHOW_STATUS_FUNC: case SQLCOM_SHOW_DATABASES: case SQLCOM_SHOW_TABLES: case SQLCOM_SHOW_TRIGGERS: @@ -2555,16 +2581,16 @@ case SQLCOM_PREPARE: case SQLCOM_SHOW_WARNS: { res= mysqld_show_warnings(thd, (ulong) - ((1L << (uint) MYSQL_ERROR::WARN_LEVEL_NOTE) | - (1L << (uint) MYSQL_ERROR::WARN_LEVEL_WARN) | - (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR) + ((1L << (uint) Sql_condition::WARN_LEVEL_NOTE) | + (1L << (uint) Sql_condition::WARN_LEVEL_WARN) | + (1L << (uint) Sql_condition::WARN_LEVEL_ERROR) )); break; } case SQLCOM_SHOW_ERRORS: { res= mysqld_show_warnings(thd, (ulong) - (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR)); + (1L << (uint) Sql_condition::WARN_LEVEL_ERROR)); break; } case SQLCOM_SHOW_PROFILES: @@ -2634,7 +2660,7 @@ case SQLCOM_PREPARE: mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index->get_master_info(&lex_mi->connection_name, - MYSQL_ERROR::WARN_LEVEL_NOTE); + Sql_condition::WARN_LEVEL_NOTE); if (mi == NULL) { @@ -2687,7 +2713,7 @@ case SQLCOM_PREPARE: LEX_MASTER_INFO *lex_mi= &thd->lex->mi; Master_info *mi; mi= master_info_index->get_master_info(&lex_mi->connection_name, - MYSQL_ERROR::WARN_LEVEL_ERROR); + Sql_condition::WARN_LEVEL_ERROR); if (mi != NULL) { res= show_master_info(thd, mi, 0); @@ -2795,9 +2821,6 @@ case SQLCOM_PREPARE: } #endif - /* Close any open handlers for the table. */ - mysql_ha_rm_tables(thd, create_table); - if (select_lex->item_list.elements) // With select { select_result *result; @@ -2845,7 +2868,7 @@ case SQLCOM_PREPARE: */ if (splocal_refs != thd->query_name_consts) push_warning(thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Invoked routine ran a statement that may cause problems with " "binary log, see 'NAME_CONST issues' in 'Binary Logging of Stored Programs' " @@ -2882,7 +2905,7 @@ case SQLCOM_PREPARE: { if (create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), create_info.alias); @@ -2992,7 +3015,7 @@ end_with_restore_list: res= mysql_alter_table(thd, first_table->db, first_table->table_name, &create_info, first_table, &alter_info, - 0, (ORDER*) 0, 0, 0); + 0, (ORDER*) 0, 0); break; } #ifdef HAVE_REPLICATION @@ -3008,7 +3031,7 @@ end_with_restore_list: if ((mi= (master_info_index-> get_master_info(&lex_mi->connection_name, - MYSQL_ERROR::WARN_LEVEL_ERROR)))) + Sql_condition::WARN_LEVEL_ERROR)))) { if (load_error) { @@ -3061,7 +3084,7 @@ end_with_restore_list: mysql_mutex_lock(&LOCK_active_mi); if ((mi= (master_info_index-> get_master_info(&lex_mi->connection_name, - MYSQL_ERROR::WARN_LEVEL_ERROR)))) + Sql_condition::WARN_LEVEL_ERROR)))) if (!stop_slave(thd, mi, 1/* net report*/)) my_ok(thd); mysql_mutex_unlock(&LOCK_active_mi); @@ -3152,6 +3175,13 @@ end_with_restore_list: else { /* + Temporary tables should be opened for SHOW CREATE TABLE, but not + for SHOW CREATE VIEW. + */ + if (open_temporary_tables(thd, all_tables)) + goto error; + + /* The fact that check_some_access() returned FALSE does not mean that access is granted. We need to check if first_table->grant.privilege contains any table-specific privilege. @@ -3329,6 +3359,18 @@ end_with_restore_list: case SQLCOM_INSERT: { DBUG_ASSERT(first_table == all_tables && first_table != 0); + + /* + Since INSERT DELAYED doesn't support temporary tables, we could + not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE. + Open them here instead. + */ + if (first_table->lock_type != TL_WRITE_DELAYED) + { + if ((res= open_temporary_tables(thd, all_tables))) + break; + } + if ((res= insert_precheck(thd, all_tables))) break; @@ -3668,6 +3710,19 @@ end_with_restore_list: thd->mdl_context.release_transactional_locks(); if (res) goto error; + + /* + Here we have to pre-open temporary tables for LOCK TABLES. + + CF_PREOPEN_TMP_TABLES is not set for this SQL statement simply + because LOCK TABLES calls close_thread_tables() as a first thing + (it's called from unlock_locked_tables() above). So even if + CF_PREOPEN_TMP_TABLES was set and the tables would be pre-opened + in a usual way, they would have been closed. + */ + if (open_temporary_tables(thd, all_tables)) + goto error; + if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)) goto error; @@ -3970,7 +4025,7 @@ end_with_restore_list: goto error; if (specialflag & SPECIAL_NO_RESOLVE && hostname_requires_resolving(user->host.str)) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_HOSTNAME_WONT_WORK, ER(ER_WARN_HOSTNAME_WONT_WORK)); // Are we trying to change a password of another user @@ -4169,6 +4224,9 @@ end_with_restore_list: DBUG_ASSERT(first_table == all_tables && first_table != 0); if (check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE)) goto error; + /* Close temporary tables which were pre-opened for privilege checking. */ + close_thread_tables(thd); + all_tables->table= NULL; res= mysql_ha_open(thd, first_table, 0); break; case SQLCOM_HA_CLOSE: @@ -4376,7 +4434,7 @@ end_with_restore_list: { if (sp_grant_privileges(thd, lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE)) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_PROC_AUTO_GRANT_FAIL, ER(ER_PROC_AUTO_GRANT_FAIL)); thd->clear_error(); } @@ -4581,7 +4639,7 @@ create_sp_error: { if (lex->check_exists) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), "FUNCTION (UDF)", lex->spname->m_name.str); res= FALSE; @@ -4634,7 +4692,7 @@ create_sp_error: sp_revoke_privileges(thd, db, name, lex->sql_command == SQLCOM_DROP_PROCEDURE)) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_PROC_AUTO_REVOKE_FAIL, ER(ER_PROC_AUTO_REVOKE_FAIL)); /* If this happens, an error should have been reported. */ @@ -4651,7 +4709,7 @@ create_sp_error: if (lex->check_exists) { res= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), SP_COM_STRING(lex), lex->spname->m_qname.str); if (!res) @@ -4889,10 +4947,12 @@ create_sp_error: /* fall through */ case SQLCOM_SIGNAL: case SQLCOM_RESIGNAL: - DBUG_ASSERT(lex->m_stmt != NULL); - res= lex->m_stmt->execute(thd); + case SQLCOM_GET_DIAGNOSTICS: + DBUG_ASSERT(lex->m_sql_cmd != NULL); + res= lex->m_sql_cmd->execute(thd); break; default: + #ifndef EMBEDDED_LIBRARY DBUG_ASSERT(0); /* Impossible */ #endif @@ -4934,7 +4994,7 @@ finish: if (thd->killed_errno()) { /* If we already sent 'ok', we can ignore any kill query statements */ - if (! thd->stmt_da->is_set()) + if (! thd->get_stmt_da()->is_set()) thd->send_kill_message(); } if (thd->killed < KILL_CONNECTION) @@ -4948,9 +5008,9 @@ finish: else { /* If commit fails, we should be able to reset the OK status. */ - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); trans_commit_stmt(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); } #ifdef WITH_ARIA_STORAGE_ENGINE ha_maria::implicit_commit(thd, FALSE); @@ -4977,10 +5037,10 @@ finish: /* No transaction control allowed in sub-statements. */ DBUG_ASSERT(! thd->in_sub_stmt); /* If commit fails, we should be able to reset the OK status. */ - thd->stmt_da->can_overwrite_status= TRUE; + thd->get_stmt_da()->set_overwrite_status(true); /* Commit the normal transaction if one is active. */ trans_commit_implicit(thd); - thd->stmt_da->can_overwrite_status= FALSE; + thd->get_stmt_da()->set_overwrite_status(false); thd->mdl_context.release_transactional_locks(); } else if (! thd->in_sub_stmt && ! thd->in_multi_stmt_transaction_mode()) @@ -5046,7 +5106,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) mysqld_show_warnings(). */ thd->lex->unit.print(&str, QT_TO_SYSTEM_CHARSET); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_YES, str.c_ptr_safe()); } if (res) @@ -5504,6 +5564,12 @@ static bool check_show_access(THD *thd, TABLE_LIST *table) DBUG_ASSERT(dst_table); + /* + Open temporary tables to be able to detect them during privilege check. + */ + if (open_temporary_tables(thd, dst_table)) + return TRUE; + if (check_access(thd, SELECT_ACL, dst_table->db, &dst_table->grant.privilege, &dst_table->grant.m_internal, @@ -5517,6 +5583,9 @@ static bool check_show_access(THD *thd, TABLE_LIST *table) if (check_grant(thd, SELECT_ACL, dst_table, TRUE, UINT_MAX, FALSE)) return TRUE; /* Access denied */ + close_thread_tables(thd); + dst_table->table= NULL; + /* Access granted */ return FALSE; } @@ -5602,10 +5671,10 @@ check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables, DBUG_PRINT("info", ("derived: %d view: %d", tables->derived != 0, tables->view != 0)); - if (tables->is_anonymous_derived_table() || - (tables->table && tables->table->s && - (int)tables->table->s->tmp_table)) + + if (tables->is_anonymous_derived_table()) continue; + thd->security_ctx= sctx; if (check_access(thd, want_access, tables->get_db_name(), @@ -5811,7 +5880,7 @@ bool check_stack_overrun(THD *thd, long margin, return 1; } #ifndef DBUG_OFF - max_stack_used= max(max_stack_used, stack_used); + max_stack_used= MY_MAX(max_stack_used, stack_used); #endif return 0; } @@ -5879,7 +5948,6 @@ void THD::reset_for_next_command(bool calculate_userstat) DBUG_ENTER("mysql_reset_thd_for_next_command"); DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */ DBUG_ASSERT(! thd->in_sub_stmt); - DBUG_ASSERT(thd->transaction.on); thd->free_list= 0; thd->select_number= 1; /* @@ -5916,8 +5984,8 @@ void THD::reset_for_next_command(bool calculate_userstat) thd->user_var_events_alloc= thd->mem_root; } thd->clear_error(); - thd->stmt_da->reset_diagnostics_area(); - thd->warning_info->reset_for_next_command(); + thd->get_stmt_da()->reset_diagnostics_area(); + thd->get_stmt_da()->reset_for_next_command(); thd->rand_used= 0; thd->m_sent_row_count= thd->m_examined_row_count= 0; thd->accessed_rows_and_keys= 0; @@ -6144,7 +6212,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length, { LEX *lex= thd->lex; - bool err= parse_sql(thd, parser_state, NULL); + bool err= parse_sql(thd, parser_state, NULL, true); if (!err) { @@ -6252,7 +6320,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length) lex_start(thd); mysql_reset_thd_for_next_command(thd, opt_userstat_running); - if (!parse_sql(thd, & parser_state, NULL) && + if (!parse_sql(thd, & parser_state, NULL, true) && all_tables_not_ok(thd, lex->select_lex.table_list.first)) error= 1; /* Ignore question */ thd->end_statement(); @@ -6442,6 +6510,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, thr_lock_type lock_type, enum_mdl_type mdl_type, List<Index_hint> *index_hints_arg, + List<String> *partition_names, LEX_STRING *option) { register TABLE_LIST *ptr; @@ -6586,6 +6655,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, */ table_list.link_in_list(ptr, &ptr->next_local); ptr->next_name_resolution_table= NULL; +#ifdef WITH_PARTITION_STORAGE_ENGINE + ptr->partition_names= partition_names; +#endif /* WITH_PARTITION_STORAGE_ENGINE */ /* Link table in global list (all used tables) */ lex->add_to_query_tables(ptr); @@ -7291,7 +7363,7 @@ bool check_simple_select() char command[80]; Lex_input_stream *lip= & thd->m_parser_state->m_lip; strmake(command, lip->yylval->symbol.str, - min(lip->yylval->symbol.length, sizeof(command)-1)); + MY_MIN(lip->yylval->symbol.length, sizeof(command)-1)); my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command); return 1; } @@ -7464,6 +7536,19 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last; DBUG_ENTER("multi_delete_precheck"); + /* + Temporary tables are pre-opened in 'tables' list only. Here we need to + initialize TABLE instances in 'aux_tables' list. + */ + for (TABLE_LIST *tl= aux_tables; tl; tl= tl->next_global) + { + if (tl->table) + continue; + + if (tl->correspondent_table) + tl->table= tl->correspondent_table->table; + } + /* sql_yacc guarantees that tables and aux_tables are not zero */ DBUG_ASSERT(aux_tables != 0); if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)) @@ -7732,9 +7817,9 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, CREATE TABLE ... SELECT, also require INSERT. */ - want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ? - CREATE_TMP_ACL : CREATE_ACL) | - (select_lex->item_list.elements ? INSERT_ACL : 0); + want_priv= (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ? + CREATE_TMP_ACL : + (CREATE_ACL | (select_lex->item_list.elements ? INSERT_ACL : 0)); if (check_access(thd, want_priv, create_table->db, &create_table->grant.privilege, @@ -7743,11 +7828,48 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, goto err; /* If it is a merge table, check privileges for merge children. */ - if (lex->create_info.merge_list.first && - check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL, - lex->create_info.merge_list.first, - FALSE, UINT_MAX, FALSE)) - goto err; + if (lex->create_info.merge_list.first) + { + /* + The user must have (SELECT_ACL | UPDATE_ACL | DELETE_ACL) on the + underlying base tables, even if there are temporary tables with the same + names. + + From user's point of view, it might look as if the user must have these + privileges on temporary tables to create a merge table over them. This is + one of two cases when a set of privileges is required for operations on + temporary tables (see also CREATE TABLE). + + The reason for this behavior stems from the following facts: + + - For merge tables, the underlying table privileges are checked only + at CREATE TABLE / ALTER TABLE time. + + In other words, once a merge table is created, the privileges of + the underlying tables can be revoked, but the user will still have + access to the merge table (provided that the user has privileges on + the merge table itself). + + - Temporary tables shadow base tables. + + I.e. there might be temporary and base tables with the same name, and + the temporary table takes the precedence in all operations. + + - For temporary MERGE tables we do not track if their child tables are + base or temporary. As result we can't guarantee that privilege check + which was done in presence of temporary child will stay relevant later + as this temporary table might be removed. + + If SELECT_ACL | UPDATE_ACL | DELETE_ACL privileges were not checked for + the underlying *base* tables, it would create a security breach as in + Bug#12771903. + */ + + if (check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL, + lex->create_info.merge_list.first, + FALSE, UINT_MAX, FALSE)) + goto err; + } if (want_priv != CREATE_TMP_ACL && check_grant(thd, want_priv, create_table, FALSE, 1, FALSE)) @@ -8105,14 +8227,13 @@ extern int MYSQLparse(void *thd); // from sql_yacc.cc @retval TRUE on parsing error. */ -bool parse_sql(THD *thd, - Parser_state *parser_state, - Object_creation_ctx *creation_ctx) +bool parse_sql(THD *thd, Parser_state *parser_state, + Object_creation_ctx *creation_ctx, bool do_pfs_digest) { bool ret_value; DBUG_ENTER("parse_sql"); DBUG_ASSERT(thd->m_parser_state == NULL); - DBUG_ASSERT(thd->lex->m_stmt == NULL); + DBUG_ASSERT(thd->lex->m_sql_cmd == NULL); MYSQL_QUERY_PARSE_START(thd->query()); /* Backup creation context. */ @@ -8129,7 +8250,7 @@ bool parse_sql(THD *thd, #ifdef HAVE_PSI_STATEMENT_DIGEST_INTERFACE /* Start Digest */ thd->m_parser_state->m_lip.m_digest_psi= - MYSQL_DIGEST_START(thd->m_statement_psi); + MYSQL_DIGEST_START(do_pfs_digest ? thd->m_statement_psi : NULL); #endif /* Parse the query. */ diff --git a/sql/sql_parse.h b/sql/sql_parse.h index 346a3c8899b..84256aa2256 100644 --- a/sql/sql_parse.h +++ b/sql/sql_parse.h @@ -47,9 +47,8 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables); bool create_table_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *create_table); -bool parse_sql(THD *thd, - Parser_state *parser_state, - Object_creation_ctx *creation_ctx); +bool parse_sql(THD *thd, Parser_state *parser_state, + Object_creation_ctx *creation_ctx, bool do_pfs_digest=false); void free_items(Item *item); void cleanup_items(Item *item); diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index f51bba83b75..9e4c48b47ff 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -59,6 +59,7 @@ #include <m_ctype.h> #include "my_md5.h" #include "transaction.h" +#include "debug_sync.h" #include "sql_base.h" // close_all_tables_for_name #include "sql_table.h" // build_table_filename, @@ -67,6 +68,11 @@ // mysql_*_alter_copy_data #include "opt_range.h" // store_key_image_to_rec #include "sql_analyse.h" // append_escaped +#include "sql_alter.h" // Alter_table_ctx + +#include <algorithm> +using std::max; +using std::min; #ifdef WITH_PARTITION_STORAGE_ENGINE #include "ha_partition.h" @@ -87,9 +93,7 @@ const LEX_STRING partition_keywords[]= { C_STRING_WITH_LEN("KEY") }, { C_STRING_WITH_LEN("MAXVALUE") }, { C_STRING_WITH_LEN("LINEAR ") }, - { C_STRING_WITH_LEN(" COLUMNS") }, - { C_STRING_WITH_LEN("ALGORITHM") } - + { C_STRING_WITH_LEN(" COLUMNS") } }; static const char *part_str= "PARTITION"; static const char *sub_str= "SUB"; @@ -189,7 +193,7 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val, item New converted item */ -Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs) +Item* convert_charset_partition_constant(Item *item, const CHARSET_INFO *cs) { THD *thd= current_thd; Name_resolution_context *context= &thd->lex->current_select->context; @@ -207,21 +211,18 @@ Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs) } -/* - A support function to check if a name is in a list of strings +/** + A support function to check if a name is in a list of strings. - SYNOPSIS - is_name_in_list() - name String searched for - list_names A list of names searched in + @param name String searched for + @param list_names A list of names searched in - RETURN VALUES - TRUE String found - FALSE String not found + @return True if if the name is in the list. + @retval true String found + @retval false String not found */ -bool is_name_in_list(char *name, - List<char> list_names) +static bool is_name_in_list(char *name, List<char> list_names) { List_iterator<char> names_it(list_names); uint num_names= list_names.elements; @@ -288,61 +289,6 @@ bool partition_default_handling(TABLE *table, partition_info *part_info, /* - Check that the reorganized table will not have duplicate partitions. - - SYNOPSIS - check_reorganise_list() - new_part_info New partition info - old_part_info Old partition info - list_part_names The list of partition names that will go away and - can be reused in the new table. - - RETURN VALUES - TRUE Inacceptable name conflict detected. - FALSE New names are OK. - - DESCRIPTION - Can handle that the 'new_part_info' and 'old_part_info' the same - in which case it checks that the list of names in the partitions - doesn't contain any duplicated names. -*/ - -bool check_reorganise_list(partition_info *new_part_info, - partition_info *old_part_info, - List<char> list_part_names) -{ - uint new_count, old_count; - uint num_new_parts= new_part_info->partitions.elements; - uint num_old_parts= old_part_info->partitions.elements; - List_iterator<partition_element> new_parts_it(new_part_info->partitions); - bool same_part_info= (new_part_info == old_part_info); - DBUG_ENTER("check_reorganise_list"); - - new_count= 0; - do - { - List_iterator<partition_element> old_parts_it(old_part_info->partitions); - char *new_name= (new_parts_it++)->partition_name; - new_count++; - old_count= 0; - do - { - char *old_name= (old_parts_it++)->partition_name; - old_count++; - if (same_part_info && old_count == new_count) - break; - if (!(my_strcasecmp(system_charset_info, old_name, new_name))) - { - if (!is_name_in_list(old_name, list_part_names)) - DBUG_RETURN(TRUE); - } - } while (old_count < num_old_parts); - } while (new_count < num_new_parts); - DBUG_RETURN(FALSE); -} - - -/* A useful routine used by update_row for partition handlers to calculate the partition ids of the old and the new record. @@ -370,7 +316,7 @@ int get_parts_for_update(const uchar *old_data, uchar *new_data, longlong old_func_value; DBUG_ENTER("get_parts_for_update"); - DBUG_ASSERT(new_data == rec0); // table->record[0] + DBUG_ASSERT(new_data == rec0); set_field_ptr(part_field_array, old_data, rec0); error= part_info->get_partition_id(part_info, old_part_id, &old_func_value); @@ -528,12 +474,12 @@ static bool set_up_field_array(TABLE *table, } if (num_fields > MAX_REF_PARTS) { - char *err_str; + char *ptr; if (is_sub_part) - err_str= (char*)"subpartition function"; + ptr= (char*)"subpartition function"; else - err_str= (char*)"partition function"; - my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), err_str); + ptr= (char*)"partition function"; + my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), ptr); DBUG_RETURN(TRUE); } if (num_fields == 0) @@ -577,7 +523,13 @@ static bool set_up_field_array(TABLE *table, } while (++inx < num_fields); if (inx == num_fields) { - mem_alloc_error(1); + /* + Should not occur since it should already been checked in either + add_column_list_values, handle_list_of_fields, + check_partition_info etc. + */ + DBUG_ASSERT(0); + my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0)); result= TRUE; continue; } @@ -741,7 +693,7 @@ end: static void clear_indicator_in_key_fields(KEY *key_info) { KEY_PART_INFO *key_part; - uint key_parts= key_info->key_parts, i; + uint key_parts= key_info->user_defined_key_parts, i; for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++) key_part->field->flags&= (~GET_FIXED_FIELDS_FLAG); } @@ -761,7 +713,7 @@ static void clear_indicator_in_key_fields(KEY *key_info) static void set_indicator_in_key_fields(KEY *key_info) { KEY_PART_INFO *key_part; - uint key_parts= key_info->key_parts, i; + uint key_parts= key_info->user_defined_key_parts, i; for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++) key_part->field->flags|= GET_FIXED_FIELDS_FLAG; } @@ -881,7 +833,7 @@ static bool handle_list_of_fields(List_iterator<char> it, uint primary_key= table->s->primary_key; if (primary_key != MAX_KEY) { - uint num_key_parts= table->key_info[primary_key].key_parts, i; + uint num_key_parts= table->key_info[primary_key].user_defined_key_parts, i; /* In the case of an empty list we use primary key as partition key. */ @@ -1074,7 +1026,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table, goto end; } else - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR, ER(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR)); } @@ -1243,39 +1195,44 @@ void check_range_capable_PF(TABLE *table) } -/* - Set up partition bitmap +/** + Set up partition bitmaps - SYNOPSIS - set_up_partition_bitmap() - thd Thread object - part_info Reference to partitioning data structure + @param thd Thread object + @param part_info Reference to partitioning data structure - RETURN VALUE - TRUE Memory allocation failure - FALSE Success + @return Operation status + @retval TRUE Memory allocation failure + @retval FALSE Success - DESCRIPTION - Allocate memory for bitmap of the partitioned table + Allocate memory for bitmaps of the partitioned table and initialise it. */ -static bool set_up_partition_bitmap(THD *thd, partition_info *part_info) +static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info) { uint32 *bitmap_buf; uint bitmap_bits= part_info->num_subparts? (part_info->num_subparts* part_info->num_parts): part_info->num_parts; uint bitmap_bytes= bitmap_buffer_size(bitmap_bits); - DBUG_ENTER("set_up_partition_bitmap"); + DBUG_ENTER("set_up_partition_bitmaps"); + + DBUG_ASSERT(!part_info->bitmaps_are_initialized); - if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))) + /* Allocate for both read and lock_partitions */ + if (!(bitmap_buf= (uint32*) alloc_root(&part_info->table->mem_root, + bitmap_bytes * 2))) { - mem_alloc_error(bitmap_bytes); + mem_alloc_error(bitmap_bytes * 2); DBUG_RETURN(TRUE); } - bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE); - bitmap_set_all(&part_info->used_partitions); + bitmap_init(&part_info->read_partitions, bitmap_buf, bitmap_bits, FALSE); + /* Use the second half of the allocated buffer for lock_partitions */ + bitmap_init(&part_info->lock_partitions, bitmap_buf + (bitmap_bytes / 4), + bitmap_bits, FALSE); + part_info->bitmaps_are_initialized= TRUE; + part_info->set_partition_bitmaps(NULL); DBUG_RETURN(FALSE); } @@ -1795,7 +1752,7 @@ bool fix_partition_func(THD *thd, TABLE *table, (table->s->db_type()->partition_flags() & HA_CAN_PARTITION_UNIQUE))) && check_unique_keys(table))) goto end; - if (unlikely(set_up_partition_bitmap(thd, part_info))) + if (unlikely(set_up_partition_bitmaps(thd, part_info))) goto end; if (unlikely(part_info->set_up_charset_field_preps())) { @@ -1811,6 +1768,7 @@ bool fix_partition_func(THD *thd, TABLE *table, set_up_partition_key_maps(table, part_info); set_up_partition_func_pointers(part_info); set_up_range_analysis_info(part_info); + table->file->set_part_info(part_info); result= FALSE; end: thd->mark_used_columns= save_mark_used_columns; @@ -1982,8 +1940,85 @@ static int add_quoted_string(File fptr, const char *quotestr) return err + add_string(fptr, "'"); } +/** + @brief Truncate the partition file name from a path it it exists. + + @note A partition file name will contian one or more '#' characters. +One of the occurances of '#' will be either "#P#" or "#p#" depending +on whether the storage engine has converted the filename to lower case. +*/ +void truncate_partition_filename(char *path) +{ + if (path) + { + char* last_slash= strrchr(path, FN_LIBCHAR); + + if (!last_slash) + last_slash= strrchr(path, FN_LIBCHAR2); + + if (last_slash) + { + /* Look for a partition-type filename */ + for (char* pound= strchr(last_slash, '#'); + pound; pound = strchr(pound + 1, '#')) + { + if ((pound[1] == 'P' || pound[1] == 'p') && pound[2] == '#') + { + last_slash[0] = '\0'; /* truncate the file name */ + break; + } + } + } + } +} + + +/** + @brief Output a filepath. Similar to add_keyword_string except it +also converts \ to / on Windows and skips the partition file name at +the end if found. + + @note When Mysql sends a DATA DIRECTORY from SQL for partitions it does +not use a file name, but it does for DATA DIRECTORY on a non-partitioned +table. So when the storage engine is asked for the DATA DIRECTORY string +after a restart through Handler::update_create_options(), the storage +engine may include the filename. +*/ +static int add_keyword_path(File fptr, const char *keyword, + const char *path) +{ + int err= add_string(fptr, keyword); + + err+= add_space(fptr); + err+= add_equal(fptr); + err+= add_space(fptr); + + char temp_path[FN_REFLEN]; + strcpy(temp_path, path); +#ifdef __WIN__ + /* Convert \ to / to be able to create table on unix */ + char *pos, *end; + uint length= strlen(temp_path); + for (pos= temp_path, end= pos+length ; pos < end ; pos++) + { + if (*pos == '\\') + *pos = '/'; + } +#endif + + /* + If the partition file name with its "#P#" identifier + is found after the last slash, truncate that filename. + */ + truncate_partition_filename(temp_path); + + err+= add_quoted_string(fptr, temp_path); + + return err + add_space(fptr); +} + static int add_keyword_string(File fptr, const char *keyword, - bool should_use_quotes, + bool should_use_quotes, const char *keystr) { int err= add_string(fptr, keyword); @@ -2034,11 +2069,9 @@ static int add_partition_options(File fptr, partition_element *p_elem) if (!(current_thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)) { if (p_elem->data_file_name) - err+= add_keyword_string(fptr, "DATA DIRECTORY", TRUE, - p_elem->data_file_name); + err+= add_keyword_path(fptr, "DATA DIRECTORY", p_elem->data_file_name); if (p_elem->index_file_name) - err+= add_keyword_string(fptr, "INDEX DIRECTORY", TRUE, - p_elem->index_file_name); + err+= add_keyword_path(fptr, "INDEX DIRECTORY", p_elem->index_file_name); } if (p_elem->part_comment) err+= add_keyword_string(fptr, "COMMENT", TRUE, p_elem->part_comment); @@ -2188,7 +2221,7 @@ static int add_column_list_values(File fptr, partition_info *part_info, else { String *res; - CHARSET_INFO *field_cs; + const CHARSET_INFO *field_cs; bool need_cs_check= FALSE; Item_result result_type= STRING_RESULT; @@ -2344,58 +2377,6 @@ end: return err; } - -/** - Add 'KEY' word, with optional 'ALGORTIHM = N'. - - @param fptr File to write to. - @param part_info partition_info holding the used key_algorithm - @param current_comment_start NULL, or comment string encapsulating the - PARTITION BY clause. - - @return Operation status. - @retval 0 Success - @retval != 0 Failure -*/ - -static int add_key_with_algorithm(File fptr, partition_info *part_info, - const char *current_comment_start) -{ - int err= 0; - err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str); - - /* - current_comment_start is given when called from SHOW CREATE TABLE, - Then only add ALGORITHM = 1, not the default 2 or non-set 0! - For .frm current_comment_start is NULL, then add ALGORITHM if != 0. - */ - if (part_info->key_algorithm == partition_info::KEY_ALGORITHM_51 || // SHOW - (!current_comment_start && // .frm - (part_info->key_algorithm != partition_info::KEY_ALGORITHM_NONE))) - { - /* If we already are within a comment, end that comment first. */ - if (current_comment_start) - err+= add_string(fptr, "*/ "); - err+= add_string(fptr, "/*!50531 "); - err+= add_part_key_word(fptr, partition_keywords[PKW_ALGORITHM].str); - err+= add_equal(fptr); - err+= add_space(fptr); - err+= add_int(fptr, part_info->key_algorithm); - err+= add_space(fptr); - err+= add_string(fptr, "*/ "); - if (current_comment_start) - { - /* Skip new line. */ - if (current_comment_start[0] == '\n') - current_comment_start++; - err+= add_string(fptr, current_comment_start); - err+= add_space(fptr); - } - } - return err; -} - - /* Generate the partition syntax from the partition data structure. Useful for support of generating defaults, SHOW CREATE TABLES @@ -2440,8 +2421,7 @@ char *generate_partition_syntax(partition_info *part_info, bool use_sql_alloc, bool show_partition_options, HA_CREATE_INFO *create_info, - Alter_info *alter_info, - const char *current_comment_start) + Alter_info *alter_info) { uint i,j, tot_num_parts, num_subparts; partition_element *part_elem; @@ -2475,8 +2455,7 @@ char *generate_partition_syntax(partition_info *part_info, err+= add_string(fptr, partition_keywords[PKW_LINEAR].str); if (part_info->list_of_part_fields) { - err+= add_key_with_algorithm(fptr, part_info, - current_comment_start); + err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str); err+= add_part_field_list(fptr, part_info->part_field_list); } else @@ -2516,9 +2495,8 @@ char *generate_partition_syntax(partition_info *part_info, err+= add_string(fptr, partition_keywords[PKW_LINEAR].str); if (part_info->list_of_subpart_fields) { - err+= add_key_with_algorithm(fptr, part_info, - current_comment_start); - err+= add_part_field_list(fptr, part_info->subpart_field_list); + add_part_key_word(fptr, partition_keywords[PKW_KEY].str); + add_part_field_list(fptr, part_info->subpart_field_list); } else err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str); @@ -2702,114 +2680,13 @@ static inline int part_val_int(Item *item_expr, longlong *result) We have a set of support functions for these 14 variants. There are 4 variants of hash functions and there is a function for each. The KEY - partitioning uses the function calculate_key_value to calculate the hash + partitioning uses the function calculate_key_hash_value to calculate the hash value based on an array of fields. The linear hash variants uses the method get_part_id_from_linear_hash to get the partition id using the hash value and some parameters calculated from the number of partitions. */ /* - Calculate hash value for KEY partitioning using an array of fields. - - SYNOPSIS - calculate_key_value() - field_array An array of the fields in KEY partitioning - - RETURN VALUE - hash_value calculated - - DESCRIPTION - Uses the hash function on the character set of the field. Integer and - floating point fields use the binary character set by default. -*/ - -static uint32 calculate_key_value(Field **field_array) -{ - ulong nr1= 1; - ulong nr2= 4; - bool use_51_hash; - use_51_hash= test((*field_array)->table->part_info->key_algorithm == - partition_info::KEY_ALGORITHM_51); - - do - { - Field *field= *field_array; - if (use_51_hash) - { - switch (field->real_type()) { - case MYSQL_TYPE_TINY: - case MYSQL_TYPE_SHORT: - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_FLOAT: - case MYSQL_TYPE_DOUBLE: - case MYSQL_TYPE_NEWDECIMAL: - case MYSQL_TYPE_TIMESTAMP: - case MYSQL_TYPE_LONGLONG: - case MYSQL_TYPE_INT24: - case MYSQL_TYPE_TIME: - case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_YEAR: - case MYSQL_TYPE_NEWDATE: - { - if (field->is_null()) - { - nr1^= (nr1 << 1) | 1; - continue; - } - /* Force this to my_hash_sort_bin, which was used in 5.1! */ - uint len= field->pack_length(); - my_charset_bin.coll->hash_sort(&my_charset_bin, field->ptr, len, - &nr1, &nr2); - /* Done with this field, continue with next one. */ - continue; - } - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_VARCHAR: - case MYSQL_TYPE_BIT: - /* Not affected, same in 5.1 and 5.5 */ - break; - /* - ENUM/SET uses my_hash_sort_simple in 5.1 (i.e. my_charset_latin1) - and my_hash_sort_bin in 5.5! - */ - case MYSQL_TYPE_ENUM: - case MYSQL_TYPE_SET: - { - if (field->is_null()) - { - nr1^= (nr1 << 1) | 1; - continue; - } - /* Force this to my_hash_sort_bin, which was used in 5.1! */ - uint len= field->pack_length(); - my_charset_latin1.coll->hash_sort(&my_charset_latin1, field->ptr, - len, &nr1, &nr2); - continue; - } - /* These types should not be allowed for partitioning! */ - case MYSQL_TYPE_NULL: - case MYSQL_TYPE_DECIMAL: - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_GEOMETRY: - /* fall through. */ - default: - DBUG_ASSERT(0); // New type? - /* Fall through for default hashing (5.5). */ - } - /* fall through, use collation based hashing. */ - } - field->hash(&nr1, &nr2); - } while (*(++field_array)); - return (uint32) nr1; -} - - -/* A simple support function to calculate part_id given local part and sub part. @@ -2896,25 +2773,25 @@ static int get_part_id_linear_hash(partition_info *part_info, } -/* +/** Calculate part_id for (SUB)PARTITION BY KEY - SYNOPSIS - get_part_id_key() - field_array Array of fields for PARTTION KEY - num_parts Number of KEY partitions + @param file Handler to storage engine + @param field_array Array of fields for PARTTION KEY + @param num_parts Number of KEY partitions + @param func_value[out] Returns calculated hash value - RETURN VALUE - Calculated partition id + @return Calculated partition id */ inline -static uint32 get_part_id_key(Field **field_array, +static uint32 get_part_id_key(handler *file, + Field **field_array, uint num_parts, longlong *func_value) { DBUG_ENTER("get_part_id_key"); - *func_value= calculate_key_value(field_array); + *func_value= ha_partition::calculate_key_hash_value(field_array); DBUG_RETURN((uint32) (*func_value % num_parts)); } @@ -2941,7 +2818,7 @@ static uint32 get_part_id_linear_key(partition_info *part_info, { DBUG_ENTER("get_part_id_linear_key"); - *func_value= calculate_key_value(field_array); + *func_value= ha_partition::calculate_key_hash_value(field_array); DBUG_RETURN(get_part_id_from_linear_hash(*func_value, part_info->linear_hash_mask, num_parts)); @@ -3629,7 +3506,8 @@ int get_partition_id_key_nosub(partition_info *part_info, uint32 *part_id, longlong *func_value) { - *part_id= get_part_id_key(part_info->part_field_array, + *part_id= get_part_id_key(part_info->table->file, + part_info->part_field_array, part_info->num_parts, func_value); return 0; } @@ -3719,7 +3597,8 @@ int get_partition_id_key_sub(partition_info *part_info, uint32 *part_id) { longlong func_value; - *part_id= get_part_id_key(part_info->subpart_field_array, + *part_id= get_part_id_key(part_info->table->file, + part_info->subpart_field_array, part_info->num_subparts, &func_value); return FALSE; } @@ -3956,6 +3835,92 @@ void get_full_part_id_from_key(const TABLE *table, uchar *buf, DBUG_VOID_RETURN; } + +/** + @brief Verify that all rows in a table is in the given partition + + @param table Table which contains the data that will be checked if + it is matching the partition definition. + @param part_table Partitioned table containing the partition to check. + @param part_id Which partition to match with. + + @return Operation status + @retval TRUE Not all rows match the given partition + @retval FALSE OK +*/ +bool verify_data_with_partition(TABLE *table, TABLE *part_table, + uint32 part_id) +{ + uint32 found_part_id; + longlong func_value; /* Unused */ + handler *file; + int error; + uchar *old_rec; + partition_info *part_info; + DBUG_ENTER("verify_data_with_partition"); + DBUG_ASSERT(table && table->file && part_table && part_table->part_info && + part_table->file); + + /* + Verify all table rows. + First implementation uses full scan + evaluates partition functions for + every row. TODO: add optimization to use index if possible, see WL#5397. + + 1) Open both tables (already done) and set the row buffers to use + the same buffer (to avoid copy). + 2) Init rnd on table. + 3) loop over all rows. + 3.1) verify that partition_id on the row is correct. Break if error. + */ + file= table->file; + part_info= part_table->part_info; + bitmap_union(table->read_set, &part_info->full_part_field_set); + old_rec= part_table->record[0]; + part_table->record[0]= table->record[0]; + set_field_ptr(part_info->full_part_field_array, table->record[0], old_rec); + if ((error= file->ha_rnd_init(TRUE))) + { + file->print_error(error, MYF(0)); + goto err; + } + + do + { + if ((error= file->ha_rnd_next(table->record[0]))) + { + if (error == HA_ERR_RECORD_DELETED) + continue; + if (error == HA_ERR_END_OF_FILE) + error= 0; + else + file->print_error(error, MYF(0)); + break; + } + if ((error= part_info->get_partition_id(part_info, &found_part_id, + &func_value))) + { + part_table->file->print_error(error, MYF(0)); + break; + } + DEBUG_SYNC(current_thd, "swap_partition_first_row_read"); + if (found_part_id != part_id) + { + my_error(ER_ROW_DOES_NOT_MATCH_PARTITION, MYF(0)); + error= 1; + break; + } + } while (TRUE); + (void) file->ha_rnd_end(); +err: + set_field_ptr(part_info->full_part_field_array, old_rec, + table->record[0]); + part_table->record[0]= old_rec; + if (error) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); +} + + /* Prune the set of partitions to use in query @@ -3966,7 +3931,7 @@ void get_full_part_id_from_key(const TABLE *table, uchar *buf, DESCRIPTION This function is called to prune the range of partitions to scan by - checking the used_partitions bitmap. + checking the read_partitions bitmap. If start_part > end_part at return it means no partition needs to be scanned. If start_part == end_part it always means a single partition needs to be scanned. @@ -3983,7 +3948,7 @@ void prune_partition_set(const TABLE *table, part_id_range *part_spec) DBUG_ENTER("prune_partition_set"); for (i= part_spec->start_part; i <= part_spec->end_part; i++) { - if (bitmap_is_set(&(part_info->used_partitions), i)) + if (bitmap_is_set(&(part_info->read_partitions), i)) { DBUG_PRINT("info", ("Partition %d is set", i)); if (last_partition == -1) @@ -4065,7 +4030,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, */ get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec); /* - Check if range can be adjusted by looking in used_partitions + Check if range can be adjusted by looking in read_partitions */ prune_partition_set(table, part_spec); DBUG_VOID_RETURN; @@ -4117,7 +4082,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec); clear_indicator_in_key_fields(key_info); /* - Check if range can be adjusted by looking in used_partitions + Check if range can be adjusted by looking in read_partitions */ prune_partition_set(table, part_spec); DBUG_VOID_RETURN; @@ -4187,7 +4152,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index, if (found_part_field) clear_indicator_in_key_fields(key_info); /* - Check if range can be adjusted by looking in used_partitions + Check if range can be adjusted by looking in read_partitions */ prune_partition_set(table, part_spec); DBUG_VOID_RETURN; @@ -4258,9 +4223,11 @@ bool mysql_unpack_partition(THD *thd, { bool result= TRUE; partition_info *part_info; - CHARSET_INFO *old_character_set_client= thd->variables.character_set_client; + const CHARSET_INFO *old_character_set_client= + thd->variables.character_set_client; LEX *old_lex= thd->lex; LEX lex; + PSI_statement_locker *parent_locker= thd->m_statement_psi; DBUG_ENTER("mysql_unpack_partition"); thd->variables.character_set_client= system_charset_info; @@ -4290,12 +4257,16 @@ bool mysql_unpack_partition(THD *thd, } part_info= lex.part_info; DBUG_PRINT("info", ("Parse: %s", part_buf)); + + thd->m_statement_psi= NULL; if (parse_sql(thd, & parser_state, NULL) || part_info->fix_parser_data(thd)) { thd->free_items(); + thd->m_statement_psi= parent_locker; goto end; } + thd->m_statement_psi= parent_locker; /* The parsed syntax residing in the frm file can still contain defaults. The reason is that the frm file is sometimes saved outside of this @@ -4335,6 +4306,7 @@ bool mysql_unpack_partition(THD *thd, *work_part_info_used= true; } table->part_info= part_info; + part_info->table= table; table->file->set_part_info(part_info); if (!part_info->default_engine_type) part_info->default_engine_type= default_db_type; @@ -4552,7 +4524,7 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info, do { partition_element *part_elem= part_it++; - if ((alter_info->flags & ALTER_ALL_PARTITION) || + if ((alter_info->flags & Alter_info::ALTER_ALL_PARTITION) || (is_name_in_list(part_elem->partition_name, alter_info->partition_names))) { @@ -4571,7 +4543,7 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info, } while (++part_count < tab_part_info->num_parts); if (num_parts_found != alter_info->partition_names.elements && - !(alter_info->flags & ALTER_ALL_PARTITION)) + !(alter_info->flags & Alter_info::ALTER_ALL_PARTITION)) { /* Not all given partitions found, revert and return failure */ part_it.rewind(); @@ -4588,16 +4560,60 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info, /** + @brief Check if partition is exchangable with table by checking table options + + @param table_create_info Table options from table. + @param part_elem All the info of the partition. + + @retval FALSE if they are equal, otherwise TRUE. + + @note Any differens that would cause a change in the frm file is prohibited. + Such options as data_file_name, index_file_name, min_rows, max_rows etc. are + not allowed to differ. But comment is allowed to differ. +*/ +bool compare_partition_options(HA_CREATE_INFO *table_create_info, + partition_element *part_elem) +{ +#define MAX_COMPARE_PARTITION_OPTION_ERRORS 5 + const char *option_diffs[MAX_COMPARE_PARTITION_OPTION_ERRORS + 1]; + int i, errors= 0; + DBUG_ENTER("compare_partition_options"); + DBUG_ASSERT(!part_elem->tablespace_name && + !table_create_info->tablespace); + + /* + Note that there are not yet any engine supporting tablespace together + with partitioning. TODO: when there are, add compare. + */ + if (part_elem->tablespace_name || table_create_info->tablespace) + option_diffs[errors++]= "TABLESPACE"; + if (part_elem->part_max_rows != table_create_info->max_rows) + option_diffs[errors++]= "MAX_ROWS"; + if (part_elem->part_min_rows != table_create_info->min_rows) + option_diffs[errors++]= "MIN_ROWS"; + if (part_elem->data_file_name || table_create_info->data_file_name) + option_diffs[errors++]= "DATA DIRECTORY"; + if (part_elem->index_file_name || table_create_info->index_file_name) + option_diffs[errors++]= "INDEX DIRECTORY"; + + for (i= 0; i < errors; i++) + my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0), + option_diffs[i]); + DBUG_RETURN(errors != 0); +} + + +/* Prepare for ALTER TABLE of partition structure @param[in] thd Thread object @param[in] table Table object @param[in,out] alter_info Alter information @param[in,out] create_info Create info for CREATE TABLE - @param[in] old_db_type Old engine type + @param[in] alter_ctx ALTER TABLE runtime context @param[out] partition_changed Boolean indicating whether partition changed - @param[out] fast_alter_table Internal temporary table allowing fast - partition change or NULL if not possible + @param[out] fast_alter_table Boolean indicating if fast partition alter is + possible. @return Operation status @retval TRUE Error @@ -4615,22 +4631,26 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info, uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, HA_CREATE_INFO *create_info, - handlerton *old_db_type, + Alter_table_ctx *alter_ctx, bool *partition_changed, - char *db, - const char *table_name, - const char *path, - TABLE **fast_alter_table) + bool *fast_alter_table) { - TABLE *new_table= NULL; DBUG_ENTER("prep_alter_part_table"); /* Foreign keys on partitioned tables are not supported, waits for WL#148 */ - if (table->part_info && (alter_info->flags & ALTER_FOREIGN_KEY)) + if (table->part_info && (alter_info->flags & Alter_info::ADD_FOREIGN_KEY || + alter_info->flags & Alter_info::DROP_FOREIGN_KEY)) { my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0)); DBUG_RETURN(TRUE); } + /* Remove partitioning on a not partitioned table is not possible */ + if (!table->part_info && (alter_info->flags & + Alter_info::ALTER_REMOVE_PARTITIONING)) + { + my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(TRUE); + } thd->work_part_info= thd->lex->part_info; @@ -4639,12 +4659,15 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, DBUG_RETURN(TRUE); /* ALTER_ADMIN_PARTITION is handled in mysql_admin_table */ - DBUG_ASSERT(!(alter_info->flags & ALTER_ADMIN_PARTITION)); + DBUG_ASSERT(!(alter_info->flags & Alter_info::ALTER_ADMIN_PARTITION)); if (alter_info->flags & - (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION | - ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION | - ALTER_TABLE_REORG | ALTER_REBUILD_PARTITION)) + (Alter_info::ALTER_ADD_PARTITION | + Alter_info::ALTER_DROP_PARTITION | + Alter_info::ALTER_COALESCE_PARTITION | + Alter_info::ALTER_REORGANIZE_PARTITION | + Alter_info::ALTER_TABLE_REORG | + Alter_info::ALTER_REBUILD_PARTITION)) { partition_info *tab_part_info; partition_info *alt_part_info= thd->work_part_info; @@ -4666,30 +4689,31 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, Open it as a copy of the original table, and modify its partition_info object to allow fast_alter_partition_table to perform the changes. */ - DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name, + DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, + alter_ctx->db, + alter_ctx->table_name, MDL_INTENTION_EXCLUSIVE)); - new_table= open_table_uncached(thd, old_db_type, path, db, table_name, 0); - if (!new_table) - DBUG_RETURN(TRUE); - /* - This table may be used for copy rows between partitions - and also read/write columns when fixing the partition_info struct. - */ - new_table->use_all_columns(); - - tab_part_info= new_table->part_info; + tab_part_info= table->part_info; - if (alter_info->flags & ALTER_TABLE_REORG) + if (alter_info->flags & Alter_info::ALTER_TABLE_REORG) { uint new_part_no, curr_part_no; + /* + 'ALTER TABLE t REORG PARTITION' only allowed with auto partition + if default partitioning is used. + */ + if (tab_part_info->part_type != HASH_PARTITION || - tab_part_info->use_default_num_partitions) + ((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) && + !tab_part_info->use_default_num_partitions) || + ((!(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)) && + tab_part_info->use_default_num_partitions)) { my_error(ER_REORG_NO_PARAM_ERROR, MYF(0)); goto err; } - new_part_no= new_table->file->get_default_no_partitions(create_info); + new_part_no= table->file->get_default_no_partitions(create_info); curr_part_no= tab_part_info->num_parts; if (new_part_no == curr_part_no) { @@ -4698,7 +4722,23 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, after the change as before. Thus we can reply ok immediately without any changes at all. */ - *fast_alter_table= new_table; + flags= table->file->alter_table_flags(alter_info->flags); + if (flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) + { + *fast_alter_table= true; + /* Force table re-open for consistency with the main case. */ + table->m_needs_reopen= true; + } + else + { + /* + Create copy of partition_info to avoid modifying original + TABLE::part_info, to keep it safe for later use. + */ + if (!(tab_part_info= tab_part_info->get_clone())) + DBUG_RETURN(TRUE); + } + thd->work_part_info= tab_part_info; DBUG_RETURN(FALSE); } @@ -4708,7 +4748,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, We will add more partitions, we use the ADD PARTITION without setting the flag for no default number of partitions */ - alter_info->flags|= ALTER_ADD_PARTITION; + alter_info->flags|= Alter_info::ALTER_ADD_PARTITION; thd->work_part_info->num_parts= new_part_no - curr_part_no; } else @@ -4717,21 +4757,41 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, We will remove hash partitions, we use the COALESCE PARTITION without setting the flag for no default number of partitions */ - alter_info->flags|= ALTER_COALESCE_PARTITION; + alter_info->flags|= Alter_info::ALTER_COALESCE_PARTITION; alter_info->num_parts= curr_part_no - new_part_no; } } - if (!(flags= new_table->file->alter_table_flags(alter_info->flags))) + if (!(flags= table->file->alter_table_flags(alter_info->flags))) { my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0)); goto err; } if ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0) - *fast_alter_table= new_table; - DBUG_PRINT("info", ("*fast_alter_table: %p flags: 0x%x", - *fast_alter_table, flags)); - if ((alter_info->flags & ALTER_ADD_PARTITION) || - (alter_info->flags & ALTER_REORGANIZE_PARTITION)) + { + /* + "Fast" change of partitioning is supported in this case. + We will change TABLE::part_info (as this is how we pass + information to storage engine in this case), so the table + must be reopened. + */ + *fast_alter_table= true; + table->m_needs_reopen= true; + } + else + { + /* + "Fast" changing of partitioning is not supported. Create + a copy of TABLE::part_info object, so we can modify it safely. + Modifying original TABLE::part_info will cause problems when + we read data from old version of table using this TABLE object + while copying them to new version of table. + */ + if (!(tab_part_info= tab_part_info->get_clone())) + DBUG_RETURN(TRUE); + } + DBUG_PRINT("info", ("*fast_alter_table flags: 0x%x", flags)); + if ((alter_info->flags & Alter_info::ALTER_ADD_PARTITION) || + (alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION)) { if (thd->work_part_info->part_type != tab_part_info->part_type) { @@ -4798,7 +4858,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, goto err; } } - if (alter_info->flags & ALTER_ADD_PARTITION) + if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION) { /* We start by moving the new partitions to the list of temporary @@ -4849,7 +4909,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, } alt_part_info->part_type= tab_part_info->part_type; alt_part_info->subpart_type= tab_part_info->subpart_type; - if (alt_part_info->set_up_defaults_for_partitioning(new_table->file, 0, + if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0, tab_part_info->num_parts)) { goto err; @@ -5037,7 +5097,7 @@ that are reorganised. of partitions anymore. We use this code also for Table reorganisations and here we don't set any default flags to FALSE. */ - if (!(alter_info->flags & ALTER_TABLE_REORG)) + if (!(alter_info->flags & Alter_info::ALTER_TABLE_REORG)) { if (!alt_part_info->use_default_partitions) { @@ -5048,7 +5108,7 @@ that are reorganised. tab_part_info->is_auto_partitioned= FALSE; } } - else if (alter_info->flags & ALTER_DROP_PARTITION) + else if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION) { /* Drop a partition from a range partition and list partitioning is @@ -5092,14 +5152,14 @@ that are reorganised. my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP"); goto err; } - if (new_table->file->is_fk_defined_on_table_or_index(MAX_KEY)) + if (table->file->is_fk_defined_on_table_or_index(MAX_KEY)) { my_error(ER_ROW_IS_REFERENCED, MYF(0)); goto err; } tab_part_info->num_parts-= num_parts_dropped; } - else if (alter_info->flags & ALTER_REBUILD_PARTITION) + else if (alter_info->flags & Alter_info::ALTER_REBUILD_PARTITION) { if (set_part_state(alter_info, tab_part_info, PART_CHANGED)) { @@ -5108,11 +5168,11 @@ that are reorganised. } if (!(*fast_alter_table)) { - new_table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0)); + table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0)); goto err; } } - else if (alter_info->flags & ALTER_COALESCE_PARTITION) + else if (alter_info->flags & Alter_info::ALTER_COALESCE_PARTITION) { uint num_parts_coalesced= alter_info->num_parts; uint num_parts_remain= tab_part_info->num_parts - num_parts_coalesced; @@ -5210,13 +5270,13 @@ state of p1. } while (part_count < tab_part_info->num_parts); tab_part_info->num_parts= num_parts_remain; } - if (!(alter_info->flags & ALTER_TABLE_REORG)) + if (!(alter_info->flags & Alter_info::ALTER_TABLE_REORG)) { tab_part_info->use_default_num_partitions= FALSE; tab_part_info->is_auto_partitioned= FALSE; } } - else if (alter_info->flags & ALTER_REORGANIZE_PARTITION) + else if (alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION) { /* Reorganise partitions takes a number of partitions that are next @@ -5264,8 +5324,9 @@ state of p1. alt_part_info->subpart_type= tab_part_info->subpart_type; alt_part_info->num_subparts= tab_part_info->num_subparts; DBUG_ASSERT(!alt_part_info->use_default_partitions); - if (alt_part_info->set_up_defaults_for_partitioning(new_table->file, - 0, 0)) + /* We specified partitions explicitly so don't use defaults anymore. */ + tab_part_info->use_default_partitions= FALSE; + if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0, 0)) { goto err; } @@ -5388,8 +5449,8 @@ the generated partition syntax in a correct manner. } *partition_changed= TRUE; thd->work_part_info= tab_part_info; - if (alter_info->flags & ALTER_ADD_PARTITION || - alter_info->flags & ALTER_REORGANIZE_PARTITION) + if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION || + alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION) { if (tab_part_info->use_default_subpartitions && !alt_part_info->use_default_subpartitions) @@ -5398,7 +5459,7 @@ the generated partition syntax in a correct manner. tab_part_info->use_default_num_subpartitions= FALSE; } if (tab_part_info->check_partition_info(thd, (handlerton**)NULL, - new_table->file, 0, TRUE)) + table->file, 0, TRUE)) { goto err; } @@ -5407,7 +5468,7 @@ the generated partition syntax in a correct manner. since this function "fixes" the item trees of the new partitions to reorganize into */ - if (alter_info->flags == ALTER_REORGANIZE_PARTITION && + if (alter_info->flags == Alter_info::ALTER_REORGANIZE_PARTITION && tab_part_info->part_type == RANGE_PARTITION && ((is_last_partition_reorged && (tab_part_info->column_list ? @@ -5486,15 +5547,17 @@ the generated partition syntax in a correct manner. There was no partitioning before and no partitioning defined. Obviously no work needed. */ - if (table->part_info) + partition_info *tab_part_info= table->part_info; + + if (tab_part_info) { - if (alter_info->flags & ALTER_REMOVE_PARTITIONING) + if (alter_info->flags & Alter_info::ALTER_REMOVE_PARTITIONING) { DBUG_PRINT("info", ("Remove partitioning")); if (!(create_info->used_fields & HA_CREATE_USED_ENGINE)) { DBUG_PRINT("info", ("No explicit engine used")); - create_info->db_type= table->part_info->default_engine_type; + create_info->db_type= tab_part_info->default_engine_type; } DBUG_PRINT("info", ("New engine type: %s", ha_resolve_storage_engine_name(create_info->db_type))); @@ -5506,16 +5569,20 @@ the generated partition syntax in a correct manner. /* Retain partitioning but possibly with a new storage engine beneath. + + Create a copy of TABLE::part_info to be able to modify it freely. */ - thd->work_part_info= table->part_info; + if (!(tab_part_info= tab_part_info->get_clone())) + DBUG_RETURN(TRUE); + thd->work_part_info= tab_part_info; if (create_info->used_fields & HA_CREATE_USED_ENGINE && - create_info->db_type != table->part_info->default_engine_type) + create_info->db_type != tab_part_info->default_engine_type) { /* Make sure change of engine happens to all partitions. */ DBUG_PRINT("info", ("partition changed")); - if (table->part_info->is_auto_partitioned) + if (tab_part_info->is_auto_partitioned) { /* If the user originally didn't specify partitioning to be @@ -5543,25 +5610,14 @@ the generated partition syntax in a correct manner. Need to cater for engine types that can handle partition without using the partition handler. */ - if (part_info != table->part_info) + if (part_info != tab_part_info) { - if (part_info->fix_parser_data(thd)) + DBUG_PRINT("info", ("partition changed")); + *partition_changed= TRUE; + if (thd->work_part_info->fix_parser_data(thd)) { goto err; } - /* - Compare the old and new part_info. If only key_algorithm - change is done, don't consider it as changed partitioning (to avoid - rebuild). This is to handle KEY (numeric_cols) partitioned tables - created in 5.1. For more info, see bug#14521864. - */ - if (alter_info->flags != ALTER_PARTITION || - !table->part_info || - !table->part_info->has_same_partitioning(part_info)) - { - DBUG_PRINT("info", ("partition changed")); - *partition_changed= true; - } } /* Set up partition default_engine_type either from the create_info @@ -5571,8 +5627,8 @@ the generated partition syntax in a correct manner. part_info->default_engine_type= create_info->db_type; else { - if (table->part_info) - part_info->default_engine_type= table->part_info->default_engine_type; + if (tab_part_info) + part_info->default_engine_type= tab_part_info->default_engine_type; else part_info->default_engine_type= create_info->db_type; } @@ -5592,15 +5648,7 @@ the generated partition syntax in a correct manner. } DBUG_RETURN(FALSE); err: - if (new_table) - { - /* - Only remove the intermediate table object and its share object, - do not remove the .frm file, since it is the original one. - */ - close_temporary(new_table, 1, 0); - } - *fast_alter_table= NULL; + *fast_alter_table= false; DBUG_RETURN(TRUE); } @@ -5641,12 +5689,7 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0); - /* First lock the original tables */ - if (file->ha_external_lock(thd, F_WRLCK)) - DBUG_RETURN(TRUE); - - /* Disable transactions for all new tables */ - if (mysql_trans_prepare_alter_copy_data(thd)) + if(mysql_trans_prepare_alter_copy_data(thd)) DBUG_RETURN(TRUE); /* TODO: test if bulk_insert would increase the performance */ @@ -5661,9 +5704,6 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) if (mysql_trans_commit_alter_copy_data(thd)) error= 1; /* The error has been reported */ - if (file->ha_external_lock(thd, F_UNLCK)) - error= 1; - DBUG_RETURN(test(error)); } @@ -5734,6 +5774,11 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) int error; DBUG_ENTER("mysql_drop_partitions"); + DBUG_ASSERT(lpt->thd->mdl_context.is_lock_owner(MDL_key::TABLE, + lpt->table->s->db.str, + lpt->table->s->table_name.str, + MDL_EXCLUSIVE)); + build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0); if ((error= lpt->table->file->ha_drop_partitions(path))) { @@ -6315,7 +6360,8 @@ static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt) if (write_log_changed_partitions(lpt, &next_entry, (const char*)path)) goto error; if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path, - lpt->alter_info->flags & ALTER_REORGANIZE_PARTITION)) + lpt->alter_info->flags & + Alter_info::ALTER_REORGANIZE_PARTITION)) goto error; if (write_log_replace_delete_frm(lpt, next_entry, shadow_path, path, TRUE)) goto error; @@ -6412,47 +6458,54 @@ static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt) { THD *thd= lpt->thd; - if (lpt->old_table) - close_all_tables_for_name(thd, lpt->old_table->s, HA_EXTRA_NOT_USED); if (lpt->table) { /* - Only remove the intermediate table object and its share object, - do not remove the .frm file, since it is the original one. + Remove all instances of the table and its locks and other resources. */ - close_temporary(lpt->table, 1, 0); + close_all_tables_for_name(thd, lpt->table->s, HA_EXTRA_NOT_USED, NULL); } lpt->table= 0; - lpt->old_table= 0; lpt->table_list->table= 0; - if (thd->locked_tables_list.reopen_tables(thd)) - sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE"); + if (thd->locked_tables_mode) + { + Diagnostics_area *stmt_da= NULL; + Diagnostics_area tmp_stmt_da(true); + + if (thd->is_error()) + { + /* reopen might fail if we have a previous error, use a temporary da. */ + stmt_da= thd->get_stmt_da(); + thd->set_stmt_da(&tmp_stmt_da); + } + + if (thd->locked_tables_list.reopen_tables(thd)) + sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE"); + + if (stmt_da) + thd->set_stmt_da(stmt_da); + } } -/* - Unlock and close table before renaming and dropping partitions - SYNOPSIS - alter_close_tables() - lpt Struct carrying parameters - close_old Close original table too - RETURN VALUES - 0 +/** + Unlock and close table before renaming and dropping partitions. + + @param lpt Struct carrying parameters + + @return Always 0. */ -static int alter_close_tables(ALTER_PARTITION_PARAM_TYPE *lpt, bool close_old) +static int alter_close_table(ALTER_PARTITION_PARAM_TYPE *lpt) { - DBUG_ENTER("alter_close_tables"); + DBUG_ENTER("alter_close_table"); + if (lpt->table->db_stat) { + mysql_lock_remove(lpt->thd, lpt->thd->lock, lpt->table); lpt->table->file->ha_close(); lpt->table->db_stat= 0; // Mark file closed } - if (close_old && lpt->old_table) - { - close_all_tables_for_name(lpt->thd, lpt->old_table->s, HA_EXTRA_NOT_USED); - lpt->old_table= 0; - } DBUG_RETURN(0); } @@ -6474,23 +6527,54 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, bool close_table) { partition_info *part_info= lpt->part_info; + THD *thd= lpt->thd; + TABLE *table= lpt->table; DBUG_ENTER("handle_alter_part_error"); + DBUG_ASSERT(table->m_needs_reopen); if (close_table) { /* - Since the error handling (ddl_log) needs to drop newly created - partitions they must be closed first to not issue errors. - But we still need some information from the part_info object, - so we clone it first to have a copy. + All instances of this table needs to be closed. + Better to do that here, than leave the cleaning up to others. + Aquire EXCLUSIVE mdl lock if not already aquired. */ + if (!thd->mdl_context.is_lock_owner(MDL_key::TABLE, lpt->db, + lpt->table_name, + MDL_EXCLUSIVE)) + { + if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN)) + { + /* At least remove this instance on failure */ + goto err_exclusive_lock; + } + } + /* Ensure the share is destroyed and reopened. */ part_info= lpt->part_info->get_clone(); - alter_close_tables(lpt, action_completed); + close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL); + } + else + { +err_exclusive_lock: + /* + Temporarily remove it from the locked table list, so that it will get + reopened. + */ + thd->locked_tables_list.unlink_from_list(thd, + table->pos_in_locked_tables, + false); + /* + Make sure that the table is unlocked, closed and removed from + the table cache. + */ + mysql_lock_remove(thd, thd->lock, table); + part_info= lpt->part_info->get_clone(); + close_thread_table(thd, &thd->open_tables); + lpt->table_list->table= NULL; } if (part_info->first_log_entry && - execute_ddl_log_entry(lpt->thd, - part_info->first_log_entry->entry_pos)) + execute_ddl_log_entry(thd, part_info->first_log_entry->entry_pos)) { /* We couldn't recover from error, most likely manual interaction @@ -6503,14 +6587,14 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, if (drop_partition) { /* Table is still ok, but we left a shadow frm file behind. */ - push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1, "%s %s", "Operation was unsuccessful, table is still intact,", "but it is possible that a shadow frm file was left behind"); } else { - push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1, "%s %s %s %s", "Operation was unsuccessful, table is still intact,", "but it is possible that a shadow frm file was left behind.", @@ -6526,7 +6610,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, Failed during install of shadow frm file, table isn't intact and dropped partitions are still there */ - push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1, "%s %s %s", "Failed during alter of partitions, table is no longer intact.", "The frm file is in an unknown state, and a backup", @@ -6540,7 +6624,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, ask the user to perform the action manually. We remove the log records and ask the user to perform the action manually. */ - push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1, "%s %s", "Failed during drop of partitions, table is intact.", "Manual drop of remaining partitions is required"); @@ -6552,7 +6636,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, certainly in a very bad state so we give user warning and disable the table by writing an ancient frm version into it. */ - push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1, "%s %s %s", "Failed during renaming of partitions. We are now in a position", "where table is not reusable", @@ -6581,11 +6665,31 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt, even though we reported an error the operation was successfully completed. */ - push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,"%s %s", + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,"%s %s", "Operation was successfully completed by failure handling,", "after failure of normal operation"); } } + + if (thd->locked_tables_mode) + { + Diagnostics_area *stmt_da= NULL; + Diagnostics_area tmp_stmt_da(true); + + if (thd->is_error()) + { + /* reopen might fail if we have a previous error, use a temporary da. */ + stmt_da= thd->get_stmt_da(); + thd->set_stmt_da(&tmp_stmt_da); + } + + if (thd->locked_tables_list.reopen_tables(thd)) + sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE"); + + if (stmt_da) + thd->set_stmt_da(stmt_da); + } + DBUG_VOID_RETURN; } @@ -6602,7 +6706,7 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket, enum_mdl_type type) { if (thd->locked_tables_mode) - ticket->downgrade_exclusive_lock(type); + ticket->downgrade_lock(type); } @@ -6611,13 +6715,12 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket, previously prepared. @param thd Thread object - @param table Original table object + @param table Original table object with new part_info @param alter_info ALTER TABLE info @param create_info Create info for CREATE TABLE @param table_list List of the table involved @param db Database name of new table @param table_name Table name of new table - @param fast_alter_table Prepared table object @return Operation status @retval TRUE Error @@ -6633,8 +6736,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, char *db, - const char *table_name, - TABLE *fast_alter_table) + const char *table_name) { /* Set-up struct used to write frm files */ partition_info *part_info; @@ -6644,10 +6746,10 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, bool close_table_on_failure= FALSE; bool frm_install= FALSE; MDL_ticket *mdl_ticket= table->mdl_ticket; - DBUG_ASSERT(fast_alter_table); DBUG_ENTER("fast_alter_partition_table"); + DBUG_ASSERT(table->m_needs_reopen); - part_info= fast_alter_table->part_info; + part_info= table->part_info; lpt->thd= thd; lpt->table_list= table_list; lpt->part_info= part_info; @@ -6656,8 +6758,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, lpt->db_options= create_info->table_options; if (create_info->row_type == ROW_TYPE_DYNAMIC) lpt->db_options|= HA_OPTION_PACK_RECORD; - lpt->table= fast_alter_table; - lpt->old_table= table; + lpt->table= table; lpt->key_info_buffer= 0; lpt->key_count= 0; lpt->db= db; @@ -6715,7 +6816,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, goto err; } } - else if (alter_info->flags & ALTER_DROP_PARTITION) + else if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION) { /* Now after all checks and setting state on dropped partitions we can @@ -6750,9 +6851,9 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, 3) Write the ddl log to ensure that the operation is completed even in the presence of a MySQL Server crash (the log is executed before any other threads are started, so there are no locking issues). - 4) Close all tables that have already been opened but didn't stumble on + 4) Close the table that have already been opened but didn't stumble on the abort locked previously. This is done as part of the - alter_close_tables call. + alter_close_table call. 5) Write the bin log Unfortunately the writing of the binlog is not synchronised with other logging activities. So no matter in which order the binlog @@ -6788,7 +6889,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, (action_completed= TRUE, FALSE) || ERROR_INJECT_CRASH("crash_drop_partition_4") || ERROR_INJECT_ERROR("fail_drop_partition_4") || - alter_close_tables(lpt, action_completed) || + alter_close_table(lpt) || (close_table_on_failure= FALSE, FALSE) || ERROR_INJECT_CRASH("crash_drop_partition_5") || ERROR_INJECT_ERROR("fail_drop_partition_5") || @@ -6815,7 +6916,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, goto err; } } - else if ((alter_info->flags & ALTER_ADD_PARTITION) && + else if ((alter_info->flags & Alter_info::ALTER_ADD_PARTITION) && (part_info->part_type == RANGE_PARTITION || part_info->part_type == LIST_PARTITION)) { @@ -6865,7 +6966,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, ERROR_INJECT_CRASH("crash_add_partition_5") || ERROR_INJECT_ERROR("fail_add_partition_5") || (close_table_on_failure= FALSE, FALSE) || - alter_close_tables(lpt, action_completed) || + alter_close_table(lpt) || ERROR_INJECT_CRASH("crash_add_partition_6") || ERROR_INJECT_ERROR("fail_add_partition_6") || ((!thd->lex->no_write_to_binlog) && @@ -6925,27 +7026,27 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, use a lower lock level. This can be handled inside store_lock in the respective handler. - 0) Write an entry that removes the shadow frm file if crash occurs - 1) Write the shadow frm file of new partitioning + 0) Write an entry that removes the shadow frm file if crash occurs. + 1) Write the shadow frm file of new partitioning. 2) Log such that temporary partitions added in change phase are - removed in a crash situation - 3) Add the new partitions - Copy from the reorganised partitions to the new partitions + removed in a crash situation. + 3) Add the new partitions. + Copy from the reorganised partitions to the new partitions. 4) Get an exclusive metadata lock on the table (waits for all active transactions using this table). This ensures that we can release all other locks on the table and since no one can open the table, there can be no new threads accessing the table. They will be hanging on this exclusive lock. - 5) Log that operation is completed and log all complete actions - needed to complete operation from here - 6) Write bin log - 7) Close all instances of the table and remove them from the table cache. - 8) Prepare handlers for rename and delete of partitions + 5) Close the table. + 6) Log that operation is completed and log all complete actions + needed to complete operation from here. + 7) Write bin log. + 8) Prepare handlers for rename and delete of partitions. 9) Rename and drop the reorged partitions such that they are no longer used and rename those added to their real new names. - 10) Install the shadow frm file - 11) Reopen the table if under lock tables - 12) Complete query + 10) Install the shadow frm file. + 11) Reopen the table if under lock tables. + 12) Complete query. */ if (write_log_drop_shadow_frm(lpt) || ERROR_INJECT_CRASH("crash_change_partition_1") || @@ -6963,22 +7064,22 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED) || ERROR_INJECT_CRASH("crash_change_partition_5") || ERROR_INJECT_ERROR("fail_change_partition_5") || - write_log_final_change_partition(lpt) || - (action_completed= TRUE, FALSE) || + alter_close_table(lpt) || + (close_table_on_failure= FALSE, FALSE) || ERROR_INJECT_CRASH("crash_change_partition_6") || ERROR_INJECT_ERROR("fail_change_partition_6") || + write_log_final_change_partition(lpt) || + (action_completed= TRUE, FALSE) || + ERROR_INJECT_CRASH("crash_change_partition_7") || + ERROR_INJECT_ERROR("fail_change_partition_7") || ((!thd->lex->no_write_to_binlog) && (write_bin_log(thd, FALSE, thd->query(), thd->query_length()), FALSE)) || - ERROR_INJECT_CRASH("crash_change_partition_7") || - ERROR_INJECT_ERROR("fail_change_partition_7") || + ERROR_INJECT_CRASH("crash_change_partition_8") || + ERROR_INJECT_ERROR("fail_change_partition_8") || ((frm_install= TRUE), FALSE) || mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) || (frm_install= FALSE, FALSE) || - ERROR_INJECT_CRASH("crash_change_partition_8") || - ERROR_INJECT_ERROR("fail_change_partition_8") || - alter_close_tables(lpt, action_completed) || - (close_table_on_failure= FALSE, FALSE) || ERROR_INJECT_CRASH("crash_change_partition_9") || ERROR_INJECT_ERROR("fail_change_partition_9") || mysql_drop_partitions(lpt) || @@ -7004,22 +7105,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, */ DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted, table_list)); err: - if (action_completed) - { - /* - Although error occurred, the action was forced to retry for completion. - Therefore we must close+reopen all instances of the table. - */ - (void) alter_partition_lock_handling(lpt); - } - else - { - /* - The failed action was reverted, leave the original table as is and - close/destroy the intermediate table object and its share. - */ - close_temporary(lpt->table, 1, 0); - } downgrade_mdl_if_lock_tables_mode(thd, mdl_ticket, MDL_SHARED_NO_READ_WRITE); DBUG_RETURN(TRUE); } @@ -7082,7 +7167,7 @@ void set_key_field_ptr(KEY *key_info, const uchar *new_buf, const uchar *old_buf) { KEY_PART_INFO *key_part= key_info->key_part; - uint key_parts= key_info->key_parts; + uint key_parts= key_info->user_defined_key_parts; uint i= 0; my_ptrdiff_t diff= (new_buf - old_buf); DBUG_ENTER("set_key_field_ptr"); @@ -7118,20 +7203,19 @@ void mem_alloc_error(size_t size) } #ifdef WITH_PARTITION_STORAGE_ENGINE -/* - Return comma-separated list of used partitions in the provided given string +/** + Return comma-separated list of used partitions in the provided given string. - SYNOPSIS - make_used_partitions_str() - part_info IN Partitioning info - parts_str OUT The string to fill + @param part_info Partitioning info + @param[out] parts The resulting list of string to fill - DESCRIPTION - Generate a list of used partitions (from bits in part_info->used_partitions - bitmap), asd store it into the provided String object. + Generate a list of used partitions (from bits in part_info->read_partitions + bitmap), and store it into the provided String object. - NOTE + @note The produced string must not be longer then MAX_PARTITIONS * (1 + FN_LEN). + In case of UPDATE, only the partitions read is given, not the partitions + that was written or locked. */ void make_used_partitions_str(partition_info *part_info, String *parts_str) @@ -7149,7 +7233,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str) List_iterator<partition_element> it2(head_pe->subpartitions); while ((pe= it2++)) { - if (bitmap_is_set(&part_info->used_partitions, partition_id)) + if (bitmap_is_set(&part_info->read_partitions, partition_id)) { if (parts_str->length()) parts_str->append(','); @@ -7169,7 +7253,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str) { while ((pe= it++)) { - if (bitmap_is_set(&part_info->used_partitions, partition_id)) + if (bitmap_is_set(&part_info->read_partitions, partition_id)) { if (parts_str->length()) parts_str->append(','); @@ -8010,8 +8094,7 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter) while (part_iter->field_vals.cur != part_iter->field_vals.end) { longlong dummy; - field->store(part_iter->field_vals.cur++, - ((Field_num*)field)->unsigned_flag); + field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG); if ((part_iter->part_info->is_sub_partitioned() && !part_iter->part_info->get_part_partition_id(part_iter->part_info, &part_id, &dummy)) || @@ -8035,12 +8118,11 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter) part_iter->field_vals.cur= part_iter->field_vals.start; return NOT_A_PARTITION_ID; } - field->store(part_iter->field_vals.cur++, FALSE); + field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG); if (part_iter->part_info->get_subpartition_id(part_iter->part_info, &res)) return NOT_A_PARTITION_ID; return res; - } diff --git a/sql/sql_partition.h b/sql/sql_partition.h index cf532c45c66..7f39ddd7a3f 100644 --- a/sql/sql_partition.h +++ b/sql/sql_partition.h @@ -24,6 +24,7 @@ #include "table.h" /* TABLE_LIST */ class Alter_info; +class Alter_table_ctx; class Field; class String; class handler; @@ -53,7 +54,6 @@ typedef struct st_lock_param_type HA_CREATE_INFO *create_info; Alter_info *alter_info; TABLE *table; - TABLE *old_table; KEY *key_info_buffer; const char *db; const char *table_name; @@ -75,7 +75,7 @@ typedef struct { } part_id_range; struct st_partition_iter; -#define NOT_A_PARTITION_ID ((uint32)-1) +#define NOT_A_PARTITION_ID UINT_MAX32 bool is_partition_in_list(char *part_name, List<char> list_part_names); char *are_partitions_in_table(partition_info *new_part_info, @@ -125,6 +125,7 @@ bool check_part_func_fields(Field **ptr, bool ok_with_charsets); bool field_is_partition_charset(Field *field); Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs); void mem_alloc_error(size_t size); +void truncate_partition_filename(char *path); /* A "Get next" function for partition iterator. @@ -250,24 +251,23 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, char *db, - const char *table_name, - TABLE *fast_alter_table); + const char *table_name); bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info, enum partition_state part_state); uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info, HA_CREATE_INFO *create_info, - handlerton *old_db_type, + Alter_table_ctx *alter_ctx, bool *partition_changed, - char *db, - const char *table_name, - const char *path, - TABLE **fast_alter_table); + bool *fast_alter_table); char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, bool show_partition_options, HA_CREATE_INFO *create_info, - Alter_info *alter_info, - const char *current_comment_start); + Alter_info *alter_info); +bool verify_data_with_partition(TABLE *table, TABLE *part_table, + uint32 part_id); +bool compare_partition_options(HA_CREATE_INFO *table_create_info, + partition_element *part_elem); bool partition_key_modified(TABLE *table, const MY_BITMAP *fields); #else #define partition_key_modified(X,Y) 0 diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc index b9bf3dbc217..1a82413bb07 100644 --- a/sql/sql_partition_admin.cc +++ b/sql/sql_partition_admin.cc @@ -14,8 +14,15 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "sql_parse.h" // check_one_table_access + // check_merge_table_access + // check_one_table_access #include "sql_table.h" // mysql_alter_table, etc. -#include "sql_lex.h" // Sql_statement +#include "sql_cmd.h" // Sql_cmd +#include "sql_alter.h" // Sql_cmd_alter_table +#include "sql_partition.h" // struct partition_info, etc. +#include "debug_sync.h" // DEBUG_SYNC +#include "sql_truncate.h" // mysql_truncate_table, + // Sql_cmd_truncate_table #include "sql_admin.h" // Analyze/Check/.._table_statement #include "sql_partition_admin.h" // Alter_table_*_partition #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -36,41 +43,665 @@ bool Partition_statement_unsupported::execute(THD *) #else -bool Alter_table_analyze_partition_statement::execute(THD *thd) +bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd) +{ + /* Moved from mysql_execute_command */ + LEX *lex= thd->lex; + /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ + SELECT_LEX *select_lex= &lex->select_lex; + /* first table of first SELECT_LEX */ + TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first; + /* + Code in mysql_alter_table() may modify its HA_CREATE_INFO argument, + so we have to use a copy of this structure to make execution + prepared statement- safe. A shallow copy is enough as no memory + referenced from this structure will be modified. + @todo move these into constructor... + */ + HA_CREATE_INFO create_info(lex->create_info); + Alter_info alter_info(lex->alter_info, thd->mem_root); + ulong priv_needed= ALTER_ACL | DROP_ACL | INSERT_ACL | CREATE_ACL; + + DBUG_ENTER("Sql_cmd_alter_table_exchange_partition::execute"); + + if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */ + DBUG_RETURN(TRUE); + + /* Must be set in the parser */ + DBUG_ASSERT(select_lex->db); + /* also check the table to be exchanged with the partition */ + DBUG_ASSERT(alter_info.flags & Alter_info::ALTER_EXCHANGE_PARTITION); + + if (check_access(thd, priv_needed, first_table->db, + &first_table->grant.privilege, + &first_table->grant.m_internal, + 0, 0) || + check_access(thd, priv_needed, first_table->next_local->db, + &first_table->next_local->grant.privilege, + &first_table->next_local->grant.m_internal, + 0, 0)) + DBUG_RETURN(TRUE); + + if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE)) + DBUG_RETURN(TRUE); + + /* Not allowed with EXCHANGE PARTITION */ + DBUG_ASSERT(!create_info.data_file_name && !create_info.index_file_name); + + thd->enable_slow_log= opt_log_slow_admin_statements; + DBUG_RETURN(exchange_partition(thd, first_table, &alter_info)); +} + + +/** + @brief Checks that the tables will be able to be used for EXCHANGE PARTITION. + @param table Non partitioned table. + @param part_table Partitioned table. + + @retval FALSE if OK, otherwise error is reported and TRUE is returned. +*/ +static bool check_exchange_partition(TABLE *table, TABLE *part_table) +{ + DBUG_ENTER("check_exchange_partition"); + + /* Both tables must exist */ + if (!part_table || !table) + { + my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0)); + DBUG_RETURN(TRUE); + } + + /* The first table must be partitioned, and the second must not */ + if (!part_table->part_info) + { + my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(TRUE); + } + if (table->part_info) + { + my_error(ER_PARTITION_EXCHANGE_PART_TABLE, MYF(0), + table->s->table_name.str); + DBUG_RETURN(TRUE); + } + + if (part_table->file->ht != partition_hton) + { + /* + Only allowed on partitioned tables throught the generic ha_partition + handler, i.e not yet for native partitioning (NDB). + */ + my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(TRUE); + } + + if (table->file->ht != part_table->part_info->default_engine_type) + { + my_error(ER_MIX_HANDLER_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + + /* Verify that table is not tmp table, partitioned tables cannot be tmp. */ + if (table->s->tmp_table != NO_TMP_TABLE) + { + my_error(ER_PARTITION_EXCHANGE_TEMP_TABLE, MYF(0), + table->s->table_name.str); + DBUG_RETURN(TRUE); + } + + /* The table cannot have foreign keys constraints or be referenced */ + if(!table->file->can_switch_engines()) + { + my_error(ER_PARTITION_EXCHANGE_FOREIGN_KEY, MYF(0), + table->s->table_name.str); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); +} + + +/** + @brief Compare table structure/options between a non partitioned table + and a specific partition of a partitioned table. + + @param thd Thread object. + @param table Non partitioned table. + @param part_table Partitioned table. + @param part_elem Partition element to use for partition specific compare. +*/ +static bool compare_table_with_partition(THD *thd, TABLE *table, + TABLE *part_table, + partition_element *part_elem) +{ + HA_CREATE_INFO table_create_info, part_create_info; + Alter_info part_alter_info; + Alter_table_ctx part_alter_ctx; // Not used + DBUG_ENTER("compare_table_with_partition"); + + bool metadata_equal= false; + memset(&part_create_info, 0, sizeof(HA_CREATE_INFO)); + memset(&table_create_info, 0, sizeof(HA_CREATE_INFO)); + + update_create_info_from_table(&table_create_info, table); + /* get the current auto_increment value */ + table->file->update_create_info(&table_create_info); + /* mark all columns used, since they are used when preparing the new table */ + part_table->use_all_columns(); + table->use_all_columns(); + if (mysql_prepare_alter_table(thd, part_table, &part_create_info, + &part_alter_info, &part_alter_ctx)) + { + my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0)); + DBUG_RETURN(TRUE); + } + /* db_type is not set in prepare_alter_table */ + part_create_info.db_type= part_table->part_info->default_engine_type; + /* + Since we exchange the partition with the table, allow exchanging + auto_increment value as well. + */ + part_create_info.auto_increment_value= + table_create_info.auto_increment_value; + + /* Check compatible row_types and set create_info accordingly. */ + { + enum row_type part_row_type= part_table->file->get_row_type(); + enum row_type table_row_type= table->file->get_row_type(); + if (part_row_type != table_row_type) + { + my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0), + "ROW_FORMAT"); + DBUG_RETURN(true); + } + part_create_info.row_type= table->s->row_type; + } + + /* + NOTE: ha_blackhole does not support check_if_compatible_data, + so this always fail for blackhole tables. + ha_myisam compares pointers to verify that DATA/INDEX DIRECTORY is + the same, so any table using data/index_file_name will fail. + */ + if (mysql_compare_tables(table, &part_alter_info, &part_create_info, + &metadata_equal)) + { + my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0)); + DBUG_RETURN(TRUE); + } + + DEBUG_SYNC(thd, "swap_partition_after_compare_tables"); + if (!metadata_equal) + { + my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0)); + DBUG_RETURN(TRUE); + } + DBUG_ASSERT(table->s->db_create_options == + part_table->s->db_create_options); + DBUG_ASSERT(table->s->db_options_in_use == + part_table->s->db_options_in_use); + + if (table_create_info.avg_row_length != part_create_info.avg_row_length) + { + my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0), + "AVG_ROW_LENGTH"); + DBUG_RETURN(TRUE); + } + + if (table_create_info.table_options != part_create_info.table_options) + { + my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0), + "TABLE OPTION"); + DBUG_RETURN(TRUE); + } + + if (table->s->table_charset != part_table->s->table_charset) + { + my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0), + "CHARACTER SET"); + DBUG_RETURN(TRUE); + } + + /* + NOTE: We do not support update of frm-file, i.e. change + max/min_rows, data/index_file_name etc. + The workaround is to use REORGANIZE PARTITION to rewrite + the frm file and then use EXCHANGE PARTITION when they are the same. + */ + if (compare_partition_options(&table_create_info, part_elem)) + DBUG_RETURN(TRUE); + + DBUG_RETURN(FALSE); +} + + +/** + @brief Exchange partition/table with ddl log. + + @details How to handle a crash in the middle of the rename (break on error): + 1) register in ddl_log that we are going to exchange swap_table with part. + 2) do the first rename (swap_table -> tmp-name) and sync the ddl_log. + 3) do the second rename (part -> swap_table) and sync the ddl_log. + 4) do the last rename (tmp-name -> part). + 5) mark the entry done. + + Recover by: + 5) is done, All completed. Nothing to recover. + 4) is done see 3). (No mark or sync in the ddl_log...) + 3) is done -> try rename part -> tmp-name (ignore failure) goto 2). + 2) is done -> try rename swap_table -> part (ignore failure) goto 1). + 1) is done -> try rename tmp-name -> swap_table (ignore failure). + before 1) Nothing to recover... + + @param thd Thread handle + @param name name of table/partition 1 (to be exchanged with 2) + @param from_name name of table/partition 2 (to be exchanged with 1) + @param tmp_name temporary name to use while exchaning + @param ht handlerton of the table/partitions + + @return Operation status + @retval TRUE Error + @retval FALSE Success + + @note ha_heap always succeeds in rename (since it is created upon usage). + This is OK when to recover from a crash since all heap are empty and the + recover is done early in the startup of the server (right before + read_init_file which can populate the tables). + + And if no crash we can trust the syncs in the ddl_log. + + What about if the rename is put into a background thread? That will cause + corruption and is avoided by the exlusive metadata lock. +*/ +static bool exchange_name_with_ddl_log(THD *thd, + const char *name, + const char *from_name, + const char *tmp_name, + handlerton *ht) +{ + DDL_LOG_ENTRY exchange_entry; + DDL_LOG_MEMORY_ENTRY *log_entry= NULL; + DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL; + bool error= TRUE; + bool error_set= FALSE; + handler *file= NULL; + DBUG_ENTER("exchange_name_with_ddl_log"); + + if (!(file= get_new_handler(NULL, thd->mem_root, ht))) + { + mem_alloc_error(sizeof(handler)); + DBUG_RETURN(TRUE); + } + + /* prepare the action entry */ + exchange_entry.entry_type= DDL_LOG_ENTRY_CODE; + exchange_entry.action_type= DDL_LOG_EXCHANGE_ACTION; + exchange_entry.next_entry= 0; + exchange_entry.name= name; + exchange_entry.from_name= from_name; + exchange_entry.tmp_name= tmp_name; + exchange_entry.handler_name= ha_resolve_storage_engine_name(ht); + exchange_entry.phase= EXCH_PHASE_NAME_TO_TEMP; + + mysql_mutex_lock(&LOCK_gdl); + /* + write to the ddl log what to do by: + 1) write the action entry (i.e. which names to be exchanged) + 2) write the execution entry with a link to the action entry + */ + DBUG_EXECUTE_IF("exchange_partition_fail_1", goto err_no_action_written;); + DBUG_EXECUTE_IF("exchange_partition_abort_1", DBUG_SUICIDE();); + if (write_ddl_log_entry(&exchange_entry, &log_entry)) + goto err_no_action_written; + + DBUG_EXECUTE_IF("exchange_partition_fail_2", goto err_no_execute_written;); + DBUG_EXECUTE_IF("exchange_partition_abort_2", DBUG_SUICIDE();); + if (write_execute_ddl_log_entry(log_entry->entry_pos, FALSE, &exec_log_entry)) + goto err_no_execute_written; + /* ddl_log is written and synced */ + + mysql_mutex_unlock(&LOCK_gdl); + /* + Execute the name exchange. + Do one rename, increase the phase, update the action entry and sync. + In case of errors in the ddl_log we must fail and let the ddl_log try + to revert the changes, since otherwise it could revert the command after + we sent OK to the client. + */ + /* call rename table from table to tmp-name */ + DBUG_EXECUTE_IF("exchange_partition_fail_3", + my_error(ER_ERROR_ON_RENAME, MYF(0), + name, tmp_name, 0, "n/a"); + error_set= TRUE; + goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_3", DBUG_SUICIDE();); + if (file->ha_rename_table(name, tmp_name)) + { + char errbuf[MYSYS_STRERROR_SIZE]; + my_strerror(errbuf, sizeof(errbuf), my_errno); + my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name, + my_errno, errbuf); + error_set= TRUE; + goto err_rename; + } + DBUG_EXECUTE_IF("exchange_partition_fail_4", goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_4", DBUG_SUICIDE();); + if (deactivate_ddl_log_entry(log_entry->entry_pos)) + goto err_rename; + + /* call rename table from partition to table */ + DBUG_EXECUTE_IF("exchange_partition_fail_5", + my_error(ER_ERROR_ON_RENAME, MYF(0), + from_name, name, 0, "n/a"); + error_set= TRUE; + goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_5", DBUG_SUICIDE();); + if (file->ha_rename_table(from_name, name)) + { + char errbuf[MYSYS_STRERROR_SIZE]; + my_strerror(errbuf, sizeof(errbuf), my_errno); + my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name, + my_errno, errbuf); + error_set= TRUE; + goto err_rename; + } + DBUG_EXECUTE_IF("exchange_partition_fail_6", goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_6", DBUG_SUICIDE();); + if (deactivate_ddl_log_entry(log_entry->entry_pos)) + goto err_rename; + + /* call rename table from tmp-nam to partition */ + DBUG_EXECUTE_IF("exchange_partition_fail_7", + my_error(ER_ERROR_ON_RENAME, MYF(0), + tmp_name, from_name, 0, "n/a"); + error_set= TRUE; + goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_7", DBUG_SUICIDE();); + if (file->ha_rename_table(tmp_name, from_name)) + { + char errbuf[MYSYS_STRERROR_SIZE]; + my_strerror(errbuf, sizeof(errbuf), my_errno); + my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name, + my_errno, errbuf); + error_set= TRUE; + goto err_rename; + } + DBUG_EXECUTE_IF("exchange_partition_fail_8", goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_8", DBUG_SUICIDE();); + if (deactivate_ddl_log_entry(log_entry->entry_pos)) + goto err_rename; + + /* The exchange is complete and ddl_log is deactivated */ + DBUG_EXECUTE_IF("exchange_partition_fail_9", goto err_rename;); + DBUG_EXECUTE_IF("exchange_partition_abort_9", DBUG_SUICIDE();); + /* all OK */ + error= FALSE; + delete file; + DBUG_RETURN(error); +err_rename: + /* + Nothing to do if any of these commands fails :( the commands itselfs + will log to the error log about the failures... + */ + /* execute the ddl log entry to revert the renames */ + (void) execute_ddl_log_entry(current_thd, log_entry->entry_pos); + mysql_mutex_lock(&LOCK_gdl); + /* mark the execute log entry done */ + (void) write_execute_ddl_log_entry(0, TRUE, &exec_log_entry); + /* release the execute log entry */ + (void) release_ddl_log_memory_entry(exec_log_entry); +err_no_execute_written: + /* release the action log entry */ + (void) release_ddl_log_memory_entry(log_entry); +err_no_action_written: + mysql_mutex_unlock(&LOCK_gdl); + delete file; + if (!error_set) + my_error(ER_DDL_LOG_ERROR, MYF(0)); + DBUG_RETURN(error); +} + + +/** + @brief Swap places between a partition and a table. + + @details Verify that the tables are compatible (same engine, definition etc), + verify that all rows in the table will fit in the partition, + if all OK, rename table to tmp name, rename partition to table + and finally rename tmp name to partition. + + 1) Take upgradable mdl, open tables and then lock them (inited in parse) + 2) Verify that metadata matches + 3) verify data + 4) Upgrade to exclusive mdl for both tables + 5) Rename table <-> partition + 6) Rely on close_thread_tables to release mdl and table locks + + @param thd Thread handle + @param table_list Table where the partition exists as first table, + Table to swap with the partition as second table + @param alter_info Contains partition name to swap + + @note This is a DDL operation so triggers will not be used. +*/ +bool Sql_cmd_alter_table_exchange_partition:: + exchange_partition(THD *thd, TABLE_LIST *table_list, Alter_info *alter_info) +{ + TABLE *part_table, *swap_table; + TABLE_LIST *swap_table_list; + handlerton *table_hton; + partition_element *part_elem; + char *partition_name; + char temp_name[FN_REFLEN+1]; + char part_file_name[FN_REFLEN+1]; + char swap_file_name[FN_REFLEN+1]; + char temp_file_name[FN_REFLEN+1]; + uint swap_part_id; + uint part_file_name_len; + Alter_table_prelocking_strategy alter_prelocking_strategy; + MDL_ticket *swap_table_mdl_ticket= NULL; + MDL_ticket *part_table_mdl_ticket= NULL; + uint table_counter; + bool error= TRUE; + DBUG_ENTER("mysql_exchange_partition"); + DBUG_ASSERT(alter_info->flags & Alter_info::ALTER_EXCHANGE_PARTITION); + + /* Don't allow to exchange with log table */ + swap_table_list= table_list->next_local; + if (check_if_log_table(swap_table_list->db_length, swap_table_list->db, + swap_table_list->table_name_length, + swap_table_list->table_name, 0)) + { + my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table"); + DBUG_RETURN(TRUE); + } + + /* + Currently no MDL lock that allows both read and write and is upgradeable + to exclusive, so leave the lock type to TL_WRITE_ALLOW_READ also on the + partitioned table. + + TODO: add MDL lock that allows both read and write and is upgradable to + exclusive lock. This would allow to continue using the partitioned table + also with update/insert/delete while the verification of the swap table + is running. + */ + + /* + NOTE: It is not possible to exchange a crashed partition/table since + we need some info from the engine, which we can only access after open, + to be able to verify the structure/metadata. + */ + table_list->mdl_request.set_type(MDL_SHARED_NO_WRITE); + if (open_tables(thd, &table_list, &table_counter, 0, + &alter_prelocking_strategy)) + DBUG_RETURN(true); + + part_table= table_list->table; + swap_table= swap_table_list->table; + + if (check_exchange_partition(swap_table, part_table)) + DBUG_RETURN(TRUE); + + /* set lock pruning on first table */ + partition_name= alter_info->partition_names.head(); + if (table_list->table->part_info-> + set_named_partition_bitmap(partition_name, strlen(partition_name))) + DBUG_RETURN(true); + + if (lock_tables(thd, table_list, table_counter, 0)) + DBUG_RETURN(true); + + + table_hton= swap_table->file->ht; + + THD_STAGE_INFO(thd, stage_verifying_table); + + /* Will append the partition name later in part_info->get_part_elem() */ + part_file_name_len= build_table_filename(part_file_name, + sizeof(part_file_name), + table_list->db, + table_list->table_name, + "", 0); + build_table_filename(swap_file_name, + sizeof(swap_file_name), + swap_table_list->db, + swap_table_list->table_name, + "", 0); + /* create a unique temp name #sqlx-nnnn_nnnn, x for eXchange */ + my_snprintf(temp_name, sizeof(temp_name), "%sx-%lx_%lx", + tmp_file_prefix, current_pid, thd->thread_id); + if (lower_case_table_names) + my_casedn_str(files_charset_info, temp_name); + build_table_filename(temp_file_name, sizeof(temp_file_name), + table_list->next_local->db, + temp_name, "", FN_IS_TMP); + + if (!(part_elem= part_table->part_info->get_part_elem(partition_name, + part_file_name + + part_file_name_len, + &swap_part_id))) + { + // my_error(ER_UNKNOWN_PARTITION, MYF(0), partition_name, + // part_table->alias); + DBUG_RETURN(TRUE); + } + + if (swap_part_id == NOT_A_PARTITION_ID) + { + DBUG_ASSERT(part_table->part_info->is_sub_partitioned()); + my_error(ER_PARTITION_INSTEAD_OF_SUBPARTITION, MYF(0)); + DBUG_RETURN(TRUE); + } + + if (compare_table_with_partition(thd, swap_table, part_table, part_elem)) + DBUG_RETURN(TRUE); + + /* Table and partition has same structure/options, OK to exchange */ + + thd_proc_info(thd, "verifying data with partition"); + + if (verify_data_with_partition(swap_table, part_table, swap_part_id)) + DBUG_RETURN(TRUE); + + /* + Get exclusive mdl lock on both tables, alway the non partitioned table + first. Remember the tickets for downgrading locks later. + */ + swap_table_mdl_ticket= swap_table->mdl_ticket; + part_table_mdl_ticket= part_table->mdl_ticket; + + /* + No need to set used_partitions to only propagate + HA_EXTRA_PREPARE_FOR_RENAME to one part since no built in engine uses + that flag. And the action would probably be to force close all other + instances which is what we are doing any way. + */ + if (wait_while_table_is_used(thd, swap_table, HA_EXTRA_PREPARE_FOR_RENAME) || + wait_while_table_is_used(thd, part_table, HA_EXTRA_PREPARE_FOR_RENAME)) + goto err; + + DEBUG_SYNC(thd, "swap_partition_after_wait"); + + close_all_tables_for_name(thd, swap_table->s, HA_EXTRA_NOT_USED, NULL); + close_all_tables_for_name(thd, part_table->s, HA_EXTRA_NOT_USED, NULL); + + DEBUG_SYNC(thd, "swap_partition_before_rename"); + + if (exchange_name_with_ddl_log(thd, swap_file_name, part_file_name, + temp_file_name, table_hton)) + goto err; + + /* + Reopen tables under LOCK TABLES. Ignore the return value for now. It's + better to keep master/slave in consistent state. Alternative would be to + try to revert the exchange operation and issue error. + */ + (void) thd->locked_tables_list.reopen_tables(thd); + + if ((error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()))) + { + /* + The error is reported in write_bin_log(). + We try to revert to make it easier to keep the master/slave in sync. + */ + (void) exchange_name_with_ddl_log(thd, part_file_name, swap_file_name, + temp_file_name, table_hton); + } + +err: + if (thd->locked_tables_mode) + { + if (swap_table_mdl_ticket) + swap_table_mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); + if (part_table_mdl_ticket) + part_table_mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); + } + + if (!error) + my_ok(thd); + + // For query cache + table_list->table= NULL; + table_list->next_local->table= NULL; + query_cache_invalidate3(thd, table_list, FALSE); + + DBUG_RETURN(error); +} + +bool Sql_cmd_alter_table_analyze_partition::execute(THD *thd) { bool res; - DBUG_ENTER("Alter_table_analyze_partition_statement::execute"); + DBUG_ENTER("Sql_cmd_alter_table_analyze_partition::execute"); /* Flag that it is an ALTER command which administrates partitions, used by ha_partition */ - m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION; - - res= Analyze_table_statement::execute(thd); + thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION; + res= Sql_cmd_analyze_table::execute(thd); + DBUG_RETURN(res); } -bool Alter_table_check_partition_statement::execute(THD *thd) +bool Sql_cmd_alter_table_check_partition::execute(THD *thd) { bool res; - DBUG_ENTER("Alter_table_check_partition_statement::execute"); + DBUG_ENTER("Sql_cmd_alter_table_check_partition::execute"); /* Flag that it is an ALTER command which administrates partitions, used by ha_partition */ - m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION; + thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION; - res= Check_table_statement::execute(thd); + res= Sql_cmd_check_table::execute(thd); DBUG_RETURN(res); } -bool Alter_table_optimize_partition_statement::execute(THD *thd) +bool Sql_cmd_alter_table_optimize_partition::execute(THD *thd) { bool res; DBUG_ENTER("Alter_table_optimize_partition_statement::execute"); @@ -79,46 +710,49 @@ bool Alter_table_optimize_partition_statement::execute(THD *thd) Flag that it is an ALTER command which administrates partitions, used by ha_partition */ - m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION; + thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION; - res= Optimize_table_statement::execute(thd); + res= Sql_cmd_optimize_table::execute(thd); DBUG_RETURN(res); } -bool Alter_table_repair_partition_statement::execute(THD *thd) +bool Sql_cmd_alter_table_repair_partition::execute(THD *thd) { bool res; - DBUG_ENTER("Alter_table_repair_partition_statement::execute"); + DBUG_ENTER("Sql_cmd_alter_table_repair_partition::execute"); /* Flag that it is an ALTER command which administrates partitions, used by ha_partition */ - m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION; + thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION; - res= Repair_table_statement::execute(thd); + res= Sql_cmd_repair_table::execute(thd); DBUG_RETURN(res); } -bool Alter_table_truncate_partition_statement::execute(THD *thd) +bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd) { int error; ha_partition *partition; ulong timeout= thd->variables.lock_wait_timeout; TABLE_LIST *first_table= thd->lex->select_lex.table_list.first; + Alter_info *alter_info= &thd->lex->alter_info; + uint table_counter, i; + List<String> partition_names_list; bool binlog_stmt; - DBUG_ENTER("Alter_table_truncate_partition_statement::execute"); + DBUG_ENTER("Sql_cmd_alter_table_truncate_partition::execute"); /* Flag that it is an ALTER command which administrates partitions, used by ha_partition. */ - m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION | - ALTER_TRUNCATE_PARTITION; + thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION | + Alter_info::ALTER_TRUNCATE_PARTITION; /* Fix the lock types (not the same as ordinary ALTER TABLE). */ first_table->lock_type= TL_WRITE; @@ -134,8 +768,8 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd) if (check_one_table_access(thd, DROP_ACL, first_table)) DBUG_RETURN(TRUE); - if (open_and_lock_tables(thd, first_table, FALSE, 0)) - DBUG_RETURN(TRUE); + if (open_tables(thd, &first_table, &table_counter, 0)) + DBUG_RETURN(true); /* TODO: Add support for TRUNCATE PARTITION for NDB and other @@ -149,24 +783,45 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd) DBUG_RETURN(TRUE); } + + /* + Prune all, but named partitions, + to avoid excessive calls to external_lock(). + */ + List_iterator<char> partition_names_it(alter_info->partition_names); + uint num_names= alter_info->partition_names.elements; + for (i= 0; i < num_names; i++) + { + char *partition_name= partition_names_it++; + String *str_partition_name= new (thd->mem_root) + String(partition_name, system_charset_info); + if (!str_partition_name) + DBUG_RETURN(true); + partition_names_list.push_back(str_partition_name); + } + first_table->partition_names= &partition_names_list; + if (first_table->table->part_info->set_partition_bitmaps(first_table)) + DBUG_RETURN(true); + + if (lock_tables(thd, first_table, table_counter, 0)) + DBUG_RETURN(true); + /* Under locked table modes this might still not be an exclusive lock. Hence, upgrade the lock since the handler truncate method mandates an exclusive metadata lock. */ MDL_ticket *ticket= first_table->table->mdl_ticket; - if (thd->mdl_context.upgrade_shared_lock_to_exclusive(ticket, timeout)) + if (thd->mdl_context.upgrade_shared_lock(ticket, MDL_EXCLUSIVE, timeout)) DBUG_RETURN(TRUE); tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN, first_table->db, first_table->table_name, FALSE); - partition= (ha_partition *) first_table->table->file; - + partition= (ha_partition*) first_table->table->file; /* Invoke the handler method responsible for truncating the partition. */ - if ((error= partition->truncate_partition(&thd->lex->alter_info, - &binlog_stmt))) - first_table->table->file->print_error(error, MYF(0)); + if ((error= partition->truncate_partition(alter_info, &binlog_stmt))) + partition->print_error(error, MYF(0)); /* All effects of a truncate operation are committed even if the @@ -184,11 +839,15 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd) to a shared one. */ if (thd->locked_tables_mode) - ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE); + ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); if (! error) my_ok(thd); + // Invalidate query cache + DBUG_ASSERT(!first_table->next_local); + query_cache_invalidate3(thd, first_table, FALSE); + DBUG_RETURN(error); } diff --git a/sql/sql_partition_admin.h b/sql/sql_partition_admin.h index 479371c3b4d..9c53744d9bc 100644 --- a/sql/sql_partition_admin.h +++ b/sql/sql_partition_admin.h @@ -22,214 +22,247 @@ Stub class that returns a error if the partition storage engine is not supported. */ -class Partition_statement_unsupported : public Sql_statement +class Sql_cmd_partition_unsupported : public Sql_cmd { public: - Partition_statement_unsupported(LEX *lex) - : Sql_statement(lex) + Sql_cmd_partition_unsupported() {} - ~Partition_statement_unsupported() + ~Sql_cmd_partition_unsupported() {} + /* Override SQLCOM_*, since it is an ALTER command */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } + bool execute(THD *thd); }; -class Alter_table_analyze_partition_statement : - public Partition_statement_unsupported +class Sql_cmd_alter_table_exchange_partition : + public Sql_cmd_partition_unsupported { public: - Alter_table_analyze_partition_statement(LEX *lex) - : Partition_statement_unsupported(lex) + Sql_cmd_alter_table_exchange_partition() {} - ~Alter_table_analyze_partition_statement() + ~Sql_cmd_alter_table_exchange_partition() {} }; -class Alter_table_check_partition_statement : - public Partition_statement_unsupported +class Sql_cmd_alter_table_analyze_partition : + public Sql_cmd_partition_unsupported { public: - Alter_table_check_partition_statement(LEX *lex) - : Partition_statement_unsupported(lex) + Sql_cmd_alter_table_analyze_partition() {} - ~Alter_table_check_partition_statement() + ~Sql_cmd_alter_table_analyze_partition() {} }; -class Alter_table_optimize_partition_statement : - public Partition_statement_unsupported +class Sql_cmd_alter_table_check_partition : + public Sql_cmd_partition_unsupported { public: - Alter_table_optimize_partition_statement(LEX *lex) - : Partition_statement_unsupported(lex) + Sql_cmd_alter_table_check_partition() {} - ~Alter_table_optimize_partition_statement() + ~Sql_cmd_alter_table_check_partition() {} }; -class Alter_table_repair_partition_statement : - public Partition_statement_unsupported +class Sql_cmd_alter_table_optimize_partition : + public Sql_cmd_partition_unsupported { public: - Alter_table_repair_partition_statement(LEX *lex) - : Partition_statement_unsupported(lex) + Sql_cmd_alter_table_optimize_partition() {} - ~Alter_table_repair_partition_statement() + ~Sql_cmd_alter_table_optimize_partition() {} }; -class Alter_table_truncate_partition_statement : - public Partition_statement_unsupported +class Sql_cmd_alter_table_repair_partition : + public Sql_cmd_partition_unsupported { public: - Alter_table_truncate_partition_statement(LEX *lex) - : Partition_statement_unsupported(lex) + Sql_cmd_alter_table_repair_partition() {} - ~Alter_table_truncate_partition_statement() + ~Sql_cmd_alter_table_repair_partition() {} }; +class Sql_cmd_alter_table_truncate_partition : + public Sql_cmd_partition_unsupported +{ +public: + Sql_cmd_alter_table_truncate_partition() + {} + + ~Sql_cmd_alter_table_truncate_partition() + {} +}; + #else /** Class that represents the ALTER TABLE t1 ANALYZE PARTITION p statement. */ -class Alter_table_analyze_partition_statement : public Analyze_table_statement +class Sql_cmd_alter_table_exchange_partition : public Sql_cmd_common_alter_table { public: /** - Constructor, used to represent a ALTER TABLE ANALYZE PARTITION statement. - @param lex the LEX structure for this statement. + Constructor, used to represent a ALTER TABLE EXCHANGE PARTITION statement. */ - Alter_table_analyze_partition_statement(LEX *lex) - : Analyze_table_statement(lex) + Sql_cmd_alter_table_exchange_partition() + : Sql_cmd_common_alter_table() {} - ~Alter_table_analyze_partition_statement() + ~Sql_cmd_alter_table_exchange_partition() {} + bool execute(THD *thd); + +private: + bool exchange_partition(THD *thd, TABLE_LIST *, Alter_info *); +}; + + +/** + Class that represents the ALTER TABLE t1 ANALYZE PARTITION p statement. +*/ +class Sql_cmd_alter_table_analyze_partition : public Sql_cmd_analyze_table +{ +public: /** - Execute a ALTER TABLE ANALYZE PARTITION statement at runtime. - @param thd the current thread. - @return false on success. + Constructor, used to represent a ALTER TABLE ANALYZE PARTITION statement. */ + Sql_cmd_alter_table_analyze_partition() + : Sql_cmd_analyze_table() + {} + + ~Sql_cmd_alter_table_analyze_partition() + {} + bool execute(THD *thd); + + /* Override SQLCOM_ANALYZE, since it is an ALTER command */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } }; /** Class that represents the ALTER TABLE t1 CHECK PARTITION p statement. */ -class Alter_table_check_partition_statement : public Check_table_statement +class Sql_cmd_alter_table_check_partition : public Sql_cmd_check_table { public: /** Constructor, used to represent a ALTER TABLE CHECK PARTITION statement. - @param lex the LEX structure for this statement. */ - Alter_table_check_partition_statement(LEX *lex) - : Check_table_statement(lex) + Sql_cmd_alter_table_check_partition() + : Sql_cmd_check_table() {} - ~Alter_table_check_partition_statement() + ~Sql_cmd_alter_table_check_partition() {} - /** - Execute a ALTER TABLE CHECK PARTITION statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + /* Override SQLCOM_CHECK, since it is an ALTER command */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } }; /** Class that represents the ALTER TABLE t1 OPTIMIZE PARTITION p statement. */ -class Alter_table_optimize_partition_statement : public Optimize_table_statement +class Sql_cmd_alter_table_optimize_partition : public Sql_cmd_optimize_table { public: /** Constructor, used to represent a ALTER TABLE OPTIMIZE PARTITION statement. - @param lex the LEX structure for this statement. */ - Alter_table_optimize_partition_statement(LEX *lex) - : Optimize_table_statement(lex) + Sql_cmd_alter_table_optimize_partition() + : Sql_cmd_optimize_table() {} - ~Alter_table_optimize_partition_statement() + ~Sql_cmd_alter_table_optimize_partition() {} - /** - Execute a ALTER TABLE OPTIMIZE PARTITION statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + /* Override SQLCOM_OPTIMIZE, since it is an ALTER command */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } }; /** Class that represents the ALTER TABLE t1 REPAIR PARTITION p statement. */ -class Alter_table_repair_partition_statement : public Repair_table_statement +class Sql_cmd_alter_table_repair_partition : public Sql_cmd_repair_table { public: /** Constructor, used to represent a ALTER TABLE REPAIR PARTITION statement. - @param lex the LEX structure for this statement. */ - Alter_table_repair_partition_statement(LEX *lex) - : Repair_table_statement(lex) + Sql_cmd_alter_table_repair_partition() + : Sql_cmd_repair_table() {} - ~Alter_table_repair_partition_statement() + ~Sql_cmd_alter_table_repair_partition() {} - /** - Execute a ALTER TABLE REPAIR PARTITION statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + /* Override SQLCOM_REPAIR, since it is an ALTER command */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } }; /** Class that represents the ALTER TABLE t1 TRUNCATE PARTITION p statement. */ -class Alter_table_truncate_partition_statement : public Sql_statement +class Sql_cmd_alter_table_truncate_partition : public Sql_cmd_truncate_table { public: /** Constructor, used to represent a ALTER TABLE TRUNCATE PARTITION statement. - @param lex the LEX structure for this statement. */ - Alter_table_truncate_partition_statement(LEX *lex) - : Sql_statement(lex) + Sql_cmd_alter_table_truncate_partition() {} - virtual ~Alter_table_truncate_partition_statement() + virtual ~Sql_cmd_alter_table_truncate_partition() {} - /** - Execute a ALTER TABLE TRUNCATE PARTITION statement at runtime. - @param thd the current thread. - @return false on success. - */ bool execute(THD *thd); + + /* Override SQLCOM_TRUNCATE, since it is an ALTER command */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_ALTER_TABLE; + } }; #endif /* WITH_PARTITION_STORAGE_ENGINE */ diff --git a/sql/sql_plist.h b/sql/sql_plist.h index 2b6f1067321..8e8c7fcaefb 100644 --- a/sql/sql_plist.h +++ b/sql/sql_plist.h @@ -18,7 +18,7 @@ #include <my_global.h> -template <typename T, typename B, typename C, typename I> +template <typename T, typename L> class I_P_List_iterator; class I_P_List_null_counter; template <typename T> class I_P_List_no_push_back; @@ -151,10 +151,14 @@ public: I::set_last(&rhs.m_first); C::swap(rhs); } + typedef B Adapter; + typedef I_P_List<T, B, C, I> Base; + typedef I_P_List_iterator<T, Base> Iterator; + typedef I_P_List_iterator<const T, Base> Const_Iterator; #ifndef _lint - friend class I_P_List_iterator<T, B, C, I>; + friend class I_P_List_iterator<T, Base>; + friend class I_P_List_iterator<const T, Base>; #endif - typedef I_P_List_iterator<T, B, C, I> Iterator; }; @@ -162,33 +166,33 @@ public: Iterator for I_P_List. */ -template <typename T, typename B, - typename C = I_P_List_null_counter, - typename I = I_P_List_no_push_back<T> > +template <typename T, typename L> class I_P_List_iterator { - const I_P_List<T, B, C, I> *list; + const L *list; T *current; public: - I_P_List_iterator(const I_P_List<T, B, C, I> &a) + I_P_List_iterator(const L &a) : list(&a), current(a.m_first) {} - I_P_List_iterator(const I_P_List<T, B, C, I> &a, T* current_arg) + I_P_List_iterator(const L &a, T* current_arg) : list(&a), current(current_arg) {} - inline void init(const I_P_List<T, B, C, I> &a) + inline void init(const L &a) { list= &a; current= a.m_first; } + /* Operator for it++ */ inline T* operator++(int) { T *result= current; if (result) - current= *B::next_ptr(current); + current= *L::Adapter::next_ptr(current); return result; } + /* Operator for ++it */ inline T* operator++() { - current= *B::next_ptr(current); + current= *L::Adapter::next_ptr(current); return current; } inline void rewind() @@ -207,7 +211,7 @@ template <typename T, T* T::*next, T** T::*prev> struct I_P_List_adapter { static inline T **next_ptr(T *el) { return &(el->*next); } - + static inline const T* const* next_ptr(const T *el) { return &(el->*next); } static inline T ***prev_ptr(T *el) { return &(el->*prev); } }; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 1260fb8cb3a..e89054ac849 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -686,7 +686,7 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl, for (i=0; (old= (struct st_maria_plugin *)(ptr + i * sizeof_st_plugin))->info; i++) - memcpy(cur + i, old, min(sizeof(cur[i]), sizeof_st_plugin)); + memcpy(cur + i, old, MY_MIN(sizeof(cur[i]), sizeof_st_plugin)); sym= cur; plugin_dl->allocated= true; @@ -2009,7 +2009,7 @@ static bool finalize_install(THD *thd, TABLE *table, const LEX_STRING *name) if (tmp->state == PLUGIN_IS_DISABLED) { if (global_system_variables.log_warnings) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_CANT_INITIALIZE_UDF, ER(ER_CANT_INITIALIZE_UDF), name->str, "Plugin is disabled"); } @@ -2170,7 +2170,7 @@ static bool do_uninstall(THD *thd, TABLE *table, const LEX_STRING *name) plugin->state= PLUGIN_IS_DELETED; if (plugin->ref_count) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, WARN_PLUGIN_BUSY, ER(WARN_PLUGIN_BUSY)); else reap_needed= true; diff --git a/sql/sql_plugin_services.h b/sql/sql_plugin_services.h index 838b994b6e8..6b70048345a 100644 --- a/sql/sql_plugin_services.h +++ b/sql/sql_plugin_services.h @@ -59,6 +59,11 @@ static struct thd_timezone_service_st thd_timezone_handler= { thd_gmt_sec_to_TIME }; +static struct my_sha1_service_st my_sha1_handler = { + my_sha1, + my_sha1_multi +}; + static struct st_service_ref list_of_services[]= { { "my_snprintf_service", VERSION_my_snprintf, &my_snprintf_handler }, @@ -68,5 +73,6 @@ static struct st_service_ref list_of_services[]= { "debug_sync_service", VERSION_debug_sync, 0 }, // updated in plugin_init() { "thd_kill_statement_service", VERSION_kill_statement, &thd_kill_statement_handler }, { "thd_timezone_service", VERSION_thd_timezone, &thd_timezone_handler }, + { "my_sha1_service", VERSION_my_sha1, &my_sha1_handler} }; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 220a2a16db6..120cfc3e350 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -106,6 +106,7 @@ When one supplies long data for a placeholder: #include "sp_head.h" #include "sp.h" #include "sp_cache.h" +#include "sql_handler.h" // mysql_ha_rm_tables #include "probes_mysql.h" #ifdef EMBEDDED_LIBRARY /* include MYSQL_BIND headers */ @@ -343,7 +344,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) int2store(buff+5, columns); int2store(buff+7, stmt->param_count); buff[9]= 0; // Guard against a 4.1 client - tmp= min(stmt->thd->warning_info->statement_warn_count(), 65535); + tmp= MY_MIN(stmt->thd->get_stmt_da()->current_statement_warn_count(), 65535); int2store(buff+10, tmp); /* @@ -360,7 +361,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) if (!error) /* Flag that a response has already been sent */ - thd->stmt_da->disable_status(); + thd->get_stmt_da()->disable_status(); DBUG_RETURN(error); } @@ -373,7 +374,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, thd->client_stmt_id= stmt->id; thd->client_param_count= stmt->param_count; thd->clear_error(); - thd->stmt_da->disable_status(); + thd->get_stmt_da()->disable_status(); return 0; } @@ -1253,6 +1254,17 @@ static bool mysql_test_insert(Prepared_statement *stmt, List_item *values; DBUG_ENTER("mysql_test_insert"); + /* + Since INSERT DELAYED doesn't support temporary tables, we could + not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE. + Open them here instead. + */ + if (table_list->lock_type != TL_WRITE_DELAYED) + { + if (open_temporary_tables(thd, table_list)) + goto error; + } + if (insert_precheck(thd, table_list)) goto error; @@ -1820,6 +1832,13 @@ static bool mysql_test_create_view(Prepared_statement *stmt) if (create_view_precheck(thd, tables, view, lex->create_view_mode)) goto err; + /* + Since we can't pre-open temporary tables for SQLCOM_CREATE_VIEW, + (see mysql_create_view) we have to do it here instead. + */ + if (open_temporary_tables(thd, tables)) + goto err; + if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL, DT_PREPARE)) goto err; @@ -2055,7 +2074,20 @@ static bool check_prepared_statement(Prepared_statement *stmt) /* Reset warning count for each query that uses tables */ if (tables) - thd->warning_info->opt_clear_warning_info(thd->query_id); + thd->get_stmt_da()->opt_clear_warning_info(thd->query_id); + + if (sql_command_flags[sql_command] & CF_HA_CLOSE) + mysql_ha_rm_tables(thd, tables); + + /* + Open temporary tables that are known now. Temporary tables added by + prelocking will be opened afterwards (during open_tables()). + */ + if (sql_command_flags[sql_command] & CF_PREOPEN_TMP_TABLES) + { + if (open_temporary_tables(thd, tables)) + goto error; + } switch (sql_command) { case SQLCOM_REPLACE: @@ -2859,7 +2891,7 @@ void mysqld_stmt_close(THD *thd, char *packet) Prepared_statement *stmt; DBUG_ENTER("mysqld_stmt_close"); - thd->stmt_da->disable_status(); + thd->get_stmt_da()->disable_status(); if (!(stmt= find_prepared_statement(thd, stmt_id))) DBUG_VOID_RETURN; @@ -2935,7 +2967,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) status_var_increment(thd->status_var.com_stmt_send_long_data); - thd->stmt_da->disable_status(); + thd->get_stmt_da()->disable_status(); #ifndef EMBEDDED_LIBRARY /* Minimal size of long data packet is 6 bytes */ if (packet_length < MYSQL_LONG_DATA_HEADER) @@ -2964,26 +2996,23 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) param= stmt->param_array[param_number]; - Diagnostics_area new_stmt_da, *save_stmt_da= thd->stmt_da; - Warning_info new_warnning_info(thd->query_id, false); - Warning_info *save_warinig_info= thd->warning_info; + Diagnostics_area new_stmt_da(thd->query_id, false, true); + Diagnostics_area *save_stmt_da= thd->get_stmt_da(); - thd->stmt_da= &new_stmt_da; - thd->warning_info= &new_warnning_info; + thd->set_stmt_da(&new_stmt_da); #ifndef EMBEDDED_LIBRARY param->set_longdata(packet, (ulong) (packet_end - packet)); #else param->set_longdata(thd->extra_data, thd->extra_length); #endif - if (thd->stmt_da->is_error()) + if (thd->get_stmt_da()->is_error()) { stmt->state= Query_arena::STMT_ERROR; - stmt->last_errno= thd->stmt_da->sql_errno(); - strncpy(stmt->last_error, thd->stmt_da->message(), MYSQL_ERRMSG_SIZE); + stmt->last_errno= thd->get_stmt_da()->sql_errno(); + strncpy(stmt->last_error, thd->get_stmt_da()->message(), MYSQL_ERRMSG_SIZE); } - thd->stmt_da= save_stmt_da; - thd->warning_info= save_warinig_info; + thd->set_stmt_da(save_stmt_da); general_log_print(thd, thd->get_command(), NullS); @@ -3059,8 +3088,7 @@ Reprepare_observer::report_error(THD *thd) that this thread execution stops and returns to the caller, backtracking all the way to Prepared_statement::execute_loop(). */ - thd->stmt_da->set_error_status(thd, ER_NEED_REPREPARE, - ER(ER_NEED_REPREPARE), "HY000"); + thd->get_stmt_da()->set_error_status(ER_NEED_REPREPARE); m_invalidated= TRUE; return TRUE; @@ -3526,7 +3554,6 @@ Prepared_statement::execute_loop(String *expanded_query, Reprepare_observer reprepare_observer; bool error; int reprepare_attempt= 0; - bool need_set_parameters= true; /* Check if we got an error when sending long data */ if (state == Query_arena::STMT_ERROR) @@ -3535,20 +3562,19 @@ Prepared_statement::execute_loop(String *expanded_query, return TRUE; } -reexecute: - if (need_set_parameters && - set_parameters(expanded_query, packet, packet_end)) + if (set_parameters(expanded_query, packet, packet_end)) return TRUE; - /* - if set_parameters() has generated warnings, - we need to repeat it when reexecuting, to recreate these - warnings. - */ - need_set_parameters= thd->warning_info->statement_warn_count(); - - reprepare_observer.reset_reprepare_observer(); +#ifdef NOT_YET_FROM_MYSQL_5_6 + if (unlikely(thd->security_ctx->password_expired && + !lex->is_change_password)) + { + my_error(ER_MUST_CHANGE_PASSWORD, MYF(0)); + return true; + } +#endif +reexecute: /* If the free_list is not empty, we'll wrongly free some externally allocated items when cleaning up after validation of the prepared @@ -3562,22 +3588,24 @@ reexecute: the observer method will be invoked to push an error into the error stack. */ - if (sql_command_flags[lex->sql_command] & - CF_REEXECUTION_FRAGILE) + + if (sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) { + reprepare_observer.reset_reprepare_observer(); DBUG_ASSERT(thd->m_reprepare_observer == NULL); - thd->m_reprepare_observer = &reprepare_observer; + thd->m_reprepare_observer= &reprepare_observer; } error= execute(expanded_query, open_cursor) || thd->is_error(); thd->m_reprepare_observer= NULL; - if (error && !thd->is_fatal_error && !thd->killed && + if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) && + error && !thd->is_fatal_error && !thd->killed && reprepare_observer.is_invalidated() && reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS) { - DBUG_ASSERT(thd->stmt_da->sql_errno() == ER_NEED_REPREPARE); + DBUG_ASSERT(thd->get_stmt_da()->sql_errno() == ER_NEED_REPREPARE); thd->clear_error(); error= reprepare(); @@ -3679,7 +3707,7 @@ Prepared_statement::reprepare() Sic: we can't simply silence warnings during reprepare, because if it's failed, we need to return all the warnings to the user. */ - thd->warning_info->clear_warning_info(thd->query_id); + thd->get_stmt_da()->clear_warning_info(thd->query_id); } return error; } @@ -4041,7 +4069,7 @@ Ed_result_set::Ed_result_set(List<Ed_row> *rows_arg, */ Ed_connection::Ed_connection(THD *thd) - :m_warning_info(thd->query_id, false, true), + :m_diagnostics_area(thd->query_id, false, true), m_thd(thd), m_rsets(0), m_current_rset(0) @@ -4067,7 +4095,7 @@ Ed_connection::free_old_result() } m_current_rset= m_rsets; m_diagnostics_area.reset_diagnostics_area(); - m_warning_info.clear_warning_info(m_thd->query_id); + m_diagnostics_area.clear_warning_info(m_thd->query_id); } @@ -4104,23 +4132,20 @@ bool Ed_connection::execute_direct(Server_runnable *server_runnable) Protocol_local protocol_local(m_thd, this); Prepared_statement stmt(m_thd); Protocol *save_protocol= m_thd->protocol; - Diagnostics_area *save_diagnostics_area= m_thd->stmt_da; - Warning_info *save_warning_info= m_thd->warning_info; + Diagnostics_area *save_diagnostics_area= m_thd->get_stmt_da(); DBUG_ENTER("Ed_connection::execute_direct"); free_old_result(); /* Delete all data from previous execution, if any */ m_thd->protocol= &protocol_local; - m_thd->stmt_da= &m_diagnostics_area; - m_thd->warning_info= &m_warning_info; + m_thd->set_stmt_da(&m_diagnostics_area); rc= stmt.execute_server_runnable(server_runnable); m_thd->protocol->end_statement(); m_thd->protocol= save_protocol; - m_thd->stmt_da= save_diagnostics_area; - m_thd->warning_info= save_warning_info; + m_thd->set_stmt_da(save_diagnostics_area); /* Protocol_local makes use of m_current_rset to keep track of the last result set, while adding result sets to the end. diff --git a/sql/sql_prepare.h b/sql/sql_prepare.h index e0891bbd188..ea5ebddb561 100644 --- a/sql/sql_prepare.h +++ b/sql/sql_prepare.h @@ -253,16 +253,9 @@ public: */ ulong get_warn_count() const { - return m_warning_info.warn_count(); + return m_diagnostics_area.warn_count(); } - /** - Get the server warnings as a result set. - The result set has fixed metadata: - The first column is the level. - The second is a numeric code. - The third is warning text. - */ - List<MYSQL_ERROR> *get_warn_list() { return &m_warning_info.warn_list(); } + /** The following members are only valid if execute_direct() or move_to_next_result() returned an error. @@ -311,7 +304,6 @@ public: ~Ed_connection() { free_old_result(); } private: Diagnostics_area m_diagnostics_area; - Warning_info m_warning_info; /** Execute direct interface does not support multi-statements, only multi-results. So we never have a situation when we have diff --git a/sql/sql_priv.h b/sql/sql_priv.h index 9891cf1b24e..383888bac30 100644 --- a/sql/sql_priv.h +++ b/sql/sql_priv.h @@ -50,7 +50,7 @@ do { \ compile_time_assert(MYSQL_VERSION_ID < VerHi * 10000 + VerLo * 100); \ if (((THD *) Thd) != NULL) \ - push_warning_printf(((THD *) Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \ + push_warning_printf(((THD *) Thd), Sql_condition::WARN_LEVEL_WARN, \ ER_WARN_DEPRECATED_SYNTAX, \ ER(ER_WARN_DEPRECATED_SYNTAX), \ (Old), (New)); \ diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc index feb7810fa28..dc7aacb3d94 100644 --- a/sql/sql_profile.cc +++ b/sql/sql_profile.cc @@ -288,7 +288,7 @@ void QUERY_PROFILE::set_query_source(char *query_source_arg, uint query_length_arg) { /* Truncate to avoid DoS attacks. */ - uint length= min(MAX_QUERY_LENGTH, query_length_arg); + uint length= MY_MIN(MAX_QUERY_LENGTH, query_length_arg); DBUG_ASSERT(query_source == NULL); /* we don't leak memory */ if (query_source_arg != NULL) diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc index f430c1b3a5d..f3eab6b84cf 100644 --- a/sql/sql_reload.cc +++ b/sql/sql_reload.cc @@ -176,7 +176,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, mysql_mutex_lock(&LOCK_active_mi); if (!(mi= (master_info_index-> get_master_info(&connection_name, - MYSQL_ERROR::WARN_LEVEL_ERROR)))) + Sql_condition::WARN_LEVEL_ERROR)))) { result= 1; } @@ -349,7 +349,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options, if (!(mi= (master_info_index-> get_master_info(&lex_mi->connection_name, - MYSQL_ERROR::WARN_LEVEL_ERROR)))) + Sql_condition::WARN_LEVEL_ERROR)))) { result= 1; } diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index c957076ac4f..78acb4a519f 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -39,8 +39,8 @@ static bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list); /* - Every second entry in the table_list is the original name and every - second entry is the new name. + Every two entries in the table_list form a pair of original name and + the new name. */ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) @@ -144,7 +144,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent) } if (lock_table_names(thd, table_list, 0, thd->variables.lock_wait_timeout, - MYSQL_OPEN_SKIP_TEMPORARY)) + 0)) goto err; error=0; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 802161a09a9..5a93f3b819a 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -2783,7 +2783,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) { /* Issuing warning then started without --skip-slave-start */ if (!opt_skip_slave_start) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_MISSING_SKIP_SLAVE, ER(ER_MISSING_SKIP_SLAVE)); } @@ -2791,7 +2791,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) mysql_mutex_unlock(&mi->rli.data_lock); } else if (thd->lex->mi.pos || thd->lex->mi.relay_log_pos) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED, ER(ER_UNTIL_COND_IGNORED)); if (!slave_errno) @@ -2808,7 +2808,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report) else { /* no error if all threads are already started, only a warning */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING, ER(ER_SLAVE_WAS_RUNNING)); } @@ -2874,7 +2874,7 @@ int stop_slave(THD* thd, Master_info* mi, bool net_report ) { //no error if both threads are already stopped, only a warning slave_errno= 0; - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_NOT_RUNNING, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_WAS_NOT_RUNNING, ER(ER_SLAVE_WAS_NOT_RUNNING)); } unlock_slave_threads(mi); @@ -3134,7 +3134,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) /* if new Master_info doesn't exists, add it */ if (!master_info_index->get_master_info(&mi->connection_name, - MYSQL_ERROR::WARN_LEVEL_NOTE)) + Sql_condition::WARN_LEVEL_NOTE)) { if (master_info_index->add_master_info(mi, TRUE)) { @@ -3216,7 +3216,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) if (lex_mi->heartbeat_opt != LEX_MASTER_INFO::LEX_MI_UNCHANGED) mi->heartbeat_period = lex_mi->heartbeat_period; else - mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD, + mi->heartbeat_period= (float) MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD, (slave_net_timeout/2.0)); mi->received_heartbeats= 0; // counter lives until master is CHANGEd /* @@ -3273,7 +3273,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath || lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key || lex_mi->ssl_verify_server_cert || lex_mi->ssl_crl || lex_mi->ssl_crlpath) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS)); #endif @@ -3321,12 +3321,12 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) { /* Sometimes mi->rli.master_log_pos == 0 (it happens when the SQL thread is - not initialized), so we use a max(). + not initialized), so we use a MY_MAX(). What happens to mi->rli.master_log_pos during the initialization stages of replication is not 100% clear, so we guard against problems using - max(). + MY_MAX(). */ - mi->master_log_pos = max(BIN_LOG_HEADER_SIZE, + mi->master_log_pos = MY_MAX(BIN_LOG_HEADER_SIZE, mi->rli.group_master_log_pos); strmake_buf(mi->master_log_name, mi->rli.group_master_log_name); } @@ -3502,7 +3502,7 @@ bool mysql_show_binlog_events(THD* thd) mysql_mutex_lock(&LOCK_active_mi); if (!(mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_ERROR))) + Sql_condition::WARN_LEVEL_ERROR))) { mysql_mutex_unlock(&LOCK_active_mi); DBUG_RETURN(TRUE); @@ -3515,7 +3515,7 @@ bool mysql_show_binlog_events(THD* thd) LEX_MASTER_INFO *lex_mi= &thd->lex->mi; SELECT_LEX_UNIT *unit= &thd->lex->unit; ha_rows event_count, limit_start, limit_end; - my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly + my_off_t pos = MY_MAX(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly char search_file_name[FN_REFLEN], *name; const char *log_file_name = lex_mi->log_file_name; mysql_mutex_t *log_lock = binary_log->get_log_lock(); @@ -3805,14 +3805,14 @@ int log_loaded_block(IO_CACHE* file) DBUG_RETURN(0); for (block_len= (uint) (my_b_get_bytes_in_buffer(file)); block_len > 0; - buffer += min(block_len, max_event_size), - block_len -= min(block_len, max_event_size)) + buffer += MY_MIN(block_len, max_event_size), + block_len -= MY_MIN(block_len, max_event_size)) { lf_info->last_pos_in_file= my_b_get_pos_in_file(file); if (lf_info->wrote_create_file) { Append_block_log_event a(lf_info->thd, lf_info->thd->db, buffer, - min(block_len, max_event_size), + MY_MIN(block_len, max_event_size), lf_info->log_delayed); if (mysql_bin_log.write(&a)) DBUG_RETURN(1); @@ -3821,7 +3821,7 @@ int log_loaded_block(IO_CACHE* file) { Begin_load_query_log_event b(lf_info->thd, lf_info->thd->db, buffer, - min(block_len, max_event_size), + MY_MIN(block_len, max_event_size), lf_info->log_delayed); if (mysql_bin_log.write(&b)) DBUG_RETURN(1); @@ -3965,7 +3965,7 @@ rpl_gtid_pos_check(THD *thd, char *str, size_t len) } else if (!gave_missing_warning) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_MASTER_GTID_POS_MISSING_DOMAIN, ER(ER_MASTER_GTID_POS_MISSING_DOMAIN), binlog_gtid->domain_id, binlog_gtid->domain_id, @@ -3985,7 +3985,7 @@ rpl_gtid_pos_check(THD *thd, char *str, size_t len) } else if (!gave_conflict_warning) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG, ER(ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG), slave_gtid->domain_id, slave_gtid->server_id, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2d80767a141..f5baaad5655 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -381,7 +381,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, If LIMIT ROWS EXAMINED interrupted query execution, issue a warning, continue with normal processing and produce an incomplete query result. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT, ER(ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT), thd->accessed_rows_and_keys, @@ -1217,15 +1217,16 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S if (!tbl->embedding) { Item *prune_cond= tbl->on_expr? tbl->on_expr : conds; - tbl->table->no_partitions_used= prune_partitions(thd, tbl->table, - prune_cond); - } + tbl->table->all_partitions_pruned_away= prune_partitions(thd, + tbl->table, + prune_cond); + } } } #endif /* - Try to optimize count(*), min() and max() to const fields if + Try to optimize count(*), MY_MIN() and MY_MAX() to const fields if there is implicit grouping (aggregate functions but no group_list). In this case, the result set shall only contain one row. @@ -3352,9 +3353,9 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, bitmap_clear_all(&table->cond_set); #ifdef WITH_PARTITION_STORAGE_ENGINE - const bool no_partitions_used= table->no_partitions_used; + const bool all_partitions_pruned_away= table->all_partitions_pruned_away; #else - const bool no_partitions_used= FALSE; + const bool all_partitions_pruned_away= FALSE; #endif DBUG_EXECUTE_IF("bug11747970_raise_error", @@ -3391,7 +3392,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, if (!table->is_filled_at_execution() && ((!table->file->stats.records && (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)) || - no_partitions_used) && !embedding) + all_partitions_pruned_away) && !embedding) { // Empty table s->dependent= 0; // Ignore LEFT JOIN depend. no_rows_const_tables |= table->map; @@ -3435,7 +3436,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, (table->s->system || (table->file->stats.records <= 1 && (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)) || - no_partitions_used) && + all_partitions_pruned_away) && !s->dependent && !table->fulltext_searched && !join->no_const_tables) { @@ -3685,7 +3686,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, (!embedding || (embedding->sj_on_expr && !embedding->embedding))) { key_map base_part, base_const_ref, base_eq_part; - base_part.set_prefix(keyinfo->key_parts); + base_part.set_prefix(keyinfo->user_defined_key_parts); base_const_ref= const_ref; base_const_ref.intersect(base_part); base_eq_part= eq_part; @@ -3778,7 +3779,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list, This is can't be to high as otherwise we are likely to use table scan. */ - s->worst_seeks= min((double) s->found_records / 10, + s->worst_seeks= MY_MIN((double) s->found_records / 10, (double) s->read_time*3); if (s->worst_seeks < 2.0) // Fix for small tables s->worst_seeks=2.0; @@ -4977,7 +4978,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, uint and_level,i; KEY_FIELD *key_fields, *end, *field; uint sz; - uint m= max(select_lex->max_equal_elems,1); + uint m= MY_MAX(select_lex->max_equal_elems,1); /* We use the same piece of memory to store both KEY_FIELD @@ -5000,7 +5001,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, can be not more than select_lex->max_equal_elems such substitutions. */ - sz= max(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))* + sz= MY_MAX(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))* (((thd->lex->current_select->cond_count+1)*2 + thd->lex->current_select->between_count)*m+1); if (!(key_fields=(KEY_FIELD*) thd->alloc(sz))) @@ -5184,7 +5185,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) DBUG_ASSERT(tablenr != Table_map_iterator::BITMAP_END); TABLE *tmp_table=join->table[tablenr]; if (tmp_table) // already created - keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100); + keyuse->ref_table_rows= MY_MAX(tmp_table->file->stats.records, 100); } } /* @@ -5663,7 +5664,7 @@ best_access_path(JOIN *join, tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); else tmp= table->file->read_time(key, 1, - (ha_rows) min(tmp,s->worst_seeks)); + (ha_rows) MY_MIN(tmp,s->worst_seeks)); tmp*= record_count; } } @@ -5676,7 +5677,7 @@ best_access_path(JOIN *join, */ if ((found_part & 1) && (!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) || - found_part == PREV_BITS(uint,keyinfo->key_parts))) + found_part == PREV_BITS(uint,keyinfo->user_defined_key_parts))) { max_key_part= max_part_bit(found_part); /* @@ -5770,7 +5771,7 @@ best_access_path(JOIN *join, */ double rec_per_key; if (!(rec_per_key=(double) - keyinfo->rec_per_key[keyinfo->key_parts-1])) + keyinfo->rec_per_key[keyinfo->user_defined_key_parts-1])) rec_per_key=(double) s->records/rec+1; if (!s->records) @@ -5780,10 +5781,10 @@ best_access_path(JOIN *join, else { double a=s->records*0.01; - if (keyinfo->key_parts > 1) + if (keyinfo->user_defined_key_parts > 1) tmp= (max_key_part * (rec_per_key - a) + - a*keyinfo->key_parts - rec_per_key)/ - (keyinfo->key_parts-1); + a*keyinfo->user_defined_key_parts - rec_per_key)/ + (keyinfo->user_defined_key_parts-1); else tmp= a; set_if_bigger(tmp,1.0); @@ -5828,7 +5829,7 @@ best_access_path(JOIN *join, tmp= table->file->keyread_time(key, 1, (ha_rows) tmp); else tmp= table->file->read_time(key, 1, - (ha_rows) min(tmp,s->worst_seeks)); + (ha_rows) MY_MIN(tmp,s->worst_seeks)); tmp*= record_count; } else @@ -8202,8 +8203,8 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab, !(key_part_info = (KEY_PART_INFO *) thd->alloc(sizeof(KEY_PART_INFO)* key_parts))) DBUG_RETURN(TRUE); - keyinfo->usable_key_parts= keyinfo->key_parts = key_parts; - keyinfo->ext_key_parts= keyinfo->key_parts; + keyinfo->usable_key_parts= keyinfo->user_defined_key_parts = key_parts; + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->key_part= key_part_info; keyinfo->key_length=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; @@ -8249,7 +8250,7 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab, keyuse++; } while (keyuse->table == table && keyuse->is_for_hash_join()); - keyinfo->ext_key_parts= keyinfo->key_parts; + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->ext_key_flags= keyinfo->flags; keyinfo->ext_key_part_map= 0; @@ -8461,9 +8462,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, ulong key_flags= j->table->actual_key_flags(keyinfo); if (j->type == JT_CONST) j->table->const_table= 1; - else if (!((keyparts == keyinfo->key_parts && + else if (!((keyparts == keyinfo->user_defined_key_parts && ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)) || - (keyparts > keyinfo->key_parts && // true only for extended keys + (keyparts > keyinfo->user_defined_key_parts && // true only for extended keys test(key_flags & HA_EXT_NOSAME) && keyparts == keyinfo->ext_key_parts)) || null_ref_key) @@ -10905,7 +10906,7 @@ bool TABLE_REF::tmp_table_index_lookup_init(THD *thd, bool value, uint skip) { - uint tmp_key_parts= tmp_key->key_parts; + uint tmp_key_parts= tmp_key->user_defined_key_parts; uint i; DBUG_ENTER("TABLE_REF::tmp_table_index_lookup_init"); @@ -11002,7 +11003,7 @@ bool TABLE_REF::is_access_triggered() a correlated subquery itself, but has subqueries, we can free it fully and also free JOINs of all its subqueries. The exception is a subquery in SELECT list, e.g: @n - SELECT a, (select max(b) from t1) group by c @n + SELECT a, (select MY_MAX(b) from t1) group by c @n This subquery will not be evaluated at first sweep and its value will not be inserted into the temporary table. Instead, it's evaluated when selecting from the temporary table. Therefore, it can't be freed @@ -12472,7 +12473,7 @@ static int compare_fields_by_table_order(Item *field1, if (!cmp) { KEY *key_info= tab->table->key_info + keyno; - for (uint i= 0; i < key_info->key_parts; i++) + for (uint i= 0; i < key_info->user_defined_key_parts; i++) { Field *fld= key_info->key_part[i].field; if (fld->eq(f2->field)) @@ -14966,7 +14967,6 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, table->s= share; init_tmp_table_share(thd, share, "", 0, tmpname, tmpname); share->blob_field= blob_field; - share->blob_ptr_size= portable_sizeof_char_ptr; share->table_charset= param->table_charset; share->primary_key= MAX_KEY; // Indicate no primary key share->keys_for_keyread.init(); @@ -15207,6 +15207,12 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, if (!table->file) goto err; + if (table->file->set_ha_share_ref(&share->ha_share)) + { + delete table->file; + goto err; + } + if (!using_unique_constraint) reclength+= group_null_items; // null flag is stored separately @@ -15368,7 +15374,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, share->max_rows= ~(ha_rows) 0; else share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ? - min(thd->variables.tmp_table_size, + MY_MIN(thd->variables.tmp_table_size, thd->variables.max_heap_table_size) : thd->variables.tmp_table_size) / share->reclength); @@ -15395,8 +15401,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, keyinfo->key_part=key_part_info; keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY; keyinfo->ext_key_flags= keyinfo->flags; - keyinfo->usable_key_parts=keyinfo->key_parts= param->group_parts; - keyinfo->ext_key_parts= keyinfo->key_parts; + keyinfo->usable_key_parts=keyinfo->user_defined_key_parts= param->group_parts; + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->key_length=0; keyinfo->rec_per_key=NULL; keyinfo->read_stats= NULL; @@ -15496,16 +15502,17 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields, share->uniques= 1; } null_pack_length-=hidden_null_pack_length; - keyinfo->key_parts= ((field_count-param->hidden_field_count)+ - (share->uniques ? test(null_pack_length) : 0)); - keyinfo->ext_key_parts= keyinfo->key_parts; + keyinfo->user_defined_key_parts= + ((field_count-param->hidden_field_count)+ + (share->uniques ? test(null_pack_length) : 0)); + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; table->distinct= 1; share->keys= 1; if (!(key_part_info= (KEY_PART_INFO*) alloc_root(&table->mem_root, - keyinfo->key_parts * sizeof(KEY_PART_INFO)))) + keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO)))) goto err; - bzero((void*) key_part_info, keyinfo->key_parts * sizeof(KEY_PART_INFO)); + bzero((void*) key_part_info, keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO)); table->keys_in_use_for_query.set_bit(0); share->keys_in_use.set_bit(0); table->key_info= table->s->key_info= keyinfo; @@ -15685,7 +15692,6 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list) table->temp_pool_slot= MY_BIT_NONE; share->blob_field= blob_field; share->fields= field_count; - share->blob_ptr_size= portable_sizeof_char_ptr; setup_tmp_table_column_bitmaps(table, bitmaps); /* Create all fields and calculate the total length of record */ @@ -15841,13 +15847,13 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, { // Get keys for ni_create bool using_unique_constraint=0; HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root, - sizeof(*seg) * keyinfo->key_parts); + sizeof(*seg) * keyinfo->user_defined_key_parts); if (!seg) goto err; - bzero(seg, sizeof(*seg) * keyinfo->key_parts); + bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts); if (keyinfo->key_length >= table->file->max_key_length() || - keyinfo->key_parts > table->file->max_key_parts() || + keyinfo->user_defined_key_parts > table->file->max_key_parts() || share->uniques) { if (!share->uniques && !(keyinfo->flags & HA_NOSAME)) @@ -15862,7 +15868,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, share->uniques= 1; using_unique_constraint=1; bzero((char*) &uniquedef,sizeof(uniquedef)); - uniquedef.keysegs=keyinfo->key_parts; + uniquedef.keysegs=keyinfo->user_defined_key_parts; uniquedef.seg=seg; uniquedef.null_are_equal=1; @@ -15878,10 +15884,10 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, /* Create a key */ bzero((char*) &keydef,sizeof(keydef)); keydef.flag= keyinfo->flags & HA_NOSAME; - keydef.keysegs= keyinfo->key_parts; + keydef.keysegs= keyinfo->user_defined_key_parts; keydef.seg= seg; } - for (uint i=0; i < keyinfo->key_parts ; i++,seg++) + for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++) { Field *field=keyinfo->key_part[i].field; seg->flag= 0; @@ -15893,7 +15899,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); - seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size); + seg->bit_start= (uint8)(field->pack_length() - + portable_sizeof_char_ptr); seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } @@ -15947,7 +15954,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, start_recinfo, share->uniques, &uniquedef, &create_info, - HA_CREATE_TMP_TABLE))) + HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->db_stat=0; @@ -16010,13 +16017,13 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, { // Get keys for ni_create bool using_unique_constraint=0; HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root, - sizeof(*seg) * keyinfo->key_parts); + sizeof(*seg) * keyinfo->user_defined_key_parts); if (!seg) goto err; - bzero(seg, sizeof(*seg) * keyinfo->key_parts); + bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts); if (keyinfo->key_length >= table->file->max_key_length() || - keyinfo->key_parts > table->file->max_key_parts() || + keyinfo->user_defined_key_parts > table->file->max_key_parts() || share->uniques) { /* Can't create a key; Make a unique constraint instead of a key */ @@ -16024,7 +16031,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, share->uniques= 1; using_unique_constraint=1; bzero((char*) &uniquedef,sizeof(uniquedef)); - uniquedef.keysegs=keyinfo->key_parts; + uniquedef.keysegs=keyinfo->user_defined_key_parts; uniquedef.seg=seg; uniquedef.null_are_equal=1; @@ -16041,10 +16048,10 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, bzero((char*) &keydef,sizeof(keydef)); keydef.flag= ((keyinfo->flags & HA_NOSAME) | HA_BINARY_PACK_KEY | HA_PACK_KEY); - keydef.keysegs= keyinfo->key_parts; + keydef.keysegs= keyinfo->user_defined_key_parts; keydef.seg= seg; } - for (uint i=0; i < keyinfo->key_parts ; i++,seg++) + for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++) { Field *field=keyinfo->key_part[i].field; seg->flag= 0; @@ -16056,7 +16063,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); - seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size); + seg->bit_start= (uint8)(field->pack_length() - portable_sizeof_char_ptr); seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } @@ -16093,7 +16100,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo, start_recinfo, share->uniques, &uniquedef, &create_info, - HA_CREATE_TMP_TABLE))) + HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->db_stat=0; @@ -16149,6 +16156,12 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table, new_table.s->db_type()))) DBUG_RETURN(1); // End of memory + if (new_table.file->set_ha_share_ref(&share.ha_share)) + { + delete new_table.file; + DBUG_RETURN(1); + } + save_proc_info=thd->proc_info; THD_STAGE_INFO(thd, stage_converting_heap_to_myisam); @@ -16779,7 +16792,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) if (join_tab->on_precond && !join_tab->on_precond->val_int()) rc= NESTED_LOOP_NO_MORE_ROWS; } - join->thd->warning_info->reset_current_row_for_warning(); + join->thd->get_stmt_da()->reset_current_row_for_warning(); if (rc != NESTED_LOOP_NO_MORE_ROWS && (rc= join_tab_execution_startup(join_tab)) < 0) @@ -17016,7 +17029,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, enum enum_nested_loop_state rc; /* A match from join_tab is found for the current partial join. */ rc= (*join_tab->next_select)(join, join_tab+1, 0); - join->thd->warning_info->inc_current_row_for_warning(); + join->thd->get_stmt_da()->inc_current_row_for_warning(); if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS) DBUG_RETURN(rc); if (return_tab < join->return_tab) @@ -17034,7 +17047,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, } else { - join->thd->warning_info->inc_current_row_for_warning(); + join->thd->get_stmt_da()->inc_current_row_for_warning(); join_tab->read_record.unlock_row(join_tab); } } @@ -17045,7 +17058,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, with the beginning coinciding with the current partial join. */ join->examined_rows++; - join->thd->warning_info->inc_current_row_for_warning(); + join->thd->get_stmt_da()->inc_current_row_for_warning(); join_tab->read_record.unlock_row(join_tab); } DBUG_RETURN(NESTED_LOOP_OK); @@ -17157,13 +17170,8 @@ int report_error(TABLE *table, int error) */ if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT && !table->in_use->killed) - { - push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, error, - "Got error %d when reading table %`s.%`s", - error, table->s->db.str, table->s->table_name.str); sql_print_error("Got error %d when reading table '%s'", error, table->s->path.str); - } table->file->print_error(error,MYF(0)); return 1; } @@ -18837,7 +18845,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, { KEY_PART_INFO *key_part,*key_part_end; key_part=table->key_info[idx].key_part; - key_part_end=key_part+table->key_info[idx].key_parts; + key_part_end=key_part+table->key_info[idx].user_defined_key_parts; key_part_map const_key_parts=table->const_key_parts[idx]; int reverse=0; uint key_parts; @@ -18879,7 +18887,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, (we have to stop as first not continous primary key part) */ for (key_part_end= key_part, - end= key_part+table->key_info[table->s->primary_key].key_parts; + end= key_part+table->key_info[table->s->primary_key].user_defined_key_parts; key_part_end < end; key_part_end++, pk_part_idx++) { /* Found hole in the pk_parts; Abort */ @@ -18896,7 +18904,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, Test if the primary key parts were all const (i.e. there's one row). The sorting doesn't matter. */ - if (key_part == start+table->key_info[table->s->primary_key].key_parts && + if (key_part == start+table->key_info[table->s->primary_key].user_defined_key_parts && reverse == 0) { key_parts= 0; @@ -18922,7 +18930,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, } if (on_pk_suffix) { - uint used_key_parts_secondary= table->key_info[idx].key_parts; + uint used_key_parts_secondary= table->key_info[idx].user_defined_key_parts; uint used_key_parts_pk= (uint) (key_part - table->key_info[table->s->primary_key].key_part); key_parts= used_key_parts_pk + used_key_parts_secondary; @@ -19033,7 +19041,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, { if (usable_keys->is_set(nr) && table->key_info[nr].key_length < min_length && - table->key_info[nr].key_parts >= ref_key_parts && + table->key_info[nr].user_defined_key_parts >= ref_key_parts && is_subkey(table->key_info[nr].key_part, ref_key_part, ref_key_part_end) && test_if_order_by_key(order, table, nr)) @@ -19091,7 +19099,7 @@ list_contains_unique_index(TABLE *table, KEY_PART_INFO *key_part, *key_part_end; for (key_part=keyinfo->key_part, - key_part_end=key_part+ keyinfo->key_parts; + key_part_end=key_part+ keyinfo->user_defined_key_parts; key_part < key_part_end; key_part++) { @@ -19400,7 +19408,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, order_direction= best_key_direction; /* saved_best_key_parts is actual number of used keyparts found by the - test_if_order_by_key function. It could differ from keyinfo->key_parts, + test_if_order_by_key function. It could differ from keyinfo->user_defined_key_parts, thus we have to restore it in case of desc order as it affects QUICK_SELECT_DESC behaviour. */ @@ -20131,7 +20139,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length, count++; if (!sortorder) sortorder= (SORT_FIELD*) sql_alloc(sizeof(SORT_FIELD) * - (max(count, *length) + 1)); + (MY_MAX(count, *length) + 1)); pos= sort= sortorder; if (!pos) @@ -20365,7 +20373,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, warning so the user knows that the field from the FROM clause overshadows the column reference from the SELECT list. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), ((Item_ident*) order_item)->field_name, current_thd->where); @@ -23750,12 +23758,12 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, if (group) { /* - Used_key_parts can be larger than keyinfo->key_parts + Used_key_parts can be larger than keyinfo->user_defined_key_parts when using a secondary index clustered with a primary key (e.g. as in Innodb). See Bug #28591 for details. */ - uint used_index_parts= keyinfo->key_parts; + uint used_index_parts= keyinfo->user_defined_key_parts; uint used_pk_parts= 0; if (used_key_parts > used_index_parts) used_pk_parts= used_key_parts-used_index_parts; @@ -23770,7 +23778,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, of the primary key are considered unknown we assume they are equal to 1. */ - if (used_key_parts == pkinfo->key_parts || + if (used_key_parts == pkinfo->user_defined_key_parts || pkinfo->rec_per_key[0] == 0) rec_per_key= 1; if (rec_per_key > 1) @@ -23837,7 +23845,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, select_limit= (ha_rows) (select_limit * (double) table_records / table->quick_condition_rows); - rec_per_key= keyinfo->actual_rec_per_key(keyinfo->key_parts-1); + rec_per_key= keyinfo->actual_rec_per_key(keyinfo->user_defined_key_parts-1); set_if_bigger(rec_per_key, 1); /* Here we take into account the fact that rows are @@ -23851,7 +23859,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, index entry. */ index_scan_time= select_limit/rec_per_key * - min(rec_per_key, table->file->scan_time()); + MY_MIN(rec_per_key, table->file->scan_time()); if ((ref_key < 0 && (group || table->force_index || is_covering)) || index_scan_time < read_time) { @@ -23862,13 +23870,13 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table, if (table->quick_keys.is_set(nr)) quick_records= table->quick_rows[nr]; if (best_key < 0 || - (select_limit <= min(quick_records,best_records) ? - keyinfo->key_parts < best_key_parts : + (select_limit <= MY_MIN(quick_records,best_records) ? + keyinfo->user_defined_key_parts < best_key_parts : quick_records < best_records) || (!is_best_covering && is_covering)) { best_key= nr; - best_key_parts= keyinfo->key_parts; + best_key_parts= keyinfo->user_defined_key_parts; if (saved_best_key_parts) *saved_best_key_parts= used_key_parts; best_records= quick_records; diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index b5b7f9866c5..cf96297391c 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -265,9 +265,9 @@ bool servers_reload(THD *thd) Execution might have been interrupted; only print the error message if an error condition has been raised. */ - if (thd->stmt_da->is_error()) + if (thd->get_stmt_da()->is_error()) sql_print_error("Can't open and lock privilege tables: %s", - thd->stmt_da->message()); + thd->get_stmt_da()->message()); return_val= FALSE; goto end; } @@ -631,7 +631,7 @@ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) if (close_cached_connection_tables(thd, &name)) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Server connection in use"); } @@ -1060,7 +1060,7 @@ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options) if (close_cached_connection_tables(thd, &name)) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Server connection in use"); } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index d5a52ed0b52..587d4b6ebdb 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -910,8 +910,8 @@ public: } bool handle_condition(THD *thd, uint sql_errno, const char * /* sqlstate */, - MYSQL_ERROR::enum_warning_level level, - const char *message, MYSQL_ERROR ** /* cond_hdl */) + Sql_condition::enum_warning_level level, + const char *message, Sql_condition ** /* cond_hdl */) { /* The handler does not handle the errors raised by itself. @@ -942,7 +942,7 @@ public: case ER_NO_SUCH_TABLE: case ER_NO_SUCH_TABLE_IN_ENGINE: /* Established behavior: warn if underlying tables are missing. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_VIEW_INVALID, ER(ER_VIEW_INVALID), m_top_view->get_db_name(), @@ -952,7 +952,7 @@ public: case ER_SP_DOES_NOT_EXIST: /* Established behavior: warn if underlying functions are missing. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_VIEW_INVALID, ER(ER_VIEW_INVALID), m_top_view->get_db_name(), @@ -1046,7 +1046,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) { field_list.push_back(new Item_empty_string("View",NAME_CHAR_LEN)); field_list.push_back(new Item_empty_string("Create View", - max(buffer.length(),1024))); + MY_MAX(buffer.length(),1024))); field_list.push_back(new Item_empty_string("character_set_client", MY_CS_NAME_SIZE)); field_list.push_back(new Item_empty_string("collation_connection", @@ -1057,7 +1057,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) field_list.push_back(new Item_empty_string("Table",NAME_CHAR_LEN)); // 1024 is for not to confuse old clients field_list.push_back(new Item_empty_string("Create Table", - max(buffer.length(),1024))); + MY_MAX(buffer.length(),1024))); } if (protocol->send_result_set_metadata(&field_list, @@ -1739,7 +1739,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" (")); - for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) + for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++) { if (j) packet->append(','); @@ -1881,6 +1881,22 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, packet->append(STRING_WITH_LEN(" PACK_KEYS=1")); if (create_info.options & HA_OPTION_NO_PACK_KEYS) packet->append(STRING_WITH_LEN(" PACK_KEYS=0")); + if (share->db_create_options & HA_OPTION_STATS_PERSISTENT) + packet->append(STRING_WITH_LEN(" STATS_PERSISTENT=1")); + if (share->db_create_options & HA_OPTION_NO_STATS_PERSISTENT) + packet->append(STRING_WITH_LEN(" STATS_PERSISTENT=0")); + if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON) + packet->append(STRING_WITH_LEN(" STATS_AUTO_RECALC=1")); + else if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF) + packet->append(STRING_WITH_LEN(" STATS_AUTO_RECALC=0")); + if (share->stats_sample_pages != 0) + { + char *end; + packet->append(STRING_WITH_LEN(" STATS_SAMPLE_PAGES=")); + end= longlong10_to_str(share->stats_sample_pages, buff, 10); + packet->append(buff, (uint) (end - buff)); + } + /* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */ if (create_info.options & HA_OPTION_CHECKSUM) packet->append(STRING_WITH_LEN(" CHECKSUM=1")); @@ -1940,8 +1956,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, &part_syntax_len, FALSE, show_table_options, - NULL, NULL, - comment_start.c_ptr()))) + NULL, NULL))) { packet->append(comment_start); if (packet->append(part_syntax, part_syntax_len) || @@ -2257,7 +2272,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) /* Lock THD mutex that protects its data when looking at it. */ if (tmp->query()) { - uint length= min(max_query_length, tmp->query_length()); + uint length= MY_MIN(max_query_length, tmp->query_length()); char *q= thd->strmake(tmp->query(),length); /* Safety: in case strmake failed, we set length to 0. */ thd_info->query_string= @@ -2270,7 +2285,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) */ if (tmp->progress.max_counter) { - uint max_stage= max(tmp->progress.max_stage, 1); + uint max_stage= MY_MAX(tmp->progress.max_stage, 1); thd_info->progress= (((tmp->progress.stage / (double) max_stage) + ((tmp->progress.counter / (double) tmp->progress.max_counter) / @@ -2479,7 +2494,7 @@ int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond) else warning_text= explain_req.query_str.c_ptr_safe(); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_YES, warning_text); } DBUG_RETURN(bres); @@ -2583,7 +2598,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) if (tmp->query()) { table->field[7]->store(tmp->query(), - min(PROCESS_LIST_INFO_WIDTH, + MY_MIN(PROCESS_LIST_INFO_WIDTH, tmp->query_length()), cs); table->field[7]->set_notnull(); } @@ -3030,7 +3045,7 @@ static int aggregate_user_stats(HASH *all_user_stats, HASH *agg_user_stats) { DBUG_ENTER("aggregate_user_stats"); if (my_hash_init(agg_user_stats, system_charset_info, - max(all_user_stats->records, 1), + MY_MAX(all_user_stats->records, 1), 0, 0, (my_hash_get_key)get_key_user_stats, (my_hash_free_key)free_user_stats, 0)) { @@ -4069,12 +4084,13 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys, 'only_view_structure()'. */ lex->sql_command= SQLCOM_SHOW_FIELDS; - result= open_normal_and_derived_tables(thd, table_list, - (MYSQL_OPEN_IGNORE_FLUSH | - MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL | - (can_deadlock ? - MYSQL_OPEN_FAIL_ON_MDL_CONFLICT : 0)), - DT_PREPARE | DT_CREATE); + result= (open_temporary_tables(thd, table_list) || + open_normal_and_derived_tables(thd, table_list, + (MYSQL_OPEN_IGNORE_FLUSH | + MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL | + (can_deadlock ? + MYSQL_OPEN_FAIL_ON_MDL_CONFLICT : 0)), + DT_PREPARE | DT_CREATE)); /* Restore old value of sql_command back as it is being looked at in process_table() function. @@ -4095,8 +4111,8 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys, of backward compatibility. */ if (!is_show_fields_or_keys && result && thd->is_error() && - (thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE || - thd->stmt_da->sql_errno() == ER_WRONG_OBJECT)) + (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE || + thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT)) { /* Hide error for a non-existing table. @@ -4183,7 +4199,7 @@ static int fill_schema_table_names(THD *thd, TABLE_LIST *tables, else table->field[3]->store(STRING_WITH_LEN("ERROR"), cs); - if (thd->is_error() && thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE) + if (thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE) { thd->clear_error(); return 0; @@ -4227,7 +4243,7 @@ uint get_table_open_method(TABLE_LIST *tables, for (ptr=tables->table->field; (field= *ptr) ; ptr++) { star_table_open_method= - min(star_table_open_method, + MY_MIN(star_table_open_method, schema_table->fields_info[field_indx].open_method); if (bitmap_is_set(tables->table->read_set, field->field_index)) { @@ -4386,7 +4402,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables, */ DBUG_ASSERT(can_deadlock); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_I_S_SKIPPED_TABLE, ER(ER_WARN_I_S_SKIPPED_TABLE), table_list.db, table_list.table_name); @@ -4514,9 +4530,9 @@ public: bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* msg, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { if (sql_errno == ER_PARSE_ERROR || sql_errno == ER_TRG_NO_DEFINER || @@ -4892,7 +4908,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, } else { - char option_buff[350]; + char option_buff[512]; String str(option_buff,sizeof(option_buff), system_charset_info); TABLE *show_table= tables->table; TABLE_SHARE *share= show_table->s; @@ -4957,6 +4973,23 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables, if (share->db_create_options & HA_OPTION_NO_PACK_KEYS) str.qs_append(STRING_WITH_LEN(" pack_keys=0")); + if (share->db_create_options & HA_OPTION_STATS_PERSISTENT) + str.qs_append(STRING_WITH_LEN(" stats_persistent=1")); + + if (share->db_create_options & HA_OPTION_NO_STATS_PERSISTENT) + str.qs_append(STRING_WITH_LEN(" stats_persistent=0")); + + if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON) + str.qs_append(STRING_WITH_LEN(" stats_auto_recalc=1")); + else if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF) + str.qs_append(STRING_WITH_LEN(" stats_auto_recalc=0")); + + if (share->stats_sample_pages != 0) + { + str.qs_append(STRING_WITH_LEN(" stats_sample_pages=")); + str.qs_append(share->stats_sample_pages); + } + /* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */ if (share->db_create_options & HA_OPTION_CHECKSUM) str.qs_append(STRING_WITH_LEN(" checksum=1")); @@ -5104,13 +5137,14 @@ err: column with the error text, and clear the error so that the operation can continue. */ - const char *error= thd->is_error() ? thd->stmt_da->message() : ""; + const char *error= thd->is_error() ? thd->get_stmt_da()->message() : ""; table->field[20]->store(error, strlen(error), cs); if (thd->is_error()) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); } } @@ -5274,8 +5308,9 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables, rather than in SHOW COLUMNS */ if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); res= 0; } @@ -5671,16 +5706,16 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table, for (uint i= 0 ; i < params ; i++) { const char *tmp_buff; - sp_variable_t *spvar= spcont->find_variable(i); + sp_variable *spvar= spcont->find_variable(i); field_def= &spvar->field_def; switch (spvar->mode) { - case sp_param_in: + case sp_variable::MODE_IN: tmp_buff= "IN"; break; - case sp_param_out: + case sp_variable::MODE_OUT: tmp_buff= "OUT"; break; - case sp_param_inout: + case sp_variable::MODE_INOUT: tmp_buff= "INOUT"; break; default: @@ -5945,8 +5980,9 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, rather than in SHOW KEYS */ if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); res= 0; } @@ -5967,7 +6003,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables, { KEY_PART_INFO *key_part= key_info->key_part; const char *str; - for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) + for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++) { restore_record(table, s->default_values); table->field[0]->store(STRING_WITH_LEN("def"), cs); @@ -6130,7 +6166,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables, */ while ((item= it++)) { - if ((field= item->filed_for_view_update()) && field->field && + if ((field= item->field_for_view_update()) && field->field && !field->field->table->pos_in_table_list->schema_table) { updatable_view= 1; @@ -6167,8 +6203,9 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables, if (schema_table_store_record(thd, table)) DBUG_RETURN(1); if (res && thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); } if (res) thd->clear_error(); @@ -6201,8 +6238,9 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables, if (res) { if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); DBUG_RETURN(0); } @@ -6307,8 +6345,9 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables, if (res) { if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); DBUG_RETURN(0); } @@ -6388,8 +6427,9 @@ static int get_schema_key_column_usage_record(THD *thd, if (res) { if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); DBUG_RETURN(0); } @@ -6408,7 +6448,7 @@ static int get_schema_key_column_usage_record(THD *thd, continue; uint f_idx= 0; KEY_PART_INFO *key_part= key_info->key_part; - for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) + for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++) { if (key_part->field) { @@ -6603,7 +6643,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table, strlen(part_elem->tablespace_name), cs); else { - char *ts= showing_table->file->get_tablespace_name(thd,0,0); + char *ts= showing_table->s->tablespace; if(ts) table->field[24]->store(ts, strlen(ts), cs); else @@ -6678,8 +6718,9 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables, if (res) { if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); DBUG_RETURN(0); } @@ -7210,8 +7251,9 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables, if (res) { if (thd->is_error()) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - thd->stmt_da->sql_errno(), thd->stmt_da->message()); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + thd->get_stmt_da()->sql_errno(), + thd->get_stmt_da()->message()); thd->clear_error(); DBUG_RETURN(0); } @@ -7919,41 +7961,32 @@ static bool do_fill_table(THD *thd, // Warning_info, so "useful warnings" get rejected. In order to avoid // that problem we create a Warning_info instance, which is capable of // storing "unlimited" number of warnings. - Warning_info wi(thd->query_id, true); - Warning_info *wi_saved= thd->warning_info; + Diagnostics_area *da= thd->get_stmt_da(); + Warning_info wi_tmp(thd->query_id, true, true); - thd->warning_info= &wi; + da->push_warning_info(&wi_tmp); bool res= table_list->schema_table->fill_table( thd, table_list, join_table->select_cond); - thd->warning_info= wi_saved; + da->pop_warning_info(); // Pass an error if any. - if (thd->stmt_da->is_error()) + if (da->is_error()) { - thd->warning_info->push_warning(thd, - thd->stmt_da->sql_errno(), - thd->stmt_da->get_sqlstate(), - MYSQL_ERROR::WARN_LEVEL_ERROR, - thd->stmt_da->message()); + da->push_warning(thd, + da->sql_errno(), + da->get_sqlstate(), + Sql_condition::WARN_LEVEL_ERROR, + da->message()); } // Pass warnings (if any). // // Filter out warnings with WARN_LEVEL_ERROR level, because they // correspond to the errors which were filtered out in fill_table(). - - - List_iterator_fast<MYSQL_ERROR> it(wi.warn_list()); - MYSQL_ERROR *err; - - while ((err= it++)) - { - if (err->get_level() != MYSQL_ERROR::WARN_LEVEL_ERROR) - thd->warning_info->push_warning(thd, err); - } + da->copy_non_errors_from_wi(thd, &wi_tmp); return res; } @@ -9110,7 +9143,7 @@ static bool show_create_trigger_impl(THD *thd, Item_empty_string *stmt_fld= new Item_empty_string("SQL Original Statement", - max(trg_sql_original_stmt.length, 1024)); + MY_MAX(trg_sql_original_stmt.length, 1024)); stmt_fld->maybe_null= TRUE; diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc index ed4d2c23d53..a0a47b77591 100644 --- a/sql/sql_signal.cc +++ b/sql/sql_signal.cc @@ -88,9 +88,10 @@ void Set_signal_information::clear() memset(m_item, 0, sizeof(m_item)); } -void Signal_common::assign_defaults(MYSQL_ERROR *cond, +void Sql_cmd_common_signal::assign_defaults( + Sql_condition *cond, bool set_level_code, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, int sqlcode) { if (set_level_code) @@ -102,7 +103,7 @@ void Signal_common::assign_defaults(MYSQL_ERROR *cond, cond->set_builtin_message_text(ER(sqlcode)); } -void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond) +void Sql_cmd_common_signal::eval_defaults(THD *thd, Sql_condition *cond) { DBUG_ASSERT(cond); @@ -114,8 +115,8 @@ void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond) /* SIGNAL is restricted in sql_yacc.yy to only signal SQLSTATE conditions. */ - DBUG_ASSERT(m_cond->type == sp_cond_type::state); - sqlstate= m_cond->sqlstate; + DBUG_ASSERT(m_cond->type == sp_condition_value::SQLSTATE); + sqlstate= m_cond->sql_state; cond->set_sqlstate(sqlstate); } else @@ -129,19 +130,19 @@ void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond) { /* SQLSTATE class "01": warning. */ assign_defaults(cond, set_defaults, - MYSQL_ERROR::WARN_LEVEL_WARN, ER_SIGNAL_WARN); + Sql_condition::WARN_LEVEL_WARN, ER_SIGNAL_WARN); } else if ((sqlstate[0] == '0') && (sqlstate[1] == '2')) { /* SQLSTATE class "02": not found. */ assign_defaults(cond, set_defaults, - MYSQL_ERROR::WARN_LEVEL_ERROR, ER_SIGNAL_NOT_FOUND); + Sql_condition::WARN_LEVEL_ERROR, ER_SIGNAL_NOT_FOUND); } else { /* other SQLSTATE classes : error. */ assign_defaults(cond, set_defaults, - MYSQL_ERROR::WARN_LEVEL_ERROR, ER_SIGNAL_EXCEPTION); + Sql_condition::WARN_LEVEL_ERROR, ER_SIGNAL_EXCEPTION); } } @@ -256,26 +257,26 @@ static int assign_condition_item(MEM_ROOT *mem_root, const char* name, THD *thd, } -int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond) +int Sql_cmd_common_signal::eval_signal_informations(THD *thd, Sql_condition *cond) { struct cond_item_map { enum enum_diag_condition_item_name m_item; - String MYSQL_ERROR::*m_member; + String Sql_condition::*m_member; }; static cond_item_map map[]= { - { DIAG_CLASS_ORIGIN, & MYSQL_ERROR::m_class_origin }, - { DIAG_SUBCLASS_ORIGIN, & MYSQL_ERROR::m_subclass_origin }, - { DIAG_CONSTRAINT_CATALOG, & MYSQL_ERROR::m_constraint_catalog }, - { DIAG_CONSTRAINT_SCHEMA, & MYSQL_ERROR::m_constraint_schema }, - { DIAG_CONSTRAINT_NAME, & MYSQL_ERROR::m_constraint_name }, - { DIAG_CATALOG_NAME, & MYSQL_ERROR::m_catalog_name }, - { DIAG_SCHEMA_NAME, & MYSQL_ERROR::m_schema_name }, - { DIAG_TABLE_NAME, & MYSQL_ERROR::m_table_name }, - { DIAG_COLUMN_NAME, & MYSQL_ERROR::m_column_name }, - { DIAG_CURSOR_NAME, & MYSQL_ERROR::m_cursor_name } + { DIAG_CLASS_ORIGIN, & Sql_condition::m_class_origin }, + { DIAG_SUBCLASS_ORIGIN, & Sql_condition::m_subclass_origin }, + { DIAG_CONSTRAINT_CATALOG, & Sql_condition::m_constraint_catalog }, + { DIAG_CONSTRAINT_SCHEMA, & Sql_condition::m_constraint_schema }, + { DIAG_CONSTRAINT_NAME, & Sql_condition::m_constraint_name }, + { DIAG_CATALOG_NAME, & Sql_condition::m_catalog_name }, + { DIAG_SCHEMA_NAME, & Sql_condition::m_schema_name }, + { DIAG_TABLE_NAME, & Sql_condition::m_table_name }, + { DIAG_COLUMN_NAME, & Sql_condition::m_column_name }, + { DIAG_CURSOR_NAME, & Sql_condition::m_cursor_name } }; Item *set; @@ -288,7 +289,7 @@ int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond) String *member; const LEX_STRING *name; - DBUG_ENTER("Signal_common::eval_signal_informations"); + DBUG_ENTER("Sql_cmd_common_signal::eval_signal_informations"); for (i= FIRST_DIAG_SET_PROPERTY; i <= LAST_DIAG_SET_PROPERTY; @@ -360,7 +361,7 @@ int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond) /* See the comments - "Design notes about MYSQL_ERROR::m_message_text." + "Design notes about Sql_condition::m_message_text." in file sql_error.cc */ String converted_text; @@ -413,23 +414,23 @@ end: DBUG_RETURN(result); } -bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond) +bool Sql_cmd_common_signal::raise_condition(THD *thd, Sql_condition *cond) { bool result= TRUE; - DBUG_ENTER("Signal_common::raise_condition"); + DBUG_ENTER("Sql_cmd_common_signal::raise_condition"); - DBUG_ASSERT(m_lex->query_tables == NULL); + DBUG_ASSERT(thd->lex->query_tables == NULL); eval_defaults(thd, cond); if (eval_signal_informations(thd, cond)) DBUG_RETURN(result); /* SIGNAL should not signal WARN_LEVEL_NOTE */ - DBUG_ASSERT((cond->m_level == MYSQL_ERROR::WARN_LEVEL_WARN) || - (cond->m_level == MYSQL_ERROR::WARN_LEVEL_ERROR)); + DBUG_ASSERT((cond->m_level == Sql_condition::WARN_LEVEL_WARN) || + (cond->m_level == Sql_condition::WARN_LEVEL_ERROR)); - MYSQL_ERROR *raised= NULL; + Sql_condition *raised= NULL; raised= thd->raise_condition(cond->get_sql_errno(), cond->get_sqlstate(), cond->get_level(), @@ -437,7 +438,7 @@ bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond) if (raised) raised->copy_opt_attributes(cond); - if (cond->m_level == MYSQL_ERROR::WARN_LEVEL_WARN) + if (cond->m_level == Sql_condition::WARN_LEVEL_WARN) { my_ok(thd); result= FALSE; @@ -446,12 +447,12 @@ bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond) DBUG_RETURN(result); } -bool Signal_statement::execute(THD *thd) +bool Sql_cmd_signal::execute(THD *thd) { bool result= TRUE; - MYSQL_ERROR cond(thd->mem_root); + Sql_condition cond(thd->mem_root); - DBUG_ENTER("Signal_statement::execute"); + DBUG_ENTER("Sql_cmd_signal::execute"); /* WL#2110 SIGNAL specification says: @@ -465,9 +466,9 @@ bool Signal_statement::execute(THD *thd) This has roots in the SQL standard specification for SIGNAL. */ - thd->stmt_da->reset_diagnostics_area(); + thd->get_stmt_da()->reset_diagnostics_area(); thd->set_row_count_func(0); - thd->warning_info->clear_warning_info(thd->query_id); + thd->get_stmt_da()->clear_warning_info(thd->query_id); result= raise_condition(thd, &cond); @@ -475,14 +476,27 @@ bool Signal_statement::execute(THD *thd) } -bool Resignal_statement::execute(THD *thd) +/** + Execute RESIGNAL SQL-statement. + + @param thd Thread context. + + @return Error status + @retval true in case of error + @retval false on success +*/ + +bool Sql_cmd_resignal::execute(THD *thd) { - Sql_condition_info *signaled; + Diagnostics_area *da= thd->get_stmt_da(); + const sp_rcontext::Sql_condition_info *signaled; int result= TRUE; DBUG_ENTER("Resignal_statement::execute"); - thd->warning_info->m_warn_id= thd->query_id; + // This is a way to force sql_conditions from the current Warning_info to be + // passed to the caller's Warning_info. + da->set_warning_info_id(thd->query_id); if (! thd->spcont || ! (signaled= thd->spcont->raised_condition())) { @@ -490,22 +504,38 @@ bool Resignal_statement::execute(THD *thd) DBUG_RETURN(result); } - MYSQL_ERROR signaled_err(thd->mem_root); - signaled_err.set(signaled->m_sql_errno, - signaled->m_sql_state, - signaled->m_level, - signaled->m_message); + Sql_condition signaled_err(thd->mem_root); + signaled_err.set(signaled->sql_errno, + signaled->sql_state, + signaled->level, + signaled->message); - if (m_cond == NULL) + if (m_cond) { - /* RESIGNAL without signal_value */ - result= raise_condition(thd, &signaled_err); - DBUG_RETURN(result); + query_cache_abort(&thd->query_cache_tls); + + /* Keep handled conditions. */ + da->unmark_sql_conditions_from_removal(); + + /* Check if the old condition still exists. */ + if (da->has_sql_condition(signaled->message, strlen(signaled->message))) + { + /* Make room for the new RESIGNAL condition. */ + da->reserve_space(thd, 1); + } + else + { + /* Make room for old condition + the new RESIGNAL condition. */ + da->reserve_space(thd, 2); + + da->push_warning(thd, &signaled_err); + } } /* RESIGNAL with signal_value */ result= raise_condition(thd, &signaled_err); DBUG_RETURN(result); + } diff --git a/sql/sql_signal.h b/sql/sql_signal.h index 058457a3639..2a508eed5bf 100644 --- a/sql/sql_signal.h +++ b/sql/sql_signal.h @@ -18,27 +18,25 @@ #define SQL_SIGNAL_H /** - Signal_common represents the common properties of the SIGNAL and RESIGNAL - statements. + Sql_cmd_common_signal represents the common properties of the + SIGNAL and RESIGNAL statements. */ -class Signal_common : public Sql_statement +class Sql_cmd_common_signal : public Sql_cmd { protected: /** Constructor. - @param lex the LEX structure for this statement. @param cond the condition signaled if any, or NULL. @param set collection of signal condition item assignments. */ - Signal_common(LEX *lex, - const sp_cond_type_t *cond, - const Set_signal_information& set) - : Sql_statement(lex), + Sql_cmd_common_signal(const sp_condition_value *cond, + const Set_signal_information& set) + : Sql_cmd(), m_cond(cond), m_set_signal_information(set) {} - virtual ~Signal_common() + virtual ~Sql_cmd_common_signal() {} /** @@ -49,9 +47,9 @@ protected: @param level the level to assign @param sqlcode the sql code to assign */ - static void assign_defaults(MYSQL_ERROR *cond, + static void assign_defaults(Sql_condition *cond, bool set_level_code, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, int sqlcode); /** @@ -60,7 +58,7 @@ protected: @param thd the current thread. @param cond the condition to update. */ - void eval_defaults(THD *thd, MYSQL_ERROR *cond); + void eval_defaults(THD *thd, Sql_condition *cond); /** Evaluate each signal condition items for this statement. @@ -68,7 +66,7 @@ protected: @param cond the condition to update. @return 0 on success. */ - int eval_signal_informations(THD *thd, MYSQL_ERROR *cond); + int eval_signal_informations(THD *thd, Sql_condition *cond); /** Raise a SQL condition. @@ -76,13 +74,13 @@ protected: @param cond the condition to raise. @return false on success. */ - bool raise_condition(THD *thd, MYSQL_ERROR *cond); + bool raise_condition(THD *thd, Sql_condition *cond); /** The condition to signal or resignal. This member is optional and can be NULL (RESIGNAL). */ - const sp_cond_type_t *m_cond; + const sp_condition_value *m_cond; /** Collection of 'SET item = value' assignments in the @@ -92,60 +90,56 @@ protected: }; /** - Signal_statement represents a SIGNAL statement. + Sql_cmd_signal represents a SIGNAL statement. */ -class Signal_statement : public Signal_common +class Sql_cmd_signal : public Sql_cmd_common_signal { public: /** Constructor, used to represent a SIGNAL statement. - @param lex the LEX structure for this statement. @param cond the SQL condition to signal (required). @param set the collection of signal informations to signal. */ - Signal_statement(LEX *lex, - const sp_cond_type_t *cond, - const Set_signal_information& set) - : Signal_common(lex, cond, set) + Sql_cmd_signal(const sp_condition_value *cond, + const Set_signal_information& set) + : Sql_cmd_common_signal(cond, set) {} - virtual ~Signal_statement() + virtual ~Sql_cmd_signal() {} - /** - Execute a SIGNAL statement at runtime. - @param thd the current thread. - @return false on success. - */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_SIGNAL; + } + virtual bool execute(THD *thd); }; /** - Resignal_statement represents a RESIGNAL statement. + Sql_cmd_resignal represents a RESIGNAL statement. */ -class Resignal_statement : public Signal_common +class Sql_cmd_resignal : public Sql_cmd_common_signal { public: /** Constructor, used to represent a RESIGNAL statement. - @param lex the LEX structure for this statement. @param cond the SQL condition to resignal (optional, may be NULL). @param set the collection of signal informations to resignal. */ - Resignal_statement(LEX *lex, - const sp_cond_type_t *cond, - const Set_signal_information& set) - : Signal_common(lex, cond, set) + Sql_cmd_resignal(const sp_condition_value *cond, + const Set_signal_information& set) + : Sql_cmd_common_signal(cond, set) {} - virtual ~Resignal_statement() + virtual ~Sql_cmd_resignal() {} - /** - Execute a RESIGNAL statement at runtime. - @param thd the current thread. - @return 0 on success. - */ + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_RESIGNAL; + } + virtual bool execute(THD *thd); }; diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 2e2886a1d3f..94cbf3b946a 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1548,7 +1548,7 @@ public: is_single_comp_pk= FALSE; uint pk= table->s->primary_key; if ((uint) (table->key_info - key_info) == pk && - table->key_info[pk].key_parts == 1) + table->key_info[pk].user_defined_key_parts == 1) { prefixes= 1; is_single_comp_pk= TRUE; @@ -1990,12 +1990,12 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share, DBUG_RETURN(1); if (!is_safe) - mysql_mutex_lock(&table_share->LOCK_ha_data); + mysql_mutex_lock(&table_share->LOCK_share); if (stats_cb->stats_can_be_read) { if (!is_safe) - mysql_mutex_unlock(&table_share->LOCK_ha_data); + mysql_mutex_unlock(&table_share->LOCK_share); DBUG_RETURN(0); } @@ -2007,7 +2007,7 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share, if (!table_stats) { if (!is_safe) - mysql_mutex_unlock(&table_share->LOCK_ha_data); + mysql_mutex_unlock(&table_share->LOCK_share); DBUG_RETURN(1); } memset(table_stats, 0, sizeof(Table_statistics)); @@ -2080,7 +2080,7 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share, stats_cb->stats_can_be_read= TRUE; if (!is_safe) - mysql_mutex_unlock(&table_share->LOCK_ha_data); + mysql_mutex_unlock(&table_share->LOCK_share); DBUG_RETURN(0); } @@ -2124,12 +2124,12 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share, DBUG_ENTER("alloc_histograms_for_table_share"); if (!is_safe) - mysql_mutex_lock(&table_share->LOCK_ha_data); + mysql_mutex_lock(&table_share->LOCK_share); if (stats_cb->histograms_can_be_read) { if (!is_safe) - mysql_mutex_unlock(&table_share->LOCK_ha_data); + mysql_mutex_unlock(&table_share->LOCK_share); DBUG_RETURN(0); } @@ -2143,7 +2143,7 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share, if (!histograms) { if (!is_safe) - mysql_mutex_unlock(&table_share->LOCK_ha_data); + mysql_mutex_unlock(&table_share->LOCK_share); DBUG_RETURN(1); } memset(histograms, 0, total_hist_size); @@ -2152,7 +2152,7 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share, } if (!is_safe) - mysql_mutex_unlock(&table_share->LOCK_ha_data); + mysql_mutex_unlock(&table_share->LOCK_share); DBUG_RETURN(0); @@ -2177,7 +2177,7 @@ void Column_statistics_collected::init(THD *thd, Field *table_field) is_single_pk_col= FALSE; - if (pk != MAX_KEY && table->key_info[pk].key_parts == 1 && + if (pk != MAX_KEY && table->key_info[pk].user_defined_key_parts == 1 && table->key_info[pk].key_part[0].fieldnr == table_field->field_index + 1) is_single_pk_col= TRUE; @@ -2727,12 +2727,12 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables) } key_part_map ext_key_part_map= key_info->ext_key_part_map; - if (key_info->key_parts != key_info->ext_key_parts && - key_info->read_stats->get_avg_frequency(key_info->key_parts) == 0) + if (key_info->user_defined_key_parts != key_info->ext_key_parts && + key_info->read_stats->get_avg_frequency(key_info->user_defined_key_parts) == 0) { KEY *pk_key_info= table_share->key_info + table_share->primary_key; - uint k= key_info->key_parts; - uint pk_parts= pk_key_info->key_parts; + uint k= key_info->user_defined_key_parts; + uint pk_parts= pk_key_info->user_defined_key_parts; ha_rows n_rows= read_stats->cardinality; double k_dist= n_rows / key_info->read_stats->get_avg_frequency(k-1); uint m= 0; @@ -3193,7 +3193,7 @@ int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info, } else { - for (uint i= key_info->key_parts; i < key_info->ext_key_parts; i++) + for (uint i= key_info->user_defined_key_parts; i < key_info->ext_key_parts; i++) { index_stat.set_key_fields(key_info, i+1); if (index_stat.find_next_stat_for_prefix(4)) @@ -3341,7 +3341,10 @@ int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col, int rc= 0; DBUG_ENTER("rename_column_in_stat_tables"); - + + if (tab->s->tmp_table != NO_TMP_TABLE) + DBUG_RETURN(0); + if (open_single_stat_table(thd, &tables, &stat_table_name[1], &open_tables_backup, TRUE)) { diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h index c6a72478c34..c1c80921861 100644 --- a/sql/sql_statistics.h +++ b/sql/sql_statistics.h @@ -258,6 +258,17 @@ public: class Columns_statistics; class Index_statistics; +static inline +int rename_table_in_stat_tables(THD *thd, const char *db, const char *tab, + const char *new_db, const char *new_tab) +{ + LEX_STRING od= { const_cast<char*>(db), strlen(db) }; + LEX_STRING ot= { const_cast<char*>(tab), strlen(tab) }; + LEX_STRING nd= { const_cast<char*>(new_db), strlen(new_db) }; + LEX_STRING nt= { const_cast<char*>(new_tab), strlen(new_tab) }; + return rename_table_in_stat_tables(thd, &od, &ot, &nd, &nt); +} + /* Statistical data on a table */ diff --git a/sql/sql_string.cc b/sql/sql_string.cc index f1cb5e07eca..ddac315f80f 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -661,7 +661,7 @@ int String::reserve(uint32 space_needed, uint32 grow_by) { if (Alloced_length < str_length + space_needed) { - if (realloc(Alloced_length + max(space_needed, grow_by) - 1)) + if (realloc(Alloced_length + MY_MAX(space_needed, grow_by) - 1)) return TRUE; } return FALSE; @@ -748,7 +748,7 @@ int sortcmp(const String *s,const String *t, CHARSET_INFO *cs) int stringcmp(const String *s,const String *t) { - uint32 s_len=s->length(),t_len=t->length(),len=min(s_len,t_len); + uint32 s_len=s->length(),t_len=t->length(),len=MY_MIN(s_len,t_len); int cmp= memcmp(s->ptr(), t->ptr(), len); return (cmp) ? cmp : (int) (s_len - t_len); } @@ -765,7 +765,7 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length) } if (to->realloc(from_length)) return from; // Actually an error - if ((to->str_length=min(from->str_length,from_length))) + if ((to->str_length=MY_MIN(from->str_length,from_length))) memcpy(to->Ptr,from->Ptr,to->str_length); to->str_charset=from->str_charset; return to; @@ -776,67 +776,6 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length) Help functions ****************************************************************************/ - - -/* - Optimized for quick copying of ASCII characters in the range 0x00..0x7F. -*/ -uint32 -copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, - const char *from, uint32 from_length, CHARSET_INFO *from_cs, - uint *errors) -{ - /* - If any of the character sets is not ASCII compatible, - immediately switch to slow mb_wc->wc_mb method. - */ - if ((to_cs->state | from_cs->state) & MY_CS_NONASCII) - return copy_and_convert_extended(to, to_length, to_cs, - from, from_length, from_cs, errors); - - uint32 length= min(to_length, from_length), length2= length; - -#if defined(__i386__) || defined(__x86_64__) - /* - Special loop for i386, it allows to refer to a - non-aligned memory block as UINT32, which makes - it possible to copy four bytes at once. This - gives about 10% performance improvement comparing - to byte-by-byte loop. - */ - for ( ; length >= 4; length-= 4, from+= 4, to+= 4) - { - if ((*(uint32*)from) & 0x80808080) - break; - *((uint32*) to)= *((const uint32*) from); - } -#endif - - for (; ; *to++= *from++, length--) - { - if (!length) - { - *errors= 0; - return length2; - } - if (*((unsigned char*) from) > 0x7F) /* A non-ASCII character */ - { - uint32 copied_length= length2 - length; - to_length-= copied_length; - from_length-= copied_length; - return copied_length + copy_and_convert_extended(to, to_length, - to_cs, - from, from_length, - from_cs, - errors); - } - } - - DBUG_ASSERT(FALSE); // Should never get to here - return 0; // Make compiler happy -} - - /** Copy string with HEX-encoding of "bad" characters. @@ -954,7 +893,7 @@ well_formed_copy_nchars(CHARSET_INFO *to_cs, if (to_cs == &my_charset_bin) { - res= min(min(nchars, to_length), from_length); + res= MY_MIN(MY_MIN(nchars, to_length), from_length); memmove(to, from, res); *from_end_pos= from + res; *well_formed_error_pos= NULL; @@ -1155,7 +1094,7 @@ uint convert_to_printable(char *to, size_t to_len, char *t= to; char *t_end= to + to_len - 1; // '- 1' is for the '\0' at the end const char *f= from; - const char *f_end= from + (nbytes ? min(from_len, nbytes) : from_len); + const char *f_end= from + (nbytes ? MY_MIN(from_len, nbytes) : from_len); char *dots= to; // last safe place to append '...' if (!f || t == t_end) diff --git a/sql/sql_string.h b/sql/sql_string.h index 1979ac6e4af..352dfbe9fa3 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -34,9 +34,13 @@ typedef struct st_mem_root MEM_ROOT; int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); -uint32 copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, - const char *from, uint32 from_length, - CHARSET_INFO *from_cs, uint *errors); +inline uint32 copy_and_convert(char *to, uint32 to_length, + const CHARSET_INFO *to_cs, + const char *from, uint32 from_length, + const CHARSET_INFO *from_cs, uint *errors) +{ + return my_convert(to, to_length, to_cs, from, from_length, from_cs, errors); +} uint32 well_formed_copy_nchars(CHARSET_INFO *to_cs, char *to, uint to_length, CHARSET_INFO *from_cs, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 35a486a960a..79c6d4cbaf9 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -32,6 +32,7 @@ #include "sql_partition.h" // mem_alloc_error, // generate_partition_syntax, // partition_info + // NOT_A_PARTITION_ID #include "sql_db.h" // load_db_opt_by_name #include "sql_time.h" // make_truncated_value_warning #include "records.h" // init_read_record, end_read_record @@ -63,10 +64,12 @@ const char *primary_key_name="PRIMARY"; static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end); static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end); -static int copy_data_between_tables(THD *thd, TABLE *,TABLE *, - List<Create_field> &, bool, - uint, ORDER *, ha_rows *,ha_rows *, - enum enum_enable_or_disable, bool); +static int copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, + List<Create_field> &create, bool ignore, + uint order_num, ORDER *order, + ha_rows *copied,ha_rows *deleted, + Alter_info::enum_enable_or_disable keys_onoff, + Alter_table_ctx *alter_ctx); static bool prepare_blob_field(THD *thd, Create_field *sql_field); static bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *); @@ -100,7 +103,8 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p, tmp_name[name_len]= 0; conv_name= tmp_name; } - res= strconvert(&my_charset_filename, conv_name, system_charset_info, + res= strconvert(&my_charset_filename, conv_name, name_len, + system_charset_info, conv_string, FN_REFLEN, &errors); if (!res || errors) { @@ -376,7 +380,7 @@ uint filename_to_tablename(const char *from, char *to, uint to_length DBUG_ENTER("filename_to_tablename"); DBUG_PRINT("enter", ("from '%s'", from)); - res= strconvert(&my_charset_filename, from, + res= strconvert(&my_charset_filename, from, FN_REFLEN, system_charset_info, to, to_length, &errors); if (errors) // Old 5.0 name { @@ -467,7 +471,7 @@ uint tablename_to_filename(const char *from, char *to, uint to_length) } DBUG_RETURN(length); } - length= strconvert(system_charset_info, from, + length= strconvert(system_charset_info, from, FN_REFLEN, &my_charset_filename, to, to_length, &errors); if (check_if_legal_tablename(to) && length + 4 < to_length) @@ -523,7 +527,7 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, db, table_name, ext, flags)); if (flags & FN_IS_TMP) // FN_FROM_IS_TMP | FN_TO_IS_TMP - strnmov(tbbuff, table_name, sizeof(tbbuff)); + strmake(tbbuff, table_name, sizeof(tbbuff)-1); else (void) tablename_to_filename(table_name, tbbuff, sizeof(tbbuff)); @@ -538,8 +542,11 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, pos= strnmov(pos, FN_ROOTDIR, end - pos); pos= strxnmov(pos, end - pos, dbbuff, FN_ROOTDIR, NullS); #ifdef USE_SYMDIR - unpack_dirname(buff, buff); - pos= strend(buff); + if (!(flags & SKIP_SYMDIR_ACCESS)) + { + unpack_dirname(buff, buff); + pos= strend(buff); + } #endif pos= strxnmov(pos, end - pos, tbbuff, ext, NullS); @@ -548,22 +555,19 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, } -/* - Creates path to a file: mysql_tmpdir/#sql1234_12_1.ext - - SYNOPSIS - build_tmptable_filename() - thd The thread handle. - buff Where to write result in my_charset_filename. - bufflen buff size +/** + Create path to a temporary table mysql_tmpdir/#sql1234_12_1 + (i.e. to its .FRM file but without an extension). - NOTES + @param thd The thread handle. + @param buff Where to write result in my_charset_filename. + @param bufflen buff size + @note Uses current_pid, thread_id, and tmp_table counter to create a file name in mysql_tmpdir. - RETURN - path length + @return Path length. */ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) @@ -571,9 +575,9 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) DBUG_ENTER("build_tmptable_filename"); char *p= strnmov(buff, mysql_tmpdir, bufflen); - my_snprintf(p, bufflen - (p - buff), "/%s%lx_%lx_%x%s", + my_snprintf(p, bufflen - (p - buff), "/%s%lx_%lx_%x", tmp_file_prefix, current_pid, - thd->thread_id, thd->tmp_table++, reg_ext); + thd->thread_id, thd->tmp_table++); if (lower_case_table_names) { @@ -612,9 +616,15 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) -------------------------------------------------------------------------- */ - struct st_global_ddl_log { + /* + We need to adjust buffer size to be able to handle downgrades/upgrades + where IO_SIZE has changed. We'll set the buffer size such that we can + handle that the buffer size was upto 4 times bigger in the version + that wrote the DDL log. + */ + char file_entry_buf[4*IO_SIZE]; char file_name_str[FN_REFLEN]; char *file_name; DDL_LOG_MEMORY_ENTRY *first_free; @@ -642,31 +652,28 @@ mysql_mutex_t LOCK_gdl; #define DDL_LOG_NUM_ENTRY_POS 0 #define DDL_LOG_NAME_LEN_POS 4 #define DDL_LOG_IO_SIZE_POS 8 -#define DDL_LOG_HEADER_SIZE 12 /** Read one entry from ddl log file. - @param[out] file_entry_buf Buffer to read into - @param entry_no Entry number to read - @param size Number of bytes of the entry to read + + @param entry_no Entry number to read @return Operation status @retval true Error @retval false Success */ -static bool read_ddl_log_file_entry(uchar *file_entry_buf, - uint entry_no, - uint size) +static bool read_ddl_log_file_entry(uint entry_no) { bool error= FALSE; File file_id= global_ddl_log.file_id; + uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; uint io_size= global_ddl_log.io_size; DBUG_ENTER("read_ddl_log_file_entry"); - DBUG_ASSERT(io_size >= size); - if (mysql_file_pread(file_id, file_entry_buf, size, io_size * entry_no, - MYF(MY_WME)) != size) + mysql_mutex_assert_owner(&LOCK_gdl); + if (mysql_file_pread(file_id, file_entry_buf, io_size, io_size * entry_no, + MYF(MY_WME)) != io_size) error= TRUE; DBUG_RETURN(error); } @@ -675,75 +682,77 @@ static bool read_ddl_log_file_entry(uchar *file_entry_buf, /** Write one entry to ddl log file. - @param file_entry_buf Buffer to write - @param entry_no Entry number to write - @param size Number of bytes of the entry to write + @param entry_no Entry number to write @return Operation status @retval true Error @retval false Success */ -static bool write_ddl_log_file_entry(uchar *file_entry_buf, - uint entry_no, - uint size) +static bool write_ddl_log_file_entry(uint entry_no) { bool error= FALSE; File file_id= global_ddl_log.file_id; - uint io_size= global_ddl_log.io_size; + uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; DBUG_ENTER("write_ddl_log_file_entry"); - DBUG_ASSERT(io_size >= size); - if (mysql_file_pwrite(file_id, file_entry_buf, size, - io_size * entry_no, MYF(MY_WME)) != size) + mysql_mutex_assert_owner(&LOCK_gdl); + if (mysql_file_pwrite(file_id, file_entry_buf, + IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE) error= TRUE; DBUG_RETURN(error); } -/* - Write ddl log header - SYNOPSIS - write_ddl_log_header() - RETURN VALUES - TRUE Error - FALSE Success +/** + Sync the ddl log file. + + @return Operation status + @retval FALSE Success + @retval TRUE Error +*/ + + +static bool sync_ddl_log_file() +{ + DBUG_ENTER("sync_ddl_log_file"); + DBUG_RETURN(mysql_file_sync(global_ddl_log.file_id, MYF(MY_WME))); +} + + +/** + Write ddl log header. + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ static bool write_ddl_log_header() { uint16 const_var; - bool error= FALSE; - uchar file_entry_buf[DDL_LOG_HEADER_SIZE]; DBUG_ENTER("write_ddl_log_header"); - DBUG_ASSERT((DDL_LOG_NAME_POS + 3 * global_ddl_log.name_len) - <= global_ddl_log.io_size); - int4store(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS], + int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS], global_ddl_log.num_entries); - const_var= global_ddl_log.name_len; - int4store(&file_entry_buf[DDL_LOG_NAME_LEN_POS], + const_var= FN_REFLEN; + int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS], (ulong) const_var); - const_var= global_ddl_log.io_size; - int4store(&file_entry_buf[DDL_LOG_IO_SIZE_POS], + const_var= IO_SIZE; + int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS], (ulong) const_var); - if (write_ddl_log_file_entry(file_entry_buf, 0UL, DDL_LOG_HEADER_SIZE)) + if (write_ddl_log_file_entry(0UL)) { sql_print_error("Error writing ddl log header"); DBUG_RETURN(TRUE); } - (void) sync_ddl_log(); - DBUG_RETURN(error); + DBUG_RETURN(sync_ddl_log_file()); } -/* - Create ddl log file name - SYNOPSIS - create_ddl_log_file_name() - file_name Filename setup - RETURN VALUES - NONE +/** + Create ddl log file name. + @param file_name Filename setup */ static inline void create_ddl_log_file_name(char *file_name) @@ -752,35 +761,32 @@ static inline void create_ddl_log_file_name(char *file_name) } -/* - Read header of ddl log file - SYNOPSIS - read_ddl_log_header() - RETURN VALUES - > 0 Last entry in ddl log - 0 No entries in ddl log - DESCRIPTION - When we read the ddl log header we get information about maximum sizes - of names in the ddl log and we also get information about the number - of entries in the ddl log. +/** + Read header of ddl log file. + + When we read the ddl log header we get information about maximum sizes + of names in the ddl log and we also get information about the number + of entries in the ddl log. + + @return Last entry in ddl log (0 if no entries) */ static uint read_ddl_log_header() { - char file_entry_buf[DDL_LOG_HEADER_SIZE]; + uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; char file_name[FN_REFLEN]; uint entry_no; bool successful_open= FALSE; DBUG_ENTER("read_ddl_log_header"); - DBUG_ASSERT(global_ddl_log.io_size <= IO_SIZE); + mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_SLOW); + mysql_mutex_lock(&LOCK_gdl); create_ddl_log_file_name(file_name); if ((global_ddl_log.file_id= mysql_file_open(key_file_global_ddl_log, file_name, O_RDWR | O_BINARY, MYF(0))) >= 0) { - if (read_ddl_log_file_entry((uchar *) file_entry_buf, 0UL, - DDL_LOG_HEADER_SIZE)) + if (read_ddl_log_file_entry(0UL)) { /* Write message into error log */ sql_print_error("Failed to read ddl log file in recovery"); @@ -793,6 +799,8 @@ static uint read_ddl_log_header() entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]); global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]); global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]); + DBUG_ASSERT(global_ddl_log.io_size <= + sizeof(global_ddl_log.file_entry_buf)); } else { @@ -801,28 +809,72 @@ static uint read_ddl_log_header() global_ddl_log.first_free= NULL; global_ddl_log.first_used= NULL; global_ddl_log.num_entries= 0; - mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_FAST); global_ddl_log.do_release= true; + mysql_mutex_unlock(&LOCK_gdl); DBUG_RETURN(entry_no); } /** - Set ddl log entry struct from buffer - @param read_entry Entry number - @param file_entry_buf Buffer to use - @param ddl_log_entry Entry to be set + Convert from ddl_log_entry struct to file_entry_buf binary blob. - @note Pointers in ddl_log_entry will point into file_entry_buf! + @param ddl_log_entry filled in ddl_log_entry struct. */ -static void set_ddl_log_entry_from_buf(uint read_entry, - uchar *file_entry_buf, - DDL_LOG_ENTRY *ddl_log_entry) +static void set_global_from_ddl_log_entry(const DDL_LOG_ENTRY *ddl_log_entry) { + mysql_mutex_assert_owner(&LOCK_gdl); + global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= + (char)DDL_LOG_ENTRY_CODE; + global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= + (char)ddl_log_entry->action_type; + global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0; + int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], + ddl_log_entry->next_entry); + DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_REFLEN); + strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], + ddl_log_entry->name, FN_REFLEN - 1); + if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION || + ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION || + ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION) + { + DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_REFLEN); + strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN], + ddl_log_entry->from_name, FN_REFLEN - 1); + } + else + global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0; + DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_REFLEN); + strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_REFLEN)], + ddl_log_entry->handler_name, FN_REFLEN - 1); + if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION) + { + DBUG_ASSERT(strlen(ddl_log_entry->tmp_name) < FN_REFLEN); + strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)], + ddl_log_entry->tmp_name, FN_REFLEN - 1); + } + else + global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)]= 0; +} + + +/** + Convert from file_entry_buf binary blob to ddl_log_entry struct. + + @param[out] ddl_log_entry struct to fill in. + + @note Strings (names) are pointing to the global_ddl_log structure, + so LOCK_gdl needs to be hold until they are read or copied. +*/ + +static void set_ddl_log_entry_from_global(DDL_LOG_ENTRY *ddl_log_entry, + const uint read_entry) +{ + char *file_entry_buf= (char*) global_ddl_log.file_entry_buf; uint inx; uchar single_char; - DBUG_ENTER("set_ddl_log_entry_from_buf"); + + mysql_mutex_assert_owner(&LOCK_gdl); ddl_log_entry->entry_pos= read_entry; single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]; ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char; @@ -830,27 +882,56 @@ static void set_ddl_log_entry_from_buf(uint read_entry, ddl_log_entry->action_type= (enum ddl_log_action_code)single_char; ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS]; ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]); - ddl_log_entry->name= (char*) &file_entry_buf[DDL_LOG_NAME_POS]; + ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS]; inx= DDL_LOG_NAME_POS + global_ddl_log.name_len; - ddl_log_entry->from_name= (char*) &file_entry_buf[inx]; + ddl_log_entry->from_name= &file_entry_buf[inx]; inx+= global_ddl_log.name_len; - ddl_log_entry->handler_name= (char*) &file_entry_buf[inx]; - DBUG_VOID_RETURN; + ddl_log_entry->handler_name= &file_entry_buf[inx]; + if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION) + { + inx+= global_ddl_log.name_len; + ddl_log_entry->tmp_name= &file_entry_buf[inx]; + } + else + ddl_log_entry->tmp_name= NULL; } - -/* - Initialise ddl log - SYNOPSIS - init_ddl_log() - DESCRIPTION - Write the header of the ddl log file and length of names. Also set - number of entries to zero. +/** + Read a ddl log entry. - RETURN VALUES - TRUE Error - FALSE Success + Read a specified entry in the ddl log. + + @param read_entry Number of entry to read + @param[out] entry_info Information from entry + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) +{ + DBUG_ENTER("read_ddl_log_entry"); + + if (read_ddl_log_file_entry(read_entry)) + { + DBUG_RETURN(TRUE); + } + set_ddl_log_entry_from_global(ddl_log_entry, read_entry); + DBUG_RETURN(FALSE); +} + + +/** + Initialise ddl log. + + Write the header of the ddl log file and length of names. Also set + number of entries to zero. + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ static bool init_ddl_log() @@ -862,7 +943,7 @@ static bool init_ddl_log() goto end; global_ddl_log.io_size= IO_SIZE; - global_ddl_log.name_len= FN_LEN; + global_ddl_log.name_len= FN_REFLEN; create_ddl_log_file_name(file_name); if ((global_ddl_log.file_id= mysql_file_create(key_file_global_ddl_log, file_name, CREATE_MODE, @@ -886,14 +967,116 @@ end: } -/* +/** + Sync ddl log file. + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool sync_ddl_log_no_lock() +{ + DBUG_ENTER("sync_ddl_log_no_lock"); + + mysql_mutex_assert_owner(&LOCK_gdl); + if ((!global_ddl_log.recovery_phase) && + init_ddl_log()) + { + DBUG_RETURN(TRUE); + } + DBUG_RETURN(sync_ddl_log_file()); +} + + +/** + @brief Deactivate an individual entry. + + @details For complex rename operations we need to deactivate individual + entries. + + During replace operations where we start with an existing table called + t1 and a replacement table called t1#temp or something else and where + we want to delete t1 and rename t1#temp to t1 this is not possible to + do in a safe manner unless the ddl log is informed of the phases in + the change. + + Delete actions are 1-phase actions that can be ignored immediately after + being executed. + Rename actions from x to y is also a 1-phase action since there is no + interaction with any other handlers named x and y. + Replace action where drop y and x -> y happens needs to be a two-phase + action. Thus the first phase will drop y and the second phase will + rename x -> y. + + @param entry_no Entry position of record to change + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool deactivate_ddl_log_entry_no_lock(uint entry_no) +{ + uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf; + DBUG_ENTER("deactivate_ddl_log_entry_no_lock"); + + mysql_mutex_assert_owner(&LOCK_gdl); + if (!read_ddl_log_file_entry(entry_no)) + { + if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE) + { + /* + Log entry, if complete mark it done (IGNORE). + Otherwise increase the phase by one. + */ + if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION || + file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION || + (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION && + file_entry_buf[DDL_LOG_PHASE_POS] == 1) || + (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_EXCHANGE_ACTION && + file_entry_buf[DDL_LOG_PHASE_POS] >= EXCH_PHASE_TEMP_TO_FROM)) + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE; + else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION) + { + DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0); + file_entry_buf[DDL_LOG_PHASE_POS]= 1; + } + else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_EXCHANGE_ACTION) + { + DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] <= + EXCH_PHASE_FROM_TO_NAME); + file_entry_buf[DDL_LOG_PHASE_POS]++; + } + else + { + DBUG_ASSERT(0); + } + if (write_ddl_log_file_entry(entry_no)) + { + sql_print_error("Error in deactivating log entry. Position = %u", + entry_no); + DBUG_RETURN(TRUE); + } + } + } + else + { + sql_print_error("Failed in reading entry before deactivating it"); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); +} + + +/** Execute one action in a ddl log entry - SYNOPSIS - execute_ddl_log_action() - ddl_log_entry Information in action entry to execute - RETURN VALUES - TRUE Error - FALSE Success + + @param ddl_log_entry Information in action entry to execute + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) @@ -911,17 +1094,20 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) handlerton *hton; DBUG_ENTER("execute_ddl_log_action"); + mysql_mutex_assert_owner(&LOCK_gdl); if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE) { DBUG_RETURN(FALSE); } DBUG_PRINT("ddl_log", - ("execute type %c next %u name '%s' from_name '%s' handler '%s'", + ("execute type %c next %u name '%s' from_name '%s' handler '%s'" + " tmp_name '%s'", ddl_log_entry->action_type, ddl_log_entry->next_entry, ddl_log_entry->name, ddl_log_entry->from_name, - ddl_log_entry->handler_name)); + ddl_log_entry->handler_name, + ddl_log_entry->tmp_name)); handler_name.str= (char*)ddl_log_entry->handler_name; handler_name.length= strlen(ddl_log_entry->handler_name); init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC)); @@ -935,7 +1121,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), ddl_log_entry->handler_name); goto error; } - hton= plugin_hton(plugin); + hton= plugin_data(plugin, handlerton*); file= get_new_handler((TABLE_SHARE*)0, &mem_root, hton); if (!file) { @@ -971,9 +1157,9 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) break; } } - if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos))) + if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos))) break; - (void) sync_ddl_log(); + (void) sync_ddl_log_no_lock(); error= FALSE; if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION) break; @@ -1006,12 +1192,64 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry) ddl_log_entry->name)) break; } - if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos))) + if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos))) break; - (void) sync_ddl_log(); + (void) sync_ddl_log_no_lock(); error= FALSE; break; } + case DDL_LOG_EXCHANGE_ACTION: + { + /* We hold LOCK_gdl, so we can alter global_ddl_log.file_entry_buf */ + char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf; + /* not yet implemented for frm */ + DBUG_ASSERT(!frm_action); + /* + Using a case-switch here to revert all currently done phases, + since it will fall through until the first phase is undone. + */ + switch (ddl_log_entry->phase) { + case EXCH_PHASE_TEMP_TO_FROM: + /* tmp_name -> from_name possibly done */ + (void) file->ha_rename_table(ddl_log_entry->from_name, + ddl_log_entry->tmp_name); + /* decrease the phase and sync */ + file_entry_buf[DDL_LOG_PHASE_POS]--; + if (write_ddl_log_file_entry(ddl_log_entry->entry_pos)) + break; + if (sync_ddl_log_no_lock()) + break; + /* fall through */ + case EXCH_PHASE_FROM_TO_NAME: + /* from_name -> name possibly done */ + (void) file->ha_rename_table(ddl_log_entry->name, + ddl_log_entry->from_name); + /* decrease the phase and sync */ + file_entry_buf[DDL_LOG_PHASE_POS]--; + if (write_ddl_log_file_entry(ddl_log_entry->entry_pos)) + break; + if (sync_ddl_log_no_lock()) + break; + /* fall through */ + case EXCH_PHASE_NAME_TO_TEMP: + /* name -> tmp_name possibly done */ + (void) file->ha_rename_table(ddl_log_entry->tmp_name, + ddl_log_entry->name); + /* disable the entry and sync */ + file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE; + if (write_ddl_log_file_entry(ddl_log_entry->entry_pos)) + break; + if (sync_ddl_log_no_lock()) + break; + error= FALSE; + break; + default: + DBUG_ASSERT(0); + break; + } + + break; + } default: DBUG_ASSERT(0); break; @@ -1023,14 +1261,14 @@ error: } -/* +/** Get a free entry in the ddl log - SYNOPSIS - get_free_ddl_log_entry() - out:active_entry A ddl log memory entry returned - RETURN VALUES - TRUE Error - FALSE Success + + @param[out] active_entry A ddl log memory entry returned + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry, @@ -1040,7 +1278,6 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry, DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used; DBUG_ENTER("get_free_ddl_log_entry"); - mysql_mutex_assert_owner(&LOCK_gdl); if (global_ddl_log.first_free == NULL) { if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc( @@ -1074,76 +1311,99 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry, } +/** + Execute one entry in the ddl log. + + Executing an entry means executing a linked list of actions. + + @param first_entry Reference to first action in entry + + @return Operation status + @retval TRUE Error + @retval FALSE Success +*/ + +static bool execute_ddl_log_entry_no_lock(THD *thd, uint first_entry) +{ + DDL_LOG_ENTRY ddl_log_entry; + uint read_entry= first_entry; + DBUG_ENTER("execute_ddl_log_entry_no_lock"); + + mysql_mutex_assert_owner(&LOCK_gdl); + do + { + if (read_ddl_log_entry(read_entry, &ddl_log_entry)) + { + /* Write to error log and continue with next log entry */ + sql_print_error("Failed to read entry = %u from ddl log", + read_entry); + break; + } + DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE || + ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE); + + if (execute_ddl_log_action(thd, &ddl_log_entry)) + { + /* Write to error log and continue with next log entry */ + sql_print_error("Failed to execute action for entry = %u from ddl log", + read_entry); + break; + } + read_entry= ddl_log_entry.next_entry; + } while (read_entry); + DBUG_RETURN(FALSE); +} + + /* External interface methods for the DDL log Module --------------------------------------------------- */ -/* - SYNOPSIS - write_ddl_log_entry() - ddl_log_entry Information about log entry - out:entry_written Entry information written into +/** + Write a ddl log entry. - RETURN VALUES - TRUE Error - FALSE Success + A careful write of the ddl log is performed to ensure that we can + handle crashes occurring during CREATE and ALTER TABLE processing. - DESCRIPTION - A careful write of the ddl log is performed to ensure that we can - handle crashes occurring during CREATE and ALTER TABLE processing. + @param ddl_log_entry Information about log entry + @param[out] entry_written Entry information written into + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, DDL_LOG_MEMORY_ENTRY **active_entry) { bool error, write_header; - char file_entry_buf[IO_SIZE]; DBUG_ENTER("write_ddl_log_entry"); + mysql_mutex_assert_owner(&LOCK_gdl); if (init_ddl_log()) { DBUG_RETURN(TRUE); } - memset(file_entry_buf, 0, sizeof(file_entry_buf)); - file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= - (char)DDL_LOG_ENTRY_CODE; - file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= - (char)ddl_log_entry->action_type; - file_entry_buf[DDL_LOG_PHASE_POS]= 0; - int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], - ddl_log_entry->next_entry); - DBUG_ASSERT(strlen(ddl_log_entry->name) < global_ddl_log.name_len); - strmake(&file_entry_buf[DDL_LOG_NAME_POS], ddl_log_entry->name, - global_ddl_log.name_len - 1); - if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION || - ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION) - { - DBUG_ASSERT(strlen(ddl_log_entry->from_name) < global_ddl_log.name_len); - strmake(&file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len], - ddl_log_entry->from_name, global_ddl_log.name_len - 1); - } - else - file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len]= 0; - DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < global_ddl_log.name_len); - strmake(&file_entry_buf[DDL_LOG_NAME_POS + (2*global_ddl_log.name_len)], - ddl_log_entry->handler_name, global_ddl_log.name_len - 1); + set_global_from_ddl_log_entry(ddl_log_entry); if (get_free_ddl_log_entry(active_entry, &write_header)) { DBUG_RETURN(TRUE); } error= FALSE; DBUG_PRINT("ddl_log", - ("write type %c next %u name '%s' from_name '%s' handler '%s'", - (char) file_entry_buf[DDL_LOG_ACTION_TYPE_POS], + ("write type %c next %u name '%s' from_name '%s' handler '%s'" + " tmp_name '%s'", + (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS], ddl_log_entry->next_entry, - (char*) &file_entry_buf[DDL_LOG_NAME_POS], - (char*) &file_entry_buf[DDL_LOG_NAME_POS + - global_ddl_log.name_len], - (char*) &file_entry_buf[DDL_LOG_NAME_POS + - (2*global_ddl_log.name_len)])); - if (write_ddl_log_file_entry((uchar*) file_entry_buf, - (*active_entry)->entry_pos, IO_SIZE)) + (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS], + (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + + FN_REFLEN], + (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + + (2*FN_REFLEN)], + (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + + (3*FN_REFLEN)])); + if (write_ddl_log_file_entry((*active_entry)->entry_pos)) { error= TRUE; sql_print_error("Failed to write entry_no = %u", @@ -1151,7 +1411,7 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, } if (write_header && !error) { - (void) sync_ddl_log(); + (void) sync_ddl_log_no_lock(); if (write_ddl_log_header()) error= TRUE; } @@ -1161,31 +1421,30 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry, } -/* - Write final entry in the ddl log - SYNOPSIS - write_execute_ddl_log_entry() - first_entry First entry in linked list of entries +/** + @brief Write final entry in the ddl log. + + @details This is the last write in the ddl log. The previous log entries + have already been written but not yet synched to disk. + We write a couple of log entries that describes action to perform. + This entries are set-up in a linked list, however only when a first + execute entry is put as the first entry these will be executed. + This routine writes this first. + + @param first_entry First entry in linked list of entries to execute, if 0 = NULL it means that the entry is removed and the entries are put into the free list. - complete Flag indicating we are simply writing + @param complete Flag indicating we are simply writing info about that entry has been completed - in:out:active_entry Entry to execute, 0 = NULL if the entry + @param[in,out] active_entry Entry to execute, 0 = NULL if the entry is written first time and needs to be returned. In this case the entry written is returned in this parameter - RETURN VALUES - TRUE Error - FALSE Success - DESCRIPTION - This is the last write in the ddl log. The previous log entries have - already been written but not yet synched to disk. - We write a couple of log entries that describes action to perform. - This entries are set-up in a linked list, however only when a first - execute entry is put as the first entry these will be executed. - This routine writes this first + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool write_execute_ddl_log_entry(uint first_entry, @@ -1193,14 +1452,14 @@ bool write_execute_ddl_log_entry(uint first_entry, DDL_LOG_MEMORY_ENTRY **active_entry) { bool write_header= FALSE; - char file_entry_buf[IO_SIZE]; + char *file_entry_buf= (char*)global_ddl_log.file_entry_buf; DBUG_ENTER("write_execute_ddl_log_entry"); + mysql_mutex_assert_owner(&LOCK_gdl); if (init_ddl_log()) { DBUG_RETURN(TRUE); } - memset(file_entry_buf, 0, sizeof(file_entry_buf)); if (!complete) { /* @@ -1209,28 +1468,32 @@ bool write_execute_ddl_log_entry(uint first_entry, any log entries before, we are only here to write the execute entry to indicate it is done. */ - (void) sync_ddl_log(); + (void) sync_ddl_log_no_lock(); file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE; } else file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE; + file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */ + file_entry_buf[DDL_LOG_PHASE_POS]= 0; int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry); + file_entry_buf[DDL_LOG_NAME_POS]= 0; + file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0; + file_entry_buf[DDL_LOG_NAME_POS + 2*FN_REFLEN]= 0; if (!(*active_entry)) { if (get_free_ddl_log_entry(active_entry, &write_header)) { DBUG_RETURN(TRUE); } + write_header= TRUE; } - if (write_ddl_log_file_entry((uchar*) file_entry_buf, - (*active_entry)->entry_pos, - IO_SIZE)) + if (write_ddl_log_file_entry((*active_entry)->entry_pos)) { sql_print_error("Error writing execute entry in ddl log"); release_ddl_log_memory_entry(*active_entry); DBUG_RETURN(TRUE); } - (void) sync_ddl_log(); + (void) sync_ddl_log_no_lock(); if (write_header) { if (write_ddl_log_header()) @@ -1243,112 +1506,54 @@ bool write_execute_ddl_log_entry(uint first_entry, } -/* - For complex rename operations we need to deactivate individual entries. - SYNOPSIS - deactivate_ddl_log_entry() - entry_no Entry position of record to change - RETURN VALUES - TRUE Error - FALSE Success - DESCRIPTION - During replace operations where we start with an existing table called - t1 and a replacement table called t1#temp or something else and where - we want to delete t1 and rename t1#temp to t1 this is not possible to - do in a safe manner unless the ddl log is informed of the phases in - the change. - - Delete actions are 1-phase actions that can be ignored immediately after - being executed. - Rename actions from x to y is also a 1-phase action since there is no - interaction with any other handlers named x and y. - Replace action where drop y and x -> y happens needs to be a two-phase - action. Thus the first phase will drop y and the second phase will - rename x -> y. +/** + Deactivate an individual entry. + + @details see deactivate_ddl_log_entry_no_lock. + + @param entry_no Entry position of record to change + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool deactivate_ddl_log_entry(uint entry_no) { - uchar file_entry_buf[DDL_LOG_NAME_POS]; + bool error; DBUG_ENTER("deactivate_ddl_log_entry"); - - /* - Only need to read and write the first bytes of the entry, where - ENTRY_TYPE, ACTION_TYPE and PHASE reside. Using DDL_LOG_NAME_POS - to include all info except for the names. - */ - if (!read_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS)) - { - if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE) - { - if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION || - file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION || - (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION && - file_entry_buf[DDL_LOG_PHASE_POS] == 1)) - file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE; - else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION) - { - DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0); - file_entry_buf[DDL_LOG_PHASE_POS]= 1; - } - else - { - DBUG_ASSERT(0); - } - if (write_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS)) - { - sql_print_error("Error in deactivating log entry. Position = %u", - entry_no); - DBUG_RETURN(TRUE); - } - } - } - else - { - sql_print_error("Failed in reading entry before deactivating it"); - DBUG_RETURN(TRUE); - } - DBUG_RETURN(FALSE); + mysql_mutex_lock(&LOCK_gdl); + error= deactivate_ddl_log_entry_no_lock(entry_no); + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(error); } -/* - Sync ddl log file - SYNOPSIS - sync_ddl_log() - RETURN VALUES - TRUE Error - FALSE Success +/** + Sync ddl log file. + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool sync_ddl_log() { - bool error= FALSE; + bool error; DBUG_ENTER("sync_ddl_log"); - if ((!global_ddl_log.recovery_phase) && - init_ddl_log()) - { - DBUG_RETURN(TRUE); - } - if (mysql_file_sync(global_ddl_log.file_id, MYF(0))) - { - /* Write to error log */ - sql_print_error("Failed to sync ddl log"); - error= TRUE; - } + mysql_mutex_lock(&LOCK_gdl); + error= sync_ddl_log_no_lock(); + mysql_mutex_unlock(&LOCK_gdl); + DBUG_RETURN(error); } -/* - Release a log memory entry - SYNOPSIS - release_ddl_log_memory_entry() - log_memory_entry Log memory entry to release - RETURN VALUES - NONE +/** + Release a log memory entry. + @param log_memory_entry Log memory entry to release */ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) @@ -1357,8 +1562,8 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry; DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry; DBUG_ENTER("release_ddl_log_memory_entry"); - mysql_mutex_assert_owner(&LOCK_gdl); + mysql_mutex_assert_owner(&LOCK_gdl); global_ddl_log.first_free= log_entry; log_entry->next_log_entry= first_free; @@ -1372,58 +1577,32 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry) } -/* - Execute one entry in the ddl log. Executing an entry means executing - a linked list of actions. - SYNOPSIS - execute_ddl_log_entry() - first_entry Reference to first action in entry - RETURN VALUES - TRUE Error - FALSE Success +/** + Execute one entry in the ddl log. + + Executing an entry means executing a linked list of actions. + + @param first_entry Reference to first action in entry + + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool execute_ddl_log_entry(THD *thd, uint first_entry) { - DDL_LOG_ENTRY ddl_log_entry; - uint read_entry= first_entry; - uchar file_entry_buf[IO_SIZE]; + bool error; DBUG_ENTER("execute_ddl_log_entry"); mysql_mutex_lock(&LOCK_gdl); - do - { - if (read_ddl_log_file_entry(file_entry_buf, read_entry, IO_SIZE)) - { - /* Print the error to the log and continue with next log entry */ - sql_print_error("Failed to read entry = %u from ddl log", - read_entry); - break; - } - set_ddl_log_entry_from_buf(read_entry, file_entry_buf, &ddl_log_entry); - DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE || - ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE); - - if (execute_ddl_log_action(thd, &ddl_log_entry)) - { - /* Print the error to the log and continue with next log entry */ - sql_print_error("Failed to execute action for entry = %u from ddl log", - read_entry); - break; - } - read_entry= ddl_log_entry.next_entry; - } while (read_entry); + error= execute_ddl_log_entry_no_lock(thd, first_entry); mysql_mutex_unlock(&LOCK_gdl); - DBUG_RETURN(FALSE); + DBUG_RETURN(error); } -/* - Close the ddl log - SYNOPSIS - close_ddl_log() - RETURN VALUES - NONE +/** + Close the ddl log. */ static void close_ddl_log() @@ -1438,12 +1617,8 @@ static void close_ddl_log() } -/* - Execute the ddl log at recovery of MySQL Server - SYNOPSIS - execute_ddl_log_recovery() - RETURN VALUES - NONE +/** + Execute the ddl log at recovery of MySQL Server. */ void execute_ddl_log_recovery() @@ -1451,8 +1626,6 @@ void execute_ddl_log_recovery() uint num_entries, i; THD *thd; DDL_LOG_ENTRY ddl_log_entry; - uchar *file_entry_buf; - uint io_size; char file_name[FN_REFLEN]; static char recover_query_string[]= "INTERNAL DDL LOG RECOVER IN PROGRESS"; DBUG_ENTER("execute_ddl_log_recovery"); @@ -1460,6 +1633,7 @@ void execute_ddl_log_recovery() /* Initialise global_ddl_log struct */ + bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf)); global_ddl_log.inited= FALSE; global_ddl_log.recovery_phase= TRUE; global_ddl_log.io_size= IO_SIZE; @@ -1477,26 +1651,18 @@ void execute_ddl_log_recovery() /* this also initialize LOCK_gdl */ num_entries= read_ddl_log_header(); - io_size= global_ddl_log.io_size; - file_entry_buf= (uchar*) my_malloc(io_size, MYF(0)); - if (!file_entry_buf) - { - sql_print_error("Failed to allocate buffer for recover ddl log"); - DBUG_VOID_RETURN; - } + mysql_mutex_lock(&LOCK_gdl); for (i= 1; i < num_entries + 1; i++) { - if (read_ddl_log_file_entry(file_entry_buf, i, io_size)) + if (read_ddl_log_entry(i, &ddl_log_entry)) { sql_print_error("Failed to read entry no = %u from ddl log", i); continue; } - - set_ddl_log_entry_from_buf(i, file_entry_buf, &ddl_log_entry); if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE) { - if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry)) + if (execute_ddl_log_entry_no_lock(thd, ddl_log_entry.next_entry)) { /* Real unpleasant scenario but we continue anyways. */ continue; @@ -1507,20 +1673,16 @@ void execute_ddl_log_recovery() create_ddl_log_file_name(file_name); (void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0)); global_ddl_log.recovery_phase= FALSE; + mysql_mutex_unlock(&LOCK_gdl); delete thd; - my_free(file_entry_buf); /* Remember that we don't have a THD */ set_current_thd(0); DBUG_VOID_RETURN; } -/* - Release all memory allocated to the ddl log - SYNOPSIS - release_ddl_log() - RETURN VALUES - NONE +/** + Release all memory allocated to the ddl log. */ void release_ddl_log() @@ -1659,8 +1821,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) &syntax_len, TRUE, TRUE, lpt->create_info, - lpt->alter_info, - NULL))) + lpt->alter_info))) { DBUG_RETURN(TRUE); } @@ -1763,8 +1924,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) &syntax_len, TRUE, TRUE, lpt->create_info, - lpt->alter_info, - NULL))) + lpt->alter_info))) { error= 1; goto err; @@ -1896,22 +2056,20 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, (void) delete_statistics_for_table(thd, &db_name, &table_name); } } - - mysql_ha_rm_tables(thd, tables); if (!drop_temporary) { if (!thd->locked_tables_mode) { - if (lock_table_names(thd, tables, NULL, thd->variables.lock_wait_timeout, - MYSQL_OPEN_SKIP_TEMPORARY)) + if (lock_table_names(thd, tables, NULL, + thd->variables.lock_wait_timeout, 0)) DBUG_RETURN(true); } else { for (table= tables; table; table= table->next_local) - if (table->open_type != OT_BASE_ONLY && - find_temporary_table(thd, table)) + { + if (is_temporary_table(table)) { /* A temporary table. @@ -1943,6 +2101,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, DBUG_RETURN(true); table->mdl_request.ticket= table->table->mdl_ticket; } + } } } @@ -2022,6 +2181,9 @@ static uint32 comment_length(THD *thd, uint32 comment_pos, @note This function assumes that metadata locks have already been taken. It is also assumed that the tables have been removed from TDC. + @note This function assumes that temporary tables to be dropped have + been pre-opened using corresponding table list elements. + @todo When logging to the binary log, we should log tmp_tables and transactional tables as separate statements if we are in a transaction; This is needed to get these tables into the @@ -2036,9 +2198,10 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, bool dont_log_query) { TABLE_LIST *table; - char path[FN_REFLEN + 1], *alias= NULL; + char path[FN_REFLEN + 1], wrong_tables_buff[160], *alias= NULL; + String wrong_tables(wrong_tables_buff, sizeof(wrong_tables_buff)-1, + system_charset_info); uint path_length= 0; - String wrong_tables; int error= 0; int non_temp_tables_count= 0; bool foreign_key_error=0; @@ -2049,6 +2212,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, String built_trans_tmp_query, built_non_trans_tmp_query; DBUG_ENTER("mysql_rm_table_no_locks"); + wrong_tables.length(0); /* Prepares the drop statements that will be written into the binary log as follows: @@ -2247,9 +2411,17 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, . ./sql/datadict.cc +32 /Alfranio - TODO: We need to test this. */ if (if_exists) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, - ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), - table->table_name); + { + char buff[FN_REFLEN]; + String tbl_name(buff, sizeof(buff), system_charset_info); + tbl_name.length(0); + tbl_name.append(db); + tbl_name.append('.'); + tbl_name.append(table->table_name); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), + tbl_name.c_ptr_safe()); + } else { non_tmp_error = (drop_temporary ? non_tmp_error : TRUE); @@ -2282,7 +2454,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, } /* the following internally does TDC_RT_REMOVE_ALL */ close_all_tables_for_name(thd, table->table->s, - HA_EXTRA_PREPARE_FOR_DROP); + HA_EXTRA_PREPARE_FOR_DROP, NULL); table->table= 0; } else @@ -2335,7 +2507,9 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, { if (wrong_tables.length()) wrong_tables.append(','); - wrong_tables.append(String(table->table_name,system_charset_info)); + wrong_tables.append(db); + wrong_tables.append('.'); + wrong_tables.append(table->table_name); } else { @@ -2451,22 +2625,20 @@ end: } -/* +/** Quickly remove a table. - SYNOPSIS - quick_rm_table() - base The handlerton handle. - db The database name. - table_name The table name. - flags flags for build_table_filename(). + @param thd Thread context. + @param base The handlerton handle. + @param db The database name. + @param table_name The table name. + @param flags Flags for build_table_filename() as well as describing + if handler files / .FRM should be deleted as well. - RETURN - 0 OK - != 0 Error + @return False in case of success, True otherwise. */ -bool quick_rm_table(handlerton *base,const char *db, +bool quick_rm_table(THD *thd, handlerton *base, const char *db, const char *table_name, uint flags) { char path[FN_REFLEN + 1]; @@ -2478,7 +2650,15 @@ bool quick_rm_table(handlerton *base,const char *db, if (mysql_file_delete(key_file_frm, path, MYF(0))) error= 1; /* purecov: inspected */ path[path_length - reg_ext_length]= '\0'; // Remove reg_ext - if (!(flags & FRM_ONLY)) + if (flags & NO_HA_TABLE) + { + handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base); + if (!file) + DBUG_RETURN(true); + (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); + delete file; + } + if (!(flags & (FRM_ONLY|NO_HA_TABLE))) error|= ha_delete_table(current_thd, base, path, db, table_name, 0); if (likely(error == 0)) @@ -2490,6 +2670,7 @@ bool quick_rm_table(handlerton *base,const char *db, DBUG_RETURN(error); } + /* Sort keys in the following order: - PRIMARY KEY @@ -2583,7 +2764,7 @@ bool check_duplicates_in_interval(const char *set_or_name, name, err.ptr(), set_or_name); return 1; } - push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd,Sql_condition::WARN_LEVEL_NOTE, ER_DUPLICATED_VALUE_IN_TYPE, ER(ER_DUPLICATED_VALUE_IN_TYPE), name, err.ptr(), set_or_name); @@ -2651,7 +2832,7 @@ int prepare_create_field(Create_field *sql_field, longlong table_flags) { unsigned int dup_val_count; - DBUG_ENTER("prepare_field"); + DBUG_ENTER("prepare_create_field"); /* This code came from mysql_prepare_create_table. @@ -2828,21 +3009,6 @@ CHARSET_INFO* get_sql_field_charset(Create_field *sql_field, } -bool check_duplicate_warning(THD *thd, char *msg, ulong length) -{ - List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list()); - MYSQL_ERROR *err; - while ((err= it++)) - { - if (strncmp(msg, err->get_message_text(), length) == 0) - { - return true; - } - } - return false; -} - - /** Modifies the first column definition whose SQL type is TIMESTAMP by adding the features DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP. @@ -2939,6 +3105,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, executing a prepared statement for the second time. */ sql_field->length= sql_field->char_length; + /* Set field charset. */ save_cs= sql_field->charset= get_sql_field_charset(sql_field, create_info); if ((sql_field->flags & BINCMP_FLAG) && @@ -3405,7 +3572,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if (key->generated) key_info->flags|= HA_GENERATED_KEY; - key_info->key_parts=(uint8) key->columns.elements; + key_info->user_defined_key_parts=(uint8) key->columns.elements; key_info->key_part=key_part_info; key_info->usable_key_parts= key_number; key_info->algorithm= key->key_create_info.algorithm; @@ -3440,7 +3607,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0), file->table_type()); DBUG_RETURN(TRUE); } - if (key_info->key_parts != 1) + if (key_info->user_defined_key_parts != 1) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX"); DBUG_RETURN(TRUE); @@ -3449,7 +3616,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, else if (key_info->algorithm == HA_KEY_ALG_RTREE) { #ifdef HAVE_RTREE_KEYS - if ((key_info->key_parts & 1) == 1) + if ((key_info->user_defined_key_parts & 1) == 1) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "RTREE INDEX"); DBUG_RETURN(TRUE); @@ -3633,14 +3800,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if ((length=column->length) > max_key_length || length > file->max_key_part_length()) { - length=min(max_key_length, file->max_key_part_length()); + length=MY_MIN(max_key_length, file->max_key_part_length()); if (key->type == Key::MULTIPLE) { /* not a critical problem */ char warn_buff[MYSQL_ERRMSG_SIZE]; my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), length); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_TOO_LONG_KEY, warn_buff); /* Align key length to multibyte char boundary */ length-= length % sql_field->charset->mbmaxlen; @@ -3688,7 +3855,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, char warn_buff[MYSQL_ERRMSG_SIZE]; my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY), length); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_TOO_LONG_KEY, warn_buff); /* Align key length to multibyte char boundary */ length-= length % sql_field->charset->mbmaxlen; @@ -3778,8 +3945,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_INDEX_COMMENT), key_info->name, static_cast<ulong>(INDEX_COMMENT_MAXLEN)); /* do not push duplicate warnings */ - if (!check_duplicate_warning(thd, warn_buff, strlen(warn_buff))) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + if (!thd->get_stmt_da()->has_sql_condition(warn_buff, strlen(warn_buff))) + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_TOO_LONG_INDEX_COMMENT, warn_buff); key->key_create_info.comment.length= tmp_len; @@ -3842,7 +4009,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, } if (create_info->tmp_table()) - create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE; + create_info->options|=HA_CREATE_DELAY_KEY_WRITE; /* Give warnings for not supported table options */ #if defined(WITH_ARIA_STORAGE_ENGINE) @@ -3850,7 +4017,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if (file->ht != maria_hton) #endif if (create_info->transactional) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), file->engine_name()->str, @@ -3933,7 +4100,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field) my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_AUTO_CONVERT), sql_field->field_name, (sql_field->charset == &my_charset_bin) ? "VARBINARY" : "VARCHAR", (sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT"); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, warn_buff); } @@ -4004,56 +4171,15 @@ void sp_prepare_create_field(THD *thd, Create_field *sql_field) } -#ifdef WITH_PARTITION_STORAGE_ENGINE -/** - Auxiliary function which allows to check if freshly created .FRM - file for table can be opened. - - @retval FALSE - Success. - @retval TRUE - Failure. -*/ - -static bool check_if_created_table_can_be_opened(THD *thd, - const char *path, - const char *db, - const char *table_name, - HA_CREATE_INFO *create_info, - handler *file) -{ - TABLE table; - TABLE_SHARE share; - bool result; - - /* - It is impossible to open definition of partitioned table without .par file. - */ - if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG)) - return TRUE; - - init_tmp_table_share(thd, &share, db, 0, table_name, path); - share.db_plugin= ha_lock_engine(thd, file->ht); - - result= (open_table_def(thd, &share) || - open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, - 0, &table, TRUE)); - if (! result) - (void) closefrm(&table, 0); - - free_table_share(&share); - (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); - return result; -} -#endif - - handler *mysql_create_frm_image(THD *thd, const char *db, const char *table_name, HA_CREATE_INFO *create_info, Alter_info *alter_info, int create_table_mode, + KEY **key_info, + uint *key_count, LEX_CUSTRING *frm) { - uint db_options, key_count; - KEY *key_info_buffer; + uint db_options; handler *file; DBUG_ENTER("mysql_create_frm_image"); @@ -4073,6 +4199,7 @@ handler *mysql_create_frm_image(THD *thd, create_info->row_type != ROW_TYPE_FIXED && create_info->row_type != ROW_TYPE_DEFAULT) db_options|= HA_OPTION_PACK_RECORD; + if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, create_info->db_type))) { @@ -4109,12 +4236,7 @@ handler *mysql_create_frm_image(THD *thd, partitions also in the call to check_partition_info. We transport this information in the default_db_type variable, it is either DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command. - - Check that we don't use foreign keys in the table since it won't - work even with InnoDB beneath it. */ - List_iterator<Key> key_iterator(alter_info->key_list); - Key *key; handlerton *part_engine_type= create_info->db_type; char *part_syntax_buf; uint syntax_len; @@ -4124,15 +4246,6 @@ handler *mysql_create_frm_image(THD *thd, my_error(ER_PARTITION_NO_TEMPORARY, MYF(0)); goto err; } - while ((key= key_iterator++)) - { - if (key->type == Key::FOREIGN_KEY && - !part_info->is_auto_partitioned) - { - my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0)); - goto err; - } - } if ((part_engine_type == partition_hton) && part_info->default_engine_type) { @@ -4174,8 +4287,7 @@ handler *mysql_create_frm_image(THD *thd, &syntax_len, TRUE, TRUE, create_info, - alter_info, - NULL))) + alter_info))) goto err; part_info->part_info_string= part_syntax_buf; part_info->part_info_len= syntax_len; @@ -4239,17 +4351,37 @@ handler *mysql_create_frm_image(THD *thd, } } } + /* + Unless table's storage engine supports partitioning natively + don't allow foreign keys on partitioned tables (they won't + work work even with InnoDB beneath of partitioning engine). + If storage engine handles partitioning natively (like NDB) + foreign keys support is possible, so we let the engine decide. + */ + if (create_info->db_type == partition_hton) + { + List_iterator_fast<Key> key_iterator(alter_info->key_list); + Key *key; + while ((key= key_iterator++)) + { + if (key->type == Key::FOREIGN_KEY) + { + my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0)); + goto err; + } + } + } #endif if (mysql_prepare_create_table(thd, create_info, alter_info, &db_options, - file, &key_info_buffer, &key_count, + file, key_info, key_count, create_table_mode)) goto err; create_info->table_options=db_options; *frm= build_frm_image(thd, table_name, create_info, - alter_info->create_list, key_count, - key_info_buffer, file); + alter_info->create_list, *key_count, + *key_info, file); if (frm->str) DBUG_RETURN(file); @@ -4260,52 +4392,52 @@ err: } -/* +/** Create a table - SYNOPSIS - mysql_create_table_no_lock() - thd Thread object - db Database - table_name Table name - create_info Create information (like MAX_ROWS) - fields List of fields to create - keys List of keys to create - is_trans identifies the type of engine where the table - was created: either trans or non-trans. - create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, C_ASSISTED_DISCOVERY - or any positive number (for C_CREATE_SELECT). - - DESCRIPTION - If one creates a temporary table, this is automatically opened - - Note that this function assumes that caller already have taken - exclusive metadata lock on table being created or used some other - way to ensure that concurrent operations won't intervene. - mysql_create_table() is a wrapper that can be used for this. - - select_field_count is also used for CREATE ... SELECT, - and must be zero for standard create of table. - - RETURN VALUES - FALSE OK - TRUE error + @param thd Thread object + @param db Database + @param table_name Table name + @param path Path to table (i.e. to its .FRM file without + the extension). + @param create_info Create information (like MAX_ROWS) + @param alter_info Description of fields and keys for new table + @param create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, C_ASSISTED_DISCOVERY + or any positive number (for C_CREATE_SELECT). + @param[out] is_trans Identifies the type of engine where the table + was created: either trans or non-trans. + @param[out] key_info Array of KEY objects describing keys in table + which was created. + @param[out] key_count Number of keys in table which was created. + + If one creates a temporary table, this is automatically opened + + Note that this function assumes that caller already have taken + exclusive metadata lock on table being created or used some other + way to ensure that concurrent operations won't intervene. + mysql_create_table() is a wrapper that can be used for this. + + @retval false OK + @retval true error */ -bool mysql_create_table_no_lock(THD *thd, - const char *db, const char *table_name, - HA_CREATE_INFO *create_info, - Alter_info *alter_info, bool *is_trans, - int create_table_mode) +static +bool create_table_impl(THD *thd, + const char *db, const char *table_name, + const char *path, + HA_CREATE_INFO *create_info, + Alter_info *alter_info, + int create_table_mode, + bool *is_trans, + KEY **key_info, + uint *key_count, + LEX_CUSTRING *frm) { - char path[FN_REFLEN + 1]; - uint path_length; const char *alias; handler *file= 0; - LEX_CUSTRING frm= {0,0}; bool error= TRUE; - bool internal_tmp_table= create_table_mode == C_ALTER_TABLE || - create_table_mode == C_ALTER_TABLE_FRM_ONLY; + bool frm_only= create_table_mode == C_ALTER_TABLE_FRM_ONLY; + bool internal_tmp_table= create_table_mode == C_ALTER_TABLE || frm_only; DBUG_ENTER("mysql_create_table_no_lock"); DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d", db, table_name, internal_tmp_table)); @@ -4313,11 +4445,11 @@ bool mysql_create_table_no_lock(THD *thd, if (!my_use_symdir || (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)) { if (create_info->data_file_name) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED), "DATA DIRECTORY"); if (create_info->index_file_name) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED), "INDEX DIRECTORY"); create_info->data_file_name= create_info->index_file_name= 0; @@ -4333,9 +4465,6 @@ bool mysql_create_table_no_lock(THD *thd, /* Check if table exists */ if (create_info->tmp_table()) { - path_length= build_tmptable_filename(thd, path, sizeof(path)); - path[path_length - reg_ext_length]= '\0'; // Remove .frm extension - if (find_temporary_table(thd, db, table_name)) { if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) @@ -4346,9 +4475,6 @@ bool mysql_create_table_no_lock(THD *thd, } else { - path_length= build_table_filename(path, sizeof(path) - 1, db, alias, "", - internal_tmp_table ? FN_IS_TMP : 0); - if (!internal_tmp_table && ha_table_exists(thd, db, table_name)) { if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) @@ -4410,15 +4536,15 @@ bool mysql_create_table_no_lock(THD *thd, else { file= mysql_create_frm_image(thd, db, table_name, create_info, alter_info, - create_table_mode, &frm); + create_table_mode, key_info, key_count, frm); if (!file) goto err; - if (rea_create_table(thd, &frm, path, db, table_name, create_info, - create_table_mode == C_ALTER_TABLE_FRM_ONLY ? 0 : file)) + if (rea_create_table(thd, frm, path, db, table_name, create_info, + file, frm_only)) goto err; } - if (create_info->tmp_table()) + if (!frm_only && create_info->tmp_table()) { /* Open a table (skipping table cache) and add it into @@ -4426,7 +4552,7 @@ bool mysql_create_table_no_lock(THD *thd, */ TABLE *table= open_table_uncached(thd, create_info->db_type, path, - db, table_name, TRUE); + db, table_name, true, true); if (!table) { @@ -4440,7 +4566,7 @@ bool mysql_create_table_no_lock(THD *thd, thd->thread_specific_used= TRUE; } #ifdef WITH_PARTITION_STORAGE_ENGINE - else if (thd->work_part_info && create_table_mode == C_ALTER_TABLE_FRM_ONLY) + else if (thd->work_part_info && frm_only) { /* For partitioned tables we can't find some problems with table @@ -4452,12 +4578,25 @@ bool mysql_create_table_no_lock(THD *thd, In cases when we create .FRM without SE part we have to open table explicitly. */ - if (check_if_created_table_can_be_opened(thd, path, db, table_name, - create_info, file)) + TABLE table; + TABLE_SHARE share; + + init_tmp_table_share(thd, &share, db, 0, table_name, path); + + bool result= (open_table_def(thd, &share, GTS_TABLE) || + open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, + 0, &table, true)); + if (!result) + (void) closefrm(&table, 0); + + free_table_share(&share); + + if (result) { char frm_name[FN_REFLEN]; strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS); (void) mysql_file_delete(key_file_frm, frm_name, MYF(0)); + (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); goto err; } } @@ -4466,19 +4605,56 @@ bool mysql_create_table_no_lock(THD *thd, error= FALSE; err: THD_STAGE_INFO(thd, stage_after_create); - my_free(const_cast<uchar*>(frm.str)); delete file; DBUG_RETURN(error); warn: error= FALSE; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), alias); goto err; } /** + Simple wrapper around create_table_impl() to be used + in various version of CREATE TABLE statement. +*/ +bool mysql_create_table_no_lock(THD *thd, + const char *db, const char *table_name, + HA_CREATE_INFO *create_info, + Alter_info *alter_info, bool *is_trans, + int create_table_mode) +{ + KEY *not_used_1; + uint not_used_2; + char path[FN_REFLEN + 1]; + LEX_CUSTRING frm= {0,0}; + + if (create_info->tmp_table()) + build_tmptable_filename(thd, path, sizeof(path)); + else + { + int length; + const char *alias= table_case_name(create_info, table_name); + length= build_table_filename(path, sizeof(path) - 1, db, alias, + "", 0); + // Check if we hit FN_REFLEN bytes along with file extension. + if (length+reg_ext_length > FN_REFLEN) + { + my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), sizeof(path)-1, path); + return true; + } + } + + bool res= create_table_impl(thd, db, table_name, path, create_info, + alter_info, create_table_mode, is_trans, + ¬_used_1, ¬_used_2, &frm); + my_free(const_cast<uchar*>(frm.str)); + return res; +} + +/** Implementation of SQLCOM_CREATE_TABLE. Take the metadata locks (including a shared lock on the affected @@ -4572,25 +4748,23 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end) ****************************************************************************/ -/* +/** Rename a table. - SYNOPSIS - mysql_rename_table() - base The handlerton handle. - old_db The old database name. - old_name The old table name. - new_db The new database name. - new_name The new table name. - flags flags for build_table_filename(). - FN_FROM_IS_TMP old_name is temporary. - FN_TO_IS_TMP new_name is temporary. - NO_FRM_RENAME Don't rename the FRM file - but only the table in the storage engine. - - RETURN - FALSE OK - TRUE Error + @param base The handlerton handle. + @param old_db The old database name. + @param old_name The old table name. + @param new_db The new database name. + @param new_name The new table name. + @param flags flags + FN_FROM_IS_TMP old_name is temporary. + FN_TO_IS_TMP new_name is temporary. + NO_FRM_RENAME Don't rename the FRM file + but only the table in the storage engine. + NO_HA_TABLE Don't rename table in engine. + + @return false OK + @return true Error */ bool @@ -4605,6 +4779,7 @@ mysql_rename_table(handlerton *base, const char *old_db, char tmp_name[SAFE_NAME_LEN+1]; handler *file; int error=0; + int length; DBUG_ENTER("mysql_rename_table"); DBUG_PRINT("enter", ("old: '%s'.'%s' new: '%s'.'%s'", old_db, old_name, new_db, new_name)); @@ -4614,8 +4789,14 @@ mysql_rename_table(handlerton *base, const char *old_db, build_table_filename(from, sizeof(from) - 1, old_db, old_name, "", flags & FN_FROM_IS_TMP); - build_table_filename(to, sizeof(to) - 1, new_db, new_name, "", - flags & FN_TO_IS_TMP); + length= build_table_filename(to, sizeof(to) - 1, new_db, new_name, "", + flags & FN_TO_IS_TMP); + // Check if we hit FN_REFLEN bytes along with file extension. + if (length+reg_ext_length > FN_REFLEN) + { + my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), sizeof(to)-1, to); + DBUG_RETURN(TRUE); + } /* If lower_case_table_names == 2 (case-preserving but case-insensitive @@ -4638,7 +4819,13 @@ mysql_rename_table(handlerton *base, const char *old_db, to_base= lc_to; } - if (!file || !(error=file->ha_rename_table(from_base, to_base))) + if (flags & NO_HA_TABLE) + { + if (rename_file_ext(from,to,reg_ext)) + error= my_errno; + (void) file->ha_create_partitioning_metadata(to, from, CHF_RENAME_FLAG); + } + else if (!file || !(error=file->ha_rename_table(from_base, to_base))) { if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext)) { @@ -4695,6 +4882,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, { HA_CREATE_INFO local_create_info; Alter_info local_alter_info; + Alter_table_ctx local_alter_ctx; // Not used bool res= TRUE; bool is_trans= FALSE; uint not_used; @@ -4726,7 +4914,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, local_create_info.db_type= src_table->table->s->db_type(); local_create_info.row_type= src_table->table->s->row_type; if (mysql_prepare_alter_table(thd, src_table->table, &local_create_info, - &local_alter_info)) + &local_alter_info, &local_alter_ctx)) goto err; #ifdef WITH_PARTITION_STORAGE_ENGINE /* Partition info is not handled by mysql_prepare_alter_table() call. */ @@ -4800,6 +4988,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, String query(buf, sizeof(buf), system_charset_info); query.length(0); // Have to zero it since constructor doesn't Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN); + bool new_table= FALSE; // Whether newly created table is open. /* The condition avoids a crash as described in BUG#48506. Other @@ -4808,14 +4997,21 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, */ if (!table->view) { - /* - Here we open the destination table, on which we already have - exclusive metadata lock. This is needed for store_create_info() - to work. The table will be closed by close_thread_table() at - the end of this branch. - */ - if (open_table(thd, table, thd->mem_root, &ot_ctx)) - goto err; + if (!table->table) + { + + /* + In order for store_create_info() to work we need to open + destination table if it is not already open (i.e. if it + has not existed before). We don't need acquire metadata + lock in order to do this as we already hold exclusive + lock on this table. The table will be closed by + close_thread_table() at the end of this branch. + */ + if (open_table(thd, table, thd->mem_root, &ot_ctx)) + goto err; + new_table= TRUE; + } int result __attribute__((unused))= store_create_info(thd, table, &query, @@ -4825,13 +5021,16 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table, if (write_bin_log(thd, TRUE, query.ptr(), query.length())) goto err; - DBUG_ASSERT(thd->open_tables == table->table); - /* - When opening the table, we ignored the locked tables - (MYSQL_OPEN_GET_NEW_TABLE). Now we can close the table without - risking to close some locked table. - */ - close_thread_table(thd, &thd->open_tables); + if (new_table) + { + DBUG_ASSERT(thd->open_tables == table->table); + /* + When opening the table, we ignored the locked tables + (MYSQL_OPEN_GET_NEW_TABLE). Now we can close the table + without risking to close some locked table. + */ + close_thread_table(thd, &thd->open_tables); + } } } else // Case 1 @@ -4851,16 +5050,16 @@ err: /* table_list should contain just one table */ -static int -mysql_discard_or_import_tablespace(THD *thd, - TABLE_LIST *table_list, - enum tablespace_op_type tablespace_op) +int mysql_discard_or_import_tablespace(THD *thd, + TABLE_LIST *table_list, + bool discard) { - TABLE *table; - my_bool discard; + Alter_table_prelocking_strategy alter_prelocking_strategy; int error; DBUG_ENTER("mysql_discard_or_import_tablespace"); + mysql_audit_alter_table(thd, table_list); + /* Note that DISCARD/IMPORT TABLESPACE always is the only operation in an ALTER TABLE @@ -4868,21 +5067,28 @@ mysql_discard_or_import_tablespace(THD *thd, THD_STAGE_INFO(thd, stage_discard_or_import_tablespace); - discard= test(tablespace_op == DISCARD_TABLESPACE); - /* We set this flag so that ha_innobase::open and ::external_lock() do not complain when we lock the table */ thd->tablespace_op= TRUE; - table_list->mdl_request.set_type(MDL_SHARED_WRITE); - if (!(table=open_ltable(thd, table_list, TL_WRITE, 0))) + /* + Adjust values of table-level and metadata which was set in parser + for the case general ALTER TABLE. + */ + table_list->mdl_request.set_type(MDL_EXCLUSIVE); + table_list->lock_type= TL_WRITE; + /* Do not open views. */ + table_list->required_type= FRMTYPE_TABLE; + + if (open_and_lock_tables(thd, table_list, FALSE, 0, + &alter_prelocking_strategy)) { thd->tablespace_op=FALSE; DBUG_RETURN(-1); } - error= table->file->ha_discard_or_import_tablespace(discard); + error= table_list->table->file->ha_discard_or_import_tablespace(discard); THD_STAGE_INFO(thd, stage_end); @@ -4913,48 +5119,31 @@ err: DBUG_RETURN(0); } - table->file->print_error(error, MYF(0)); + table_list->table->file->print_error(error, MYF(0)); DBUG_RETURN(-1); } + /** - @brief Check if both DROP and CREATE are present for an index in ALTER TABLE - - @details Checks if any index is being modified (present as both DROP INDEX - and ADD INDEX) in the current ALTER TABLE statement. Needed for disabling - in-place ALTER TABLE. - - @param table The table being altered - @param alter_info The ALTER TABLE structure - @return presence of index being altered - @retval FALSE No such index - @retval TRUE Have at least 1 index modified + Check if key is a candidate key, i.e. a unique index with no index + fields partial or nullable. */ -static bool -is_index_maintenance_unique (TABLE *table, Alter_info *alter_info) +static bool is_candidate_key(KEY *key) { - List_iterator<Key> key_it(alter_info->key_list); - List_iterator<Alter_drop> drop_it(alter_info->drop_list); - Key *key; + KEY_PART_INFO *key_part; + KEY_PART_INFO *key_part_end= key->key_part + key->user_defined_key_parts; - while ((key= key_it++)) - { - if (key->name.str) - { - Alter_drop *drop; + if (!(key->flags & HA_NOSAME) || (key->flags & HA_NULL_PART_KEY)) + return false; - drop_it.rewind(); - while ((drop= drop_it++)) - { - if (drop->type == Alter_drop::KEY && - !my_strcasecmp(system_charset_info, key->name.str, drop->name)) - return TRUE; - } - } + for (key_part= key->key_part; key_part < key_part_end; key_part++) + { + if (key_part->key_part_flag & HA_PART_KEY_SEG) + return false; } - return FALSE; + return true; } @@ -4999,15 +5188,15 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) if (my_strcasecmp(system_charset_info, sql_field->field_name, (*f_ptr)->field_name) == 0) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DUP_FIELDNAME, ER(ER_DUP_FIELDNAME), sql_field->field_name); it.remove(); if (alter_info->create_list.is_empty()) { - alter_info->flags&= ~ALTER_ADD_COLUMN; + alter_info->flags&= ~Alter_info::ALTER_ADD_COLUMN; if (alter_info->key_list.is_empty()) - alter_info->flags&= ~ALTER_ADD_INDEX; + alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX; } break; } @@ -5038,15 +5227,16 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) } if (*f_ptr == NULL) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), sql_field->change, table->s->table_name.str); it.remove(); if (alter_info->create_list.is_empty()) { - alter_info->flags&= ~(ALTER_ADD_COLUMN | ALTER_CHANGE_COLUMN); + alter_info->flags&= ~(Alter_info::ALTER_ADD_COLUMN | + Alter_info::ALTER_CHANGE_COLUMN); if (alter_info->key_list.is_empty()) - alter_info->flags&= ~ALTER_ADD_INDEX; + alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX; } } } @@ -5093,12 +5283,13 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) } if (remove_drop) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_CANT_DROP_FIELD_OR_KEY, ER(ER_CANT_DROP_FIELD_OR_KEY), drop->name); drop_it.remove(); if (alter_info->drop_list.is_empty()) - alter_info->flags&= ~(ALTER_DROP_COLUMN | ALTER_DROP_INDEX); + alter_info->flags&= ~(Alter_info::ALTER_DROP_COLUMN | + Alter_info::ALTER_DROP_INDEX); } } } @@ -5118,7 +5309,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) if (my_strcasecmp(system_charset_info, key->name.str, table->key_info[n_key].name) == 0) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DUP_KEYNAME, ER(ER_DUP_KEYNAME), key->name.str); key_it.remove(); if (key->type == Key::FOREIGN_KEY) @@ -5127,7 +5318,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) key_it.remove(); } if (alter_info->key_list.is_empty()) - alter_info->flags&= ~ALTER_ADD_INDEX; + alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX; break; } } @@ -5139,7 +5330,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) if (tab_part_info && thd->lex->check_exists) { /* ALTER TABLE ADD PARTITION IF NOT EXISTS */ - if (alter_info->flags & ALTER_ADD_PARTITION) + if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION) { partition_info *alt_part_info= thd->lex->part_info; if (alt_part_info) @@ -5150,10 +5341,10 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) { if (!tab_part_info->has_unique_name(pe)) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SAME_NAME_PARTITION, ER(ER_SAME_NAME_PARTITION), pe->partition_name); - alter_info->flags&= ~ALTER_ADD_PARTITION; + alter_info->flags&= ~Alter_info::ALTER_ADD_PARTITION; thd->lex->part_info= NULL; break; } @@ -5161,7 +5352,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) } } /* ALTER TABLE DROP PARTITION IF EXISTS */ - if (alter_info->flags & ALTER_DROP_PARTITION) + if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION) { List_iterator<char> names_it(alter_info->partition_names); char *name; @@ -5178,293 +5369,332 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info) } if (!part_elem) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_DROP_PARTITION_NON_EXISTENT, ER(ER_DROP_PARTITION_NON_EXISTENT), "DROP"); names_it.remove(); } } if (alter_info->partition_names.elements == 0) - alter_info->flags&= ~ALTER_DROP_PARTITION; + alter_info->flags&= ~Alter_info::ALTER_DROP_PARTITION; } } #endif /*WITH_PARTITION_STORAGE_ENGINE*/ - /* Clear the ALTER_FOREIGN_KEY flag if nothing other than that set. */ - if (alter_info->flags == ALTER_FOREIGN_KEY) - alter_info->flags= 0; - DBUG_VOID_RETURN; } -/* - SYNOPSIS - mysql_compare_tables() - table The original table. - alter_info Alter options, fields and keys for the new - table. - create_info Create options for the new table. - order_num Number of order list elements. - need_copy_table OUT Result of the comparison. Undefined if error. - Otherwise is one of: - ALTER_TABLE_METADATA_ONLY No copy needed - ALTER_TABLE_DATA_CHANGED Data changes, - copy needed - ALTER_TABLE_INDEX_CHANGED Index changes, - copy might be needed - key_info_buffer OUT An array of KEY structs for new indexes - index_drop_buffer OUT An array of offsets into table->key_info. - index_drop_count OUT The number of elements in the array. - index_add_buffer OUT An array of offsets into key_info_buffer. - index_add_count OUT The number of elements in the array. - candidate_key_count OUT The number of candidate keys in original table. +/** + Get Create_field object for newly created table by field index. - DESCRIPTION - 'table' (first argument) contains information of the original - table, which includes all corresponding parts that the new - table has in arguments create_list, key_list and create_info. + @param alter_info Alter_info describing newly created table. + @param idx Field index. +*/ - By comparing the changes between the original and new table - we can determine how much it has changed after ALTER TABLE - and whether we need to make a copy of the table, or just change - the .frm file. +static Create_field *get_field_by_index(Alter_info *alter_info, uint idx) +{ + List_iterator_fast<Create_field> field_it(alter_info->create_list); + uint field_idx= 0; + Create_field *field; - If there are no data changes, but index changes, 'index_drop_buffer' - and/or 'index_add_buffer' are populated with offsets into - table->key_info or key_info_buffer respectively for the indexes - that need to be dropped and/or (re-)created. + while ((field= field_it++) && field_idx < idx) + { field_idx++; } - RETURN VALUES - TRUE The tables are not compatible; We have to do a full alter table - FALSE The tables are compatible; We only have to modify the .frm + return field; +} + + +static int compare_uint(const uint *s, const uint *t) +{ + return (*s < *t) ? -1 : ((*s > *t) ? 1 : 0); +} + + +/** + Compare original and new versions of a table and fill Alter_inplace_info + describing differences between those versions. + + @param thd Thread + @param table The original table. + @param varchar Indicates that new definition has new + VARCHAR column. + @param[in/out] ha_alter_info Data structure which already contains + basic information about create options, + field and keys for the new version of + table and which should be completed with + more detailed information needed for + in-place ALTER. + + First argument 'table' contains information of the original + table, which includes all corresponding parts that the new + table has in arguments create_list, key_list and create_info. + + Compare the changes between the original and new table definitions. + The result of this comparison is then passed to SE which determines + whether it can carry out these changes in-place. + + Mark any changes detected in the ha_alter_flags. + We generally try to specify handler flags only if there are real + changes. But in cases when it is cumbersome to determine if some + attribute has really changed we might choose to set flag + pessimistically, for example, relying on parser output only. + + If there are no data changes, but index changes, 'index_drop_buffer' + and/or 'index_add_buffer' are populated with offsets into + table->key_info or key_info_buffer respectively for the indexes + that need to be dropped and/or (re-)created. + + Note that this function assumes that it is OK to change Alter_info + and HA_CREATE_INFO which it gets. It is caller who is responsible + for creating copies for this structures if he needs them unchanged. + + @retval true error + @retval false success */ -bool -mysql_compare_tables(TABLE *table, - Alter_info *alter_info, - HA_CREATE_INFO *create_info, - uint order_num, - enum_alter_table_change_level *need_copy_table, - KEY **key_info_buffer, - uint **index_drop_buffer, uint *index_drop_count, - uint **index_add_buffer, uint *index_add_count, - uint *candidate_key_count) +static bool fill_alter_inplace_info(THD *thd, + TABLE *table, + bool varchar, + Alter_inplace_info *ha_alter_info) { Field **f_ptr, *field; - uint changes= 0, tmp; - uint key_count; - List_iterator_fast<Create_field> new_field_it, tmp_new_field_it; - Create_field *new_field, *tmp_new_field; - KEY_PART_INFO *key_part; + List_iterator_fast<Create_field> new_field_it; + Create_field *new_field; + KEY_PART_INFO *key_part, *new_part; KEY_PART_INFO *end; - THD *thd= table->in_use; - uint i; + uint candidate_key_count= 0; + Alter_info *alter_info= ha_alter_info->alter_info; + DBUG_ENTER("fill_alter_inplace_info"); + + /* Allocate result buffers. */ + if (! (ha_alter_info->index_drop_buffer= + (KEY**) thd->alloc(sizeof(KEY*) * table->s->keys)) || + ! (ha_alter_info->index_add_buffer= + (uint*) thd->alloc(sizeof(uint) * + alter_info->key_list.elements))) + DBUG_RETURN(true); + + /* First we setup ha_alter_flags based on what was detected by parser. */ + if (alter_info->flags & Alter_info::ALTER_ADD_COLUMN) + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_COLUMN; + if (alter_info->flags & Alter_info::ALTER_DROP_COLUMN) + ha_alter_info->handler_flags|= Alter_inplace_info::DROP_COLUMN; /* - Remember if the new definition has new VARCHAR column; - create_info->varchar will be reset in mysql_prepare_create_table. + Comparing new and old default values of column is cumbersome. + So instead of using such a comparison for detecting if default + has really changed we rely on flags set by parser to get an + approximate value for storage engine flag. */ - bool varchar= create_info->varchar; - bool not_nullable= true; - DBUG_ENTER("mysql_compare_tables"); + if (alter_info->flags & (Alter_info::ALTER_CHANGE_COLUMN | + Alter_info::ALTER_CHANGE_COLUMN_DEFAULT)) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_DEFAULT; + if (alter_info->flags & Alter_info::ADD_FOREIGN_KEY) + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_FOREIGN_KEY; + if (alter_info->flags & Alter_info::DROP_FOREIGN_KEY) + ha_alter_info->handler_flags|= Alter_inplace_info::DROP_FOREIGN_KEY; + if (alter_info->flags & Alter_info::ALTER_OPTIONS) + ha_alter_info->handler_flags|= Alter_inplace_info::CHANGE_CREATE_OPTION; + if (alter_info->flags & Alter_info::ALTER_RENAME) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_RENAME; + /* Check partition changes */ + if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION) + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PARTITION; + if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION) + ha_alter_info->handler_flags|= Alter_inplace_info::DROP_PARTITION; + if (alter_info->flags & Alter_info::ALTER_PARTITION) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_PARTITION; + if (alter_info->flags & Alter_info::ALTER_COALESCE_PARTITION) + ha_alter_info->handler_flags|= Alter_inplace_info::COALESCE_PARTITION; + if (alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION) + ha_alter_info->handler_flags|= Alter_inplace_info::REORGANIZE_PARTITION; + if (alter_info->flags & Alter_info::ALTER_TABLE_REORG) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_TABLE_REORG; + if (alter_info->flags & Alter_info::ALTER_REMOVE_PARTITIONING) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_REMOVE_PARTITIONING; + if (alter_info->flags & Alter_info::ALTER_ALL_PARTITION) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_ALL_PARTITION; /* - Create a copy of alter_info. - To compare the new and old table definitions, we need to "prepare" - the new definition - transform it from parser output to a format - that describes the final table layout (all column defaults are - initialized, duplicate columns are removed). This is done by - mysql_prepare_create_table. Unfortunately, - mysql_prepare_create_table performs its transformations - "in-place", that is, modifies the argument. Since we would - like to keep mysql_compare_tables() idempotent (not altering any - of the arguments) we create a copy of alter_info here and - pass it to mysql_prepare_create_table, then use the result - to evaluate possibility of in-place ALTER TABLE, and then - destroy the copy. + If we altering table with old VARCHAR fields we will be automatically + upgrading VARCHAR column types. */ - Alter_info tmp_alter_info(*alter_info, thd->mem_root); - uint db_options= 0; /* not used */ - - /* Set default value for return value (to ensure it's always set) */ - *need_copy_table= ALTER_TABLE_DATA_CHANGED; + if (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE; - /* Create the prepared information. */ - int create_table_mode= table->s->tmp_table == NO_TMP_TABLE ? - C_ORDINARY_CREATE : C_ALTER_TABLE; - if (mysql_prepare_create_table(thd, create_info, &tmp_alter_info, - &db_options, table->file, key_info_buffer, - &key_count, create_table_mode)) - DBUG_RETURN(1); - /* Allocate result buffers. */ - if (! (*index_drop_buffer= - (uint*) thd->alloc(sizeof(uint) * table->s->keys)) || - ! (*index_add_buffer= - (uint*) thd->alloc(sizeof(uint) * tmp_alter_info.key_list.elements))) - DBUG_RETURN(1); - /* - Some very basic checks. If number of fields changes, or the - handler, we need to run full ALTER TABLE. In the future - new fields can be added and old dropped without copy, but - not yet. + Go through fields in old version of table and detect changes to them. + We don't want to rely solely on Alter_info flags for this since: + a) new definition of column can be fully identical to the old one + despite the fact that this column is mentioned in MODIFY clause. + b) even if new column type differs from its old column from metadata + point of view, it might be identical from storage engine point + of view (e.g. when ENUM('a','b') is changed to ENUM('a','b',c')). + c) flags passed to storage engine contain more detailed information + about nature of changes than those provided from parser. + */ + for (f_ptr= table->field; (field= *f_ptr); f_ptr++) + { + /* Clear marker for renamed or dropped field + which we are going to set later. */ + field->flags&= ~(FIELD_IS_RENAMED | FIELD_IS_DROPPED); - Test also that engine was not given during ALTER TABLE, or - we are force to run regular alter table (copy). - E.g. ALTER TABLE tbl_name ENGINE=MyISAM. + /* Use transformed info to evaluate flags for storage engine. */ + uint new_field_index= 0; + new_field_it.init(alter_info->create_list); + while ((new_field= new_field_it++)) + { + if (new_field->field == field) + break; + new_field_index++; + } - For the following ones we also want to run regular alter table: - ALTER TABLE tbl_name ORDER BY .. - ALTER TABLE tbl_name CONVERT TO CHARACTER SET .. + if (new_field) + { + ha_alter_info->create_info->fields_option_struct[f_ptr - table->field]= + new_field->option_struct; - At the moment we can't handle altering temporary tables without a copy. - We also test if OPTIMIZE TABLE was given and was mapped to alter table. - In that case we always do full copy. + /* Field is not dropped. Evaluate changes bitmap for it. */ - There was a bug prior to mysql-4.0.25. Number of null fields was - calculated incorrectly. As a result frm and data files gets out of - sync after in-place alter table. There is no way to determine by which - mysql version (in 4.0 and 4.1 branches) table was created, thus we - disable in-place alter table for all tables created by mysql versions - prior to 5.0 branch. - See BUG#6236. - */ - if (table->s->fields != alter_info->create_list.elements || - table->s->db_type() != create_info->db_type || - table->s->tmp_table || - create_info->used_fields & HA_CREATE_USED_ENGINE || - create_info->used_fields & HA_CREATE_USED_CHARSET || - create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET || - (table->s->row_type != create_info->row_type) || - create_info->used_fields & HA_CREATE_USED_PAGE_CHECKSUM || - create_info->used_fields & HA_CREATE_USED_TRANSACTIONAL || - create_info->used_fields & HA_CREATE_USED_PACK_KEYS || - create_info->used_fields & HA_CREATE_USED_MAX_ROWS || - (alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) || - order_num || - !table->s->mysql_version || - (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar)) - { - DBUG_PRINT("info", ("Basic checks -> ALTER_TABLE_DATA_CHANGED")); - DBUG_RETURN(0); - } + /* + Check if type of column has changed to some incompatible type. + */ + switch (field->is_equal(new_field)) + { + case IS_EQUAL_NO: + /* New column type is incompatible with old one. */ + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE; + if (table->s->tmp_table == NO_TMP_TABLE) + { + delete_statistics_for_column(thd, table, field); + KEY *key_info= table->key_info; + for (uint i=0; i < table->s->keys; i++, key_info++) + { + if (field->part_of_key.is_set(i)) + { + uint key_parts= table->actual_n_key_parts(key_info); + for (uint j= 0; j < key_parts; j++) + { + if (key_info->key_part[j].fieldnr-1 == field->field_index) + { + delete_statistics_for_index(thd, table, key_info, + j >= key_info->user_defined_key_parts); + break; + } + } + } + } + } + break; + case IS_EQUAL_YES: + /* + New column is the same as the old one or the fully compatible with + it (for example, ENUM('a','b') was changed to ENUM('a','b','c')). + Such a change if any can ALWAYS be carried out by simply updating + data-dictionary without even informing storage engine. + No flag is set in this case. + */ + break; + case IS_EQUAL_PACK_LENGTH: + /* + New column type differs from the old one, but has compatible packed + data representation. Depending on storage engine, such a change can + be carried out by simply updating data dictionary without changing + actual data (for example, VARCHAR(300) is changed to VARCHAR(400)). + */ + ha_alter_info->handler_flags|= Alter_inplace_info:: + ALTER_COLUMN_EQUAL_PACK_LENGTH; + break; + default: + DBUG_ASSERT(0); + /* Safety. */ + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE; + } - if ((create_info->fields_option_struct= (ha_field_option_struct**) - thd->calloc(sizeof(void*) * table->s->fields)) == NULL || - (create_info->indexes_option_struct= (ha_index_option_struct**) - thd->calloc(sizeof(void*) * table->s->keys)) == NULL) - DBUG_RETURN(1); + /* + Check if the altered column is computed and either + is stored or is used in the partitioning expression. + TODO: Mark such a column with an alter flag only if + the defining expression has changed. + */ + if (field->vcol_info && + (field->stored_in_db || field->vcol_info->is_in_partitioning_expr())) + { + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL; + } - tmp_new_field_it.init(tmp_alter_info.create_list); - for (i= 0, f_ptr= table->field, tmp_new_field= tmp_new_field_it++; - (field= *f_ptr); - i++, f_ptr++, tmp_new_field= tmp_new_field_it++) - { - if (field->is_equal(tmp_new_field) == IS_EQUAL_NO && - table->s->tmp_table == NO_TMP_TABLE) - (void) delete_statistics_for_column(thd, table, field); - else if (my_strcasecmp(system_charset_info, - field->field_name, - tmp_new_field->field_name)) - (void) rename_column_in_stat_tables(thd, table, field, - tmp_new_field->field_name); - } + /* Check if field was renamed */ + if (my_strcasecmp(system_charset_info, field->field_name, + new_field->field_name)) + { + field->flags|= FIELD_IS_RENAMED; + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_NAME; + rename_column_in_stat_tables(thd, table, field, + new_field->field_name); + } - /* - Use transformed info to evaluate possibility of in-place ALTER TABLE - but use the preserved field to persist modifications. - */ - new_field_it.init(alter_info->create_list); - tmp_new_field_it.init(tmp_alter_info.create_list); + /* Check that NULL behavior is same for old and new fields */ + if ((new_field->flags & NOT_NULL_FLAG) != + (uint) (field->flags & NOT_NULL_FLAG)) + { + if (new_field->flags & NOT_NULL_FLAG) + ha_alter_info->handler_flags|= + Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE; + else + ha_alter_info->handler_flags|= + Alter_inplace_info::ALTER_COLUMN_NULLABLE; + } - /* - Go through fields and check if the original ones are compatible - with new table. - */ - for (i= 0, f_ptr= table->field, new_field= new_field_it++, - tmp_new_field= tmp_new_field_it++; - (field= *f_ptr); - i++, f_ptr++, new_field= new_field_it++, - tmp_new_field= tmp_new_field_it++) - { - DBUG_ASSERT(i < table->s->fields); - create_info->fields_option_struct[i]= tmp_new_field->option_struct; + /* + We do not detect changes to default values in this loop. + See comment above for more details. + */ - /* reset common markers of how field changed */ - field->flags&= ~(FIELD_IS_RENAMED | FIELD_IN_ADD_INDEX); + /* + Detect changes in column order. + */ + if (field->field_index != new_field_index) + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_ORDER; - /* Make sure we have at least the default charset in use. */ - if (!new_field->charset) - new_field->charset= create_info->default_table_charset; + /* Detect changes in storage type of column */ + if (new_field->field_storage_type() != field->field_storage_type()) + ha_alter_info->handler_flags|= + Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE; - /* Check that NULL behavior is same for old and new fields */ - if ((tmp_new_field->flags & NOT_NULL_FLAG) != - (uint) (field->flags & NOT_NULL_FLAG)) - { - DBUG_PRINT("info", ("NULL behaviour difference in field '%s' -> " - "ALTER_TABLE_DATA_CHANGED", new_field->field_name)); - DBUG_RETURN(0); + /* Detect changes in column format of column */ + if (new_field->column_format() != field->column_format()) + ha_alter_info->handler_flags|= + Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT; } - - /* - Check if the altered column is computed and either - is stored or is used in the partitioning expression. - TODO: Mark such a column with an alter flag only if - the defining expression has changed. - */ - if (field->vcol_info && - (field->stored_in_db || field->vcol_info->is_in_partitioning_expr())) + else { - *need_copy_table= ALTER_TABLE_DATA_CHANGED; - DBUG_RETURN(0); + /* + Field is not present in new version of table and therefore was dropped. + Corresponding storage engine flag should be already set. + */ + DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::DROP_COLUMN); + field->flags|= FIELD_IS_DROPPED; } + } - /* Don't pack rows in old tables if the user has requested this. */ - if (create_info->row_type == ROW_TYPE_DYNAMIC || - (tmp_new_field->flags & BLOB_FLAG) || - (tmp_new_field->sql_type == MYSQL_TYPE_VARCHAR && - create_info->row_type != ROW_TYPE_FIXED)) - create_info->table_options|= HA_OPTION_PACK_RECORD; - - /* Check if field was renamed */ - if (my_strcasecmp(system_charset_info, - field->field_name, - tmp_new_field->field_name)) + new_field_it.init(alter_info->create_list); + while ((new_field= new_field_it++)) + { + if (! new_field->field) { - field->flags|= FIELD_IS_RENAMED; - if (table->s->tmp_table == NO_TMP_TABLE) - rename_column_in_stat_tables(thd, table, field, - tmp_new_field->field_name); - } + /* + Field is not present in old version of table and therefore was added. + Again corresponding storage engine flag should be already set. + */ + DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN); - /* Evaluate changes bitmap and send to check_if_incompatible_data() */ - if (!(tmp= field->is_equal(tmp_new_field))) - { - if (table->s->tmp_table == NO_TMP_TABLE) + if (new_field->vcol_info && + (new_field->stored_in_db || new_field->vcol_info->is_in_partitioning_expr())) { - KEY *key_info= table->key_info; - for (uint i=0; i < table->s->keys; i++, key_info++) - { - if (field->part_of_key.is_set(i)) - { - uint key_parts= table->actual_n_key_parts(key_info); - for (uint j= 0; j < key_parts; j++) - { - if (key_info->key_part[j].fieldnr-1 == field->field_index) - { - (void) delete_statistics_for_index(thd, table, key_info, - j >= key_info->key_parts); - break; - } - } - } - } + ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL; } - DBUG_PRINT("info", ("!field_is_equal('%s') -> ALTER_TABLE_DATA_CHANGED", - new_field->field_name)); - DBUG_RETURN(0); + break; } - changes|= tmp; } /* @@ -5474,105 +5704,86 @@ mysql_compare_tables(TABLE *table, KEY *table_key; KEY *table_key_end= table->key_info + table->s->keys; KEY *new_key; - KEY *new_key_end= *key_info_buffer + key_count; + KEY *new_key_end= + ha_alter_info->key_info_buffer + ha_alter_info->key_count; DBUG_PRINT("info", ("index count old: %d new: %d", - table->s->keys, key_count)); + table->s->keys, ha_alter_info->key_count)); + /* Step through all keys of the old table and search matching new keys. */ - *index_drop_count= 0; - *index_add_count= 0; - *candidate_key_count= 0; + ha_alter_info->index_drop_count= 0; + ha_alter_info->index_add_count= 0; for (table_key= table->key_info; table_key < table_key_end; table_key++) { - KEY_PART_INFO *table_part; - KEY_PART_INFO *table_part_end= table_key->key_part + table_key->key_parts; - KEY_PART_INFO *new_part; - - /* - Check if key is a candidate key, i.e. a unique index with no index - fields nullable, then key is either already primary key or could - be promoted to primary key if the original primary key is dropped. - Count all candidate keys. - */ - not_nullable= true; - for (table_part= table_key->key_part; - table_part < table_part_end; - table_part++) - { - not_nullable= not_nullable && (! table_part->field->maybe_null()); - } - if ((table_key->flags & HA_NOSAME) && not_nullable) - (*candidate_key_count)++; - /* Search a new key with the same name. */ - for (new_key= *key_info_buffer; new_key < new_key_end; new_key++) + for (new_key= ha_alter_info->key_info_buffer; + new_key < new_key_end; + new_key++) { if (! strcmp(table_key->name, new_key->name)) break; } if (new_key >= new_key_end) { - /* Key not found. Add the offset of the key to the drop buffer. */ - (*index_drop_buffer)[(*index_drop_count)++]= table_key - table->key_info; + /* Key not found. Add the key to the drop buffer. */ + ha_alter_info->index_drop_buffer + [ha_alter_info->index_drop_count++]= + table_key; DBUG_PRINT("info", ("index dropped: '%s'", table_key->name)); continue; } /* Check that the key types are compatible between old and new tables. */ if ((table_key->algorithm != new_key->algorithm) || - ((table_key->flags & HA_KEYFLAG_MASK) != + ((table_key->flags & HA_KEYFLAG_MASK) != (new_key->flags & HA_KEYFLAG_MASK)) || - (table_key->key_parts != new_key->key_parts)) + (table_key->user_defined_key_parts != + new_key->user_defined_key_parts)) goto index_changed; /* Check that the key parts remain compatible between the old and new tables. */ - for (table_part= table_key->key_part, new_part= new_key->key_part; - table_part < table_part_end; - table_part++, new_part++) + end= table_key->key_part + table_key->user_defined_key_parts; + for (key_part= table_key->key_part, new_part= new_key->key_part; + key_part < end; + key_part++, new_part++) { /* - Key definition has changed if we are using a different field or - if the used key part length is different. We know that the fields - did not change. Comparing field numbers is sufficient. + Key definition has changed if we are using a different field or + if the used key part length is different. It makes sense to + check lengths first as in case when fields differ it is likely + that lengths differ too and checking fields is more expensive + in general case. */ - if ((table_part->length != new_part->length) || - (table_part->fieldnr - 1 != new_part->fieldnr)) - goto index_changed; + if (key_part->length != new_part->length) + goto index_changed; + + new_field= get_field_by_index(alter_info, new_part->fieldnr); + + /* + For prefix keys KEY_PART_INFO::field points to cloned Field + object with adjusted length. So below we have to check field + indexes instead of simply comparing pointers to Field objects. + */ + if (! new_field->field || + new_field->field->field_index != key_part->fieldnr - 1) + goto index_changed; } continue; index_changed: - /* Key modified. Add the offset of the key to both buffers. */ - (*index_drop_buffer)[(*index_drop_count)++]= table_key - table->key_info; - (*index_add_buffer)[(*index_add_count)++]= new_key - *key_info_buffer; - key_part= new_key->key_part; - end= key_part + new_key->key_parts; - for(; key_part != end; key_part++) - { - // Mark field to be part of new key - field= table->field[key_part->fieldnr]; - field->flags|= FIELD_IN_ADD_INDEX; - } - if (table->s->tmp_table == NO_TMP_TABLE) - { - (void) delete_statistics_for_index(thd, table, table_key, FALSE); - if ((uint) (table_key - table->key_info) == table->s->primary_key) - { - KEY *tab_key_info= table->key_info; - for (uint j=0; j < table->s->keys; j++, tab_key_info++) - { - if (tab_key_info->key_parts != tab_key_info->ext_key_parts) - (void) delete_statistics_for_index(thd, table, tab_key_info, - TRUE); - } - } - } - + /* Key modified. Add the key / key offset to both buffers. */ + ha_alter_info->index_drop_buffer + [ha_alter_info->index_drop_count++]= + table_key; + ha_alter_info->index_add_buffer + [ha_alter_info->index_add_count++]= + new_key - ha_alter_info->key_info_buffer; + /* Mark all old fields which are used in newly created index. */ DBUG_PRINT("info", ("index changed: '%s'", table_key->name)); } /*end of for (; table_key < table_key_end;) */ @@ -5580,12 +5791,12 @@ mysql_compare_tables(TABLE *table, /* Step through all keys of the new table and find matching old keys. */ - for (new_key= *key_info_buffer; new_key < new_key_end; new_key++) + for (new_key= ha_alter_info->key_info_buffer; + new_key < new_key_end; + new_key++) { /* Search an old key with the same name. */ - for (i= 0, table_key= table->key_info; - table_key < table_key_end; - i++, table_key++) + for (table_key= table->key_info; table_key < table_key_end; table_key++) { if (! strcmp(table_key->name, new_key->name)) break; @@ -5593,44 +5804,309 @@ mysql_compare_tables(TABLE *table, if (table_key >= table_key_end) { /* Key not found. Add the offset of the key to the add buffer. */ - (*index_add_buffer)[(*index_add_count)++]= new_key - *key_info_buffer; - key_part= new_key->key_part; - end= key_part + new_key->key_parts; - for(; key_part != end; key_part++) + ha_alter_info->index_add_buffer + [ha_alter_info->index_add_count++]= + new_key - ha_alter_info->key_info_buffer; + DBUG_PRINT("info", ("index added: '%s'", new_key->name)); + } + else + ha_alter_info->create_info->indexes_option_struct[table_key - table->key_info]= + new_key->option_struct; + } + + /* + Sort index_add_buffer according to how key_info_buffer is sorted. + I.e. with primary keys first - see sort_keys(). + */ + my_qsort(ha_alter_info->index_add_buffer, + ha_alter_info->index_add_count, + sizeof(uint), (qsort_cmp) compare_uint); + + /* Now let us calculate flags for storage engine API. */ + + /* Count all existing candidate keys. */ + for (table_key= table->key_info; table_key < table_key_end; table_key++) + { + /* + Check if key is a candidate key, This key is either already primary key + or could be promoted to primary key if the original primary key is + dropped. + In MySQL one is allowed to create primary key with partial fields (i.e. + primary key which is not considered candidate). For simplicity we count + such key as a candidate key here. + */ + if (((uint) (table_key - table->key_info) == table->s->primary_key) || + is_candidate_key(table_key)) + candidate_key_count++; + } + + /* Figure out what kind of indexes we are dropping. */ + KEY **dropped_key; + KEY **dropped_key_end= ha_alter_info->index_drop_buffer + + ha_alter_info->index_drop_count; + + for (dropped_key= ha_alter_info->index_drop_buffer; + dropped_key < dropped_key_end; dropped_key++) + { + table_key= *dropped_key; + + if (table_key->flags & HA_NOSAME) + { + /* + Unique key. Check for PRIMARY KEY. Also see comment about primary + and candidate keys above. + */ + if ((uint) (table_key - table->key_info) == table->s->primary_key) { - // Mark field to be part of new key - field= table->field[key_part->fieldnr]; - field->flags|= FIELD_IN_ADD_INDEX; + ha_alter_info->handler_flags|= Alter_inplace_info::DROP_PK_INDEX; + candidate_key_count--; + } + else + { + ha_alter_info->handler_flags|= Alter_inplace_info::DROP_UNIQUE_INDEX; + if (is_candidate_key(table_key)) + candidate_key_count--; } - DBUG_PRINT("info", ("index added: '%s'", new_key->name)); } else + ha_alter_info->handler_flags|= Alter_inplace_info::DROP_INDEX; + } + + /* Now figure out what kind of indexes we are adding. */ + for (uint add_key_idx= 0; add_key_idx < ha_alter_info->index_add_count; add_key_idx++) + { + new_key= ha_alter_info->key_info_buffer + ha_alter_info->index_add_buffer[add_key_idx]; + + if (new_key->flags & HA_NOSAME) { - DBUG_ASSERT(i < table->s->keys); - create_info->indexes_option_struct[i]= new_key->option_struct; + bool is_pk= !my_strcasecmp(system_charset_info, new_key->name, primary_key_name); + + if ((!(new_key->flags & HA_KEY_HAS_PART_KEY_SEG) && + !(new_key->flags & HA_NULL_PART_KEY)) || + is_pk) + { + /* Candidate key or primary key! */ + if (candidate_key_count == 0 || is_pk) + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PK_INDEX; + else + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX; + candidate_key_count++; + } + else + { + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX; + } } + else + ha_alter_info->handler_flags|= Alter_inplace_info::ADD_INDEX; } - /* Check if changes are compatible with current handler without a copy */ + DBUG_RETURN(false); +} + + +/** + Mark fields participating in newly added indexes in TABLE object which + corresponds to new version of altered table. + + @param ha_alter_info Alter_inplace_info describing in-place ALTER. + @param altered_table TABLE object for new version of TABLE in which + fields should be marked. +*/ + +static void update_altered_table(const Alter_inplace_info &ha_alter_info, + TABLE *altered_table) +{ + uint field_idx, add_key_idx; + KEY *key; + KEY_PART_INFO *end, *key_part; + + /* + Clear marker for all fields, as we are going to set it only + for fields which participate in new indexes. + */ + for (field_idx= 0; field_idx < altered_table->s->fields; ++field_idx) + altered_table->field[field_idx]->flags&= ~FIELD_IN_ADD_INDEX; + + /* + Go through array of newly added indexes and mark fields + participating in them. + */ + for (add_key_idx= 0; add_key_idx < ha_alter_info.index_add_count; + add_key_idx++) + { + key= ha_alter_info.key_info_buffer + + ha_alter_info.index_add_buffer[add_key_idx]; + + end= key->key_part + key->user_defined_key_parts; + for (key_part= key->key_part; key_part < end; key_part++) + altered_table->field[key_part->fieldnr]->flags|= FIELD_IN_ADD_INDEX; + } +} + + +/** + Compare two tables to see if their metadata are compatible. + One table specified by a TABLE instance, the other using Alter_info + and HA_CREATE_INFO. + + @param[in] table The first table. + @param[in] alter_info Alter options, fields and keys for the + second table. + @param[in] create_info Create options for the second table. + @param[out] metadata_equal Result of comparison. + + @retval true error + @retval false success +*/ + +bool mysql_compare_tables(TABLE *table, + Alter_info *alter_info, + HA_CREATE_INFO *create_info, + bool *metadata_equal) +{ + DBUG_ENTER("mysql_compare_tables"); + + uint changes= IS_EQUAL_NO; + uint key_count; + List_iterator_fast<Create_field> tmp_new_field_it; + THD *thd= table->in_use; + *metadata_equal= false; + + /* + Create a copy of alter_info. + To compare definitions, we need to "prepare" the definition - transform it + from parser output to a format that describes the table layout (all column + defaults are initialized, duplicate columns are removed). This is done by + mysql_prepare_create_table. Unfortunately, mysql_prepare_create_table + performs its transformations "in-place", that is, modifies the argument. + Since we would like to keep mysql_compare_tables() idempotent (not altering + any of the arguments) we create a copy of alter_info here and pass it to + mysql_prepare_create_table, then use the result to compare the tables, and + then destroy the copy. + */ + Alter_info tmp_alter_info(*alter_info, thd->mem_root); + uint db_options= 0; /* not used */ + KEY *key_info_buffer= NULL; + + /* Create the prepared information. */ + int create_table_mode= table->s->tmp_table == NO_TMP_TABLE ? + C_ORDINARY_CREATE : C_ALTER_TABLE; + if (mysql_prepare_create_table(thd, create_info, &tmp_alter_info, + &db_options, table->file, &key_info_buffer, + &key_count, create_table_mode)) + DBUG_RETURN(1); + + /* Some very basic checks. */ + if (table->s->fields != alter_info->create_list.elements || + table->s->db_type() != create_info->db_type || + table->s->tmp_table || + (table->s->row_type != create_info->row_type)) + DBUG_RETURN(false); + + /* Go through fields and check if they are compatible. */ + tmp_new_field_it.init(tmp_alter_info.create_list); + for (Field **f_ptr= table->field; *f_ptr; f_ptr++) + { + Field *field= *f_ptr; + Create_field *tmp_new_field= tmp_new_field_it++; + + /* Check that NULL behavior is the same. */ + if ((tmp_new_field->flags & NOT_NULL_FLAG) != + (uint) (field->flags & NOT_NULL_FLAG)) + DBUG_RETURN(false); + + /* + mysql_prepare_alter_table() clears HA_OPTION_PACK_RECORD bit when + preparing description of existing table. In ALTER TABLE it is later + updated to correct value by create_table_impl() call. + So to get correct value of this bit in this function we have to + mimic behavior of create_table_impl(). + */ + if (create_info->row_type == ROW_TYPE_DYNAMIC || + (tmp_new_field->flags & BLOB_FLAG) || + (tmp_new_field->sql_type == MYSQL_TYPE_VARCHAR && + create_info->row_type != ROW_TYPE_FIXED)) + create_info->table_options|= HA_OPTION_PACK_RECORD; + + /* Check if field was renamed */ + if (my_strcasecmp(system_charset_info, + field->field_name, + tmp_new_field->field_name)) + DBUG_RETURN(false); + + /* Evaluate changes bitmap and send to check_if_incompatible_data() */ + uint field_changes= field->is_equal(tmp_new_field); + if (field_changes != IS_EQUAL_YES) + DBUG_RETURN(false); + + changes|= field_changes; + } + + /* Check if changes are compatible with current handler. */ if (table->file->check_if_incompatible_data(create_info, changes)) + DBUG_RETURN(false); + + /* Go through keys and check if they are compatible. */ + KEY *table_key; + KEY *table_key_end= table->key_info + table->s->keys; + KEY *new_key; + KEY *new_key_end= key_info_buffer + key_count; + + /* Step through all keys of the first table and search matching keys. */ + for (table_key= table->key_info; table_key < table_key_end; table_key++) { - DBUG_PRINT("info", ("check_if_incompatible_data() -> " - "ALTER_TABLE_DATA_CHANGED")); - DBUG_RETURN(0); + /* Search a key with the same name. */ + for (new_key= key_info_buffer; new_key < new_key_end; new_key++) + { + if (! strcmp(table_key->name, new_key->name)) + break; + } + if (new_key >= new_key_end) + DBUG_RETURN(false); + + /* Check that the key types are compatible. */ + if ((table_key->algorithm != new_key->algorithm) || + ((table_key->flags & HA_KEYFLAG_MASK) != + (new_key->flags & HA_KEYFLAG_MASK)) || + (table_key->user_defined_key_parts != + new_key->user_defined_key_parts)) + DBUG_RETURN(false); + + /* Check that the key parts remain compatible. */ + KEY_PART_INFO *table_part; + KEY_PART_INFO *table_part_end= table_key->key_part + table_key->user_defined_key_parts; + KEY_PART_INFO *new_part; + for (table_part= table_key->key_part, new_part= new_key->key_part; + table_part < table_part_end; + table_part++, new_part++) + { + /* + Key definition is different if we are using a different field or + if the used key part length is different. We know that the fields + are equal. Comparing field numbers is sufficient. + */ + if ((table_part->length != new_part->length) || + (table_part->fieldnr - 1 != new_part->fieldnr)) + DBUG_RETURN(false); + } } - if (*index_drop_count || *index_add_count) + /* Step through all keys of the second table and find matching keys. */ + for (new_key= key_info_buffer; new_key < new_key_end; new_key++) { - DBUG_PRINT("info", ("Index dropped=%u added=%u -> " - "ALTER_TABLE_INDEX_CHANGED", - *index_drop_count, *index_add_count)); - *need_copy_table= ALTER_TABLE_INDEX_CHANGED; - DBUG_RETURN(0); + /* Search a key with the same name. */ + for (table_key= table->key_info; table_key < table_key_end; table_key++) + { + if (! strcmp(table_key->name, new_key->name)) + break; + } + if (table_key >= table_key_end) + DBUG_RETURN(false); } - DBUG_PRINT("info", (" -> ALTER_TABLE_METADATA_ONLY")); - *need_copy_table= ALTER_TABLE_METADATA_ONLY; // Tables are compatible - DBUG_RETURN(0); + *metadata_equal= true; // Tables are compatible + DBUG_RETURN(false); } @@ -5651,7 +6127,7 @@ mysql_compare_tables(TABLE *table, static bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, - enum enum_enable_or_disable keys_onoff) + Alter_info::enum_enable_or_disable keys_onoff) { int error= 0; DBUG_ENTER("alter_table_manage_keys"); @@ -5659,21 +6135,21 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, table, indexes_were_disabled, keys_onoff)); switch (keys_onoff) { - case ENABLE: + case Alter_info::ENABLE: DEBUG_SYNC(table->in_use, "alter_table_enable_indexes"); error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); break; - case LEAVE_AS_IS: + case Alter_info::LEAVE_AS_IS: if (!indexes_were_disabled) break; /* fall-through: disabled indexes */ - case DISABLE: + case Alter_info::DISABLE: error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); } if (error == HA_ERR_WRONG_COMMAND) { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), table->file->table_type(), table->s->db.str, table->s->table_name.str); @@ -5684,6 +6160,401 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, DBUG_RETURN(error); } + +/** + Check if the pending ALTER TABLE operations support the in-place + algorithm based on restrictions in the SQL layer or given the + nature of the operations themselves. If in-place isn't supported, + it won't be necessary to check with the storage engine. + + @param table The original TABLE. + @param create_info Information from the parsing phase about new + table properties. + @param alter_info Data related to detected changes. + + @return false In-place is possible, check with storage engine. + @return true Incompatible operations, must use table copy. +*/ + +static bool is_inplace_alter_impossible(TABLE *table, + HA_CREATE_INFO *create_info, + const Alter_info *alter_info) +{ + DBUG_ENTER("is_inplace_alter_impossible"); + + /* At the moment we can't handle altering temporary tables without a copy. */ + if (table->s->tmp_table) + DBUG_RETURN(true); + + + /* + We also test if OPTIMIZE TABLE was given and was mapped to alter table. + In that case we always do full copy (ALTER_RECREATE is set in this case). + + For the ALTER TABLE tbl_name ORDER BY ... we also always use copy + algorithm. In theory, this operation can be done in-place by some + engine, but since a) no current engine does this and b) our current + API lacks infrastructure for passing information about table ordering + to storage engine we simply always do copy now. + + ENABLE/DISABLE KEYS is a MyISAM/Heap specific operation that is + not supported for in-place in combination with other operations. + Alone, it will be done by simple_rename_or_index_change(). + */ + if (alter_info->flags & (Alter_info::ALTER_RECREATE | + Alter_info::ALTER_ORDER | + Alter_info::ALTER_KEYS_ONOFF)) + DBUG_RETURN(true); + + /* + Test also that engine was not given during ALTER TABLE, or + we are force to run regular alter table (copy). + E.g. ALTER TABLE tbl_name ENGINE=MyISAM. + Note that in addition to checking flag in HA_CREATE_INFO we + also check HA_CREATE_INFO::db_type value. This is done + to cover cases in which engine is changed implicitly + (e.g. when non-partitioned table becomes partitioned). + + Note that we do copy even if the table is already using the + given engine. Many users and tools depend on using ENGINE + to force a table rebuild. + */ + if (create_info->db_type != table->s->db_type() || + create_info->used_fields & HA_CREATE_USED_ENGINE) + DBUG_RETURN(true); + + /* + There was a bug prior to mysql-4.0.25. Number of null fields was + calculated incorrectly. As a result frm and data files gets out of + sync after fast alter table. There is no way to determine by which + mysql version (in 4.0 and 4.1 branches) table was created, thus we + disable fast alter table for all tables created by mysql versions + prior to 5.0 branch. + See BUG#6236. + */ + if (!table->s->mysql_version) + DBUG_RETURN(true); + + DBUG_RETURN(false); +} + + +/** + Perform in-place alter table. + + @param thd Thread handle. + @param table_list TABLE_LIST for the table to change. + @param table The original TABLE. + @param altered_table TABLE object for new version of the table. + @param ha_alter_info Structure describing ALTER TABLE to be carried + out and serving as a storage place for data + used during different phases. + @param inplace_supported Enum describing the locking requirements. + @param target_mdl_request Metadata request/lock on the target table name. + @param alter_ctx ALTER TABLE runtime context. + + @retval true Error + @retval false Success + + @note + If mysql_alter_table does not need to copy the table, it is + either an alter table where the storage engine does not + need to know about the change, only the frm will change, + or the storage engine supports performing the alter table + operation directly, in-place without mysql having to copy + the table. + + @note This function frees the TABLE object associated with the new version of + the table and removes the .FRM file for it in case of both success and + failure. +*/ + +static bool mysql_inplace_alter_table(THD *thd, + TABLE_LIST *table_list, + TABLE *table, + TABLE *altered_table, + Alter_inplace_info *ha_alter_info, + enum_alter_inplace_result inplace_supported, + MDL_request *target_mdl_request, + Alter_table_ctx *alter_ctx) +{ + Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN); + handlerton *db_type= table->s->db_type(); + MDL_ticket *mdl_ticket= table->mdl_ticket; + HA_CREATE_INFO *create_info= ha_alter_info->create_info; + Alter_info *alter_info= ha_alter_info->alter_info; + bool reopen_tables= false; + + DBUG_ENTER("mysql_inplace_alter_table"); + + /* + Upgrade to EXCLUSIVE lock if: + - This is requested by the storage engine + - Or the storage engine needs exclusive lock for just the prepare + phase + - Or requested by the user + + Note that we handle situation when storage engine needs exclusive + lock for prepare phase under LOCK TABLES in the same way as when + exclusive lock is required for duration of the whole statement. + */ + if (inplace_supported == HA_ALTER_INPLACE_EXCLUSIVE_LOCK || + ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || + inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) && + (thd->locked_tables_mode == LTM_LOCK_TABLES || + thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)) || + alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE) + { + if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN)) + goto cleanup; + /* + Get rid of all TABLE instances belonging to this thread + except one to be used for in-place ALTER TABLE. + + This is mostly needed to satisfy InnoDB assumptions/asserts. + */ + close_all_tables_for_name(thd, table->s, + alter_ctx->is_table_renamed() ? + HA_EXTRA_PREPARE_FOR_RENAME : + HA_EXTRA_NOT_USED, + table); + /* + If we are under LOCK TABLES we will need to reopen tables which we + just have closed in case of error. + */ + reopen_tables= true; + } + else if (inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || + inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) + { + /* + Storage engine has requested exclusive lock only for prepare phase + and we are not under LOCK TABLES. + Don't mark TABLE_SHARE as old in this case, as this won't allow opening + of table by other threads during main phase of in-place ALTER TABLE. + */ + if (thd->mdl_context.upgrade_shared_lock(table->mdl_ticket, MDL_EXCLUSIVE, + thd->variables.lock_wait_timeout)) + goto cleanup; + + tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE, + table->s->db.str, table->s->table_name.str, + false); + } + + /* + Upgrade to SHARED_NO_WRITE lock if: + - The storage engine needs writes blocked for the whole duration + - Or this is requested by the user + Note that under LOCK TABLES, we will already have SHARED_NO_READ_WRITE. + */ + if ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK || + alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED) && + thd->mdl_context.upgrade_shared_lock(table->mdl_ticket, + MDL_SHARED_NO_WRITE, + thd->variables.lock_wait_timeout)) + { + goto cleanup; + } + + // It's now safe to take the table level lock. + if (lock_tables(thd, table_list, alter_ctx->tables_opened, 0)) + goto cleanup; + + DEBUG_SYNC(thd, "alter_table_inplace_after_lock_upgrade"); + THD_STAGE_INFO(thd, stage_alter_inplace_prepare); + + switch (inplace_supported) { + case HA_ALTER_ERROR: + case HA_ALTER_INPLACE_NOT_SUPPORTED: + DBUG_ASSERT(0); + // fall through + case HA_ALTER_INPLACE_NO_LOCK: + case HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: + switch (alter_info->requested_lock) { + case Alter_info::ALTER_TABLE_LOCK_DEFAULT: + case Alter_info::ALTER_TABLE_LOCK_NONE: + ha_alter_info->online= true; + break; + case Alter_info::ALTER_TABLE_LOCK_SHARED: + case Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE: + break; + } + break; + case HA_ALTER_INPLACE_EXCLUSIVE_LOCK: + case HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: + case HA_ALTER_INPLACE_SHARED_LOCK: + break; + } + + if (table->file->ha_prepare_inplace_alter_table(altered_table, + ha_alter_info)) + { + goto rollback; + } + + /* + Downgrade the lock if storage engine has told us that exclusive lock was + necessary only for prepare phase (unless we are not under LOCK TABLES) and + user has not explicitly requested exclusive lock. + */ + if ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || + inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) && + !(thd->locked_tables_mode == LTM_LOCK_TABLES || + thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES) && + (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)) + { + /* If storage engine or user requested shared lock downgrade to SNW. */ + if (inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE || + alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED) + table->mdl_ticket->downgrade_lock(MDL_SHARED_NO_WRITE); + else + { + DBUG_ASSERT(inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE); + table->mdl_ticket->downgrade_lock(MDL_SHARED_UPGRADABLE); + } + } + + DEBUG_SYNC(thd, "alter_table_inplace_after_lock_downgrade"); + THD_STAGE_INFO(thd, stage_alter_inplace); + + if (table->file->ha_inplace_alter_table(altered_table, + ha_alter_info)) + { + goto rollback; + } + + // Upgrade to EXCLUSIVE before commit. + if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME)) + goto rollback; + + /* + If we are killed after this point, we should ignore and continue. + We have mostly completed the operation at this point, there should + be no long waits left. + */ + + DBUG_EXECUTE_IF("alter_table_rollback_new_index", { + table->file->ha_commit_inplace_alter_table(altered_table, + ha_alter_info, + false); + my_error(ER_UNKNOWN_ERROR, MYF(0)); + goto cleanup; + }); + + DEBUG_SYNC(thd, "alter_table_inplace_before_commit"); + THD_STAGE_INFO(thd, stage_alter_inplace_commit); + + if (table->file->ha_commit_inplace_alter_table(altered_table, + ha_alter_info, + true)) + { + goto rollback; + } + + close_all_tables_for_name(thd, table->s, + alter_ctx->is_table_renamed() ? + HA_EXTRA_PREPARE_FOR_RENAME : + HA_EXTRA_NOT_USED, + NULL); + table_list->table= table= NULL; + close_temporary_table(thd, altered_table, true, false); + + /* + Replace the old .FRM with the new .FRM, but keep the old name for now. + Rename to the new name (if needed) will be handled separately below. + */ + if (mysql_rename_table(db_type, alter_ctx->new_db, alter_ctx->tmp_name, + alter_ctx->db, alter_ctx->alias, + FN_FROM_IS_TMP | NO_HA_TABLE)) + { + // Since changes were done in-place, we can't revert them. + (void) quick_rm_table(thd, db_type, + alter_ctx->new_db, alter_ctx->tmp_name, + FN_IS_TMP | NO_HA_TABLE); + DBUG_RETURN(true); + } + + table_list->mdl_request.ticket= mdl_ticket; + if (open_table(thd, table_list, thd->mem_root, &ot_ctx)) + DBUG_RETURN(true); + + /* + Tell the handler that the changed frm is on disk and table + has been re-opened + */ + table_list->table->file->ha_notify_table_changed(); + + /* + We might be going to reopen table down on the road, so we have to + restore state of the TABLE object which we used for obtaining of + handler object to make it usable for later reopening. + */ + close_thread_table(thd, &thd->open_tables); + table_list->table= NULL; + + // Rename altered table if requested. + if (alter_ctx->is_table_renamed()) + { + // Remove TABLE and TABLE_SHARE for old name from TDC. + tdc_remove_table(thd, TDC_RT_REMOVE_ALL, + alter_ctx->db, alter_ctx->table_name, false); + + if (mysql_rename_table(db_type, alter_ctx->db, alter_ctx->table_name, + alter_ctx->new_db, alter_ctx->new_alias, 0)) + { + /* + If the rename fails we will still have a working table + with the old name, but with other changes applied. + */ + DBUG_RETURN(true); + } + if (Table_triggers_list::change_table_name(thd, + alter_ctx->db, + alter_ctx->alias, + alter_ctx->table_name, + alter_ctx->new_db, + alter_ctx->new_alias)) + { + /* + If the rename of trigger files fails, try to rename the table + back so we at least have matching table and trigger files. + */ + (void) mysql_rename_table(db_type, + alter_ctx->new_db, alter_ctx->new_alias, + alter_ctx->db, alter_ctx->alias, 0); + DBUG_RETURN(true); + } + rename_table_in_stat_tables(thd, alter_ctx->db,alter_ctx->alias, + alter_ctx->new_db, alter_ctx->new_alias); + } + + DBUG_RETURN(false); + + rollback: + table->file->ha_commit_inplace_alter_table(altered_table, + ha_alter_info, + false); + cleanup: + if (reopen_tables) + { + /* Close the only table instance which is still around. */ + close_all_tables_for_name(thd, table->s, + alter_ctx->is_table_renamed() ? + HA_EXTRA_PREPARE_FOR_RENAME : + HA_EXTRA_NOT_USED, + NULL); + if (thd->locked_tables_list.reopen_tables(thd)) + thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); + /* QQ; do something about metadata locks ? */ + } + close_temporary_table(thd, altered_table, true, false); + // Delete temporary .frm/.par + (void) quick_rm_table(thd, create_info->db_type, alter_ctx->new_db, + alter_ctx->tmp_name, FN_IS_TMP | NO_HA_TABLE); + DBUG_RETURN(true); +} + /** maximum possible length for certain blob types. @@ -5742,6 +6613,7 @@ blob_length_by_type(enum_field_types type) But since ALTER might end-up doing CREATE, this distinction is gone and we just carry around two structures. + @param[in,out] alter_ctx Runtime context for ALTER TABLE. @return Fills various create_info members based on information retrieved @@ -5758,7 +6630,8 @@ blob_length_by_type(enum_field_types type) bool mysql_prepare_alter_table(THD *thd, TABLE *table, HA_CREATE_INFO *create_info, - Alter_info *alter_info) + Alter_info *alter_info, + Alter_table_ctx *alter_ctx) { /* New column definitions are added here */ List<Create_field> new_create_list; @@ -5797,15 +6670,30 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, table->file->info(HA_STATUS_AUTO); create_info->auto_increment_value= table->file->stats.auto_increment_value; } + if (!(used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE)) create_info->key_block_size= table->s->key_block_size; + + if (!(used_fields & HA_CREATE_USED_STATS_SAMPLE_PAGES)) + create_info->stats_sample_pages= table->s->stats_sample_pages; + + if (!(used_fields & HA_CREATE_USED_STATS_AUTO_RECALC)) + create_info->stats_auto_recalc= table->s->stats_auto_recalc; + if (!(used_fields & HA_CREATE_USED_TRANSACTIONAL)) create_info->transactional= table->s->transactional; restore_record(table, s->default_values); // Empty record for DEFAULT + if ((create_info->fields_option_struct= (ha_field_option_struct**) + thd->calloc(sizeof(void*) * table->s->fields)) == NULL || + (create_info->indexes_option_struct= (ha_index_option_struct**) + thd->calloc(sizeof(void*) * table->s->keys)) == NULL) + DBUG_RETURN(1); + create_info->option_list= merge_engine_table_options(table->s->option_list, create_info->option_list, thd->mem_root); + /* First collect all fields from table which isn't in drop_list */ @@ -5836,12 +6724,6 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (table->s->tmp_table == NO_TMP_TABLE) (void) delete_statistics_for_column(thd, table, field); drop_it.remove(); - /* - ALTER TABLE DROP COLUMN always changes table data even in cases - when new version of the table has the same structure as the old - one. - */ - alter_info->change_level= ALTER_TABLE_DATA_CHANGED; continue; } /* Check if field is changed */ @@ -5855,6 +6737,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (def) { // Field is changed def->field=field; + /* + Add column being updated to the list of new columns. + Note that columns with AFTER clauses are added to the end + of the list for now. Their positions will be corrected later. + */ + new_create_list.push_back(def); if (field->stored_in_db != def->stored_in_db) { my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN, MYF(0)); @@ -5862,7 +6750,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, } if (!def->after) { - new_create_list.push_back(def); + /* + If this ALTER TABLE doesn't have an AFTER clause for the modified + column then remove this column from the list of columns to be + processed. So later we can iterate over the columns remaining + in this list and process modified columns with AFTER clause or + add new columns. + */ def_it.remove(); } } @@ -5916,45 +6810,58 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, def->sql_type == MYSQL_TYPE_NEWDATE || def->sql_type == MYSQL_TYPE_DATETIME || def->sql_type == MYSQL_TYPE_DATETIME2) && - !alter_info->datetime_field && + !alter_ctx->datetime_field && !(~def->flags & (NO_DEFAULT_VALUE_FLAG | NOT_NULL_FLAG)) && thd->variables.sql_mode & MODE_NO_ZERO_DATE) { - alter_info->datetime_field= def; - alter_info->error_if_not_empty= TRUE; + alter_ctx->datetime_field= def; + alter_ctx->error_if_not_empty= TRUE; } if (!def->after) new_create_list.push_back(def); - else if (def->after == first_keyword) - { - new_create_list.push_front(def); - /* - Re-ordering columns in table can't be done using in-place algorithm - as it always changes table data. - */ - alter_info->change_level= ALTER_TABLE_DATA_CHANGED; - } else { Create_field *find; - find_it.rewind(); - while ((find=find_it++)) // Add new columns + if (def->change) { - if (!my_strcasecmp(system_charset_info,def->after, find->field_name)) - break; + find_it.rewind(); + /* + For columns being modified with AFTER clause we should first remove + these columns from the list and then add them back at their correct + positions. + */ + while ((find=find_it++)) + { + /* + Create_fields representing changed columns are added directly + from Alter_info::create_list to new_create_list. We can therefore + safely use pointer equality rather than name matching here. + This prevents removing the wrong column in case of column rename. + */ + if (find == def) + { + find_it.remove(); + break; + } + } } - if (!find) + if (def->after == first_keyword) + new_create_list.push_front(def); + else { - my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after, - table->s->table_name.str); - goto err; + find_it.rewind(); + while ((find=find_it++)) + { + if (!my_strcasecmp(system_charset_info, def->after, find->field_name)) + break; + } + if (!find) + { + my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after, table->s->table_name.str); + goto err; + } + find_it.after(def); // Put column after this } - find_it.after(def); // Put element after this - /* - Re-ordering columns in table can't be done using in-place algorithm - as it always changes table data. - */ - alter_info->change_level= ALTER_TABLE_DATA_CHANGED; } } if (alter_info->alter_list.elements) @@ -5996,7 +6903,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, KEY *tab_key_info= table->key_info; for (uint j=0; j < table->s->keys; j++, tab_key_info++) { - if (tab_key_info->key_parts != tab_key_info->ext_key_parts) + if (tab_key_info->user_defined_key_parts != + tab_key_info->ext_key_parts) (void) delete_statistics_for_index(thd, table, tab_key_info, TRUE); } @@ -6009,7 +6917,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, KEY_PART_INFO *key_part= key_info->key_part; key_parts.empty(); bool delete_index_stat= FALSE; - for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) + for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++) { if (!key_part->field) continue; // Wrong field (from UNIREG) @@ -6083,7 +6991,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (delete_index_stat) (void) delete_statistics_for_index(thd, table, key_info, FALSE); else if (modified_primary_key && - key_info->key_parts != key_info->ext_key_parts) + key_info->user_defined_key_parts != key_info->ext_key_parts) (void) delete_statistics_for_index(thd, table, key_info, TRUE); } @@ -6130,8 +7038,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (key->type == Key::FOREIGN_KEY && ((Foreign_key *)key)->validate(new_create_list)) goto err; - if (key->type != Key::FOREIGN_KEY) - new_key_list.push_back(key); + new_key_list.push_back(key); if (key->name.str && !my_strcasecmp(system_charset_info, key->name.str, primary_key_name)) { @@ -6143,9 +7050,20 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, if (alter_info->drop_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), - alter_info->drop_list.head()->name); - goto err; + Alter_drop *drop; + drop_it.rewind(); + while ((drop=drop_it++)) { + switch (drop->type) { + case Alter_drop::KEY: + case Alter_drop::COLUMN: + my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), + alter_info->drop_list.head()->name); + goto err; + case Alter_drop::FOREIGN_KEY: + // Leave the DROP FOREIGN KEY names in the alter_info->drop_list. + break; + } + } } if (alter_info->alter_list.elements) { @@ -6165,6 +7083,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, (HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS)) || (used_fields & HA_CREATE_USED_PACK_KEYS)) db_create_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); + if ((create_info->table_options & + (HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT)) || + (used_fields & HA_CREATE_USED_STATS_PERSISTENT)) + db_create_options&= ~(HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT); + if (create_info->table_options & (HA_OPTION_CHECKSUM | HA_OPTION_NO_CHECKSUM)) db_create_options&= ~(HA_OPTION_CHECKSUM | HA_OPTION_NO_CHECKSUM); @@ -6185,88 +7108,495 @@ err: } -/* - Alter table +/** + Get Create_field object for newly created table by its name + in the old version of table. - SYNOPSIS - mysql_alter_table() - thd Thread handle - new_db If there is a RENAME clause - new_name If there is a RENAME clause - create_info Information from the parsing phase about new - table properties. - table_list The table to change. - alter_info Lists of fields, keys to be changed, added - or dropped. - order_num How many ORDER BY fields has been specified. - order List of fields to ORDER BY. - ignore Whether we have ALTER IGNORE TABLE - require_online Give an error if we can't do operation online + @param alter_info Alter_info describing newly created table. + @param old_name Name of field in old table. - DESCRIPTION - This is a veery long function and is everything but the kitchen sink :) - It is used to alter a table and not only by ALTER TABLE but also - CREATE|DROP INDEX are mapped on this function. - - When the ALTER TABLE statement just does a RENAME or ENABLE|DISABLE KEYS, - or both, then this function short cuts its operation by renaming - the table and/or enabling/disabling the keys. In this case, the FRM is - not changed, directly by mysql_alter_table. However, if there is a - RENAME + change of a field, or an index, the short cut is not used. - See how `create_list` is used to generate the new FRM regarding the - structure of the fields. The same is done for the indices of the table. - - Important is the fact, that this function tries to do as little work as - possible, by finding out whether a intermediate table is needed to copy - data into and when finishing the altering to use it as the original table. - For this reason the function mysql_compare_tables() is called, which decides - based on all kind of data how similar are the new and the original - tables. + @returns Pointer to Create_field object, NULL - if field is + not present in new version of table. +*/ - RETURN VALUES - FALSE OK - TRUE Error +static Create_field *get_field_by_old_name(Alter_info *alter_info, + const char *old_name) +{ + List_iterator_fast<Create_field> new_field_it(alter_info->create_list); + Create_field *new_field; + + while ((new_field= new_field_it++)) + { + if (new_field->field && + (my_strcasecmp(system_charset_info, + new_field->field->field_name, + old_name) == 0)) + break; + } + return new_field; +} + + +/** Type of change to foreign key column, */ + +enum fk_column_change_type +{ + FK_COLUMN_NO_CHANGE, FK_COLUMN_DATA_CHANGE, + FK_COLUMN_RENAMED, FK_COLUMN_DROPPED +}; + +/** + Check that ALTER TABLE's changes on columns of a foreign key are allowed. + + @param[in] thd Thread context. + @param[in] alter_info Alter_info describing changes to be done + by ALTER TABLE. + @param[in] fk_columns List of columns of the foreign key to check. + @param[out] bad_column_name Name of field on which ALTER TABLE tries to + do prohibited operation. + + @note This function takes into account value of @@foreign_key_checks + setting. + + @retval FK_COLUMN_NO_CHANGE No significant changes are to be done on + foreign key columns. + @retval FK_COLUMN_DATA_CHANGE ALTER TABLE might result in value + change in foreign key column (and + foreign_key_checks is on). + @retval FK_COLUMN_RENAMED Foreign key column is renamed. + @retval FK_COLUMN_DROPPED Foreign key column is dropped. +*/ + +static enum fk_column_change_type +fk_check_column_changes(THD *thd, Alter_info *alter_info, + List<LEX_STRING> &fk_columns, + const char **bad_column_name) +{ + List_iterator_fast<LEX_STRING> column_it(fk_columns); + LEX_STRING *column; + + *bad_column_name= NULL; + + while ((column= column_it++)) + { + Create_field *new_field= get_field_by_old_name(alter_info, column->str); + + if (new_field) + { + Field *old_field= new_field->field; + + if (my_strcasecmp(system_charset_info, old_field->field_name, + new_field->field_name)) + { + /* + Copy algorithm doesn't support proper renaming of columns in + the foreign key yet. At the moment we lack API which will tell + SE that foreign keys should be updated to use new name of column + like it happens in case of in-place algorithm. + */ + *bad_column_name= column->str; + return FK_COLUMN_RENAMED; + } + + if ((old_field->is_equal(new_field) == IS_EQUAL_NO) || + ((new_field->flags & NOT_NULL_FLAG) && + !(old_field->flags & NOT_NULL_FLAG))) + { + if (!(thd->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS)) + { + /* + Column in a FK has changed significantly. Unless + foreign_key_checks are off we prohibit this since this + means values in this column might be changed by ALTER + and thus referential integrity might be broken, + */ + *bad_column_name= column->str; + return FK_COLUMN_DATA_CHANGE; + } + } + } + else + { + /* + Column in FK was dropped. Most likely this will break + integrity constraints of InnoDB data-dictionary (and thus + InnoDB will emit an error), so we prohibit this right away + even if foreign_key_checks are off. + This also includes a rare case when another field replaces + field being dropped since it is easy to break referential + integrity in this case. + */ + *bad_column_name= column->str; + return FK_COLUMN_DROPPED; + } + } + + return FK_COLUMN_NO_CHANGE; +} + + +/** + Check if ALTER TABLE we are about to execute using COPY algorithm + is not supported as it might break referential integrity. + + @note If foreign_key_checks is disabled (=0), we allow to break + referential integrity. But we still disallow some operations + like dropping or renaming columns in foreign key since they + are likely to break consistency of InnoDB data-dictionary + and thus will end-up in error anyway. + + @param[in] thd Thread context. + @param[in] table Table to be altered. + @param[in] alter_info Lists of fields, keys to be changed, added + or dropped. + @param[out] alter_ctx ALTER TABLE runtime context. + Alter_table_ctx::fk_error_if_delete flag + is set if deletion during alter can break + foreign key integrity. + + @retval false Success. + @retval true Error, ALTER - tries to do change which is not compatible + with foreign key definitions on the table. +*/ + +static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table, + Alter_info *alter_info, + Alter_table_ctx *alter_ctx) +{ + List <FOREIGN_KEY_INFO> fk_parent_key_list; + List <FOREIGN_KEY_INFO> fk_child_key_list; + FOREIGN_KEY_INFO *f_key; + + DBUG_ENTER("fk_prepare_copy_alter_table"); + + table->file->get_parent_foreign_key_list(thd, &fk_parent_key_list); + + /* OOM when building list. */ + if (thd->is_error()) + DBUG_RETURN(true); + + /* + Remove from the list all foreign keys in which table participates as + parent which are to be dropped by this ALTER TABLE. This is possible + when a foreign key has the same table as child and parent. + */ + List_iterator<FOREIGN_KEY_INFO> fk_parent_key_it(fk_parent_key_list); + + while ((f_key= fk_parent_key_it++)) + { + Alter_drop *drop; + List_iterator_fast<Alter_drop> drop_it(alter_info->drop_list); + + while ((drop= drop_it++)) + { + /* + InnoDB treats foreign key names in case-insensitive fashion. + So we do it here too. For database and table name type of + comparison used depends on lower-case-table-names setting. + For l_c_t_n = 0 we use case-sensitive comparison, for + l_c_t_n > 0 modes case-insensitive comparison is used. + */ + if ((drop->type == Alter_drop::FOREIGN_KEY) && + (my_strcasecmp(system_charset_info, f_key->foreign_id->str, + drop->name) == 0) && + (my_strcasecmp(table_alias_charset, f_key->foreign_db->str, + table->s->db.str) == 0) && + (my_strcasecmp(table_alias_charset, f_key->foreign_table->str, + table->s->table_name.str) == 0)) + fk_parent_key_it.remove(); + } + } + + /* + If there are FKs in which this table is parent which were not + dropped we need to prevent ALTER deleting rows from the table, + as it might break referential integrity. OTOH it is OK to do + so if foreign_key_checks are disabled. + */ + if (!fk_parent_key_list.is_empty() && + !(thd->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS)) + alter_ctx->set_fk_error_if_delete_row(fk_parent_key_list.head()); + + fk_parent_key_it.rewind(); + while ((f_key= fk_parent_key_it++)) + { + enum fk_column_change_type changes; + const char *bad_column_name; + + changes= fk_check_column_changes(thd, alter_info, + f_key->referenced_fields, + &bad_column_name); + + switch(changes) + { + case FK_COLUMN_NO_CHANGE: + /* No significant changes. We can proceed with ALTER! */ + break; + case FK_COLUMN_DATA_CHANGE: + { + char buff[NAME_LEN*2+2]; + strxnmov(buff, sizeof(buff)-1, f_key->foreign_db->str, ".", + f_key->foreign_table->str, NullS); + my_error(ER_FK_COLUMN_CANNOT_CHANGE_CHILD, MYF(0), bad_column_name, + f_key->foreign_id->str, buff); + DBUG_RETURN(true); + } + case FK_COLUMN_RENAMED: + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), + "ALGORITHM=COPY", + ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME), + "ALGORITHM=INPLACE"); + DBUG_RETURN(true); + case FK_COLUMN_DROPPED: + { + char buff[NAME_LEN*2+2]; + strxnmov(buff, sizeof(buff)-1, f_key->foreign_db->str, ".", + f_key->foreign_table->str, NullS); + my_error(ER_FK_COLUMN_CANNOT_DROP_CHILD, MYF(0), bad_column_name, + f_key->foreign_id->str, buff); + DBUG_RETURN(true); + } + default: + DBUG_ASSERT(0); + } + } + + table->file->get_foreign_key_list(thd, &fk_child_key_list); + + /* OOM when building list. */ + if (thd->is_error()) + DBUG_RETURN(true); + + /* + Remove from the list all foreign keys which are to be dropped + by this ALTER TABLE. + */ + List_iterator<FOREIGN_KEY_INFO> fk_key_it(fk_child_key_list); + + while ((f_key= fk_key_it++)) + { + Alter_drop *drop; + List_iterator_fast<Alter_drop> drop_it(alter_info->drop_list); + + while ((drop= drop_it++)) + { + /* Names of foreign keys in InnoDB are case-insensitive. */ + if ((drop->type == Alter_drop::FOREIGN_KEY) && + (my_strcasecmp(system_charset_info, f_key->foreign_id->str, + drop->name) == 0)) + fk_key_it.remove(); + } + } + + fk_key_it.rewind(); + while ((f_key= fk_key_it++)) + { + enum fk_column_change_type changes; + const char *bad_column_name; + + changes= fk_check_column_changes(thd, alter_info, + f_key->foreign_fields, + &bad_column_name); + + switch(changes) + { + case FK_COLUMN_NO_CHANGE: + /* No significant changes. We can proceed with ALTER! */ + break; + case FK_COLUMN_DATA_CHANGE: + my_error(ER_FK_COLUMN_CANNOT_CHANGE, MYF(0), bad_column_name, + f_key->foreign_id->str); + DBUG_RETURN(true); + case FK_COLUMN_RENAMED: + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), + "ALGORITHM=COPY", + ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME), + "ALGORITHM=INPLACE"); + DBUG_RETURN(true); + case FK_COLUMN_DROPPED: + my_error(ER_FK_COLUMN_CANNOT_DROP, MYF(0), bad_column_name, + f_key->foreign_id->str); + DBUG_RETURN(true); + default: + DBUG_ASSERT(0); + } + } + + DBUG_RETURN(false); +} + + +/** + Rename table and/or turn indexes on/off without touching .FRM + + @param thd Thread handler + @param table_list TABLE_LIST for the table to change + @param keys_onoff ENABLE or DISABLE KEYS? + @param alter_ctx ALTER TABLE runtime context. + + @return Operation status + @retval false Success + @retval true Failure +*/ + +static bool +simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list, + Alter_info::enum_enable_or_disable keys_onoff, + Alter_table_ctx *alter_ctx) +{ + TABLE *table= table_list->table; + MDL_ticket *mdl_ticket= table->mdl_ticket; + int error= 0; + enum ha_extra_function extra_func= thd->locked_tables_mode + ? HA_EXTRA_NOT_USED + : HA_EXTRA_FORCE_REOPEN; + DBUG_ENTER("simple_rename_or_index_change"); + + if (keys_onoff != Alter_info::LEAVE_AS_IS) + { + if (wait_while_table_is_used(thd, table, extra_func)) + DBUG_RETURN(true); + + // It's now safe to take the table level lock. + if (lock_tables(thd, table_list, alter_ctx->tables_opened, 0)) + DBUG_RETURN(true); + + if (keys_onoff == Alter_info::ENABLE) + { + DEBUG_SYNC(thd,"alter_table_enable_indexes"); + DBUG_EXECUTE_IF("sleep_alter_enable_indexes", my_sleep(6000000);); + error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + } + else if (keys_onoff == Alter_info::DISABLE) + error=table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + + if (error == HA_ERR_WRONG_COMMAND) + { + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), + table->file->table_type(), + table->s->db.str, table->s->table_name.str); + error= 0; + } + else if (error > 0) + { + table->file->print_error(error, MYF(0)); + error= -1; + } + } + + if (!error && alter_ctx->is_table_renamed()) + { + THD_STAGE_INFO(thd, stage_rename); + handlerton *old_db_type= table->s->db_type(); + /* + Then do a 'simple' rename of the table. First we need to close all + instances of 'source' table. + Note that if wait_while_table_is_used() returns error here (i.e. if + this thread was killed) then it must be that previous step of + simple rename did nothing and therefore we can safely return + without additional clean-up. + */ + if (wait_while_table_is_used(thd, table, extra_func)) + DBUG_RETURN(true); + close_all_tables_for_name(thd, table->s, HA_EXTRA_PREPARE_FOR_RENAME, NULL); + + LEX_STRING old_db_name= { alter_ctx->db, strlen(alter_ctx->db) }; + LEX_STRING old_table_name= + { alter_ctx->table_name, strlen(alter_ctx->table_name) }; + LEX_STRING new_db_name= { alter_ctx->new_db, strlen(alter_ctx->new_db) }; + LEX_STRING new_table_name= + { alter_ctx->new_alias, strlen(alter_ctx->new_alias) }; + (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name, + &new_db_name, &new_table_name); + + if (mysql_rename_table(old_db_type, alter_ctx->db, alter_ctx->table_name, + alter_ctx->new_db, alter_ctx->new_alias, 0)) + error= -1; + else if (Table_triggers_list::change_table_name(thd, + alter_ctx->db, + alter_ctx->alias, + alter_ctx->table_name, + alter_ctx->new_db, + alter_ctx->new_alias)) + { + (void) mysql_rename_table(old_db_type, + alter_ctx->new_db, alter_ctx->new_alias, + alter_ctx->db, alter_ctx->table_name, 0); + error= -1; + } + } + + if (!error) + { + error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); + if (!error) + my_ok(thd); + } + table_list->table= NULL; // For query cache + query_cache_invalidate3(thd, table_list, 0); + + if ((thd->locked_tables_mode == LTM_LOCK_TABLES || + thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)) + { + /* + Under LOCK TABLES we should adjust meta-data locks before finishing + statement. Otherwise we can rely on them being released + along with the implicit commit. + */ + if (alter_ctx->is_table_renamed()) + thd->mdl_context.release_all_locks_for_name(mdl_ticket); + else + mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); + } + DBUG_RETURN(error != 0); +} + + +/** + Alter table + + @param thd Thread handle + @param new_db If there is a RENAME clause + @param new_name If there is a RENAME clause + @param create_info Information from the parsing phase about new + table properties. + @param table_list The table to change. + @param alter_info Lists of fields, keys to be changed, added + or dropped. + @param order_num How many ORDER BY fields has been specified. + @param order List of fields to ORDER BY. + @param ignore Whether we have ALTER IGNORE TABLE + + @retval true Error + @retval false Success + + This is a veery long function and is everything but the kitchen sink :) + It is used to alter a table and not only by ALTER TABLE but also + CREATE|DROP INDEX are mapped on this function. + + When the ALTER TABLE statement just does a RENAME or ENABLE|DISABLE KEYS, + or both, then this function short cuts its operation by renaming + the table and/or enabling/disabling the keys. In this case, the FRM is + not changed, directly by mysql_alter_table. However, if there is a + RENAME + change of a field, or an index, the short cut is not used. + See how `create_list` is used to generate the new FRM regarding the + structure of the fields. The same is done for the indices of the table. + + Altering a table can be done in two ways. The table can be modified + directly using an in-place algorithm, or the changes can be done using + an intermediate temporary table (copy). In-place is the preferred + algorithm as it avoids copying table data. The storage engine + selects which algorithm to use in check_if_supported_inplace_alter() + based on information about the table changes from fill_alter_inplace_info(). */ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, Alter_info *alter_info, - uint order_num, ORDER *order, bool ignore, - bool require_online) + uint order_num, ORDER *order, bool ignore) { - TABLE *table, *new_table= 0; - MDL_ticket *mdl_ticket; - MDL_request target_mdl_request; - int error= 0, create_table_mode= C_ALTER_TABLE; - char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN + 1]; - char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias; - char index_file[FN_REFLEN], data_file[FN_REFLEN]; - char path[FN_REFLEN + 1]; - ha_rows copied,deleted; - handlerton *old_db_type, *new_db_type, *save_old_db_type; - enum_alter_table_change_level need_copy_table= ALTER_TABLE_METADATA_ONLY; -#ifdef WITH_PARTITION_STORAGE_ENGINE - TABLE *table_for_fast_alter_partition= NULL; - bool partition_changed= FALSE; -#endif - bool need_lock_for_indexes __attribute__((unused)) = TRUE; - KEY *key_info_buffer; - uint index_drop_count= 0; - uint *index_drop_buffer= NULL; - uint index_add_count= 0; - handler_add_index *add= NULL; - bool pending_inplace_add_index= false; - uint *index_add_buffer= NULL; - uint candidate_key_count= 0; - bool no_pk; - ulong explicit_used_fields= 0; - enum ha_extra_function extra_func= thd->locked_tables_mode - ? HA_EXTRA_NOT_USED - : HA_EXTRA_FORCE_REOPEN; - LEX_STRING old_db_name= { table_list->db, table_list->db_length }; - LEX_STRING old_table_name= { table_list->table_name, - table_list->table_name_length }; DBUG_ENTER("mysql_alter_table"); /* @@ -6275,68 +7605,39 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, it is the case. TODO: this design is obsolete and will be removed. */ - if (table_list && table_list->db && table_list->table_name) - { - int table_kind= 0; + int table_kind= check_if_log_table(table_list->db_length, table_list->db, + table_list->table_name_length, + table_list->table_name, false); - table_kind= check_if_log_table(table_list->db_length, table_list->db, - table_list->table_name_length, - table_list->table_name, 0); - - if (table_kind) + if (table_kind) + { + /* Disable alter of enabled log tables */ + if (logger.is_log_table_enabled(table_kind)) { - /* Disable alter of enabled log tables */ - if (logger.is_log_table_enabled(table_kind)) - { - my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER"); - DBUG_RETURN(TRUE); - } + my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER"); + DBUG_RETURN(true); + } - /* Disable alter of log tables to unsupported engine */ - if ((create_info->used_fields & HA_CREATE_USED_ENGINE) && - (!create_info->db_type || /* unknown engine */ - !(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES))) - { - my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0), - hton_name(create_info->db_type)->str); - DBUG_RETURN(TRUE); - } + /* Disable alter of log tables to unsupported engine */ + if ((create_info->used_fields & HA_CREATE_USED_ENGINE) && + (!create_info->db_type || /* unknown engine */ + !(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES))) + { + my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0), + hton_name(create_info->db_type)->str); + DBUG_RETURN(true); + } #ifdef WITH_PARTITION_STORAGE_ENGINE - if (alter_info->flags & ALTER_PARTITION) - { - my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table"); - DBUG_RETURN(TRUE); - } -#endif + if (alter_info->flags & Alter_info::ALTER_PARTITION) + { + my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table"); + DBUG_RETURN(true); } +#endif } - /* - Assign variables table_name, new_name, db, new_db, path, - to simplify further comparisions: we want to see if it's a RENAME - later just by comparing the pointers, avoiding the need for strcmp. - */ THD_STAGE_INFO(thd, stage_init); - table_name=table_list->table_name; - alias= (lower_case_table_names == 2) ? table_list->alias : table_name; - db=table_list->db; - if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db)) - new_db= db; - build_table_filename(path, sizeof(path) - 1, db, table_name, "", 0); - - mysql_ha_rm_tables(thd, table_list); - - /* DISCARD/IMPORT TABLESPACE is always alone in an ALTER TABLE */ - if (alter_info->tablespace_op != NO_TABLESPACE_OP) - { - mysql_audit_alter_table(thd, table_list); - - /* Conditionally writes to binlog. */ - bool ret= mysql_discard_or_import_tablespace(thd,table_list, - alter_info->tablespace_op); - DBUG_RETURN(ret); - } /* Code below can handle only base tables so ensure that we won't open a view. @@ -6345,20 +7646,21 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, */ table_list->required_type= FRMTYPE_TABLE; - Alter_table_prelocking_strategy alter_prelocking_strategy(alter_info); + Alter_table_prelocking_strategy alter_prelocking_strategy; DEBUG_SYNC(thd, "alter_table_before_open_tables"); - error= open_and_lock_tables(thd, table_list, FALSE, 0, - &alter_prelocking_strategy); + uint tables_opened; + bool error= open_tables(thd, &table_list, &tables_opened, 0, + &alter_prelocking_strategy); + + DEBUG_SYNC(thd, "alter_opened_table"); if (error) - { - DBUG_RETURN(TRUE); - } + DBUG_RETURN(true); - table= table_list->table; + TABLE *table= table_list->table; table->use_all_columns(); - mdl_ticket= table->mdl_ticket; + MDL_ticket *mdl_ticket= table->mdl_ticket; /* Prohibit changing of the UNION list of a non-temporary MERGE table @@ -6372,100 +7674,73 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, (table->s->tmp_table == NO_TMP_TABLE)) { my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0)); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } + Alter_table_ctx alter_ctx(thd, table_list, tables_opened, new_db, new_name); + + MDL_request target_mdl_request; + /* Check that we are not trying to rename to an existing table */ - if (new_name) + if (alter_ctx.is_table_renamed()) { - DBUG_PRINT("info", ("new_db.new_name: '%s'.'%s'", new_db, new_name)); - strmov(new_name_buff,new_name); - strmov(new_alias= new_alias_buff, new_name); - if (lower_case_table_names) + if (table->s->tmp_table != NO_TMP_TABLE) { - if (lower_case_table_names != 2) + if (find_temporary_table(thd, alter_ctx.new_db, alter_ctx.new_name)) { - my_casedn_str(files_charset_info, new_name_buff); - new_alias= new_name; // Create lower case table name + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alter_ctx.new_alias); + DBUG_RETURN(true); } - my_casedn_str(files_charset_info, new_name); } - if (new_db == db && - !my_strcasecmp(table_alias_charset, new_name_buff, table_name)) + else { + MDL_request_list mdl_requests; + MDL_request target_db_mdl_request; + + target_mdl_request.init(MDL_key::TABLE, + alter_ctx.new_db, alter_ctx.new_name, + MDL_EXCLUSIVE, MDL_TRANSACTION); + mdl_requests.push_front(&target_mdl_request); + /* - Source and destination table names are equal: make later check - easier. + If we are moving the table to a different database, we also + need IX lock on the database name so that the target database + is protected by MDL while the table is moved. */ - new_alias= new_name= table_name; - } - else - { - if (table->s->tmp_table != NO_TMP_TABLE) + if (alter_ctx.is_database_changed()) { - if (find_temporary_table(thd,new_db,new_name_buff)) - { - my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff); - DBUG_RETURN(TRUE); - } + target_db_mdl_request.init(MDL_key::SCHEMA, alter_ctx.new_db, "", + MDL_INTENTION_EXCLUSIVE, + MDL_TRANSACTION); + mdl_requests.push_front(&target_db_mdl_request); } - else - { - MDL_request_list mdl_requests; - MDL_request target_db_mdl_request; - - target_mdl_request.init(MDL_key::TABLE, new_db, new_name, - MDL_EXCLUSIVE, MDL_TRANSACTION); - mdl_requests.push_front(&target_mdl_request); - - /* - If we are moving the table to a different database, we also - need IX lock on the database name so that the target database - is protected by MDL while the table is moved. - */ - if (new_db != db) - { - target_db_mdl_request.init(MDL_key::SCHEMA, new_db, "", - MDL_INTENTION_EXCLUSIVE, - MDL_TRANSACTION); - mdl_requests.push_front(&target_db_mdl_request); - } - /* - Global intention exclusive lock must have been already acquired when - table to be altered was open, so there is no need to do it here. - */ - DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, - "", "", - MDL_INTENTION_EXCLUSIVE)); + /* + Global intention exclusive lock must have been already acquired when + table to be altered was open, so there is no need to do it here. + */ + DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, + "", "", + MDL_INTENTION_EXCLUSIVE)); - if (thd->mdl_context.acquire_locks(&mdl_requests, - thd->variables.lock_wait_timeout)) - DBUG_RETURN(TRUE); + if (thd->mdl_context.acquire_locks(&mdl_requests, + thd->variables.lock_wait_timeout)) + DBUG_RETURN(true); - DEBUG_SYNC(thd, "locked_table_name"); - /* - Table maybe does not exist, but we got an exclusive lock - on the name, now we can safely try to find out for sure. - */ - build_table_filename(new_name_buff, sizeof(new_name_buff) - 1, - new_db, new_name_buff, reg_ext, 0); - if (!access(new_name_buff, F_OK)) - { - /* Table will be closed in do_command() */ - my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias); - goto err; - } + DEBUG_SYNC(thd, "locked_table_name"); + /* + Table maybe does not exist, but we got an exclusive lock + on the name, now we can safely try to find out for sure. + */ + if (!access(alter_ctx.get_new_filename(), F_OK)) + { + /* Table will be closed in do_command() */ + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alter_ctx.new_alias); + DBUG_RETURN(true); } } } - else - { - new_alias= (lower_case_table_names == 2) ? alias : table_name; - new_name= table_name; - } - old_db_type= table->s->db_type(); if (!create_info->db_type) { #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -6483,161 +7758,78 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, } else #endif - create_info->db_type= old_db_type; + create_info->db_type= table->s->db_type(); } - if (check_engine(thd, new_db, new_name, create_info)) - goto err; - new_db_type= create_info->db_type; + if (check_engine(thd, alter_ctx.new_db, alter_ctx.new_name, create_info)) + DBUG_RETURN(true); - if ((new_db_type != old_db_type || - alter_info->flags & ALTER_PARTITION) && + if ((create_info->db_type != table->s->db_type() || + alter_info->flags & Alter_info::ALTER_PARTITION) && !table->file->can_switch_engines()) { my_error(ER_ROW_IS_REFERENCED, MYF(0)); - goto err; + DBUG_RETURN(true); } /* - If this is an ALTER TABLE and no explicit row type specified reuse - the table's row type. - Note: this is the same as if the row type was specified explicitly and - we must thus set HA_CREATE_USED_ROW_FORMAT! + If this is an ALTER TABLE and no explicit row type specified reuse + the table's row type. + Note : this is the same as if the row type was specified explicitly. */ if (create_info->row_type == ROW_TYPE_NOT_USED) { /* ALTER TABLE without explicit row type */ create_info->row_type= table->s->row_type; - /* - We have to mark the row type as used, as otherwise the engine may - change the row format in update_create_info(). - */ - create_info->used_fields|= HA_CREATE_USED_ROW_FORMAT; - explicit_used_fields|= HA_CREATE_USED_ROW_FORMAT; + } + else + { + /* ALTER TABLE with specific row type */ + create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT; } DBUG_PRINT("info", ("old type: %s new type: %s", - ha_resolve_storage_engine_name(old_db_type), - ha_resolve_storage_engine_name(new_db_type))); - if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED)) + ha_resolve_storage_engine_name(table->s->db_type()), + ha_resolve_storage_engine_name(create_info->db_type))); + if (ha_check_storage_engine_flag(table->s->db_type(), HTON_ALTER_NOT_SUPPORTED)) { DBUG_PRINT("info", ("doesn't support alter")); - my_error(ER_ILLEGAL_HA, MYF(0), hton_name(old_db_type)->str, - db, table_name); - goto err; + my_error(ER_ILLEGAL_HA, MYF(0), hton_name(table->s->db_type())->str, + alter_ctx.db, alter_ctx.table_name); + DBUG_RETURN(true); } - if (ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED)) + if (ha_check_storage_engine_flag(create_info->db_type, + HTON_ALTER_NOT_SUPPORTED)) { DBUG_PRINT("info", ("doesn't support alter")); - my_error(ER_ILLEGAL_HA, MYF(0), hton_name(new_db_type)->str, - new_db, new_name); - goto err; + my_error(ER_ILLEGAL_HA, MYF(0), hton_name(create_info->db_type)->str, + alter_ctx.new_db, alter_ctx.new_name); + DBUG_RETURN(true); } if (table->s->tmp_table == NO_TMP_TABLE) mysql_audit_alter_table(thd, table_list); - + THD_STAGE_INFO(thd, stage_setup); - if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) && + if (!(alter_info->flags & ~(Alter_info::ALTER_RENAME | + Alter_info::ALTER_KEYS_ONOFF)) && + alter_info->requested_algorithm != + Alter_info::ALTER_TABLE_ALGORITHM_COPY && !table->s->tmp_table) // no need to touch frm { - if (alter_info->keys_onoff != LEAVE_AS_IS) - { - if (wait_while_table_is_used(thd, table, extra_func, - TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) - goto err; - error= alter_table_manage_keys(table, 0, alter_info->keys_onoff); - table->s->allow_access_to_protected_table(); - } - - if (!error && (new_name != table_name || new_db != db)) - { - THD_STAGE_INFO(thd, stage_rename); - /* - Then do a 'simple' rename of the table. First we need to close all - instances of 'source' table. - Note that if wait_while_table_is_used() returns error here (i.e. if - this thread was killed) then it must be that previous step of - simple rename did nothing and therefore we can safely return - without additional clean-up. - */ - if (wait_while_table_is_used(thd, table, extra_func, - TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) - goto err; - close_all_tables_for_name(thd, table->s, HA_EXTRA_PREPARE_FOR_RENAME); - /* - Then, we want check once again that target table does not exist. - Actually the order of these two steps does not matter since - earlier we took exclusive metadata lock on the target table, so - we do them in this particular order only to be consistent with 5.0, - in which we don't take this lock and where this order really matters. - TODO: Investigate if we need this access() check at all. - */ - if (!access(new_name_buff,F_OK)) - { - my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name); - error= -1; - } - else - { - *fn_ext(new_name)=0; - - LEX_STRING new_db_name= { new_db, strlen(new_db) }; - LEX_STRING new_table_name= { new_alias, strlen(new_alias) }; - (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name, - &new_db_name, &new_table_name); - - if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias, 0)) - error= -1; - else if (Table_triggers_list::change_table_name(thd, db, - alias, table_name, - new_db, new_alias)) - { - (void) mysql_rename_table(old_db_type, new_db, new_alias, db, - table_name, 0); - error= -1; - } - } - } - - if (error == HA_ERR_WRONG_COMMAND) + // This requires X-lock, no other lock levels supported. + if (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_DEFAULT && + alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE) { - error= 0; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, - ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), - table->file->table_type(), - table->s->db.str, table->s->table_name.str); - } - - if (!error) - { - error= write_bin_log(thd, TRUE, thd->query(), thd->query_length()); - if (!error) - my_ok(thd); - } - else if (error > 0) - { - table->file->print_error(error, MYF(0)); - error= -1; - } - table_list->table= NULL; // For query cache - query_cache_invalidate3(thd, table_list, 0); - - if ((thd->locked_tables_mode == LTM_LOCK_TABLES || - thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)) - { - /* - Under LOCK TABLES we should adjust meta-data locks before finishing - statement. Otherwise we can rely on them being released - along with the implicit commit. - */ - if (new_name != table_name || new_db != db) - thd->mdl_context.release_all_locks_for_name(mdl_ticket); - else - mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE); + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0), + "LOCK=NONE/SHARED", "LOCK=EXCLUSIVE"); + DBUG_RETURN(true); } - DBUG_RETURN(error); + bool res= simple_rename_or_index_change(thd, table_list, + alter_info->keys_onoff, + &alter_ctx); + DBUG_RETURN(res); } handle_if_exists_options(thd, table, alter_info); @@ -6647,297 +7839,127 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, /* the IF (NOT) EXISTS options. */ if (alter_info->flags == 0) { - copied= deleted= 0; - goto end_temporary; + my_snprintf(alter_ctx.tmp_name, sizeof(alter_ctx.tmp_name), + ER(ER_INSERT_INFO), 0L, 0L, 0L); + my_ok(thd, 0L, 0L, alter_ctx.tmp_name); + DBUG_RETURN(false); } /* We have to do full alter table. */ #ifdef WITH_PARTITION_STORAGE_ENGINE - if (prep_alter_part_table(thd, table, alter_info, create_info, old_db_type, - &partition_changed, - db, table_name, path, - &table_for_fast_alter_partition)) - goto err; + bool partition_changed= false; + bool fast_alter_partition= false; + { + if (prep_alter_part_table(thd, table, alter_info, create_info, + &alter_ctx, &partition_changed, + &fast_alter_partition)) + { + DBUG_RETURN(true); + } + } #endif - /* - If the old table had partitions and we are doing ALTER TABLE ... - engine= <new_engine>, the new table must preserve the original - partitioning. That means that the new engine is still the - partitioning engine, not the engine specified in the parser. - This is discovered in prep_alter_part_table, which in such case - updates create_info->db_type. - Now we need to update the stack copy of create_info->db_type, - as otherwise we won't be able to correctly move the files of the - temporary table to the result table files. - */ - new_db_type= create_info->db_type; - - if (is_index_maintenance_unique (table, alter_info)) - need_copy_table= ALTER_TABLE_DATA_CHANGED; - - if (mysql_prepare_alter_table(thd, table, create_info, alter_info)) - goto err; - - /* Remove markers set for update_create_info */ - create_info->used_fields&= ~explicit_used_fields; - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) - need_copy_table= alter_info->change_level; + if (mysql_prepare_alter_table(thd, table, create_info, alter_info, + &alter_ctx)) + { + DBUG_RETURN(true); + } - set_table_default_charset(thd, create_info, db); + set_table_default_charset(thd, create_info, alter_ctx.db); promote_first_timestamp_column(&alter_info->create_list); - if (thd->variables.old_alter_table - || (table->s->db_type() != create_info->db_type) #ifdef WITH_PARTITION_STORAGE_ENGINE - || partition_changed -#endif - ) - need_copy_table= ALTER_TABLE_DATA_CHANGED; - else + if (fast_alter_partition) { - enum_alter_table_change_level need_copy_table_res; - /* Check how much the tables differ. */ - if (mysql_compare_tables(table, alter_info, - create_info, order_num, - &need_copy_table_res, - &key_info_buffer, - &index_drop_buffer, &index_drop_count, - &index_add_buffer, &index_add_count, - &candidate_key_count)) - goto err; - - DBUG_EXECUTE_IF("alter_table_only_metadata_change", { - if (need_copy_table_res != ALTER_TABLE_METADATA_ONLY) - goto err; }); - DBUG_EXECUTE_IF("alter_table_only_index_change", { - if (need_copy_table_res != ALTER_TABLE_INDEX_CHANGED) - goto err; }); - - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) - need_copy_table= need_copy_table_res; - } - - /* - If there are index changes only, try to do them in-place. "Index - changes only" means also that the handler for the table does not - change. The table is open and locked. The handler can be accessed. - */ - if (need_copy_table == ALTER_TABLE_INDEX_CHANGED) - { - int pk_changed= 0; - ulong alter_flags= 0; - ulong needed_inplace_with_read_flags= 0; - ulong needed_inplace_flags= 0; - KEY *key; - uint *idx_p; - uint *idx_end_p; - - alter_flags= table->file->alter_table_flags(alter_info->flags); - DBUG_PRINT("info", ("alter_flags: %lu", alter_flags)); - /* Check dropped indexes. */ - for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count; - idx_p < idx_end_p; - idx_p++) - { - key= table->key_info + *idx_p; - DBUG_PRINT("info", ("index dropped: '%s'", key->name)); - if (key->flags & HA_NOSAME) - { - /* - Unique key. Check for "PRIMARY". - or if dropping last unique key - */ - if ((uint) (key - table->key_info) == table->s->primary_key) - { - DBUG_PRINT("info", ("Dropping primary key")); - /* Primary key. */ - needed_inplace_with_read_flags|= HA_INPLACE_DROP_PK_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_DROP_PK_INDEX_NO_READ_WRITE; - pk_changed++; - candidate_key_count--; - } - else - { - KEY_PART_INFO *part_end= key->key_part + key->key_parts; - bool is_candidate_key= true; - - /* Non-primary unique key. */ - needed_inplace_with_read_flags|= - HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE; - - /* - Check if all fields in key are declared - NOT NULL and adjust candidate_key_count - */ - for (KEY_PART_INFO *key_part= key->key_part; - key_part < part_end; - key_part++) - is_candidate_key= - (is_candidate_key && - (! table->field[key_part->fieldnr-1]->maybe_null())); - if (is_candidate_key) - candidate_key_count--; - } - } - else - { - /* Non-unique key. */ - needed_inplace_with_read_flags|= HA_INPLACE_DROP_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_DROP_INDEX_NO_READ_WRITE; - } - } - no_pk= ((table->s->primary_key == MAX_KEY) || - (needed_inplace_with_read_flags & - HA_INPLACE_DROP_PK_INDEX_NO_WRITE)); - /* Check added indexes. */ - for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count; - idx_p < idx_end_p; - idx_p++) - { - key= key_info_buffer + *idx_p; - DBUG_PRINT("info", ("index added: '%s'", key->name)); - if (key->flags & HA_NOSAME) - { - /* Unique key */ - - KEY_PART_INFO *part_end= key->key_part + key->key_parts; - bool is_candidate_key= true; - - /* - Check if all fields in key are declared - NOT NULL - */ - for (KEY_PART_INFO *key_part= key->key_part; - key_part < part_end; - key_part++) - is_candidate_key= - (is_candidate_key && - (! table->field[key_part->fieldnr]->maybe_null())); - - /* - Check for "PRIMARY" - or if adding first unique key - defined on non-nullable fields - */ - - if ((!my_strcasecmp(system_charset_info, - key->name, primary_key_name)) || - (no_pk && candidate_key_count == 0 && is_candidate_key)) - { - DBUG_PRINT("info", ("Adding primary key")); - /* Primary key. */ - needed_inplace_with_read_flags|= HA_INPLACE_ADD_PK_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE; - pk_changed++; - no_pk= false; - } - else - { - /* Non-primary unique key. */ - needed_inplace_with_read_flags|= HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE; - if (ignore) - { - /* - If ignore is used, we have to remove all duplicate rows, - which require a full table copy. - */ - need_copy_table= ALTER_TABLE_DATA_CHANGED; - pk_changed= 2; // Don't change need_copy_table - break; - } - } - } - else - { - /* Non-unique key. */ - needed_inplace_with_read_flags|= HA_INPLACE_ADD_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_ADD_INDEX_NO_READ_WRITE; - } + /* + ALGORITHM and LOCK clauses are generally not allowed by the + parser for operations related to partitioning. + The exceptions are ALTER_PARTITION and ALTER_REMOVE_PARTITIONING. + For consistency, we report ER_ALTER_OPERATION_NOT_SUPPORTED here. + */ + if (alter_info->requested_lock != + Alter_info::ALTER_TABLE_LOCK_DEFAULT) + { + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), + "LOCK=NONE/SHARED/EXCLUSIVE", + ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION), + "LOCK=DEFAULT"); + DBUG_RETURN(true); } - - if ((candidate_key_count > 0) && - (needed_inplace_with_read_flags & HA_INPLACE_DROP_PK_INDEX_NO_WRITE)) + else if (alter_info->requested_algorithm != + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT) { - /* - Dropped primary key when there is some other unique - not null key that should be converted to primary key - */ - needed_inplace_with_read_flags|= HA_INPLACE_ADD_PK_INDEX_NO_WRITE; - needed_inplace_flags|= HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE; - pk_changed= 2; + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), + "ALGORITHM=COPY/INPLACE", + ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION), + "ALGORITHM=DEFAULT"); + DBUG_RETURN(true); } - DBUG_PRINT("info", - ("needed_inplace_with_read_flags: 0x%lx, needed_inplace_flags: 0x%lx", - needed_inplace_with_read_flags, needed_inplace_flags)); /* - In-place add/drop index is possible only if - the primary key is not added and dropped in the same statement. - Otherwise we have to recreate the table. - need_copy_table is no-zero at this place. - - Also, in-place is not possible if we add a primary key - and drop another key in the same statement. If the drop fails, - we will not be able to revert adding of primary key. + Upgrade from MDL_SHARED_UPGRADABLE to MDL_SHARED_NO_WRITE. + Afterwards it's safe to take the table level lock. */ - if ( pk_changed < 2 ) + if (thd->mdl_context.upgrade_shared_lock(mdl_ticket, MDL_SHARED_NO_WRITE, + thd->variables.lock_wait_timeout) + || lock_tables(thd, table_list, alter_ctx.tables_opened, 0)) { - if ((needed_inplace_with_read_flags & HA_INPLACE_ADD_PK_INDEX_NO_WRITE) && - index_drop_count > 0) - { - /* - Do copy, not in-place ALTER. - Avoid setting ALTER_TABLE_METADATA_ONLY. - */ - } - else if ((alter_flags & needed_inplace_with_read_flags) == - needed_inplace_with_read_flags) - { - /* All required in-place flags to allow concurrent reads are present. */ - need_copy_table= ALTER_TABLE_METADATA_ONLY; - need_lock_for_indexes= FALSE; - } - else if ((alter_flags & needed_inplace_flags) == needed_inplace_flags) - { - /* All required in-place flags are present. */ - need_copy_table= ALTER_TABLE_METADATA_ONLY; - } + DBUG_RETURN(true); } - DBUG_PRINT("info", ("need_copy_table: %u need_lock: %d", - need_copy_table, need_lock_for_indexes)); - } - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) - { - char frm_name[FN_REFLEN+1]; - strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS); - /* - C_ALTER_TABLE_FRM_ONLY can only be used if old frm exists. - discovering frm-less engines cannot enjoy this optimization. - */ - if (!my_access(frm_name, F_OK)) - create_table_mode= C_ALTER_TABLE_FRM_ONLY; + // In-place execution of ALTER TABLE for partitioning. + DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info, + create_info, table_list, + alter_ctx.db, + alter_ctx.table_name)); } +#endif + /* + Use copy algorithm if: + - old_alter_table system variable is set without in-place requested using + the ALGORITHM clause. + - Or if in-place is impossible for given operation. + - Changes to partitioning which were not handled by fast_alter_part_table() + needs to be handled using table copying algorithm unless the engine + supports auto-partitioning as such engines can do some changes + using in-place API. + */ + if ((thd->variables.old_alter_table && + alter_info->requested_algorithm != + Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) + || is_inplace_alter_impossible(table, create_info, alter_info) #ifdef WITH_PARTITION_STORAGE_ENGINE - if (table_for_fast_alter_partition) + || (partition_changed && + !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)) +#endif + ) { - DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info, - create_info, table_list, - db, table_name, - table_for_fast_alter_partition)); + if (alter_info->requested_algorithm == + Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) + { + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0), + "ALGORITHM=INPLACE", "ALGORITHM=COPY"); + DBUG_RETURN(true); + } + alter_info->requested_algorithm= Alter_info::ALTER_TABLE_ALGORITHM_COPY; } -#endif - my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix, - current_pid, thd->thread_id); - /* Safety fix for innodb */ - if (lower_case_table_names) - my_casedn_str(files_charset_info, tmp_name); + /* + If the old table had partitions and we are doing ALTER TABLE ... + engine= <new_engine>, the new table must preserve the original + partitioning. This means that the new engine is still the + partitioning engine, not the engine specified in the parser. + This is discovered in prep_alter_part_table, which in such case + updates create_info->db_type. + It's therefore important that the assignment below is done + after prep_alter_part_table. + */ + handlerton *new_db_type= create_info->db_type; + handlerton *old_db_type= table->s->db_type(); + TABLE *new_table= NULL; + ha_rows copied=0,deleted=0; /* Handling of symlinked tables: @@ -6963,292 +7985,346 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, Copy data. Remove old table and symlinks. */ - if (!strcmp(db, new_db)) // Ignore symlink if db changed + char index_file[FN_REFLEN], data_file[FN_REFLEN]; + + if (!alter_ctx.is_database_changed()) { if (create_info->index_file_name) { /* Fix index_file_name to have 'tmp_name' as basename */ - strmov(index_file, tmp_name); + strmov(index_file, alter_ctx.tmp_name); create_info->index_file_name=fn_same(index_file, - create_info->index_file_name, - 1); + create_info->index_file_name, + 1); } if (create_info->data_file_name) { /* Fix data_file_name to have 'tmp_name' as basename */ - strmov(data_file, tmp_name); + strmov(data_file, alter_ctx.tmp_name); create_info->data_file_name=fn_same(data_file, - create_info->data_file_name, - 1); + create_info->data_file_name, + 1); } } else + { + /* Ignore symlink if db is changed. */ create_info->data_file_name=create_info->index_file_name=0; + } DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock"); + /* We can abort alter table for any table type */ + thd->abort_on_warning= !ignore && thd->is_strict_mode(); + /* - Create a table with a temporary name. - With C_ALTER_TABLE_FRM_ONLY this creates a .frm file only and - we keep the original row format. + Create .FRM for new version of table with a temporary name. We don't log the statement, it will be logged later. + + Keep information about keys in newly created table as it + will be used later to construct Alter_inplace_info object + and by fill_alter_inplace_info() call. */ - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) - { - DBUG_ASSERT(create_table_mode == C_ALTER_TABLE_FRM_ONLY); - /* Ensure we keep the original table format */ - create_info->table_options= ((create_info->table_options & - ~HA_OPTION_PACK_RECORD) | - (table->s->db_create_options & - HA_OPTION_PACK_RECORD)); - } + KEY *key_info; + uint key_count; + /* + Remember if the new definition has new VARCHAR column; + create_info->varchar will be reset in create_table_impl()/ + mysql_prepare_create_table(). + */ + bool varchar= create_info->varchar; + LEX_CUSTRING frm= {0,0}; + tmp_disable_binlog(thd); create_info->options|=HA_CREATE_TMP_ALTER; - error= mysql_create_table_no_lock(thd, new_db, tmp_name, create_info, - alter_info, NULL, create_table_mode); + error= create_table_impl(thd, alter_ctx.new_db, alter_ctx.tmp_name, + alter_ctx.get_tmp_path(), + create_info, alter_info, + C_ALTER_TABLE_FRM_ONLY, NULL, + &key_info, &key_count, &frm); reenable_binlog(thd); + thd->abort_on_warning= false; if (error) - goto err; + { + my_free(const_cast<uchar*>(frm.str)); + DBUG_RETURN(true); + } + + /* Remember that we have not created table in storage engine yet. */ + bool no_ha_table= true; - /* Open the table if we need to copy the data. */ - DBUG_PRINT("info", ("need_copy_table: %u", need_copy_table)); - if (need_copy_table != ALTER_TABLE_METADATA_ONLY) + if (alter_info->requested_algorithm != Alter_info::ALTER_TABLE_ALGORITHM_COPY) { - if (table->s->tmp_table) + Alter_inplace_info ha_alter_info(create_info, alter_info, + key_info, key_count, +#ifdef WITH_PARTITION_STORAGE_ENGINE + thd->work_part_info, +#else + NULL, +#endif + ignore); + TABLE *altered_table= NULL; + bool use_inplace= true; + + /* Fill the Alter_inplace_info structure. */ + if (fill_alter_inplace_info(thd, table, varchar, &ha_alter_info)) + goto err_new_table_cleanup; + + if (ha_alter_info.handler_flags == 0) { - Open_table_context ot_ctx(thd, (MYSQL_OPEN_IGNORE_FLUSH | - MYSQL_OPEN_FOR_REPAIR | - MYSQL_LOCK_IGNORE_TIMEOUT)); - TABLE_LIST tbl; - bzero((void*) &tbl, sizeof(tbl)); - tbl.db= new_db; - tbl.table_name= tbl.alias= tmp_name; - /* Table is in thd->temporary_tables */ - (void) open_table(thd, &tbl, thd->mem_root, &ot_ctx); - new_table= tbl.table; + /* + No-op ALTER, no need to call handler API functions. + + If this code path is entered for an ALTER statement that + should not be a real no-op, new handler flags should be added + and fill_alter_inplace_info() adjusted. + + Note that we can end up here if an ALTER statement has clauses + that cancel each other out (e.g. ADD/DROP identically index). + + Also note that we ignore the LOCK clause here. + + TODO don't create the frm in the first place + */ + deletefrm(alter_ctx.get_tmp_path()); + my_free(const_cast<uchar*>(frm.str)); + goto end_inplace; + } + + // We assume that the table is non-temporary. + DBUG_ASSERT(!table->s->tmp_table); + + if (!(altered_table= open_table_uncached(thd, new_db_type, + alter_ctx.get_tmp_path(), + alter_ctx.new_db, + alter_ctx.tmp_name, + true, false))) + goto err_new_table_cleanup; + + /* Set markers for fields in TABLE object for altered table. */ + update_altered_table(ha_alter_info, altered_table); + + /* + Mark all columns in 'altered_table' as used to allow usage + of its record[0] buffer and Field objects during in-place + ALTER TABLE. + */ + altered_table->column_bitmaps_set_no_signal(&altered_table->s->all_set, + &altered_table->s->all_set); + + // Ask storage engine whether to use copy or in-place + enum_alter_inplace_result inplace_supported= + table->file->check_if_supported_inplace_alter(altered_table, + &ha_alter_info); + + switch (inplace_supported) { + case HA_ALTER_INPLACE_EXCLUSIVE_LOCK: + // If SHARED lock and no particular algorithm was requested, use COPY. + if (alter_info->requested_lock == + Alter_info::ALTER_TABLE_LOCK_SHARED && + alter_info->requested_algorithm == + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT) + { + use_inplace= false; + } + // Otherwise, if weaker lock was requested, report errror. + else if (alter_info->requested_lock == + Alter_info::ALTER_TABLE_LOCK_NONE || + alter_info->requested_lock == + Alter_info::ALTER_TABLE_LOCK_SHARED) + { + ha_alter_info.report_unsupported_error("LOCK=NONE/SHARED", + "LOCK=EXCLUSIVE"); + close_temporary_table(thd, altered_table, true, false); + goto err_new_table_cleanup; + } + break; + case HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE: + case HA_ALTER_INPLACE_SHARED_LOCK: + // If weaker lock was requested, report errror. + if (alter_info->requested_lock == + Alter_info::ALTER_TABLE_LOCK_NONE) + { + ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED"); + close_temporary_table(thd, altered_table, true, false); + goto err_new_table_cleanup; + } + break; + case HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE: + case HA_ALTER_INPLACE_NO_LOCK: + break; + case HA_ALTER_INPLACE_NOT_SUPPORTED: + // If INPLACE was requested, report error. + if (alter_info->requested_algorithm == + Alter_info::ALTER_TABLE_ALGORITHM_INPLACE) + { + ha_alter_info.report_unsupported_error("ALGORITHM=INPLACE", + "ALGORITHM=COPY"); + close_temporary_table(thd, altered_table, true, false); + goto err_new_table_cleanup; + } + // COPY with LOCK=NONE is not supported, no point in trying. + if (alter_info->requested_lock == + Alter_info::ALTER_TABLE_LOCK_NONE) + { + ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED"); + close_temporary_table(thd, altered_table, true, false); + goto err_new_table_cleanup; + } + // Otherwise use COPY + use_inplace= false; + break; + case HA_ALTER_ERROR: + default: + close_temporary_table(thd, altered_table, true, false); + goto err_new_table_cleanup; + } + + if (use_inplace) + { + my_free(const_cast<uchar*>(frm.str)); + if (mysql_inplace_alter_table(thd, table_list, table, + altered_table, + &ha_alter_info, + inplace_supported, &target_mdl_request, + &alter_ctx)) + { + DBUG_RETURN(true); + } + + goto end_inplace; } else { - char path[FN_REFLEN + 1]; - /* table is a normal table: Create temporary table in same directory */ - build_table_filename(path, sizeof(path) - 1, new_db, tmp_name, "", - FN_IS_TMP); - /* Open our intermediate table. */ - new_table= open_table_uncached(thd, new_db_type, path, - new_db, tmp_name, TRUE); + close_temporary_table(thd, altered_table, true, false); + } + } + + /* ALTER TABLE using copy algorithm. */ + + /* Check if ALTER TABLE is compatible with foreign key definitions. */ + if (fk_prepare_copy_alter_table(thd, table, alter_info, &alter_ctx)) + goto err_new_table_cleanup; + + if (!table->s->tmp_table) + { + // COPY algorithm doesn't work with concurrent writes. + if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE) + { + my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0), + "LOCK=NONE", + ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY), + "LOCK=SHARED"); + goto err_new_table_cleanup; } - if (!new_table) + + // If EXCLUSIVE lock is requested, upgrade already. + if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE && + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN)) goto err_new_table_cleanup; + /* - Note: In case of MERGE table, we do not attach children. We do not - copy data for MERGE tables. Only the children have data. + Otherwise upgrade to SHARED_NO_WRITE. + Note that under LOCK TABLES, we will already have SHARED_NO_READ_WRITE. */ + if (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE && + thd->mdl_context.upgrade_shared_lock(mdl_ticket, MDL_SHARED_NO_WRITE, + thd->variables.lock_wait_timeout)) + goto err_new_table_cleanup; + + DEBUG_SYNC(thd, "alter_table_copy_after_lock_upgrade"); } - /* Check if we can do the ALTER TABLE as online */ - if (require_online) + // It's now safe to take the table level lock. + if (lock_tables(thd, table_list, alter_ctx.tables_opened, 0)) + goto err_new_table_cleanup; + { - if (index_add_count || index_drop_count || - (new_table && - !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))) - { - my_error(ER_CANT_DO_ONLINE, MYF(0), "ALTER"); + if (ha_create_table(thd, alter_ctx.get_tmp_path(), + alter_ctx.new_db, alter_ctx.tmp_name, + create_info, &frm)) goto err_new_table_cleanup; + + /* Mark that we have created table in storage engine. */ + no_ha_table= false; + + if (create_info->tmp_table()) + { + if (!open_table_uncached(thd, new_db_type, + alter_ctx.get_tmp_path(), + alter_ctx.new_db, alter_ctx.tmp_name, + true, true)) + goto err_new_table_cleanup; } } + + /* Open the table since we need to copy the data. */ + if (table->s->tmp_table != NO_TMP_TABLE) + { + TABLE_LIST tbl; + tbl.init_one_table(alter_ctx.new_db, strlen(alter_ctx.new_db), + alter_ctx.tmp_name, strlen(alter_ctx.tmp_name), + alter_ctx.tmp_name, TL_READ_NO_INSERT); + /* Table is in thd->temporary_tables */ + (void) open_temporary_table(thd, &tbl); + new_table= tbl.table; + } + else + { + /* table is a normal table: Create temporary table in same directory */ + /* Open our intermediate table. */ + new_table= open_table_uncached(thd, new_db_type, alter_ctx.get_tmp_path(), + alter_ctx.new_db, alter_ctx.tmp_name, + true, true); + } + if (!new_table) + goto err_new_table_cleanup; + /* + Note: In case of MERGE table, we do not attach children. We do not + copy data for MERGE tables. Only the children have data. + */ + /* Copy the data if necessary. */ thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields thd->cuted_fields=0L; - copied=deleted=0; /* We do not copy data for MERGE tables. Only the children have data. MERGE tables have HA_NO_COPY_ON_ALTER set. */ - if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER)) + if (!(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER)) { new_table->next_number_field=new_table->found_next_number_field; + THD_STAGE_INFO(thd, stage_copy_to_tmp_table); DBUG_EXECUTE_IF("abort_copy_table", { my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0)); goto err_new_table_cleanup; }); - error= copy_data_between_tables(thd, table, new_table, - alter_info->create_list, ignore, - order_num, order, &copied, &deleted, - alter_info->keys_onoff, - alter_info->error_if_not_empty); + if (copy_data_between_tables(thd, table, new_table, + alter_info->create_list, ignore, + order_num, order, &copied, &deleted, + alter_info->keys_onoff, + &alter_ctx)) + goto err_new_table_cleanup; } else { - /* - Ensure that we will upgrade the metadata lock if - handler::enable/disable_indexes() will be called. - */ - if (alter_info->keys_onoff != LEAVE_AS_IS || - table->file->indexes_are_disabled()) - need_lock_for_indexes= true; - if (!table->s->tmp_table && need_lock_for_indexes && - wait_while_table_is_used(thd, table, extra_func, - TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) + /* Should be MERGE only */ + DBUG_ASSERT(new_table->file->ht->db_type == DB_TYPE_MRG_MYISAM); + if (!table->s->tmp_table && + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN)) goto err_new_table_cleanup; THD_STAGE_INFO(thd, stage_manage_keys); DEBUG_SYNC(thd, "alter_table_manage_keys"); alter_table_manage_keys(table, table->file->indexes_are_disabled(), alter_info->keys_onoff); - error= trans_commit_stmt(thd); - if (trans_commit_implicit(thd)) - error= 1; - /* - If the table was locked, allow one to still run SHOW commands against it - */ - if (table->s->protected_against_usage()) - table->s->allow_access_to_protected_table(); - } - thd->count_cuted_fields= CHECK_FIELD_IGNORE; - - if (error) - goto err_new_table_cleanup; - - /* If we did not need to copy, we might still need to add/drop indexes. */ - if (! new_table) - { - uint *key_numbers; - uint *keyno_p; - KEY *key_info; - KEY *key; - uint *idx_p; - uint *idx_end_p; - KEY_PART_INFO *key_part; - KEY_PART_INFO *part_end; - DBUG_PRINT("info", ("No new_table, checking add/drop index")); - - table->file->ha_prepare_for_alter(); - if (index_add_count) - { - /* The add_index() method takes an array of KEY structs. */ - key_info= (KEY*) thd->alloc(sizeof(KEY) * index_add_count); - key= key_info; - for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count; - idx_p < idx_end_p; - idx_p++, key++) - { - /* Copy the KEY struct. */ - *key= key_info_buffer[*idx_p]; - /* Fix the key parts. */ - part_end= key->key_part + key->key_parts; - for (key_part= key->key_part; key_part < part_end; key_part++) - key_part->field= table->field[key_part->fieldnr]; - } - /* Add the indexes. */ - if ((error= table->file->add_index(table, key_info, index_add_count, - &add))) - { - /* Only report error if handler has not already reported an error */ - if (!thd->is_error()) - { - /* - Exchange the key_info for the error message. If we exchange - key number by key name in the message later, we need correct info. - */ - KEY *save_key_info= table->key_info; - table->key_info= key_info; - table->file->print_error(error, MYF(0)); - table->key_info= save_key_info; - } - goto err_new_table_cleanup; - } - pending_inplace_add_index= true; - } - /*end of if (index_add_count)*/ - - if (index_drop_count) - { - /* Currently we must finalize add index if we also drop indexes */ - if (pending_inplace_add_index) - { - /* Committing index changes needs exclusive metadata lock. */ - DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, - table_list->db, - table_list->table_name, - MDL_EXCLUSIVE)); - if ((error= table->file->final_add_index(add, true))) - { - table->file->print_error(error, MYF(0)); - goto err_new_table_cleanup; - } - pending_inplace_add_index= false; - } - /* The prepare_drop_index() method takes an array of key numbers. */ - key_numbers= (uint*) thd->alloc(sizeof(uint) * index_drop_count); - keyno_p= key_numbers; - /* Get the number of each key. */ - for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count; - idx_p < idx_end_p; - idx_p++, keyno_p++) - *keyno_p= *idx_p; - /* - Tell the handler to prepare for drop indexes. - This re-numbers the indexes to get rid of gaps. - */ - error= table->file->prepare_drop_index(table, key_numbers, - index_drop_count); - if (!error) - { - /* Tell the handler to finally drop the indexes. */ - error= table->file->final_drop_index(table); - } - - if (error) - { - table->file->print_error(error, MYF(0)); - if (index_add_count) // Drop any new indexes added. - { - /* - Temporarily set table-key_info to include information about the - indexes added above that we now need to drop. - */ - KEY *save_key_info= table->key_info; - table->key_info= key_info_buffer; - if ((error= table->file->prepare_drop_index(table, index_add_buffer, - index_add_count))) - table->file->print_error(error, MYF(0)); - else if ((error= table->file->final_drop_index(table))) - table->file->print_error(error, MYF(0)); - table->key_info= save_key_info; - } - - /* - Mark this TABLE instance as stale to avoid - out-of-sync index information. - */ - table->m_needs_reopen= true; - goto err_new_table_cleanup; - } - } - /*end of if (index_drop_count)*/ - - /* - The final .frm file is already created as a temporary file - and will be renamed to the original table name later. - */ - - /* Need to commit before a table is unlocked (NDB requirement). */ - DBUG_PRINT("info", ("Committing before unlocking table")); if (trans_commit_stmt(thd) || trans_commit_implicit(thd)) goto err_new_table_cleanup; } - /*end of if (! new_table) for add/drop index*/ - - DBUG_ASSERT(error == 0); + thd->count_cuted_fields= CHECK_FIELD_IGNORE; if (table->s->tmp_table != NO_TMP_TABLE) { - /* - In-place operations are not supported for temporary tables, so - we don't have to call final_add_index() in this case. The assert - verifies that in-place add index has not been done. - */ - DBUG_ASSERT(!pending_inplace_add_index); /* Close lock if this is a transactional table */ if (thd->lock) { @@ -7256,7 +8332,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, thd->locked_tables_mode != LTM_PRELOCKED_UNDER_LOCK_TABLES) { mysql_unlock_tables(thd, thd->lock); - thd->lock=0; + thd->lock= NULL; } else { @@ -7268,14 +8344,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, } } /* Remove link to old table and rename the new one */ - close_temporary_table(thd, table, 1, 1); + close_temporary_table(thd, table, true, true); /* Should pass the 'new_name' as we store table name in the cache */ - if (rename_temporary_table(thd, new_table, new_db, new_name)) + if (rename_temporary_table(thd, new_table, + alter_ctx.new_db, alter_ctx.new_name)) goto err_new_table_cleanup; /* We don't replicate alter table statement on temporary tables */ if (!thd->is_current_stmt_binlog_format_row() && - write_bin_log(thd, TRUE, thd->query(), thd->query_length())) - DBUG_RETURN(TRUE); + write_bin_log(thd, true, thd->query(), thd->query_length())) + DBUG_RETURN(true); + my_free(const_cast<uchar*>(frm.str)); goto end_temporary; } @@ -7284,11 +8362,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, not delete it! Even altough MERGE tables do not have their children attached here it is safe to call close_temporary_table(). */ - if (new_table) - { - close_temporary_table(thd, new_table, 1, 0); - new_table= 0; - } + close_temporary_table(thd, new_table, true, false); + new_table= NULL; + DEBUG_SYNC(thd, "alter_table_before_rename_result_table"); /* @@ -7309,243 +8385,164 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, */ THD_STAGE_INFO(thd, stage_rename_result_table); - my_snprintf(old_name, sizeof(old_name), "%s2-%lx-%lx", tmp_file_prefix, - current_pid, thd->thread_id); - if (lower_case_table_names) - my_casedn_str(files_charset_info, old_name); - if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME, - TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)) - { - if (pending_inplace_add_index) - { - pending_inplace_add_index= false; - table->file->final_add_index(add, false); - } - // Mark this TABLE instance as stale to avoid out-of-sync index information. - table->m_needs_reopen= true; + if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME)) goto err_new_table_cleanup; - } - if (pending_inplace_add_index) - { - pending_inplace_add_index= false; - DBUG_EXECUTE_IF("alter_table_rollback_new_index", { - table->file->final_add_index(add, false); - my_error(ER_UNKNOWN_ERROR, MYF(0)); - goto err_new_table_cleanup; - }); - if ((error= table->file->final_add_index(add, true))) - { - table->file->print_error(error, MYF(0)); - goto err_new_table_cleanup; - } - } close_all_tables_for_name(thd, table->s, - new_name != table_name || new_db != db ? - HA_EXTRA_PREPARE_FOR_RENAME : - HA_EXTRA_NOT_USED); - - error=0; - table_list->table= table= 0; /* Safety */ - save_old_db_type= old_db_type; + alter_ctx.is_table_renamed() ? + HA_EXTRA_PREPARE_FOR_RENAME: + HA_EXTRA_NOT_USED, + NULL); + table_list->table= table= NULL; /* Safety */ + my_free(const_cast<uchar*>(frm.str)); /* - This leads to the storage engine (SE) not being notified for renames in - mysql_rename_table(), because we just juggle with the FRM and nothing - more. If we have an intermediate table, then we notify the SE that - it should become the actual table. Later, we will recycle the old table. - However, in case of ALTER TABLE RENAME there might be no intermediate - table. This is when the old and new tables are compatible, according to - mysql_compare_table(). Then, we need one additional call to - mysql_rename_table() with flag NO_FRM_RENAME, which does nothing else but - actual rename in the SE and the FRM is not touched. Note that, if the - table is renamed and the SE is also changed, then an intermediate table - is created and the additional call will not take place. + Rename the old table to temporary name to have a backup in case + anything goes wrong while renaming the new table. */ - - if (new_name != table_name || new_db != db) + char backup_name[32]; + my_snprintf(backup_name, sizeof(backup_name), "%s2-%lx-%lx", tmp_file_prefix, + current_pid, thd->thread_id); + if (lower_case_table_names) + my_casedn_str(files_charset_info, backup_name); + if (mysql_rename_table(old_db_type, alter_ctx.db, alter_ctx.table_name, + alter_ctx.db, backup_name, FN_TO_IS_TMP)) { - LEX_STRING new_db_name= { new_db, strlen(new_db) }; - LEX_STRING new_table_name= { new_name, strlen(new_name) }; - (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name, - &new_db_name, &new_table_name); + // Rename to temporary name failed, delete the new table, abort ALTER. + (void) quick_rm_table(thd, new_db_type, alter_ctx.new_db, + alter_ctx.tmp_name, FN_IS_TMP); + goto err_with_mdl; } - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) - { - DBUG_ASSERT(new_db_type == old_db_type); - /* This type cannot happen in regular ALTER. */ - new_db_type= old_db_type= NULL; - } - if (mysql_rename_table(old_db_type, db, table_name, db, old_name, - FN_TO_IS_TMP)) - { - error=1; - (void) quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP); - } - else if (mysql_rename_table(new_db_type, new_db, tmp_name, new_db, - new_alias, FN_FROM_IS_TMP)) + // Rename the new table to the correct name. + if (mysql_rename_table(new_db_type, alter_ctx.new_db, alter_ctx.tmp_name, + alter_ctx.new_db, alter_ctx.new_alias, + FN_FROM_IS_TMP)) { - /* Try to get everything back. */ - error= 1; - (void) quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP); - (void) mysql_rename_table(old_db_type, db, old_name, db, alias, - FN_FROM_IS_TMP); + // Rename failed, delete the temporary table. + (void) quick_rm_table(thd, new_db_type, alter_ctx.new_db, + alter_ctx.tmp_name, FN_IS_TMP); + // Restore the backup of the original table to the old name. + (void) mysql_rename_table(old_db_type, alter_ctx.db, backup_name, + alter_ctx.db, alter_ctx.alias, FN_FROM_IS_TMP); + goto err_with_mdl; } - else if (new_name != table_name || new_db != db) + + // Check if we renamed the table and if so update trigger files. + if (alter_ctx.is_table_renamed()) { - if (need_copy_table == ALTER_TABLE_METADATA_ONLY && - mysql_rename_table(save_old_db_type, db, table_name, new_db, - new_alias, NO_FRM_RENAME)) + if (Table_triggers_list::change_table_name(thd, + alter_ctx.db, + alter_ctx.alias, + alter_ctx.table_name, + alter_ctx.new_db, + alter_ctx.new_alias)) { - /* Try to get everything back. */ - error= 1; - (void) quick_rm_table(new_db_type, new_db, new_alias, 0); - (void) mysql_rename_table(old_db_type, db, old_name, db, alias, - FN_FROM_IS_TMP); - } - else if (Table_triggers_list::change_table_name(thd, db, alias, - table_name, new_db, - new_alias)) - { - /* Try to get everything back. */ - error= 1; - (void) quick_rm_table(new_db_type, new_db, new_alias, 0); - (void) mysql_rename_table(old_db_type, db, old_name, db, - alias, FN_FROM_IS_TMP); - /* - If we were performing "fast"/in-place ALTER TABLE we also need - to restore old name of table in storage engine as a separate - step, as the above rename affects .FRM only. - */ - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) - { - (void) mysql_rename_table(save_old_db_type, new_db, new_alias, - db, table_name, NO_FRM_RENAME); - } + // Rename succeeded, delete the new table. + (void) quick_rm_table(thd, new_db_type, + alter_ctx.new_db, alter_ctx.new_alias, 0); + // Restore the backup of the original table to the old name. + (void) mysql_rename_table(old_db_type, alter_ctx.db, backup_name, + alter_ctx.db, alter_ctx.alias, FN_FROM_IS_TMP); + goto err_with_mdl; } + rename_table_in_stat_tables(thd, alter_ctx.db,alter_ctx.alias, + alter_ctx.new_db, alter_ctx.new_alias); } - if (! error) - (void) quick_rm_table(old_db_type, db, old_name, FN_IS_TMP); - - if (error) - { - /* This shouldn't happen. But let us play it safe. */ - goto err_with_mdl; - } - - if (need_copy_table == ALTER_TABLE_METADATA_ONLY) + // ALTER TABLE succeeded, delete the backup of the old table. + if (quick_rm_table(thd, old_db_type, alter_ctx.db, backup_name, FN_IS_TMP)) { /* - Now we have to inform handler that new .FRM file is in place. - To do this we need to obtain a handler object for it. - NO need to tamper with MERGE tables. The real open is done later. + The fact that deletion of the backup failed is not critical + error, but still worth reporting as it might indicate serious + problem with server. */ - Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN); - TABLE_LIST temp_table_list; - TABLE_LIST *t_table_list; - if (new_name != table_name || new_db != db) - { - temp_table_list.init_one_table(new_db, strlen(new_db), - new_name, strlen(new_name), - new_name, TL_READ_NO_INSERT); - temp_table_list.mdl_request.ticket= target_mdl_request.ticket; - t_table_list= &temp_table_list; - } - else - { - /* - Under LOCK TABLES, we have a different mdl_lock_ticket - points to a different instance than the one set initially - to request the lock. - */ - table_list->mdl_request.ticket= mdl_ticket; - t_table_list= table_list; - } - if (open_table(thd, t_table_list, thd->mem_root, &ot_ctx)) - { - goto err_with_mdl; - } - - /* Tell the handler that a new frm file is in place. */ - error= t_table_list->table->file->ha_create_partitioning_metadata(path, NULL, - CHF_INDEX_FLAG); + goto err_with_mdl; + } - DBUG_ASSERT(thd->open_tables == t_table_list->table); - close_thread_table(thd, &thd->open_tables); - t_table_list->table= NULL; +end_inplace: - if (error) - goto err_with_mdl; - } if (thd->locked_tables_list.reopen_tables(thd)) goto err_with_mdl; THD_STAGE_INFO(thd, stage_end); - DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000);); DEBUG_SYNC(thd, "alter_table_before_main_binlog"); ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE, thd->query(), thd->query_length(), - db, table_name); + alter_ctx.db, alter_ctx.table_name); DBUG_ASSERT(!(mysql_bin_log.is_open() && thd->is_current_stmt_binlog_format_row() && (create_info->tmp_table()))); - if (write_bin_log(thd, TRUE, thd->query(), thd->query_length())) - DBUG_RETURN(TRUE); + if (write_bin_log(thd, true, thd->query(), thd->query_length())) + DBUG_RETURN(true); - table_list->table=0; // For query cache - query_cache_invalidate3(thd, table_list, 0); + if (ha_check_storage_engine_flag(old_db_type, HTON_FLUSH_AFTER_RENAME)) + { + /* + For the alter table to be properly flushed to the logs, we + have to open the new table. If not, we get a problem on server + shutdown. But we do not need to attach MERGE children. + */ + TABLE *t_table; + t_table= open_table_uncached(thd, new_db_type, alter_ctx.get_new_path(), + alter_ctx.new_db, alter_ctx.new_name, + false, true); + if (t_table) + intern_close_table(t_table); + else + sql_print_warning("Could not open table %s.%s after rename\n", + alter_ctx.new_db, alter_ctx.table_name); + ha_flush_logs(old_db_type); + } + table_list->table= NULL; // For query cache + query_cache_invalidate3(thd, table_list, false); if (thd->locked_tables_mode == LTM_LOCK_TABLES || thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES) { - if ((new_name != table_name || new_db != db)) + if (alter_ctx.is_table_renamed()) thd->mdl_context.release_all_locks_for_name(mdl_ticket); else - mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE); + mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); } end_temporary: - my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO), + my_snprintf(alter_ctx.tmp_name, sizeof(alter_ctx.tmp_name), + ER(ER_INSERT_INFO), (ulong) (copied + deleted), (ulong) deleted, - (ulong) thd->warning_info->statement_warn_count()); - my_ok(thd, copied + deleted, 0L, tmp_name); - DBUG_RETURN(FALSE); + (ulong) thd->get_stmt_da()->current_statement_warn_count()); + my_ok(thd, copied + deleted, 0L, alter_ctx.tmp_name); + DBUG_RETURN(false); err_new_table_cleanup: + my_free(const_cast<uchar*>(frm.str)); if (new_table) { /* close_temporary_table() frees the new_table pointer. */ - close_temporary_table(thd, new_table, 1, 1); + close_temporary_table(thd, new_table, true, true); } else - (void) quick_rm_table(new_db_type, new_db, tmp_name, - create_table_mode == C_ALTER_TABLE_FRM_ONLY ? - FN_IS_TMP | FRM_ONLY : FN_IS_TMP); + (void) quick_rm_table(thd, new_db_type, + alter_ctx.new_db, alter_ctx.tmp_name, + (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0))); -err: -#ifdef WITH_PARTITION_STORAGE_ENGINE - /* If prep_alter_part_table created an intermediate table, destroy it. */ - if (table_for_fast_alter_partition) - close_temporary(table_for_fast_alter_partition, 1, 0); -#endif /* WITH_PARTITION_STORAGE_ENGINE */ /* No default value was provided for a DATE/DATETIME field, the current sql_mode doesn't allow the '0000-00-00' value and the table to be altered isn't empty. Report error here. */ - if (alter_info->error_if_not_empty && - thd->warning_info->current_row_for_warning()) + if (alter_ctx.error_if_not_empty && + thd->get_stmt_da()->current_row_for_warning()) { const char *f_val= 0; enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE; - switch (alter_info->datetime_field->sql_type) + switch (alter_ctx.datetime_field->sql_type) { case MYSQL_TYPE_DATE: case MYSQL_TYPE_NEWDATE: @@ -7562,14 +8559,14 @@ err: DBUG_ASSERT(0); } bool save_abort_on_warning= thd->abort_on_warning; - thd->abort_on_warning= TRUE; - make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->abort_on_warning= true; + make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN, f_val, strlength(f_val), t_type, - alter_info->datetime_field->field_name); + alter_ctx.datetime_field->field_name); thd->abort_on_warning= save_abort_on_warning; } - DBUG_RETURN(TRUE); + DBUG_RETURN(true); err_with_mdl: /* @@ -7580,13 +8577,10 @@ err_with_mdl: */ thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0); thd->mdl_context.release_all_locks_for_name(mdl_ticket); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } -/* Copy all rows from one table to another */ - - /** Prepare the transaction for the alter table's copy phase. @@ -7635,16 +8629,14 @@ bool mysql_trans_commit_alter_copy_data(THD *thd) static int -copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, - List<Create_field> &create, - bool ignore, +copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, + List<Create_field> &create, bool ignore, uint order_num, ORDER *order, - ha_rows *copied, - ha_rows *deleted, - enum enum_enable_or_disable keys_onoff, - bool error_if_not_empty) + ha_rows *copied, ha_rows *deleted, + Alter_info::enum_enable_or_disable keys_onoff, + Alter_table_ctx *alter_ctx) { - int error= 1, errpos= 0; + int error= 1; Copy_field *copy= NULL, *copy_end; ha_rows found_count= 0, delete_count= 0; uint length= 0; @@ -7658,8 +8650,6 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, bool auto_increment_field_copied= 0; ulonglong save_sql_mode= thd->variables.sql_mode; ulonglong prev_insert_id, time_to_report_progress; - List_iterator<Create_field> it(create); - Create_field *def; Field **dfield_ptr= to->default_field; DBUG_ENTER("copy_data_between_tables"); @@ -7667,16 +8657,14 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, thd_progress_init(thd, 2 + test(order)); if (mysql_trans_prepare_alter_copy_data(thd)) - goto err; - errpos=1; + DBUG_RETURN(-1); if (!(copy= new Copy_field[to->s->fields])) - goto err; /* purecov: inspected */ + DBUG_RETURN(-1); /* purecov: inspected */ /* We need external lock before we can disable/enable keys */ if (to->file->ha_external_lock(thd, F_WRLCK)) - goto err; - errpos= 2; + DBUG_RETURN(-1); alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff); @@ -7686,8 +8674,9 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, from->file->info(HA_STATUS_VARIABLE); to->file->ha_start_bulk_insert(from->file->stats.records, ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT); - errpos= 3; + List_iterator<Create_field> it(create); + Create_field *def; copy_end=copy; to->s->default_fields= 0; for (Field **ptr=to->field ; *ptr ; ptr++) @@ -7736,7 +8725,7 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, my_snprintf(warn_buff, sizeof(warn_buff), "ORDER BY ignored as there is a user-defined clustered index" " in the table '%-.192s'", from->s->table_name.str); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, warn_buff); } else @@ -7769,11 +8758,13 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, to->use_all_columns(); to->mark_virtual_columns_for_write(TRUE); if (init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE)) + { + error= 1; goto err; - errpos= 4; - if (ignore) + } + if (ignore && !alter_ctx->fk_error_if_delete_row) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - thd->warning_info->reset_current_row_for_warning(); + thd->get_stmt_da()->reset_current_row_for_warning(); restore_record(to, s->default_values); // Create empty record thd->progress.max_counter= from->file->records(); @@ -7797,7 +8788,7 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, } /* Return error if source table isn't empty. */ - if (error_if_not_empty) + if (alter_ctx->error_if_not_empty) { error= 1; break; @@ -7831,38 +8822,62 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to, to->auto_increment_field_not_null= FALSE; if (error) { - if (!ignore || - to->file->is_fatal_error(error, HA_CHECK_DUP)) + if (to->file->is_fatal_error(error, HA_CHECK_DUP)) { - if (!to->file->is_fatal_error(error, HA_CHECK_DUP)) - { - uint key_nr= to->file->get_dup_key(error); - if ((int) key_nr >= 0) - { - const char *err_msg= ER(ER_DUP_ENTRY_WITH_KEY_NAME); - if (key_nr == 0 && - (to->key_info[0].key_part[0].field->flags & - AUTO_INCREMENT_FLAG)) - err_msg= ER(ER_DUP_ENTRY_AUTOINCREMENT_CASE); - to->file->print_keydup_error(key_nr, err_msg, MYF(0)); - break; - } - } - - to->file->print_error(error,MYF(0)); + /* Not a duplicate key error. */ + to->file->print_error(error, MYF(0)); break; } - to->file->restore_auto_increment(prev_insert_id); - delete_count++; + else + { + /* Duplicate key error. */ + if (alter_ctx->fk_error_if_delete_row) + { + /* + We are trying to omit a row from the table which serves as parent + in a foreign key. This might have broken referential integrity so + emit an error. Note that we can't ignore this error even if we are + executing ALTER IGNORE TABLE. IGNORE allows to skip rows, but + doesn't allow to break unique or foreign key constraints, + */ + my_error(ER_FK_CANNOT_DELETE_PARENT, MYF(0), + alter_ctx->fk_error_id, + alter_ctx->fk_error_table); + break; + } + + if (ignore) + { + /* This ALTER IGNORE TABLE. Simply skip row and continue. */ + to->file->restore_auto_increment(prev_insert_id); + delete_count++; + } + else + { + /* Ordinary ALTER TABLE. Report duplicate key error. */ + uint key_nr= to->file->get_dup_key(error); + if ((int) key_nr >= 0) + { + const char *err_msg= ER(ER_DUP_ENTRY_WITH_KEY_NAME); + if (key_nr == 0 && + (to->key_info[0].key_part[0].field->flags & + AUTO_INCREMENT_FLAG)) + err_msg= ER(ER_DUP_ENTRY_AUTOINCREMENT_CASE); + print_keydup_error(to, key_nr == MAX_KEY ? NULL : + &to->key_info[key_nr], + err_msg, MYF(0)); + } + else + to->file->print_error(error, MYF(0)); + break; + } + } } else found_count++; - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); } - -err: - if (errpos >= 4) - end_read_record(&info); + end_read_record(&info); free_io_cache(from); delete [] copy; @@ -7874,22 +8889,23 @@ err: /* We are going to drop the temporary table */ to->file->extra(HA_EXTRA_PREPARE_FOR_DROP); } - if (errpos >= 3 && to->file->ha_end_bulk_insert() && error <= 0) + if (to->file->ha_end_bulk_insert() && error <= 0) { to->file->print_error(my_errno,MYF(0)); error= 1; } to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - if (errpos >= 1 && mysql_trans_commit_alter_copy_data(thd)) + if (mysql_trans_commit_alter_copy_data(thd)) error= 1; + err: thd->variables.sql_mode= save_sql_mode; thd->abort_on_warning= 0; *copied= found_count; *deleted=delete_count; to->file->ha_release_auto_increment(); - if (errpos >= 2 && to->file->ha_external_lock(thd,F_UNLCK)) + if (to->file->ha_external_lock(thd,F_UNLCK)) error=1; if (error < 0 && to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME)) error= 1; @@ -7909,20 +8925,14 @@ err: RETURN Like mysql_alter_table(). */ + bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list) { HA_CREATE_INFO create_info; Alter_info alter_info; - DBUG_ENTER("mysql_recreate_table"); DBUG_ASSERT(!table_list->next_global); - /* - table_list->table has been closed and freed. Do not reference - uninitialized data. open_tables() could fail. - */ - table_list->table= NULL; - /* Same applies to MDL ticket. */ - table_list->mdl_request.ticket= NULL; + /* Set lock type which is appropriate for ALTER TABLE. */ table_list->lock_type= TL_READ_NO_INSERT; /* Same applies to MDL request. */ @@ -7932,10 +8942,11 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list) create_info.row_type=ROW_TYPE_NOT_USED; create_info.default_table_charset=default_charset_info; /* Force alter table to recreate table */ - alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE); + alter_info.flags= (Alter_info::ALTER_CHANGE_COLUMN | + Alter_info::ALTER_RECREATE); DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, &alter_info, 0, - (ORDER *) 0, 0, 0)); + (ORDER *) 0, 0)); } @@ -7950,23 +8961,48 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); item->maybe_null= 1; - field_list.push_back(item= new Item_int("Checksum", (longlong) 1, + field_list.push_back(item= new Item_int("Checksum", + (longlong) 1, MY_INT64_NUM_DECIMAL_DIGITS)); item->maybe_null= 1; if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); + /* + Close all temporary tables which were pre-open to simplify + privilege checking. Clear all references to closed tables. + */ + close_thread_tables(thd); + for (table= tables; table; table= table->next_local) + table->table= NULL; + /* Open one table after the other to keep lock time as short as possible. */ for (table= tables; table; table= table->next_local) { char table_name[SAFE_NAME_LEN*2+2]; TABLE *t; + TABLE_LIST *save_next_global; strxmov(table_name, table->db ,".", table->table_name, NullS); - t= table->table= open_n_lock_single_table(thd, table, TL_READ, 0); - thd->clear_error(); // these errors shouldn't get client + /* Remember old 'next' pointer and break the list. */ + save_next_global= table->next_global; + table->next_global= NULL; + table->lock_type= TL_READ; + /* Allow to open real tables only. */ + table->required_type= FRMTYPE_TABLE; + + if (open_temporary_tables(thd, table) || + open_and_lock_tables(thd, table, FALSE, 0)) + { + t= NULL; + thd->clear_error(); // these errors shouldn't get client + } + else + t= table->table; + + table->next_global= save_next_global; protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); @@ -8067,11 +9103,6 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, if (! thd->in_sub_stmt) trans_rollback_stmt(thd); close_thread_tables(thd); - /* - Don't release metadata locks, this will be done at - statement end. - */ - table->table=0; // For query cache } if (protocol->write()) goto err; @@ -8113,7 +9144,7 @@ static bool check_engine(THD *thd, const char *db_name, if (req_engine && req_engine != *new_engine) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_USING_OTHER_HANDLER, ER(ER_WARN_USING_OTHER_HANDLER), ha_resolve_storage_engine_name(*new_engine), diff --git a/sql/sql_table.h b/sql/sql_table.h index 8bc6865decd..6bd111cae6d 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -21,6 +21,7 @@ #include "my_sys.h" // pthread_mutex_t class Alter_info; +class Alter_table_ctx; class Create_field; struct TABLE_LIST; class THD; @@ -33,7 +34,6 @@ typedef struct st_key KEY; typedef struct st_key_cache KEY_CACHE; typedef struct st_lock_param_type ALTER_PARTITION_PARAM_TYPE; typedef struct st_order ORDER; -class Alter_table_change_level; enum ddl_log_entry_code { @@ -65,10 +65,19 @@ enum ddl_log_action_code DDL_LOG_REPLACE_ACTION: Rename an entity after removing the previous entry with the new name, that is replace this entry. + DDL_LOG_EXCHANGE_ACTION: + Exchange two entities by renaming them a -> tmp, b -> a, tmp -> b. */ DDL_LOG_DELETE_ACTION = 'd', DDL_LOG_RENAME_ACTION = 'r', - DDL_LOG_REPLACE_ACTION = 's' + DDL_LOG_REPLACE_ACTION = 's', + DDL_LOG_EXCHANGE_ACTION = 'e' +}; + +enum enum_ddl_log_exchange_phase { + EXCH_PHASE_NAME_TO_TEMP= 0, + EXCH_PHASE_FROM_TO_NAME= 1, + EXCH_PHASE_TEMP_TO_FROM= 2 }; @@ -77,6 +86,7 @@ typedef struct st_ddl_log_entry const char *name; const char *from_name; const char *handler_name; + const char *tmp_name; uint next_entry; uint entry_pos; enum ddl_log_entry_code entry_type; @@ -115,11 +125,15 @@ enum enum_explain_filename_mode #define WFRM_KEEP_SHARE 8 /* Flags for conversion functions. */ -#define FN_FROM_IS_TMP (1 << 0) -#define FN_TO_IS_TMP (1 << 1) -#define FN_IS_TMP (FN_FROM_IS_TMP | FN_TO_IS_TMP) -#define NO_FRM_RENAME (1 << 2) -#define FRM_ONLY (1 << 3) +static const uint FN_FROM_IS_TMP= 1 << 0; +static const uint FN_TO_IS_TMP= 1 << 1; +static const uint FN_IS_TMP= FN_FROM_IS_TMP | FN_TO_IS_TMP; +static const uint NO_FRM_RENAME= 1 << 2; +static const uint FRM_ONLY= 1 << 3; +/** Don't remove table in engine. Remove only .FRM and maybe .PAR files. */ +static const uint NO_HA_TABLE= 1 << 4; +/** Don't resolve MySQL's fake "foo.sym" symbolic directory names. */ +static const uint SKIP_SYMDIR_ACCESS= 1 << 5; uint filename_to_tablename(const char *from, char *to, uint to_length #ifndef DBUG_OFF @@ -133,6 +147,7 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db, const char *table, const char *ext, uint flags); uint build_table_shadow_filename(char *buff, size_t bufflen, ALTER_PARTITION_PARAM_TYPE *lpt); +uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen); bool mysql_create_table(THD *thd, TABLE_LIST *create_table, HA_CREATE_INFO *create_info, Alter_info *alter_info); @@ -183,27 +198,30 @@ handler *mysql_create_frm_image(THD *thd, const char *db, const char *table_name, HA_CREATE_INFO *create_info, Alter_info *alter_info, - int create_table_mode, LEX_CUSTRING *frm); + int create_table_mode, + KEY **key_info, + uint *key_count, + LEX_CUSTRING *frm); + +int mysql_discard_or_import_tablespace(THD *thd, + TABLE_LIST *table_list, + bool discard); + bool mysql_prepare_alter_table(THD *thd, TABLE *table, HA_CREATE_INFO *create_info, - Alter_info *alter_info); + Alter_info *alter_info, + Alter_table_ctx *alter_ctx); bool mysql_trans_prepare_alter_copy_data(THD *thd); bool mysql_trans_commit_alter_copy_data(THD *thd); bool mysql_alter_table(THD *thd, char *new_db, char *new_name, HA_CREATE_INFO *create_info, TABLE_LIST *table_list, Alter_info *alter_info, - uint order_num, ORDER *order, bool ignore, - bool require_online); + uint order_num, ORDER *order, bool ignore); bool mysql_compare_tables(TABLE *table, Alter_info *alter_info, HA_CREATE_INFO *create_info, - uint order_num, - Alter_table_change_level *need_copy_table, - KEY **key_info_buffer, - uint **index_drop_buffer, uint *index_drop_count, - uint **index_add_buffer, uint *index_add_count, - uint *candidate_key_count); + bool *metadata_equal); bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list); bool mysql_create_like_table(THD *thd, TABLE_LIST *table, TABLE_LIST *src_table, @@ -222,7 +240,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool drop_view, bool log_query); -bool quick_rm_table(handlerton *base,const char *db, +bool quick_rm_table(THD *thd, handlerton *base, const char *db, const char *table_name, uint flags); void close_cached_table(THD *thd, TABLE *table); void sp_prepare_create_field(THD *thd, Create_field *sql_field); @@ -246,6 +264,9 @@ bool sync_ddl_log(); void release_ddl_log(); void execute_ddl_log_recovery(); bool execute_ddl_log_entry(THD *thd, uint first_entry); +bool validate_comment_length(THD *thd, const char *comment_str, + size_t *comment_len, uint max_len, + uint err_code, const char *comment_name); bool check_duplicate_warning(THD *thd, char *msg, ulong length); template<typename T> class List; diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc index 3f6daf7a9ec..48eeb94f7c9 100644 --- a/sql/sql_tablespace.cc +++ b/sql/sql_tablespace.cc @@ -35,7 +35,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) { hton= ha_default_handlerton(thd); if (ts_info->storage_engine != 0) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_USING_OTHER_HANDLER, ER(ER_WARN_USING_OTHER_HANDLER), hton_name(hton)->str, @@ -65,7 +65,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info) } else { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), hton_name(hton)->str, diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 93b35b4918f..867d49808e1 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -92,7 +92,7 @@ static void print_cached_tables(void) { share= (TABLE_SHARE*) my_hash_element(&table_def_cache, idx); - I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables); + TABLE_SHARE::TABLE_list::Iterator it(share->used_tables); while ((entry= it++)) { printf("%-14.14s %-32s%6ld%8ld%6d %s\n", diff --git a/sql/sql_time.cc b/sql/sql_time.cc index a67768d4c34..f4612ec517e 100644 --- a/sql/sql_time.cc +++ b/sql/sql_time.cc @@ -222,7 +222,7 @@ check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date, if (check_date(ltime, fuzzy_date, &unused)) { ErrConvTime str(ltime); - make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, &str, ts_type, 0); return true; } @@ -309,15 +309,13 @@ str_to_datetime_with_warn(CHARSET_INFO *cs, { MYSQL_TIME_STATUS status; THD *thd= current_thd; - bool ret_val= str_to_datetime(cs, str, length, l_time, - (flags | (sql_mode_for_dates(thd))), - &status); + bool ret_val= str_to_datetime(cs, str, length, l_time, flags, &status); if (ret_val || status.warnings) - make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN, str, length, flags & TIME_TIME_ONLY ? MYSQL_TIMESTAMP_TIME : l_time->time_type, NullS); DBUG_EXECUTE_IF("str_to_datetime_warn", - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_YES, str);); return ret_val; } @@ -360,7 +358,7 @@ static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part, if (res < 0 || (was_cut && (fuzzydate & TIME_NO_ZERO_IN_DATE))) { make_truncated_value_warning(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, str, + Sql_condition::WARN_LEVEL_WARN, str, res < 0 ? MYSQL_TIMESTAMP_ERROR : mysql_type_to_time_type(f_type), field_name); @@ -814,7 +812,7 @@ const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format, } void make_truncated_value_warning(THD *thd, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const ErrConv *sval, timestamp_type time_type, const char *field_name) @@ -839,7 +837,7 @@ void make_truncated_value_warning(THD *thd, cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), type_str, sval->ptr(), field_name, - (ulong) thd->warning_info->current_row_for_warning()); + (ulong) thd->get_stmt_da()->current_row_for_warning()); else { if (time_type > MYSQL_TIMESTAMP_ERROR) @@ -968,7 +966,7 @@ bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type, return 0; // Ok invalid_date: - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_DATETIME_FUNCTION_OVERFLOW, ER(ER_DATETIME_FUNCTION_OVERFLOW), ltime->time_type == MYSQL_TIMESTAMP_TIME ? diff --git a/sql/sql_time.h b/sql/sql_time.h index cf029f143b3..47b300d51cc 100644 --- a/sql/sql_time.h +++ b/sql/sql_time.h @@ -19,7 +19,7 @@ #include "my_global.h" /* ulong */ #include "my_time.h" #include "mysql_time.h" /* timestamp_type */ -#include "sql_error.h" /* MYSQL_ERROR */ +#include "sql_error.h" /* Sql_condition */ #include "structs.h" /* INTERVAL */ typedef enum enum_mysql_timestamp_type timestamp_type; @@ -48,13 +48,14 @@ bool int_to_datetime_with_warn(longlong value, MYSQL_TIME *ltime, ulonglong fuzzydate, const char *name); -void make_truncated_value_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, +void make_truncated_value_warning(THD *thd, + Sql_condition::enum_warning_level level, const ErrConv *str_val, timestamp_type time_type, const char *field_name); static inline void make_truncated_value_warning(THD *thd, - MYSQL_ERROR::enum_warning_level level, const char *str_val, + Sql_condition::enum_warning_level level, const char *str_val, uint str_length, timestamp_type time_type, const char *field_name) { diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 022c4ff4ea5..bc4986bebee 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -157,7 +157,7 @@ Trigger_creation_ctx::create(THD *thd, if (invalid_creation_ctx) { push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_WARN, + Sql_condition::WARN_LEVEL_WARN, ER_TRG_INVALID_CREATION_CTX, ER(ER_TRG_INVALID_CREATION_CTX), (const char *) db_name, @@ -329,9 +329,9 @@ public: virtual bool handle_condition(THD *thd, uint sql_errno, const char* sqlstate, - MYSQL_ERROR::enum_warning_level level, + Sql_condition::enum_warning_level level, const char* message, - MYSQL_ERROR ** cond_hdl) + Sql_condition ** cond_hdl) { if (sql_errno != EE_OUTOFMEMORY && sql_errno != ER_OUT_OF_RESOURCES) @@ -561,7 +561,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) if (result) goto end; - close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED); + close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL); /* Reopen the table if we were under LOCK TABLES. Ignore the return value for now. It's better to @@ -588,7 +588,7 @@ end: with the implicit commit. */ if (thd->locked_tables_mode && tables && lock_upgrade_done) - mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE); + mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE); /* Restore the query table list. Used only for drop trigger. */ if (!create) @@ -799,7 +799,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, lex->definer->user.str)) { push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ER_NO_SUCH_USER, ER(ER_NO_SUCH_USER), lex->definer->user.str, @@ -1274,7 +1274,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, DBUG_RETURN(1); // EOM } - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRG_NO_CREATION_CTX, ER(ER_TRG_NO_CREATION_CTX), (const char*) db, @@ -1458,7 +1458,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, warning here. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_TRG_NO_DEFINER, ER(ER_TRG_NO_DEFINER), (const char*) db, (const char*) sp->m_name.str); @@ -1731,7 +1731,7 @@ bool add_table_for_trigger(THD *thd, if (if_exists) { push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ER_TRG_DOES_NOT_EXIST, ER(ER_TRG_DOES_NOT_EXIST)); @@ -2211,6 +2211,37 @@ add_tables_and_routines_for_triggers(THD *thd, /** + Check if any of the marked fields are used in the trigger. + + @param used_fields Bitmap over fields to check + @param event_type Type of event triggers for which we are going to inspect + @param action_time Type of trigger action time we are going to inspect +*/ + +bool Table_triggers_list::is_fields_updated_in_trigger(MY_BITMAP *used_fields, + trg_event_type event_type, + trg_action_time_type action_time) +{ + Item_trigger_field *trg_field; + sp_head *sp= bodies[event_type][action_time]; + DBUG_ASSERT(used_fields->n_bits == trigger_table->s->fields); + + for (trg_field= sp->m_trg_table_fields.first; trg_field; + trg_field= trg_field->next_trg_field) + { + /* We cannot check fields which does not present in table. */ + if (trg_field->field_idx != (uint)-1) + { + if (bitmap_is_set(used_fields, trg_field->field_idx) && + trg_field->get_settable_routine_parameter()) + return true; + } + } + return false; +} + + +/** Mark fields of subject table which we read/set in its triggers as such. @@ -2302,7 +2333,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key, DBUG_PRINT("info", ("sql_modes affected by BUG#14090 detected")); push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ER_OLD_FILE_FORMAT, ER(ER_OLD_FILE_FORMAT), (char *)path, "TRIGGER"); @@ -2343,7 +2374,7 @@ process_unknown_string(char *&unknown_key, uchar* base, MEM_ROOT *mem_root, DBUG_PRINT("info", ("trigger_table affected by BUG#15921 detected")); push_warning_printf(current_thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ER_OLD_FILE_FORMAT, ER(ER_OLD_FILE_FORMAT), (char *)path, "TRIGGER"); diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h index 47b1d19ae54..52892550d35 100644 --- a/sql/sql_trigger.h +++ b/sql/sql_trigger.h @@ -207,6 +207,10 @@ public: Query_tables_list *prelocking_ctx, TABLE_LIST *table_list); + bool is_fields_updated_in_trigger(MY_BITMAP *used_fields, + trg_event_type event_type, + trg_action_time_type action_time); + private: bool prepare_record1_accessors(TABLE *table); LEX_STRING* change_table_name_in_trignames(const char *old_db_name, diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc index 19ce553f5ce..9cd984a6663 100644 --- a/sql/sql_truncate.cc +++ b/sql/sql_truncate.cc @@ -18,9 +18,8 @@ #include "sql_class.h" // THD #include "sql_base.h" // open_and_lock_tables #include "sql_table.h" // write_bin_log -#include "sql_handler.h" // mysql_ha_rm_tables #include "datadict.h" // dd_recreate_table() -#include "lock.h" // MYSQL_OPEN_TEMPORARY_ONLY +#include "lock.h" // MYSQL_OPEN_* flags #include "sql_acl.h" // DROP_ACL #include "sql_parse.h" // check_one_table_access() #include "sql_truncate.h" @@ -186,12 +185,12 @@ fk_truncate_illegal_if_parent(THD *thd, TABLE *table) @retval > 0 Error code. */ -int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref, - bool is_tmp_table) +int Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref, + bool is_tmp_table) { int error= 0; uint flags; - DBUG_ENTER("Truncate_statement::handler_truncate"); + DBUG_ENTER("Sql_cmd_truncate_table::handler_truncate"); /* Can't recreate, the engine must mechanically delete all rows @@ -199,9 +198,7 @@ int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref, */ /* If it is a temporary table, no need to take locks. */ - if (is_tmp_table) - flags= MYSQL_OPEN_TEMPORARY_ONLY; - else + if (!is_tmp_table) { /* We don't need to load triggers. */ DBUG_ASSERT(table_ref->trg_event_map == 0); @@ -216,7 +213,7 @@ int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref, the MDL lock taken above and otherwise there is no way to wait for FLUSH TABLES in deadlock-free fashion. */ - flags= MYSQL_OPEN_IGNORE_FLUSH | MYSQL_OPEN_SKIP_TEMPORARY; + flags= MYSQL_OPEN_IGNORE_FLUSH; /* Even though we have an MDL lock on the table here, we don't pass MYSQL_OPEN_HAS_MDL_LOCK to open_and_lock_tables @@ -270,7 +267,7 @@ static bool recreate_temporary_table(THD *thd, TABLE *table) share->normalized_path.str); if (open_table_uncached(thd, table_type, share->path.str, share->db.str, - share->table_name.str, TRUE)) + share->table_name.str, true, true)) { error= FALSE; thd->thread_specific_used= TRUE; @@ -298,11 +295,11 @@ static bool recreate_temporary_table(THD *thd, TABLE *table) @retval TRUE Error. */ -bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref, - bool *hton_can_recreate) +bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref, + bool *hton_can_recreate) { TABLE *table= NULL; - DBUG_ENTER("Truncate_statement::lock_table"); + DBUG_ENTER("Sql_cmd_truncate_table::lock_table"); /* Lock types are set in the parser. */ DBUG_ASSERT(table_ref->lock_type == TL_WRITE); @@ -337,8 +334,7 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref, /* Acquire an exclusive lock. */ DBUG_ASSERT(table_ref->next_global == NULL); if (lock_table_names(thd, table_ref, NULL, - thd->variables.lock_wait_timeout, - MYSQL_OPEN_SKIP_TEMPORARY)) + thd->variables.lock_wait_timeout, 0)) DBUG_RETURN(TRUE); handlerton *hton; @@ -379,7 +375,7 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref, m_ticket_downgrade= table->mdl_ticket; /* Close if table is going to be recreated. */ if (*hton_can_recreate) - close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED); + close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL); } else { @@ -406,29 +402,31 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref, @retval TRUE Error. */ -bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref) +bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref) { int error; - TABLE *table; bool binlog_stmt; - DBUG_ENTER("Truncate_statement::truncate_table"); + DBUG_ENTER("Sql_cmd_truncate_table::truncate_table"); + + DBUG_ASSERT((!table_ref->table) || + (table_ref->table && table_ref->table->s)); /* Initialize, or reinitialize in case of reexecution (SP). */ m_ticket_downgrade= NULL; - /* Remove table from the HANDLER's hash. */ - mysql_ha_rm_tables(thd, table_ref); - /* If it is a temporary table, no need to take locks. */ - if ((table= find_temporary_table(thd, table_ref))) + if (is_temporary_table(table_ref)) { + TABLE *tmp_table= table_ref->table; + /* In RBR, the statement is not binlogged if the table is temporary. */ binlog_stmt= !thd->is_current_stmt_binlog_format_row(); /* Note that a temporary table cannot be partitioned. */ - if (ha_check_storage_engine_flag(table->s->db_type(), HTON_CAN_RECREATE)) + if (ha_check_storage_engine_flag(tmp_table->s->db_type(), + HTON_CAN_RECREATE)) { - if ((error= recreate_temporary_table(thd, table))) + if ((error= recreate_temporary_table(thd, tmp_table))) binlog_stmt= FALSE; /* No need to binlog failed truncate-by-recreate. */ DBUG_ASSERT(! thd->transaction.stmt.modified_non_trans_table); @@ -508,7 +506,7 @@ bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref) to a shared one. */ if (m_ticket_downgrade) - m_ticket_downgrade->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE); + m_ticket_downgrade->downgrade_lock(MDL_SHARED_NO_READ_WRITE); DBUG_RETURN(error); } @@ -522,11 +520,11 @@ bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref) @return FALSE on success. */ -bool Truncate_statement::execute(THD *thd) +bool Sql_cmd_truncate_table::execute(THD *thd) { bool res= TRUE; TABLE_LIST *first_table= thd->lex->select_lex.table_list.first; - DBUG_ENTER("Truncate_statement::execute"); + DBUG_ENTER("Sql_cmd_truncate_table::execute"); if (check_one_table_access(thd, DROP_ACL, first_table)) DBUG_RETURN(res); diff --git a/sql/sql_truncate.h b/sql/sql_truncate.h index 95a2f35df4f..061c561b8ea 100644 --- a/sql/sql_truncate.h +++ b/sql/sql_truncate.h @@ -19,9 +19,9 @@ class THD; struct TABLE_LIST; /** - Truncate_statement represents the TRUNCATE statement. + Sql_cmd_truncate_table represents the TRUNCATE statement. */ -class Truncate_statement : public Sql_statement +class Sql_cmd_truncate_table : public Sql_cmd { private: /* Set if a lock must be downgraded after truncate is done. */ @@ -29,14 +29,12 @@ private: public: /** - Constructor, used to represent a ALTER TABLE statement. - @param lex the LEX structure for this statement. + Constructor, used to represent a TRUNCATE statement. */ - Truncate_statement(LEX *lex) - : Sql_statement(lex) + Sql_cmd_truncate_table() {} - virtual ~Truncate_statement() + virtual ~Sql_cmd_truncate_table() {} /** @@ -46,6 +44,11 @@ public: */ bool execute(THD *thd); + virtual enum_sql_command sql_command_code() const + { + return SQLCOM_TRUNCATE; + } + protected: /** Handle locking a base table for truncate. */ bool lock_table(THD *, TABLE_LIST *, bool *); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 3cf7f576cbf..a835c182c86 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -714,7 +714,7 @@ bool st_select_lex_unit::exec() Stop execution of the remaining queries in the UNIONS, and produce the current result. */ - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT, ER(ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT), thd->accessed_rows_and_keys, diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 5edccd4e937..b91215bcedd 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -138,7 +138,7 @@ static bool check_fields(THD *thd, List<Item> &items) while ((item= it++)) { - if (!(field= item->filed_for_view_update())) + if (!(field= item->field_for_view_update())) { /* item has name, because it comes from VIEW SELECT list */ my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name); @@ -841,7 +841,7 @@ int mysql_update(THD *thd, error= 1; break; } - thd->warning_info->inc_current_row_for_warning(); + thd->get_stmt_da()->inc_current_row_for_warning(); if (thd->is_error()) { error= 1; @@ -949,7 +949,7 @@ int mysql_update(THD *thd, char buff[MYSQL_ERRMSG_SIZE]; my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, - (ulong) thd->warning_info->statement_warn_count()); + (ulong) thd->get_stmt_da()->current_statement_warn_count()); my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, id, buff); DBUG_PRINT("info",("%ld records updated", (long) updated)); @@ -1134,7 +1134,7 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update) // The primary key can cover multiple columns KEY key_info= table1->key_info[table1->s->primary_key]; KEY_PART_INFO *key_part= key_info.key_part; - KEY_PART_INFO *key_part_end= key_part + key_info.key_parts; + KEY_PART_INFO *key_part_end= key_part + key_info.user_defined_key_parts; for (;key_part != key_part_end; ++key_part) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 01bbd5ecb9f..e0a567420ba 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -350,7 +350,7 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view, while ((item= it++)) { Item_field *field; - if ((field= item->filed_for_view_update())) + if ((field= item->field_for_view_update())) { /* any_privileges may be reset later by the Item_field::set_field @@ -432,7 +432,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, lex->link_first_table_back(view, link_to_local); view->open_type= OT_BASE_ONLY; - if (open_and_lock_tables(thd, lex->query_tables, TRUE, 0)) + if (open_temporary_tables(thd, lex->query_tables) || + open_and_lock_tables(thd, lex->query_tables, TRUE, 0)) { view= lex->unlink_first_table(&link_to_local); res= TRUE; @@ -511,7 +512,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, if (!is_acl_user(lex->definer->host.str, lex->definer->user.str)) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_NO_SUCH_USER, ER(ER_NO_SUCH_USER), lex->definer->user.str, @@ -636,7 +637,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, Item *item; while ((item= it++)) { - Item_field *fld= item->filed_for_view_update(); + Item_field *fld= item->field_for_view_update(); uint priv= (get_column_grant(thd, &view->grant, view->db, view->table_name, item->name) & VIEW_ANY_ACL); @@ -887,7 +888,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, if (lex->create_view_algorithm == VIEW_ALGORITHM_MERGE && !lex->can_be_merged()) { - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE, ER(ER_WARN_VIEW_MERGE)); lex->create_view_algorithm= DTYPE_ALGORITHM_UNDEFINED; } @@ -1165,7 +1166,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, DBUG_ASSERT(!table->definer.host.str && !table->definer.user.length && !table->definer.host.length); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_VIEW_FRM_NO_USER, ER(ER_VIEW_FRM_NO_USER), table->db, table->table_name); get_default_definer(thd, &table->definer); @@ -1566,7 +1567,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, lex->select_lex.order_list.elements && !table->select_lex->master_unit()->is_union()) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_VIEW_ORDERBY_IGNORED, ER(ER_VIEW_ORDERBY_IGNORED), table->db, table->table_name); @@ -1664,8 +1665,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) DBUG_RETURN(TRUE); } - if (lock_table_names(thd, views, 0, thd->variables.lock_wait_timeout, - MYSQL_OPEN_SKIP_TEMPORARY)) + if (lock_table_names(thd, views, 0, thd->variables.lock_wait_timeout, 0)) DBUG_RETURN(TRUE); for (view= views; view; view= view->next_local) @@ -1680,7 +1680,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) my_snprintf(name, sizeof(name), "%s.%s", view->db, view->table_name); if (thd->lex->check_exists) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), name); continue; @@ -1815,7 +1815,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view) if ((key_info->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) { KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *key_part_end= key_part + key_info->key_parts; + KEY_PART_INFO *key_part_end= key_part + key_info->user_defined_key_parts; /* check that all key parts are used */ for (;;) @@ -1824,7 +1824,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view) for (k= trans; k < end_of_trans; k++) { Item_field *field; - if ((field= k->item->filed_for_view_update()) && + if ((field= k->item->field_for_view_update()) && field->field == key_part->field) break; } @@ -1846,7 +1846,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view) for (fld= trans; fld < end_of_trans; fld++) { Item_field *field; - if ((field= fld->item->filed_for_view_update()) && + if ((field= fld->item->field_for_view_update()) && field->field == *field_ptr) break; } @@ -1860,7 +1860,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view) if (thd->variables.updatable_views_with_limit) { /* update allowed, but issue warning */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_VIEW_WITHOUT_KEY, ER(ER_WARN_VIEW_WITHOUT_KEY)); DBUG_RETURN(FALSE); } @@ -1900,7 +1900,7 @@ bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view) for (Field_translator *entry= trans; entry < trans_end; entry++) { Item_field *fld; - if ((fld= entry->item->filed_for_view_update())) + if ((fld= entry->item->field_for_view_update())) list->push_back(fld); else { diff --git a/sql/sql_view.h b/sql/sql_view.h index 2e9c77252e8..abe95c63e6e 100644 --- a/sql/sql_view.h +++ b/sql/sql_view.h @@ -37,6 +37,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *view, bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, uint flags); + bool mysql_drop_view(THD *thd, TABLE_LIST *view, enum_drop_mode drop_mode); bool check_key_in_view(THD *thd, TABLE_LIST * view); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index da8750a7ba4..07666822acf 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -55,11 +55,13 @@ #include "sp_pcontext.h" #include "sp_rcontext.h" #include "sp.h" -#include "sql_alter.h" // Alter_table*_statement -#include "sql_truncate.h" // Truncate_statement -#include "sql_admin.h" // Analyze/Check..._table_stmt -#include "sql_partition_admin.h" // Alter_table_*_partition_stmt +#include "sql_alter.h" // Sql_cmd_alter_table* +#include "sql_truncate.h" // Sql_cmd_truncate_table +#include "sql_admin.h" // Sql_cmd_analyze/Check..._table +#include "sql_partition_admin.h" // Sql_cmd_alter_table_*_part. +#include "sql_handler.h" // Sql_cmd_handler_* #include "sql_signal.h" +#include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics #include "event_parse_data.h" #include "create_options.h" #include <myisam.h> @@ -283,7 +285,7 @@ void case_stmt_action_case(LEX *lex) (Instruction 12 in the example) */ - lex->spcont->push_label((char *)"", lex->sphead->instructions()); + lex->spcont->push_label(current_thd, EMPTY_STR, lex->sphead->instructions()); } /** @@ -352,7 +354,7 @@ int case_stmt_action_when(LEX *lex, Item *when, bool simple) */ return !test(i) || - sp->push_backpatch(i, ctx->push_label((char *)"", 0)) || + sp->push_backpatch(i, ctx->push_label(current_thd, EMPTY_STR, 0)) || sp->add_cont_backpatch(i) || sp->add_instr(i); } @@ -468,7 +470,7 @@ set_system_variable(THD *thd, struct sys_var_with_base *tmp, */ static bool -set_local_variable(THD *thd, sp_variable_t *spv, Item *val) +set_local_variable(THD *thd, sp_variable *spv, Item *val) { Item *it; LEX *lex= thd->lex; @@ -476,8 +478,8 @@ set_local_variable(THD *thd, sp_variable_t *spv, Item *val) if (val) it= val; - else if (spv->dflt) - it= spv->dflt; + else if (spv->default_value) + it= spv->default_value; else { it= new (thd->mem_root) Item_null(); @@ -543,6 +545,57 @@ set_trigger_new_row(THD *thd, LEX_STRING *name, Item *val) /** + Create an object to represent a SP variable in the Item-hierarchy. + + @param thd The current thread. + @param name The SP variable name. + @param spvar The SP variable (optional). + @param start_in_q Start position of the SP variable name in the query. + @param end_in_q End position of the SP variable name in the query. + + @remark If spvar is not specified, the name is used to search for the + variable in the parse-time context. If the variable does not + exist, a error is set and NULL is returned to the caller. + + @return An Item_splocal object representing the SP variable, or NULL on error. +*/ +static Item_splocal* +create_item_for_sp_var(THD *thd, LEX_STRING name, sp_variable *spvar, + const char *start_in_q, const char *end_in_q) +{ + Item_splocal *item; + LEX *lex= thd->lex; + uint pos_in_q, len_in_q; + sp_pcontext *spc = lex->spcont; + + /* If necessary, look for the variable. */ + if (spc && !spvar) + spvar= spc->find_variable(name, false); + + if (!spvar) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), name.str); + return NULL; + } + + DBUG_ASSERT(spc && spvar); + + /* Position and length of the SP variable name in the query. */ + pos_in_q= start_in_q - lex->sphead->m_tmp_query; + len_in_q= end_in_q - start_in_q; + + item= new (thd->mem_root) + Item_splocal(name, spvar->offset, spvar->type, pos_in_q, len_in_q); + +#ifndef DBUG_OFF + if (item) + item->m_sp= lex->sphead; +#endif + + return item; +} + +/** Helper to resolve the SQL:2003 Syntax exception 1) in <in predicate>. See SQL:2003, Part 2, section 8.4 <in predicate>, Note 184, page 383. This function returns the proper item for the SQL expression @@ -707,10 +760,10 @@ static bool add_create_index_prepare (LEX *lex, Table_ident *table) if (!lex->current_select->add_table_to_list(lex->thd, table, NULL, TL_OPTION_UPDATING, TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + MDL_SHARED_UPGRADABLE)) return TRUE; lex->alter_info.reset(); - lex->alter_info.flags= ALTER_ADD_INDEX; + lex->alter_info.flags= Alter_info::ALTER_ADD_INDEX; lex->col_list.empty(); lex->change= NullS; lex->option_list= NULL; @@ -879,7 +932,7 @@ static bool sp_create_assignment_instr(THD *thd, bool no_lookahead) timestamp_type date_time_type; st_select_lex *select_lex; chooser_compare_func_creator boolfunc2creator; - struct sp_cond_type *spcondtype; + class sp_condition_value *spcondvalue; struct { int vars, conds, hndlrs, curs; } spblock; sp_name *spname; LEX *lex; @@ -890,6 +943,14 @@ static bool sp_create_assignment_instr(THD *thd, bool no_lookahead) enum Foreign_key::fk_option m_fk_option; enum enum_yes_no_unknown m_yes_no_unk; Diag_condition_item_name diag_condition_item_name; + Diagnostics_information::Which_area diag_area; + Diagnostics_information *diag_info; + Statement_information_item *stmt_info_item; + Statement_information_item::Name stmt_info_item_name; + List<Statement_information_item> *stmt_info_list; + Condition_information_item *cond_info_item; + Condition_information_item::Name cond_info_item_name; + List<Condition_information_item> *cond_info_list; DYNCALL_CREATE_DEF *dyncol_def; List<DYNCALL_CREATE_DEF> *dyncol_def_list; bool is_not_empty; @@ -901,10 +962,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %pure_parser /* We have threads */ /* - Currently there are 189 shift/reduce conflicts. + Currently there are 185 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 189 +%expect 185 /* Comments for TOKENS. @@ -1027,6 +1088,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token CROSS /* SQL-2003-R */ %token CUBE_SYM /* SQL-2003-R */ %token CURDATE /* MYSQL-FUNC */ +%token CURRENT_SYM /* SQL-2003-R */ %token CURRENT_USER /* SQL-2003-R */ %token CURRENT_POS_SYM %token CURSOR_SYM /* SQL-2003-R */ @@ -1058,6 +1120,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token DESCRIBE /* SQL-2003-R */ %token DES_KEY_FILE %token DETERMINISTIC_SYM /* SQL-2003-R */ +%token DIAGNOSTICS_SYM /* SQL-2003-N */ %token DIRECTORY_SYM %token DISABLE_SYM %token DISCARD @@ -1091,6 +1154,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token EVENTS_SYM %token EVENT_SYM %token EVERY_SYM /* SQL-2003-N */ +%token EXCHANGE_SYM %token EXAMINED_SYM %token EXECUTE_SYM /* SQL-2003-R */ %token EXISTS /* SQL-2003-R */ @@ -1123,6 +1187,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token GEOMETRYCOLLECTION %token GEOMETRY_SYM %token GET_FORMAT /* MYSQL-FUNC */ +%token GET_SYM /* SQL-2003-R */ %token GLOBAL_SYM /* SQL-2003-R */ %token GRANT /* SQL-2003-R */ %token GRANTS @@ -1283,6 +1348,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token NO_WRITE_TO_BINLOG %token NULL_SYM /* SQL-2003-R */ %token NUM +%token NUMBER_SYM /* SQL-2003-N */ %token NUMERIC_SYM /* SQL-2003-R */ %token NVARCHAR_SYM %token OFFSET_SYM @@ -1311,9 +1377,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token PARSER_SYM %token PARSE_VCOL_EXPR_SYM %token PARTIAL /* SQL-2003-N */ -%token PARTITIONING_SYM -%token PARTITIONS_SYM %token PARTITION_SYM /* SQL-2003-R */ +%token PARTITIONS_SYM +%token PARTITIONING_SYM %token PASSWORD %token PERSISTENT_SYM %token PHASE_SYM @@ -1374,6 +1440,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token RESTORE_SYM %token RESTRICT %token RESUME_SYM +%token RETURNED_SQLSTATE_SYM /* SQL-2003-N */ %token RETURNS_SYM /* SQL-2003-R */ %token RETURN_SYM /* SQL-2003-R */ %token REVOKE /* SQL-2003-R */ @@ -1384,6 +1451,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ROWS_SYM /* SQL-2003-R */ %token ROW_FORMAT_SYM %token ROW_SYM /* SQL-2003-R */ +%token ROW_COUNT_SYM /* SQL-2003-N */ %token RTREE_SYM %token SAVEPOINT_SYM /* SQL-2003-R */ %token SCHEDULE_SYM @@ -1437,6 +1505,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token STARTING %token STARTS_SYM %token START_SYM /* SQL-2003-R */ +%token STATS_AUTO_RECALC_SYM +%token STATS_PERSISTENT_SYM +%token STATS_SAMPLE_PAGES_SYM %token STATUS_SYM %token STDDEV_SAMP_SYM /* SQL-2003-N */ %token STD_SYM @@ -1647,6 +1718,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); function_call_generic function_call_conflict kill_expr signal_allowed_expr + simple_target_specification + condition_number %type <item_num> NUM_literal @@ -1665,7 +1738,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); btree_or_rtree %type <string_list> - using_list + using_list opt_use_partition use_partition %type <key_part> key_part @@ -1777,7 +1850,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); key_using_alg part_column_list server_def server_options_list server_option - definer_opt no_definer definer + definer_opt no_definer definer get_diagnostics parse_vcol_expr vcol_opt_specifier vcol_opt_attribute vcol_opt_attribute_list vcol_attribute END_OF_INPUT @@ -1793,7 +1866,7 @@ END_OF_INPUT %type <NONE> case_stmt_specification simple_case_stmt searched_case_stmt %type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list -%type <spcondtype> sp_cond sp_hcond sqlstate signal_value opt_signal_value +%type <spcondvalue> sp_cond sp_hcond sqlstate signal_value opt_signal_value %type <spblock> sp_decls sp_decl %type <lex> sp_cursor_stmt %type <spname> sp_name @@ -1804,6 +1877,15 @@ END_OF_INPUT %type <NONE> signal_stmt resignal_stmt %type <diag_condition_item_name> signal_condition_information_item_name +%type <diag_area> which_area; +%type <diag_info> diagnostics_information; +%type <stmt_info_item> statement_information_item; +%type <stmt_info_item_name> statement_information_item_name; +%type <stmt_info_list> statement_information; +%type <cond_info_item> condition_information_item; +%type <cond_info_item_name> condition_information_item_name; +%type <cond_info_list> condition_information; + %type <NONE> '-' '+' '*' '/' '%' '(' ')' ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM @@ -1908,6 +1990,7 @@ statement: | drop | execute | flush + | get_diagnostics | grant | handler | help @@ -2069,6 +2152,7 @@ master_def: | MASTER_PASSWORD_SYM EQ TEXT_STRING_sys { Lex->mi.password = $3.str; + Lex->contains_plaintext_password= true; } | MASTER_PORT_SYM EQ ulong_num { @@ -2131,7 +2215,7 @@ master_def: } if (Lex->mi.heartbeat_period > slave_net_timeout) { - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX, ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX)); } @@ -2139,7 +2223,7 @@ master_def: { if (Lex->mi.heartbeat_period != 0.0) { - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN, ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN)); Lex->mi.heartbeat_period= 0.0; @@ -2187,7 +2271,7 @@ master_file_def: from 0" (4 in fact), unspecified means "don't change the position (keep the preceding value)"). */ - Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos); + Lex->mi.pos= MY_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.pos); } | RELAY_LOG_FILE_SYM EQ TEXT_STRING_sys { @@ -2197,7 +2281,7 @@ master_file_def: { Lex->mi.relay_log_pos = $3; /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */ - Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos); + Lex->mi.relay_log_pos= MY_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos); } | MASTER_USE_GTID_SYM EQ CURRENT_POS_SYM { @@ -2289,7 +2373,7 @@ create: !lex->create_info.db_type) { lex->create_info.db_type= ha_default_handlerton(YYTHD); - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN, ER_WARN_USING_OTHER_HANDLER, ER(ER_WARN_USING_OTHER_HANDLER), hton_name(lex->create_info.db_type)->str, @@ -2307,6 +2391,7 @@ create: if (add_create_index(Lex, $2, $5)) MYSQL_YYABORT; } + opt_index_lock_algorithm { } | CREATE fulltext INDEX_SYM opt_if_not_exists ident init_key_options ON table_ident { @@ -2318,6 +2403,7 @@ create: if (add_create_index(Lex, $2, $5)) MYSQL_YYABORT; } + opt_index_lock_algorithm { } | CREATE spatial INDEX_SYM opt_if_not_exists ident init_key_options ON table_ident { @@ -2329,6 +2415,7 @@ create: if (add_create_index(Lex, $2, $5)) MYSQL_YYABORT; } + opt_index_lock_algorithm { } | CREATE DATABASE opt_if_not_exists ident { Lex->create_info.default_table_charset= NULL; @@ -2405,6 +2492,7 @@ server_option: | PASSWORD TEXT_STRING_sys { Lex->server_options.password= $2.str; + Lex->contains_plaintext_password= true; } | SOCKET_SYM TEXT_STRING_sys { @@ -2777,14 +2865,16 @@ sp_fdparam: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$1, TRUE)) + if (spc->find_variable($1, TRUE)) { my_error(ER_SP_DUP_PARAM, MYF(0), $1.str); MYSQL_YYABORT; } - sp_variable_t *spvar= spc->push_variable(&$1, - (enum enum_field_types)$3, - sp_param_in); + + sp_variable *spvar= spc->add_variable(YYTHD, + $1, + (enum enum_field_types) $3, + sp_variable::MODE_IN); if (lex->sphead->fill_field_definition(YYTHD, lex, (enum enum_field_types) $3, @@ -2814,14 +2904,15 @@ sp_pdparam: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$3, TRUE)) + if (spc->find_variable($3, TRUE)) { my_error(ER_SP_DUP_PARAM, MYF(0), $3.str); MYSQL_YYABORT; } - sp_variable_t *spvar= spc->push_variable(&$3, - (enum enum_field_types)$4, - (sp_param_mode_t)$1); + sp_variable *spvar= spc->add_variable(YYTHD, + $3, + (enum enum_field_types) $4, + (sp_variable::enum_mode) $1); if (lex->sphead->fill_field_definition(YYTHD, lex, (enum enum_field_types) $4, @@ -2835,10 +2926,10 @@ sp_pdparam: ; sp_opt_inout: - /* Empty */ { $$= sp_param_in; } - | IN_SYM { $$= sp_param_in; } - | OUT_SYM { $$= sp_param_out; } - | INOUT_SYM { $$= sp_param_inout; } + /* Empty */ { $$= sp_variable::MODE_IN; } + | IN_SYM { $$= sp_variable::MODE_IN; } + | OUT_SYM { $$= sp_variable::MODE_OUT; } + | INOUT_SYM { $$= sp_variable::MODE_INOUT; } ; sp_proc_stmts: @@ -2910,13 +3001,13 @@ sp_decl: for (uint i = num_vars-$2 ; i < num_vars ; i++) { uint var_idx= pctx->var_context2runtime(i); - sp_variable_t *spvar= pctx->find_variable(var_idx); + sp_variable *spvar= pctx->find_variable(var_idx); if (!spvar) MYSQL_YYABORT; spvar->type= var_type; - spvar->dflt= dflt_value_item; + spvar->default_value= dflt_value_item; if (lex->sphead->fill_field_definition(YYTHD, lex, var_type, &spvar->field_def)) @@ -2952,36 +3043,41 @@ sp_decl: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_cond(&$2, TRUE)) + if (spc->find_condition($2, TRUE)) { my_error(ER_SP_DUP_COND, MYF(0), $2.str); MYSQL_YYABORT; } - if(YYTHD->lex->spcont->push_cond(&$2, $5)) + if(spc->add_condition(YYTHD, $2, $5)) MYSQL_YYABORT; $$.vars= $$.hndlrs= $$.curs= 0; $$.conds= 1; } | DECLARE_SYM sp_handler_type HANDLER_SYM FOR_SYM { + THD *thd= YYTHD; LEX *lex= Lex; sp_head *sp= lex->sphead; - lex->spcont= lex->spcont->push_context(LABEL_HANDLER_SCOPE); + sp_handler *h= lex->spcont->add_handler(thd, + (sp_handler::enum_type) $2); + + lex->spcont= lex->spcont->push_context(thd, + sp_pcontext::HANDLER_SCOPE); sp_pcontext *ctx= lex->spcont; sp_instr_hpush_jump *i= - new sp_instr_hpush_jump(sp->instructions(), ctx, $2, - ctx->current_var_count()); + new sp_instr_hpush_jump(sp->instructions(), ctx, h); + if (i == NULL || sp->add_instr(i)) MYSQL_YYABORT; /* For continue handlers, mark end of handler scope. */ - if ($2 == SP_HANDLER_CONTINUE && + if ($2 == sp_handler::CONTINUE && sp->push_backpatch(i, ctx->last_label())) MYSQL_YYABORT; - if (sp->push_backpatch(i, ctx->push_label(empty_c_string, 0))) + if (sp->push_backpatch(i, ctx->push_label(thd, EMPTY_STR, 0))) MYSQL_YYABORT; } sp_hcond_list sp_proc_stmt @@ -2989,20 +3085,19 @@ sp_decl: LEX *lex= Lex; sp_head *sp= lex->sphead; sp_pcontext *ctx= lex->spcont; - sp_label_t *hlab= lex->spcont->pop_label(); /* After this hdlr */ + sp_label *hlab= lex->spcont->pop_label(); /* After this hdlr */ sp_instr_hreturn *i; - if ($2 == SP_HANDLER_CONTINUE) + if ($2 == sp_handler::CONTINUE) { - i= new sp_instr_hreturn(sp->instructions(), ctx, - ctx->current_var_count()); + i= new sp_instr_hreturn(sp->instructions(), ctx); if (i == NULL || sp->add_instr(i)) MYSQL_YYABORT; } else { /* EXIT or UNDO handler, just jump to the end of the block */ - i= new sp_instr_hreturn(sp->instructions(), ctx, 0); + i= new sp_instr_hreturn(sp->instructions(), ctx); if (i == NULL || sp->add_instr(i) || sp->push_backpatch(i, lex->spcont->last_label())) /* Block end */ @@ -3013,8 +3108,7 @@ sp_decl: lex->spcont= ctx->pop_context(); $$.vars= $$.conds= $$.curs= 0; - $$.hndlrs= $6; - lex->spcont->add_handlers($6); + $$.hndlrs= 1; } | DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt { @@ -3024,7 +3118,7 @@ sp_decl: uint offp; sp_instr_cpush *i; - if (ctx->find_cursor(&$2, &offp, TRUE)) + if (ctx->find_cursor($2, &offp, TRUE)) { my_error(ER_SP_DUP_CURS, MYF(0), $2.str); delete $5; @@ -3034,7 +3128,7 @@ sp_decl: ctx->current_cursor_count()); if (i == NULL || sp->add_instr(i) || - ctx->push_cursor(&$2)) + ctx->add_cursor($2)) MYSQL_YYABORT; $$.vars= $$.conds= $$.hndlrs= 0; $$.curs= 1; @@ -3065,9 +3159,9 @@ sp_cursor_stmt: ; sp_handler_type: - EXIT_SYM { $$= SP_HANDLER_EXIT; } - | CONTINUE_SYM { $$= SP_HANDLER_CONTINUE; } - /*| UNDO_SYM { QQ No yet } */ + EXIT_SYM { $$= sp_handler::EXIT; } + | CONTINUE_SYM { $$= sp_handler::CONTINUE; } + /*| UNDO_SYM { QQ No yet } */ ; sp_hcond_list: @@ -3084,7 +3178,7 @@ sp_hcond_element: sp_head *sp= lex->sphead; sp_pcontext *ctx= lex->spcont->parent_context(); - if (ctx->find_handler($1)) + if (ctx->check_duplicate_handler($1)) { my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0)); MYSQL_YYABORT; @@ -3095,7 +3189,6 @@ sp_hcond_element: (sp_instr_hpush_jump *)sp->last_instruction(); i->add_condition($1); - ctx->push_handler($1); } } ; @@ -3108,11 +3201,9 @@ sp_cond: my_error(ER_WRONG_VALUE, MYF(0), "CONDITION", "0"); MYSQL_YYABORT; } - $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$= new (YYTHD->mem_root) sp_condition_value($1); if ($$ == NULL) MYSQL_YYABORT; - $$->type= sp_cond_type_t::number; - $$->mysqlerr= $1; } | sqlstate ; @@ -3120,17 +3211,22 @@ sp_cond: sqlstate: SQLSTATE_SYM opt_value TEXT_STRING_literal { /* SQLSTATE */ - if (!sp_cond_check(&$3)) + + /* + An error is triggered: + - if the specified string is not a valid SQLSTATE, + - or if it represents the completion condition -- it is not + allowed to SIGNAL, or declare a handler for the completion + condition. + */ + if (!is_sqlstate_valid(&$3) || is_sqlstate_completion($3.str)) { my_error(ER_SP_BAD_SQLSTATE, MYF(0), $3.str); MYSQL_YYABORT; } - $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$= new (YYTHD->mem_root) sp_condition_value($3.str); if ($$ == NULL) MYSQL_YYABORT; - $$->type= sp_cond_type_t::state; - memcpy($$->sqlstate, $3.str, SQLSTATE_LENGTH); - $$->sqlstate[SQLSTATE_LENGTH]= '\0'; } ; @@ -3146,7 +3242,7 @@ sp_hcond: } | ident /* CONDITION name */ { - $$= Lex->spcont->find_cond(&$1); + $$= Lex->spcont->find_condition($1, false); if ($$ == NULL) { my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str); @@ -3155,24 +3251,22 @@ sp_hcond: } | SQLWARNING_SYM /* SQLSTATEs 01??? */ { - $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$= new (YYTHD->mem_root) sp_condition_value(sp_condition_value::WARNING); if ($$ == NULL) MYSQL_YYABORT; - $$->type= sp_cond_type_t::warning; } | not FOUND_SYM /* SQLSTATEs 02??? */ { - $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$= new (YYTHD->mem_root) sp_condition_value(sp_condition_value::NOT_FOUND); if ($$ == NULL) MYSQL_YYABORT; - $$->type= sp_cond_type_t::notfound; } | SQLEXCEPTION_SYM /* All other SQLSTATEs */ { - $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$= (sp_condition_value *)YYTHD->alloc(sizeof(sp_condition_value)); + $$= new (YYTHD->mem_root) sp_condition_value(sp_condition_value::EXCEPTION); if ($$ == NULL) MYSQL_YYABORT; - $$->type= sp_cond_type_t::exception; } ; @@ -3184,9 +3278,9 @@ signal_stmt: Yacc_state *state= & thd->m_parser_state->m_yacc; lex->sql_command= SQLCOM_SIGNAL; - lex->m_stmt= new (thd->mem_root) Signal_statement(lex, $2, - state->m_set_signal_info); - if (lex->m_stmt == NULL) + lex->m_sql_cmd= + new (thd->mem_root) Sql_cmd_signal($2, state->m_set_signal_info); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; @@ -3195,20 +3289,20 @@ signal_value: ident { LEX *lex= Lex; - sp_cond_type_t *cond; + sp_condition_value *cond; if (lex->spcont == NULL) { /* SIGNAL foo cannot be used outside of stored programs */ my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str); MYSQL_YYABORT; } - cond= lex->spcont->find_cond(&$1); + cond= lex->spcont->find_condition($1, false); if (cond == NULL) { my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str); MYSQL_YYABORT; } - if (cond->type != sp_cond_type_t::state) + if (cond->type != sp_condition_value::SQLSTATE) { my_error(ER_SIGNAL_BAD_CONDITION_TYPE, MYF(0)); MYSQL_YYABORT; @@ -3323,13 +3417,160 @@ resignal_stmt: Yacc_state *state= & thd->m_parser_state->m_yacc; lex->sql_command= SQLCOM_RESIGNAL; - lex->m_stmt= new (thd->mem_root) Resignal_statement(lex, $2, - state->m_set_signal_info); - if (lex->m_stmt == NULL) + lex->m_sql_cmd= + new (thd->mem_root) Sql_cmd_resignal($2, + state->m_set_signal_info); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; +get_diagnostics: + GET_SYM which_area DIAGNOSTICS_SYM diagnostics_information + { + Diagnostics_information *info= $4; + + info->set_which_da($2); + + Lex->sql_command= SQLCOM_GET_DIAGNOSTICS; + Lex->m_sql_cmd= new (YYTHD->mem_root) Sql_cmd_get_diagnostics(info); + + if (Lex->m_sql_cmd == NULL) + MYSQL_YYABORT; + } + ; + +which_area: + /* If <which area> is not specified, then CURRENT is implicit. */ + { $$= Diagnostics_information::CURRENT_AREA; } + | CURRENT_SYM + { $$= Diagnostics_information::CURRENT_AREA; } + ; + +diagnostics_information: + statement_information + { + $$= new (YYTHD->mem_root) Statement_information($1); + if ($$ == NULL) + MYSQL_YYABORT; + } + | CONDITION_SYM condition_number condition_information + { + $$= new (YYTHD->mem_root) Condition_information($2, $3); + if ($$ == NULL) + MYSQL_YYABORT; + } + ; + +statement_information: + statement_information_item + { + $$= new (YYTHD->mem_root) List<Statement_information_item>; + if ($$ == NULL || $$->push_back($1)) + MYSQL_YYABORT; + } + | statement_information ',' statement_information_item + { + if ($1->push_back($3)) + MYSQL_YYABORT; + $$= $1; + } + ; + +statement_information_item: + simple_target_specification EQ statement_information_item_name + { + $$= new (YYTHD->mem_root) Statement_information_item($3, $1); + if ($$ == NULL) + MYSQL_YYABORT; + } + +simple_target_specification: + ident + { + Lex_input_stream *lip= &YYTHD->m_parser_state->m_lip; + $$= create_item_for_sp_var(YYTHD, $1, NULL, + lip->get_tok_start(), lip->get_ptr()); + + if ($$ == NULL) + MYSQL_YYABORT; + } + | '@' ident_or_text + { + $$= new (YYTHD->mem_root) Item_func_get_user_var($2); + if ($$ == NULL) + MYSQL_YYABORT; + } + ; + +statement_information_item_name: + NUMBER_SYM + { $$= Statement_information_item::NUMBER; } + | ROW_COUNT_SYM + { $$= Statement_information_item::ROW_COUNT; } + ; + +/* + Only a limited subset of <expr> are allowed in GET DIAGNOSTICS + <condition number>, same subset as for SIGNAL/RESIGNAL. +*/ +condition_number: + signal_allowed_expr + { $$= $1; } + ; + +condition_information: + condition_information_item + { + $$= new (YYTHD->mem_root) List<Condition_information_item>; + if ($$ == NULL || $$->push_back($1)) + MYSQL_YYABORT; + } + | condition_information ',' condition_information_item + { + if ($1->push_back($3)) + MYSQL_YYABORT; + $$= $1; + } + ; + +condition_information_item: + simple_target_specification EQ condition_information_item_name + { + $$= new (YYTHD->mem_root) Condition_information_item($3, $1); + if ($$ == NULL) + MYSQL_YYABORT; + } + +condition_information_item_name: + CLASS_ORIGIN_SYM + { $$= Condition_information_item::CLASS_ORIGIN; } + | SUBCLASS_ORIGIN_SYM + { $$= Condition_information_item::SUBCLASS_ORIGIN; } + | CONSTRAINT_CATALOG_SYM + { $$= Condition_information_item::CONSTRAINT_CATALOG; } + | CONSTRAINT_SCHEMA_SYM + { $$= Condition_information_item::CONSTRAINT_SCHEMA; } + | CONSTRAINT_NAME_SYM + { $$= Condition_information_item::CONSTRAINT_NAME; } + | CATALOG_NAME_SYM + { $$= Condition_information_item::CATALOG_NAME; } + | SCHEMA_NAME_SYM + { $$= Condition_information_item::SCHEMA_NAME; } + | TABLE_NAME_SYM + { $$= Condition_information_item::TABLE_NAME; } + | COLUMN_NAME_SYM + { $$= Condition_information_item::COLUMN_NAME; } + | CURSOR_NAME_SYM + { $$= Condition_information_item::CURSOR_NAME; } + | MESSAGE_TEXT_SYM + { $$= Condition_information_item::MESSAGE_TEXT; } + | MYSQL_ERRNO_SYM + { $$= Condition_information_item::MYSQL_ERRNO; } + | RETURNED_SQLSTATE_SYM + { $$= Condition_information_item::RETURNED_SQLSTATE; } + ; + sp_decl_idents: ident { @@ -3338,12 +3579,15 @@ sp_decl_idents: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$1, TRUE)) + if (spc->find_variable($1, TRUE)) { my_error(ER_SP_DUP_VAR, MYF(0), $1.str); MYSQL_YYABORT; } - spc->push_variable(&$1, (enum_field_types)0, sp_param_in); + spc->add_variable(YYTHD, + $1, + MYSQL_TYPE_DECIMAL, + sp_variable::MODE_IN); $$= 1; } | sp_decl_idents ',' ident @@ -3353,12 +3597,15 @@ sp_decl_idents: LEX *lex= Lex; sp_pcontext *spc= lex->spcont; - if (spc->find_variable(&$3, TRUE)) + if (spc->find_variable($3, TRUE)) { my_error(ER_SP_DUP_VAR, MYF(0), $3.str); MYSQL_YYABORT; } - spc->push_variable(&$3, (enum_field_types)0, sp_param_in); + spc->add_variable(YYTHD, + $3, + MYSQL_TYPE_DECIMAL, + sp_variable::MODE_IN); $$= $1 + 1; } ; @@ -3480,7 +3727,9 @@ sp_proc_stmt_unlabeled: { /* Unlabeled controls get a secret label. */ LEX *lex= Lex; - lex->spcont->push_label((char *)"", lex->sphead->instructions()); + lex->spcont->push_label(YYTHD, + EMPTY_STR, + lex->sphead->instructions()); } sp_unlabeled_control { @@ -3496,7 +3745,7 @@ sp_proc_stmt_leave: LEX *lex= Lex; sp_head *sp = lex->sphead; sp_pcontext *ctx= lex->spcont; - sp_label_t *lab= ctx->find_label($2.str); + sp_label *lab= ctx->find_label($2); if (! lab) { @@ -3516,7 +3765,7 @@ sp_proc_stmt_leave: there are no hpop/cpop at the jump destination, so we should include the block context here for cleanup. */ - bool exclusive= (lab->type == SP_LAB_BEGIN); + bool exclusive= (lab->type == sp_label::BEGIN); n= ctx->diff_handlers(lab->ctx, exclusive); if (n) @@ -3549,9 +3798,9 @@ sp_proc_stmt_iterate: LEX *lex= Lex; sp_head *sp= lex->sphead; sp_pcontext *ctx= lex->spcont; - sp_label_t *lab= ctx->find_label($2.str); + sp_label *lab= ctx->find_label($2); - if (! lab || lab->type != SP_LAB_ITER) + if (! lab || lab->type != sp_label::ITERATION) { my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", $2.str); MYSQL_YYABORT; @@ -3594,7 +3843,7 @@ sp_proc_stmt_open: uint offset; sp_instr_copen *i; - if (! lex->spcont->find_cursor(&$2, &offset)) + if (! lex->spcont->find_cursor($2, &offset, false)) { my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); MYSQL_YYABORT; @@ -3614,7 +3863,7 @@ sp_proc_stmt_fetch: uint offset; sp_instr_cfetch *i; - if (! lex->spcont->find_cursor(&$3, &offset)) + if (! lex->spcont->find_cursor($3, &offset, false)) { my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $3.str); MYSQL_YYABORT; @@ -3636,7 +3885,7 @@ sp_proc_stmt_close: uint offset; sp_instr_cclose *i; - if (! lex->spcont->find_cursor(&$2, &offset)) + if (! lex->spcont->find_cursor($2, &offset, false)) { my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); MYSQL_YYABORT; @@ -3660,9 +3909,9 @@ sp_fetch_list: LEX *lex= Lex; sp_head *sp= lex->sphead; sp_pcontext *spc= lex->spcont; - sp_variable_t *spv; + sp_variable *spv; - if (!spc || !(spv = spc->find_variable(&$1))) + if (!spc || !(spv = spc->find_variable($1, false))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); MYSQL_YYABORT; @@ -3680,9 +3929,9 @@ sp_fetch_list: LEX *lex= Lex; sp_head *sp= lex->sphead; sp_pcontext *spc= lex->spcont; - sp_variable_t *spv; + sp_variable *spv; - if (!spc || !(spv = spc->find_variable(&$3))) + if (!spc || !(spv = spc->find_variable($3, false))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str); MYSQL_YYABORT; @@ -3708,7 +3957,7 @@ sp_if: sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, ctx, $2, lex); if (i == NULL || - sp->push_backpatch(i, ctx->push_label((char *)"", 0)) || + sp->push_backpatch(i, ctx->push_label(YYTHD, EMPTY_STR, 0)) || sp->add_cont_backpatch(i) || sp->add_instr(i)) MYSQL_YYABORT; @@ -3725,7 +3974,7 @@ sp_if: sp->add_instr(i)) MYSQL_YYABORT; sp->backpatch(ctx->pop_label()); - sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + sp->push_backpatch(i, ctx->push_label(YYTHD, EMPTY_STR, 0)); } sp_elseifs { @@ -3867,7 +4116,7 @@ sp_labeled_control: { LEX *lex= Lex; sp_pcontext *ctx= lex->spcont; - sp_label_t *lab= ctx->find_label($1.str); + sp_label *lab= ctx->find_label($1); if (lab) { @@ -3876,19 +4125,18 @@ sp_labeled_control: } else { - lab= lex->spcont->push_label($1.str, - lex->sphead->instructions()); - lab->type= SP_LAB_ITER; + lab= lex->spcont->push_label(YYTHD, $1, lex->sphead->instructions()); + lab->type= sp_label::ITERATION; } } sp_unlabeled_control sp_opt_label { LEX *lex= Lex; - sp_label_t *lab= lex->spcont->pop_label(); + sp_label *lab= lex->spcont->pop_label(); if ($5.str) { - if (my_strcasecmp(system_charset_info, $5.str, lab->name) != 0) + if (my_strcasecmp(system_charset_info, $5.str, lab->name.str) != 0) { my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str); MYSQL_YYABORT; @@ -3908,7 +4156,7 @@ sp_labeled_block: { LEX *lex= Lex; sp_pcontext *ctx= lex->spcont; - sp_label_t *lab= ctx->find_label($1.str); + sp_label *lab= ctx->find_label($1); if (lab) { @@ -3916,18 +4164,17 @@ sp_labeled_block: MYSQL_YYABORT; } - lab= lex->spcont->push_label($1.str, - lex->sphead->instructions()); - lab->type= SP_LAB_BEGIN; + lab= lex->spcont->push_label(YYTHD, $1, lex->sphead->instructions()); + lab->type= sp_label::BEGIN; } sp_block_content sp_opt_label { LEX *lex= Lex; - sp_label_t *lab= lex->spcont->pop_label(); + sp_label *lab= lex->spcont->pop_label(); if ($5.str) { - if (my_strcasecmp(system_charset_info, $5.str, lab->name) != 0) + if (my_strcasecmp(system_charset_info, $5.str, lab->name.str) != 0) { my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str); MYSQL_YYABORT; @@ -3940,8 +4187,8 @@ sp_unlabeled_block: { /* Unlabeled blocks get a secret label. */ LEX *lex= Lex; uint ip= lex->sphead->instructions(); - sp_label_t *lab= lex->spcont->push_label((char *)"", ip); - lab->type= SP_LAB_BEGIN; + sp_label *lab= lex->spcont->push_label(YYTHD, EMPTY_STR, ip); + lab->type= sp_label::BEGIN; } sp_block_content { @@ -3956,7 +4203,8 @@ sp_block_content: together. No [[NOT] ATOMIC] yet, and we need to figure out how make it coexist with the existing BEGIN COMMIT/ROLLBACK. */ LEX *lex= Lex; - lex->spcont= lex->spcont->push_context(LABEL_DEFAULT_SCOPE); + lex->spcont= lex->spcont->push_context(YYTHD, + sp_pcontext::REGULAR_SCOPE); } sp_decls sp_proc_stmts @@ -3992,7 +4240,7 @@ sp_unlabeled_control: { LEX *lex= Lex; uint ip= lex->sphead->instructions(); - sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip); if (i == NULL || lex->sphead->add_instr(i)) @@ -4020,7 +4268,7 @@ sp_unlabeled_control: { LEX *lex= Lex; uint ip= lex->sphead->instructions(); - sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip); if (i == NULL || lex->sphead->add_instr(i)) @@ -4033,7 +4281,7 @@ sp_unlabeled_control: { LEX *lex= Lex; uint ip= lex->sphead->instructions(); - sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_label *lab= lex->spcont->last_label(); /* Jumping back */ sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont, $5, lab->ip, lex); @@ -4549,7 +4797,7 @@ partitioning: } if (lex->sql_command == SQLCOM_ALTER_TABLE) { - lex->alter_info.flags|= ALTER_PARTITION; + lex->alter_info.flags|= Alter_info::ALTER_PARTITION; } } partition @@ -5349,6 +5597,70 @@ create_table_option: ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS; } + | STATS_AUTO_RECALC_SYM opt_equal ulong_num + { + switch($3) { + case 0: + Lex->create_info.stats_auto_recalc= HA_STATS_AUTO_RECALC_OFF; + break; + case 1: + Lex->create_info.stats_auto_recalc= HA_STATS_AUTO_RECALC_ON; + break; + default: + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + Lex->create_info.used_fields|= HA_CREATE_USED_STATS_AUTO_RECALC; + } + | STATS_AUTO_RECALC_SYM opt_equal DEFAULT + { + Lex->create_info.stats_auto_recalc= HA_STATS_AUTO_RECALC_DEFAULT; + Lex->create_info.used_fields|= HA_CREATE_USED_STATS_AUTO_RECALC; + } + | STATS_PERSISTENT_SYM opt_equal ulong_num + { + switch($3) { + case 0: + Lex->create_info.table_options|= HA_OPTION_NO_STATS_PERSISTENT; + break; + case 1: + Lex->create_info.table_options|= HA_OPTION_STATS_PERSISTENT; + break; + default: + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + Lex->create_info.used_fields|= HA_CREATE_USED_STATS_PERSISTENT; + } + | STATS_PERSISTENT_SYM opt_equal DEFAULT + { + Lex->create_info.table_options&= + ~(HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT); + Lex->create_info.used_fields|= HA_CREATE_USED_STATS_PERSISTENT; + } + | STATS_SAMPLE_PAGES_SYM opt_equal ulong_num + { + /* From user point of view STATS_SAMPLE_PAGES can be specified as + STATS_SAMPLE_PAGES=N (where 0<N<=65535, it does not make sense to + scan 0 pages) or STATS_SAMPLE_PAGES=default. Internally we record + =default as 0. See create_frm() in sql/table.cc, we use only two + bytes for stats_sample_pages and this is why we do not allow + larger values. 65535 pages, 16kb each means to sample 1GB, which + is impractical. If at some point this needs to be extended, then + we can store the higher bits from stats_sample_pages in .frm too. */ + if ($3 == 0 || $3 > 0xffff) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + Lex->create_info.stats_sample_pages=$3; + Lex->create_info.used_fields|= HA_CREATE_USED_STATS_SAMPLE_PAGES; + } + | STATS_SAMPLE_PAGES_SYM opt_equal DEFAULT + { + Lex->create_info.stats_sample_pages=0; + Lex->create_info.used_fields|= HA_CREATE_USED_STATS_SAMPLE_PAGES; + } | CHECKSUM_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; @@ -5515,7 +5827,7 @@ storage_engines: MYSQL_YYABORT; } $$= 0; - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_STORAGE_ENGINE, ER(ER_UNKNOWN_STORAGE_ENGINE), $1.str); @@ -5627,7 +5939,8 @@ key_def: { LEX *lex=Lex; Key *key= new Foreign_key($4.str ? $4 : $1, lex->col_list, - $8, + $8->db, + $8->table, lex->ref_list, lex->fk_delete_opt, lex->fk_update_opt, @@ -5641,7 +5954,7 @@ key_def: &default_key_create_info, 1)) MYSQL_YYABORT; /* Only used for ALTER TABLE. Ignored otherwise. */ - lex->alter_info.flags|= ALTER_FOREIGN_KEY; + lex->alter_info.flags|= Alter_info::ADD_FOREIGN_KEY; } | opt_constraint check_constraint { @@ -5739,13 +6052,13 @@ vcol_attribute: { LEX *lex=Lex; lex->type|= UNIQUE_FLAG; - lex->alter_info.flags|= ALTER_ADD_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | UNIQUE_SYM KEY_SYM { LEX *lex=Lex; lex->type|= UNIQUE_KEY_FLAG; - lex->alter_info.flags|= ALTER_ADD_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; } ; @@ -5860,7 +6173,7 @@ type: { char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1]; my_snprintf(buff, sizeof(buff), "YEAR(%lu)", length); - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_NOTE, ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), buff, "YEAR(4)"); @@ -6091,25 +6404,25 @@ attribute: { LEX *lex=Lex; lex->type|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_FLAG; - lex->alter_info.flags|= ALTER_ADD_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | opt_primary KEY_SYM { LEX *lex=Lex; lex->type|= PRI_KEY_FLAG | NOT_NULL_FLAG; - lex->alter_info.flags|= ALTER_ADD_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | UNIQUE_SYM { LEX *lex=Lex; lex->type|= UNIQUE_FLAG; - lex->alter_info.flags|= ALTER_ADD_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | UNIQUE_SYM KEY_SYM { LEX *lex=Lex; lex->type|= UNIQUE_KEY_FLAG; - lex->alter_info.flags|= ALTER_ADD_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; } | COLLATE_SYM collation_name @@ -6323,7 +6636,6 @@ opt_bin_mod: | BINARY { Lex->type|= BINCMP_FLAG; } ; - opt_primary: /* empty */ | PRIMARY_SYM @@ -6632,41 +6944,40 @@ string_list: */ alter: - ALTER alter_options TABLE_SYM table_ident + ALTER + { + Lex->name.str= 0; + Lex->name.length= 0; + Lex->sql_command= SQLCOM_ALTER_TABLE; + Lex->duplicates= DUP_ERROR; + Lex->col_list.empty(); + Lex->select_lex.init_order(); + bzero(&Lex->create_info, sizeof(Lex->create_info)); + Lex->create_info.db_type= 0; + Lex->create_info.default_table_charset= NULL; + Lex->create_info.row_type= ROW_TYPE_NOT_USED; + Lex->alter_info.reset(); + Lex->no_write_to_binlog= 0; + Lex->create_info.storage_media= HA_SM_DEFAULT; + DBUG_ASSERT(!Lex->m_sql_cmd); + } + alter_options TABLE_SYM table_ident { - THD *thd= YYTHD; - LEX *lex= thd->lex; - lex->name.str= 0; - lex->name.length= 0; - lex->sql_command= SQLCOM_ALTER_TABLE; - lex->duplicates= DUP_ERROR; - if (!lex->select_lex.add_table_to_list(thd, $4, NULL, + if (!Lex->select_lex.add_table_to_list(YYTHD, $5, NULL, TL_OPTION_UPDATING, TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + MDL_SHARED_UPGRADABLE)) MYSQL_YYABORT; - lex->col_list.empty(); - lex->select_lex.init_order(); - lex->select_lex.db= (lex->select_lex.table_list.first)->db; - bzero((char*) &lex->create_info,sizeof(lex->create_info)); - lex->create_info.db_type= 0; - lex->create_info.default_table_charset= NULL; - lex->create_info.row_type= ROW_TYPE_NOT_USED; - lex->alter_info.reset(); - lex->no_write_to_binlog= 0; - lex->create_info.storage_media= HA_SM_DEFAULT; - lex->create_last_non_select_table= lex->last_table(); - DBUG_ASSERT(!lex->m_stmt); + Lex->select_lex.db= (Lex->select_lex.table_list.first)->db; + Lex->create_last_non_select_table= Lex->last_table(); } alter_commands { - THD *thd= YYTHD; - LEX *lex= thd->lex; - if (!lex->m_stmt) + if (!Lex->m_sql_cmd) { /* Create a generic ALTER TABLE statment. */ - lex->m_stmt= new (thd->mem_root) Alter_table_statement(lex); - if (lex->m_stmt == NULL) + Lex->m_sql_cmd= new (YYTHD->mem_root) Sql_cmd_alter_table(); + if (Lex->m_sql_cmd == NULL) MYSQL_YYABORT; } } @@ -6857,8 +7168,22 @@ ident_or_empty: alter_commands: /* empty */ - | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } - | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; } + | DISCARD TABLESPACE + { + Lex->m_sql_cmd= new (YYTHD->mem_root) + Sql_cmd_discard_import_tablespace( + Sql_cmd_discard_import_tablespace::DISCARD_TABLESPACE); + if (Lex->m_sql_cmd == NULL) + MYSQL_YYABORT; + } + | IMPORT TABLESPACE + { + Lex->m_sql_cmd= new (YYTHD->mem_root) + Sql_cmd_discard_import_tablespace( + Sql_cmd_discard_import_tablespace::IMPORT_TABLESPACE); + if (Lex->m_sql_cmd == NULL) + MYSQL_YYABORT; + } | alter_list opt_partitioning | alter_list @@ -6870,19 +7195,18 @@ alter_commands: From here we insert a number of commands to manage the partitions of a partitioned table such as adding partitions, dropping partitions, reorganising partitions in various manners. In future releases the list - will be longer and also include moving partitions to a - new table and so forth. + will be longer. */ | add_partition_rule | DROP PARTITION_SYM opt_if_exists alt_part_name_list { - Lex->alter_info.flags|= ALTER_DROP_PARTITION; + Lex->alter_info.flags|= Alter_info::ALTER_DROP_PARTITION; } | REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog all_or_alt_part_name_list { LEX *lex= Lex; - lex->alter_info.flags|= ALTER_REBUILD_PARTITION; + lex->alter_info.flags|= Alter_info::ALTER_REBUILD_PARTITION; lex->no_write_to_binlog= $3; } | OPTIMIZE PARTITION_SYM opt_no_write_to_binlog @@ -6892,10 +7216,10 @@ alter_commands: LEX *lex= thd->lex; lex->no_write_to_binlog= $3; lex->check_opt.init(); - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) - Alter_table_optimize_partition_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) + Sql_cmd_alter_table_optimize_partition(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } opt_no_write_to_binlog @@ -6906,21 +7230,21 @@ alter_commands: LEX *lex= thd->lex; lex->no_write_to_binlog= $3; lex->check_opt.init(); - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) - Alter_table_analyze_partition_statement(lex); - if (lex->m_stmt == NULL) - MYSQL_YYABORT; + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) + Sql_cmd_alter_table_analyze_partition(); + if (lex->m_sql_cmd == NULL) + MYSQL_YYABORT; } | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list { THD *thd= YYTHD; LEX *lex= thd->lex; lex->check_opt.init(); - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) - Alter_table_check_partition_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) + Sql_cmd_alter_table_check_partition(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } opt_mi_check_type @@ -6931,17 +7255,17 @@ alter_commands: LEX *lex= thd->lex; lex->no_write_to_binlog= $3; lex->check_opt.init(); - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) - Alter_table_repair_partition_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) + Sql_cmd_alter_table_repair_partition(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } opt_mi_repair_type | COALESCE PARTITION_SYM opt_no_write_to_binlog real_ulong_num { LEX *lex= Lex; - lex->alter_info.flags|= ALTER_COALESCE_PARTITION; + lex->alter_info.flags|= Alter_info::ALTER_COALESCE_PARTITION; lex->no_write_to_binlog= $3; lex->alter_info.num_parts= $4; } @@ -6950,26 +7274,51 @@ alter_commands: THD *thd= YYTHD; LEX *lex= thd->lex; lex->check_opt.init(); - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) - Alter_table_truncate_partition_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) + Sql_cmd_alter_table_truncate_partition(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } | reorg_partition_rule + | EXCHANGE_SYM PARTITION_SYM alt_part_name_item + WITH TABLE_SYM table_ident have_partitioning + { + THD *thd= YYTHD; + LEX *lex= thd->lex; + size_t dummy; + lex->select_lex.db=$6->db.str; + if (lex->select_lex.db == NULL && + lex->copy_db_to(&lex->select_lex.db, &dummy)) + { + MYSQL_YYABORT; + } + lex->name= $6->table; + lex->alter_info.flags|= Alter_info::ALTER_EXCHANGE_PARTITION; + if (!lex->select_lex.add_table_to_list(thd, $6, NULL, + TL_OPTION_UPDATING, + TL_READ_NO_INSERT, + MDL_SHARED_NO_WRITE)) + MYSQL_YYABORT; + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) + Sql_cmd_alter_table_exchange_partition(); + if (lex->m_sql_cmd == NULL) + MYSQL_YYABORT; + } ; remove_partitioning: REMOVE_SYM PARTITIONING_SYM { - Lex->alter_info.flags|= ALTER_REMOVE_PARTITIONING; + Lex->alter_info.flags|= Alter_info::ALTER_REMOVE_PARTITIONING; } ; all_or_alt_part_name_list: ALL { - Lex->alter_info.flags|= ALTER_ALL_PARTITION; + Lex->alter_info.flags|= Alter_info::ALTER_ALL_PARTITION; } | alt_part_name_list ; @@ -6984,7 +7333,7 @@ add_partition_rule: mem_alloc_error(sizeof(partition_info)); MYSQL_YYABORT; } - lex->alter_info.flags|= ALTER_ADD_PARTITION; + lex->alter_info.flags|= Alter_info::ALTER_ADD_PARTITION; lex->no_write_to_binlog= $4; } add_part_extra @@ -7022,11 +7371,11 @@ reorg_partition_rule: reorg_parts_rule: /* empty */ { - Lex->alter_info.flags|= ALTER_TABLE_REORG; + Lex->alter_info.flags|= Alter_info::ALTER_TABLE_REORG; } | alt_part_name_list { - Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION; + Lex->alter_info.flags|= Alter_info::ALTER_REORGANIZE_PARTITION; } INTO '(' part_def_list ')' { @@ -7065,7 +7414,7 @@ add_column: { LEX *lex=Lex; lex->change=0; - lex->alter_info.flags|= ALTER_ADD_COLUMN; + lex->alter_info.flags|= Alter_info::ALTER_ADD_COLUMN; } ; @@ -7077,17 +7426,18 @@ alter_list_item: | ADD key_def { Lex->create_last_non_select_table= Lex->last_table(); - Lex->alter_info.flags|= ALTER_ADD_INDEX; + Lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX; } | add_column '(' create_field_list ')' { - Lex->alter_info.flags|= ALTER_ADD_COLUMN | ALTER_ADD_INDEX; + Lex->alter_info.flags|= Alter_info::ALTER_ADD_COLUMN | + Alter_info::ALTER_ADD_INDEX; } | CHANGE opt_column opt_if_exists field_ident { LEX *lex=Lex; lex->change= $4.str; - lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN; lex->option_list= NULL; } field_spec opt_place @@ -7101,7 +7451,7 @@ alter_list_item: lex->default_value= lex->on_update_value= 0; lex->comment=null_lex_str; lex->charset= NULL; - lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN; lex->vcol_info= 0; lex->option_list= NULL; } @@ -7129,11 +7479,16 @@ alter_list_item: if (ad == NULL) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad); - lex->alter_info.flags|= ALTER_DROP_COLUMN; + lex->alter_info.flags|= Alter_info::ALTER_DROP_COLUMN; } - | DROP FOREIGN KEY_SYM opt_if_exists opt_ident + | DROP FOREIGN KEY_SYM opt_if_exists field_ident { - Lex->alter_info.flags|= ALTER_DROP_INDEX | ALTER_FOREIGN_KEY; + LEX *lex=Lex; + Alter_drop *ad= new Alter_drop(Alter_drop::FOREIGN_KEY, $5.str, $4); + if (ad == NULL) + MYSQL_YYABORT; + lex->alter_info.drop_list.push_back(ad); + lex->alter_info.flags|= Alter_info::DROP_FOREIGN_KEY; } | DROP PRIMARY_SYM KEY_SYM { @@ -7143,7 +7498,7 @@ alter_list_item: if (ad == NULL) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad); - lex->alter_info.flags|= ALTER_DROP_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_DROP_INDEX; } | DROP key_or_index opt_if_exists field_ident { @@ -7152,19 +7507,19 @@ alter_list_item: if (ad == NULL) MYSQL_YYABORT; lex->alter_info.drop_list.push_back(ad); - lex->alter_info.flags|= ALTER_DROP_INDEX; + lex->alter_info.flags|= Alter_info::ALTER_DROP_INDEX; } | DISABLE_SYM KEYS { LEX *lex=Lex; - lex->alter_info.keys_onoff= DISABLE; - lex->alter_info.flags|= ALTER_KEYS_ONOFF; + lex->alter_info.keys_onoff= Alter_info::DISABLE; + lex->alter_info.flags|= Alter_info::ALTER_KEYS_ONOFF; } | ENABLE_SYM KEYS { LEX *lex=Lex; - lex->alter_info.keys_onoff= ENABLE; - lex->alter_info.flags|= ALTER_KEYS_ONOFF; + lex->alter_info.keys_onoff= Alter_info::ENABLE; + lex->alter_info.flags|= Alter_info::ALTER_KEYS_ONOFF; } | ALTER opt_column field_ident SET DEFAULT signed_literal { @@ -7173,7 +7528,7 @@ alter_list_item: if (ac == NULL) MYSQL_YYABORT; lex->alter_info.alter_list.push_back(ac); - lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; + lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN_DEFAULT; } | ALTER opt_column field_ident DROP DEFAULT { @@ -7182,7 +7537,7 @@ alter_list_item: if (ac == NULL) MYSQL_YYABORT; lex->alter_info.alter_list.push_back(ac); - lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; + lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN_DEFAULT; } | RENAME opt_to table_ident { @@ -7201,7 +7556,7 @@ alter_list_item: MYSQL_YYABORT; } lex->name= $3->table; - lex->alter_info.flags|= ALTER_RENAME; + lex->alter_info.flags|= Alter_info::ALTER_RENAME; } | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate { @@ -7222,12 +7577,12 @@ alter_list_item: lex->create_info.default_table_charset= $5; lex->create_info.used_fields|= (HA_CREATE_USED_CHARSET | HA_CREATE_USED_DEFAULT_CHARSET); - lex->alter_info.flags|= ALTER_CONVERT; + lex->alter_info.flags|= Alter_info::ALTER_CONVERT; } | create_table_options_space_separated { LEX *lex=Lex; - lex->alter_info.flags|= ALTER_OPTIONS; + lex->alter_info.flags|= Alter_info::ALTER_OPTIONS; if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) && !lex->create_info.db_type) { @@ -7236,12 +7591,53 @@ alter_list_item: } | FORCE_SYM { - Lex->alter_info.flags|= ALTER_RECREATE; + Lex->alter_info.flags|= Alter_info::ALTER_RECREATE; } | alter_order_clause { LEX *lex=Lex; - lex->alter_info.flags|= ALTER_ORDER; + lex->alter_info.flags|= Alter_info::ALTER_ORDER; + } + | alter_algorithm_option + | alter_lock_option + ; + +opt_index_lock_algorithm: + /* empty */ + | alter_lock_option + | alter_algorithm_option + | alter_lock_option alter_algorithm_option + | alter_algorithm_option alter_lock_option + +alter_algorithm_option: + ALGORITHM_SYM opt_equal DEFAULT + { + Lex->alter_info.requested_algorithm= + Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT; + } + | ALGORITHM_SYM opt_equal ident + { + if (Lex->alter_info.set_requested_algorithm(&$3)) + { + my_error(ER_UNKNOWN_ALTER_ALGORITHM, MYF(0), $3.str); + MYSQL_YYABORT; + } + } + ; + +alter_lock_option: + LOCK_SYM opt_equal DEFAULT + { + Lex->alter_info.requested_lock= + Alter_info::ALTER_TABLE_LOCK_DEFAULT; + } + | LOCK_SYM opt_equal ident + { + if (Lex->alter_info.set_requested_lock(&$3)) + { + my_error(ER_UNKNOWN_ALTER_LOCK, MYF(0), $3.str); + MYSQL_YYABORT; + } } ; @@ -7256,7 +7652,7 @@ opt_ignore: ; alter_options: - { Lex->ignore= Lex->online= 0;} alter_options_part2 + { Lex->ignore= 0;} alter_options_part2 ; alter_options_part2: @@ -7271,7 +7667,11 @@ alter_option_list: alter_option: IGNORE_SYM { Lex->ignore= 1;} - | ONLINE_SYM { Lex->online= 1;} + | ONLINE_SYM + { + Lex->alter_info.requested_algorithm= + Alter_info::ALTER_TABLE_ALGORITHM_INPLACE; + } opt_restrict: @@ -7282,8 +7682,16 @@ opt_restrict: opt_place: /* empty */ {} - | AFTER_SYM ident { store_position_for_column($2.str); } - | FIRST_SYM { store_position_for_column(first_keyword); } + | AFTER_SYM ident + { + store_position_for_column($2.str); + Lex->alter_info.flags |= Alter_info::ALTER_COLUMN_ORDER; + } + | FIRST_SYM + { + store_position_for_column(first_keyword); + Lex->alter_info.flags |= Alter_info::ALTER_COLUMN_ORDER; + } ; opt_to: @@ -7473,9 +7881,9 @@ repair: { THD *thd= YYTHD; LEX* lex= thd->lex; - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) Repair_table_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_repair_table(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; @@ -7511,9 +7919,9 @@ analyze: { THD *thd= YYTHD; LEX* lex= thd->lex; - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) Analyze_table_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_analyze_table(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; @@ -7636,9 +8044,9 @@ check: { THD *thd= YYTHD; LEX* lex= thd->lex; - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) Check_table_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_check_table(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; @@ -7677,9 +8085,9 @@ optimize: { THD *thd= YYTHD; LEX* lex= thd->lex; - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) Optimize_table_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_optimize_table(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; @@ -7826,7 +8234,7 @@ preload_keys_parts: adm_partition: PARTITION_SYM have_partitioning { - Lex->alter_info.flags|= ALTER_ADMIN_PARTITION; + Lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION; } '(' all_or_alt_part_name_list ')' ; @@ -9320,6 +9728,7 @@ function_call_conflict: | OLD_PASSWORD '(' expr ')' { $$= new (YYTHD->mem_root) Item_func_old_password($3); + Lex->contains_plaintext_password= true; if ($$ == NULL) MYSQL_YYABORT; } @@ -9327,7 +9736,8 @@ function_call_conflict: { THD *thd= YYTHD; Item* i1; - if (thd->variables.old_passwords) + Lex->contains_plaintext_password= true; + if (thd->variables.old_passwords == 1) i1= new (thd->mem_root) Item_func_old_password($3); else i1= new (thd->mem_root) Item_func_password($3); @@ -9353,6 +9763,14 @@ function_call_conflict: if ($$ == NULL) MYSQL_YYABORT; } + | ROW_COUNT_SYM '(' ')' + { + $$= new (YYTHD->mem_root) Item_func_row_count(); + if ($$ == NULL) + MYSQL_YYABORT; + Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION); + Lex->safe_to_cache_query= 0; + } | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new (YYTHD->mem_root) Item_func_round($3,$5,1); @@ -10163,6 +10581,22 @@ normal_join: | CROSS JOIN_SYM { $$ = 0; } ; +/* + table PARTITION (list of partitions), reusing using_list instead of creating + a new rule for partition_list. +*/ +opt_use_partition: + /* empty */ { $$= 0;} + | use_partition + ; + +use_partition: + PARTITION_SYM '(' using_list ')' have_partitioning + { + $$= $3; + } + ; + /* This is a flattening of the rules <table factor> and <table primary> in the SQL:2003 standard, since we don't have <sample clause> @@ -10176,13 +10610,14 @@ table_factor: SELECT_LEX *sel= Select; sel->table_join_options= 0; } - table_ident opt_table_alias opt_key_definition + table_ident opt_use_partition opt_table_alias opt_key_definition { - if (!($$= Select->add_table_to_list(YYTHD, $2, $3, + if (!($$= Select->add_table_to_list(YYTHD, $2, $4, Select->get_table_join_options(), YYPS->m_lock_type, YYPS->m_mdl_type, - Select->pop_index_hints()))) + Select->pop_index_hints(), + $3))) MYSQL_YYABORT; Select->add_joined_table($$); } @@ -10252,7 +10687,7 @@ table_factor: if (ti == NULL) MYSQL_YYABORT; if (!($$= sel->add_table_to_list(lex->thd, - new Table_ident(unit), $5, 0, + ti, $5, 0, TL_READ, MDL_SHARED_READ))) MYSQL_YYABORT; @@ -10844,9 +11279,9 @@ limit_option: THD *thd= YYTHD; LEX *lex= thd->lex; Lex_input_stream *lip= & thd->m_parser_state->m_lip; - sp_variable_t *spv; + sp_variable *spv; sp_pcontext *spc = lex->spcont; - if (spc && (spv = spc->find_variable(&$1))) + if (spc && (spv = spc->find_variable($1, false))) { splocal= new (thd->mem_root) Item_splocal($1, spv->offset, spv->type, @@ -11066,9 +11501,9 @@ select_var_ident: | ident_or_text { LEX *lex=Lex; - sp_variable_t *t; + sp_variable *t; - if (!lex->spcont || !(t=lex->spcont->find_variable(&$1))) + if (!lex->spcont || !(t=lex->spcont->find_variable($1, false))) { my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); MYSQL_YYABORT; @@ -11177,12 +11612,12 @@ drop: MYSQL_YYABORT; lex->sql_command= SQLCOM_DROP_INDEX; lex->alter_info.reset(); - lex->alter_info.flags= ALTER_DROP_INDEX; + lex->alter_info.flags= Alter_info::ALTER_DROP_INDEX; lex->alter_info.drop_list.push_back(ad); if (!lex->current_select->add_table_to_list(lex->thd, $6, NULL, TL_OPTION_UPDATING, TL_READ_NO_INSERT, - MDL_SHARED_NO_WRITE)) + MDL_SHARED_UPGRADABLE)) MYSQL_YYABORT; } | DROP DATABASE opt_if_exists ident @@ -11310,6 +11745,19 @@ table_name: } ; +table_name_with_opt_use_partition: + table_ident opt_use_partition + { + if (!Select->add_table_to_list(YYTHD, $1, NULL, + TL_OPTION_UPDATING, + YYPS->m_lock_type, + YYPS->m_mdl_type, + NULL, + $2)) + MYSQL_YYABORT; + } + ; + table_alias_ref_list: table_alias_ref | table_alias_ref_list ',' table_alias_ref @@ -11422,7 +11870,7 @@ insert2: ; insert_table: - table_name + table_name_with_opt_use_partition { LEX *lex=Lex; lex->field_list.empty(); @@ -11622,11 +12070,13 @@ delete: ; single_multi: - FROM table_ident + FROM table_ident opt_use_partition { if (!Select->add_table_to_list(YYTHD, $2, NULL, TL_OPTION_UPDATING, YYPS->m_lock_type, - YYPS->m_mdl_type)) + YYPS->m_mdl_type, + NULL, + $3)) MYSQL_YYABORT; YYPS->m_lock_type= TL_READ_DEFAULT; YYPS->m_mdl_type= MDL_SHARED_READ; @@ -11723,9 +12173,9 @@ truncate: { THD *thd= YYTHD; LEX* lex= thd->lex; - DBUG_ASSERT(!lex->m_stmt); - lex->m_stmt= new (thd->mem_root) Truncate_statement(lex); - if (lex->m_stmt == NULL) + DBUG_ASSERT(!lex->m_sql_cmd); + lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_truncate_table(); + if (lex->m_sql_cmd == NULL) MYSQL_YYABORT; } ; @@ -11930,7 +12380,7 @@ show_param: { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_AUTHORS; - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN, ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT, ER(ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT), "SHOW AUTHORS"); @@ -11939,7 +12389,7 @@ show_param: { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_CONTRIBUTORS; - push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN, ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT, ER(ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT), "SHOW CONTRIBUTORS"); @@ -12520,18 +12970,18 @@ load: if (!(lex->exchange= new sql_exchange($7.str, 0, $2))) MYSQL_YYABORT; } - opt_duplicate INTO TABLE_SYM table_ident + opt_duplicate INTO TABLE_SYM table_ident opt_use_partition { LEX *lex=Lex; if (!Select->add_table_to_list(YYTHD, $12, NULL, TL_OPTION_UPDATING, - $4, MDL_SHARED_WRITE)) + $4, MDL_SHARED_WRITE, NULL, $13)) MYSQL_YYABORT; lex->field_list.empty(); lex->update_list.empty(); lex->value_list.empty(); } opt_load_data_charset - { Lex->exchange->cs= $14; } + { Lex->exchange->cs= $15; } opt_xml_rows_identified_by opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec opt_load_data_set_spec @@ -13046,9 +13496,9 @@ simple_ident: THD *thd= YYTHD; LEX *lex= thd->lex; Lex_input_stream *lip= YYLIP; - sp_variable_t *spv; + sp_variable *spv; sp_pcontext *spc = lex->spcont; - if (spc && (spv = spc->find_variable(&$1))) + if (spc && (spv = spc->find_variable($1, false))) { /* We're compiling a stored procedure and found a variable */ if (! lex->parsing_options.allows_variable) @@ -13526,7 +13976,6 @@ keyword: | OPTIONS_SYM {} | OWNER_SYM {} | PARSER_SYM {} - | PARTITION_SYM {} | PORT_SYM {} | PREPARE_SYM {} | REMOVE_SYM {} @@ -13608,6 +14057,11 @@ keyword_sp: | CURRENT_POS_SYM {} | CPU_SYM {} | CUBE_SYM {} + /* + Although a reserved keyword in SQL:2003 (and :2008), + not reserved in MySQL per WL#2111 specification. + */ + | CURRENT_SYM {} | CURSOR_NAME_SYM {} | DATA_SYM {} | DATAFILE_SYM {} @@ -13617,6 +14071,7 @@ keyword_sp: | DEFINER_SYM {} | DELAY_KEY_WRITE_SYM {} | DES_KEY_FILE {} + | DIAGNOSTICS_SYM {} | DIRECTORY_SYM {} | DISABLE_SYM {} | DISCARD {} @@ -13634,6 +14089,7 @@ keyword_sp: | EVENT_SYM {} | EVENTS_SYM {} | EVERY_SYM {} + | EXCHANGE_SYM {} | EXPANSION_SYM {} | EXTENDED_SYM {} | EXTENT_SIZE_SYM {} @@ -13732,6 +14188,7 @@ keyword_sp: | NO_WAIT_SYM {} | NODEGROUP_SYM {} | NONE_SYM {} + | NUMBER_SYM {} | NVARCHAR_SYM {} | OFFSET_SYM {} | OLD_PASSWORD {} @@ -13778,10 +14235,12 @@ keyword_sp: | REPLICATION {} | RESOURCES {} | RESUME_SYM {} + | RETURNED_SQLSTATE_SYM {} | RETURNS_SYM {} | ROLLUP_SYM {} | ROUTINE_SYM {} | ROWS_SYM {} + | ROW_COUNT_SYM {} | ROW_FORMAT_SYM {} | ROW_SYM {} | RTREE_SYM {} @@ -14012,12 +14471,11 @@ option_value_no_option_type: { THD *thd= YYTHD; LEX *lex= Lex; - LEX_STRING *name= &$1.base_name; if ($1.var == trg_new_row_fake_var) { /* We are in trigger and assigning value to field of new row */ - if (set_trigger_new_row(YYTHD, name, $3)) + if (set_trigger_new_row(YYTHD, &$1.base_name, $3)) MYSQL_YYABORT; } else if ($1.var) @@ -14029,7 +14487,7 @@ option_value_no_option_type: else { sp_pcontext *spc= lex->spcont; - sp_variable *spv= spc->find_variable(name, false); + sp_variable *spv= spc->find_variable($1.base_name, false); /* It is a local variable. */ if (set_local_variable(thd, spv, $3)) @@ -14082,7 +14540,7 @@ option_value_no_option_type: names.str= (char *)"names"; names.length= 5; - if (spc && spc->find_variable(&names, false)) + if (spc && spc->find_variable(names, false)) my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str); else my_parse_error(ER(ER_SYNTAX_ERROR)); @@ -14118,7 +14576,7 @@ option_value_no_option_type: pw.str= (char *)"password"; pw.length= 8; - if (spc && spc->find_variable(&pw, false)) + if (spc && spc->find_variable(pw, false)) { my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str); MYSQL_YYABORT; @@ -14153,10 +14611,10 @@ internal_variable_name: { THD *thd= YYTHD; sp_pcontext *spc= thd->lex->spcont; - sp_variable_t *spv; + sp_variable *spv; /* Best effort lookup for system variable. */ - if (!spc || !(spv = spc->find_variable(&$1))) + if (!spc || !(spv = spc->find_variable($1, false))) { struct sys_var_with_base tmp= {NULL, $1}; @@ -14291,24 +14749,33 @@ text_or_password: TEXT_STRING { $$=$1.str;} | PASSWORD '(' TEXT_STRING ')' { - $$= $3.length ? YYTHD->variables.old_passwords ? - Item_func_old_password::alloc(YYTHD, $3.str, $3.length) : - Item_func_password::alloc(YYTHD, $3.str, $3.length) : - $3.str; + if ($3.length == 0) + $$= $3.str; + else + switch (YYTHD->variables.old_passwords) { + case 1: $$= Item_func_old_password:: + alloc(YYTHD, $3.str, $3.length); + break; + case 0: + case 2: $$= Item_func_password:: + create_password_hash_buffer(YYTHD, $3.str, $3.length); + break; + } if ($$ == NULL) MYSQL_YYABORT; + Lex->contains_plaintext_password= true; } | OLD_PASSWORD '(' TEXT_STRING ')' { - $$= $3.length ? Item_func_old_password::alloc(YYTHD, $3.str, - $3.length) : + $$= $3.length ? Item_func_old_password:: + alloc(YYTHD, $3.str, $3.length) : $3.str; if ($$ == NULL) MYSQL_YYABORT; + Lex->contains_plaintext_password= true; } ; - set_expr_or_default: expr { $$=$1; } | DEFAULT { $$=0; } @@ -14789,9 +15256,11 @@ grant_user: user IDENTIFIED_SYM BY TEXT_STRING { $$=$1; $1->password=$4; + if (Lex->sql_command == SQLCOM_REVOKE) + MYSQL_YYABORT; if ($4.length) { - if (YYTHD->variables.old_passwords) + if (YYTHD->variables.old_passwords == 1) { char *buff= (char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1); @@ -14807,7 +15276,7 @@ grant_user: (char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1); if (buff == NULL) MYSQL_YYABORT; - my_make_scrambled_password(buff, $4.str, $4.length); + my_make_scrambled_password_sha1(buff, $4.str, $4.length); $1->password.str= buff; $1->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH; } @@ -15637,7 +16106,7 @@ sf_tail: If a collision exists, it should not be silenced but fixed. */ push_warning_printf(thd, - MYSQL_ERROR::WARN_LEVEL_NOTE, + Sql_condition::WARN_LEVEL_NOTE, ER_NATIVE_FCT_NAME_COLLISION, ER(ER_NATIVE_FCT_NAME_COLLISION), sp->m_name.str); diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 48c77c7c99f..a5a64c065ce 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -265,27 +265,22 @@ uint check_word(TYPELIB *lib, const char *val, const char *end, */ -uint strconvert(CHARSET_INFO *from_cs, const char *from, +uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length, CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors) { int cnvres; my_wc_t wc; char *to_start= to; uchar *to_end= (uchar*) to + to_length - 1; + const uchar *from_end= (const uchar*) from + from_length; my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc; my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb; uint error_count= 0; while (1) { - /* - Using 'from + 10' is safe: - - it is enough to scan a single character in any character set. - - if remaining string is shorter than 10, then mb_wc will return - with error because of unexpected '\0' character. - */ if ((cnvres= (*mb_wc)(from_cs, &wc, - (uchar*) from, (uchar*) from + 10)) > 0) + (uchar*) from, from_end)) > 0) { if (!wc) break; diff --git a/sql/strfunc.h b/sql/strfunc.h index 57c5427fcd0..7b031710c76 100644 --- a/sql/strfunc.h +++ b/sql/strfunc.h @@ -43,7 +43,7 @@ char *set_to_string(THD *thd, LEX_STRING *result, ulonglong set, /* These functions were protected by INNODB_COMPATIBILITY_HOOKS */ -uint strconvert(CHARSET_INFO *from_cs, const char *from, +uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length, CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors); #endif /* STRFUNC_INCLUDED */ diff --git a/sql/structs.h b/sql/structs.h index a3a54c524e6..e5e65e01064 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -89,8 +89,8 @@ struct ha_index_option_struct; typedef struct st_key { uint key_length; /* Tot length of key */ ulong flags; /* dupp key and pack flags */ - uint key_parts; /* How many key_parts */ - uint usable_key_parts; /* Should normally be = key_parts */ + uint user_defined_key_parts; /* How many key_parts */ + uint usable_key_parts; /* Should normally be = user_defined_key_parts */ uint ext_key_parts; /* Number of key parts in extended key */ ulong ext_key_flags; /* Flags for extended key */ key_part_map ext_key_part_map; /* Bitmap of pk key parts in extension */ @@ -256,10 +256,10 @@ typedef struct user_conn { typedef struct st_user_stats { - char user[max(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1]; + char user[MY_MAX(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1]; // Account name the user is mapped to when this is a user from mapped_user. // Otherwise, the same value as user. - char priv_user[max(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1]; + char priv_user[MY_MAX(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1]; uint user_name_length; uint total_connections; uint concurrent_connections; diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 34ff98f3e78..457636629a1 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -48,6 +48,7 @@ // mysql_user_table_is_in_short_password_format #include "derror.h" // read_texts #include "sql_base.h" // close_cached_tables +#include "hostname.h" // host_cache_size #include <myisam.h> #include "log_slow.h" #include "debug_sync.h" // DEBUG_SYNC @@ -75,22 +76,24 @@ static Sys_var_mybool Sys_pfs_enabled( "performance_schema", "Enable the performance schema.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_enabled), - CMD_LINE(OPT_ARG), DEFAULT(FALSE)); + CMD_LINE(OPT_ARG), DEFAULT(TRUE)); -static Sys_var_ulong Sys_pfs_events_waits_history_long_size( +static Sys_var_long Sys_pfs_events_waits_history_long_size( "performance_schema_events_waits_history_long_size", - "Number of rows in EVENTS_WAITS_HISTORY_LONG.", + "Number of rows in EVENTS_WAITS_HISTORY_LONG." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_waits_history_long_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_WAITS_HISTORY_LONG_SIZE), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_events_waits_history_size( +static Sys_var_long Sys_pfs_events_waits_history_size( "performance_schema_events_waits_history_size", - "Number of rows per thread in EVENTS_WAITS_HISTORY.", + "Number of rows per thread in EVENTS_WAITS_HISTORY." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_waits_history_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024), - DEFAULT(PFS_WAITS_HISTORY_SIZE), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_max_cond_classes( "performance_schema_max_cond_classes", @@ -99,12 +102,13 @@ static Sys_var_ulong Sys_pfs_max_cond_classes( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256), DEFAULT(PFS_MAX_COND_CLASS), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_cond_instances( +static Sys_var_long Sys_pfs_max_cond_instances( "performance_schema_max_cond_instances", - "Maximum number of instrumented condition objects.", + "Maximum number of instrumented condition objects." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_cond_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_COND), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_max_file_classes( "performance_schema_max_file_classes", @@ -120,19 +124,21 @@ static Sys_var_ulong Sys_pfs_max_file_handles( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), DEFAULT(PFS_MAX_FILE_HANDLE), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_file_instances( +static Sys_var_long Sys_pfs_max_file_instances( "performance_schema_max_file_instances", - "Maximum number of instrumented files.", + "Maximum number of instrumented files." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_file_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_FILE), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_sockets( +static Sys_var_long Sys_pfs_max_sockets( "performance_schema_max_socket_instances", - "Maximum number of opened instrumented sockets.", + "Maximum number of opened instrumented sockets." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_socket_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_SOCKETS), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_max_socket_classes( @@ -150,12 +156,13 @@ static Sys_var_ulong Sys_pfs_max_mutex_classes( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256), DEFAULT(PFS_MAX_MUTEX_CLASS), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_mutex_instances( +static Sys_var_long Sys_pfs_max_mutex_instances( "performance_schema_max_mutex_instances", - "Maximum number of instrumented MUTEX objects.", + "Maximum number of instrumented MUTEX objects." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_mutex_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 100*1024*1024), - DEFAULT(PFS_MAX_MUTEX), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 100*1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_max_rwlock_classes( "performance_schema_max_rwlock_classes", @@ -164,26 +171,29 @@ static Sys_var_ulong Sys_pfs_max_rwlock_classes( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256), DEFAULT(PFS_MAX_RWLOCK_CLASS), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_rwlock_instances( +static Sys_var_long Sys_pfs_max_rwlock_instances( "performance_schema_max_rwlock_instances", - "Maximum number of instrumented RWLOCK objects.", + "Maximum number of instrumented RWLOCK objects." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_rwlock_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 100*1024*1024), - DEFAULT(PFS_MAX_RWLOCK), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 100*1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_table_handles( +static Sys_var_long Sys_pfs_max_table_handles( "performance_schema_max_table_handles", - "Maximum number of opened instrumented tables.", + "Maximum number of opened instrumented tables." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_table_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_TABLE), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_table_instances( +static Sys_var_long Sys_pfs_max_table_instances( "performance_schema_max_table_instances", - "Maximum number of instrumented tables.", + "Maximum number of instrumented tables." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_table_share_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_TABLE_SHARE), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_max_thread_classes( "performance_schema_max_thread_classes", @@ -192,12 +202,13 @@ static Sys_var_ulong Sys_pfs_max_thread_classes( CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256), DEFAULT(PFS_MAX_THREAD_CLASS), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_max_thread_instances( +static Sys_var_long Sys_pfs_max_thread_instances( "performance_schema_max_thread_instances", - "Maximum number of instrumented threads.", + "Maximum number of instrumented threads." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_thread_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_THREAD), BLOCK_SIZE(1)); + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_setup_actors_size( "performance_schema_setup_actors_size", @@ -215,28 +226,31 @@ static Sys_var_ulong Sys_pfs_setup_objects_size( DEFAULT(PFS_MAX_SETUP_OBJECT), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_accounts_size( +static Sys_var_long Sys_pfs_accounts_size( "performance_schema_accounts_size", - "Maximum number of instrumented user@host accounts.", + "Maximum number of instrumented user@host accounts." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_account_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_ACCOUNT), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_hosts_size( +static Sys_var_long Sys_pfs_hosts_size( "performance_schema_hosts_size", - "Maximum number of instrumented hosts.", + "Maximum number of instrumented hosts." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_host_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_HOST), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_users_size( +static Sys_var_long Sys_pfs_users_size( "performance_schema_users_size", - "Maximum number of instrumented users.", + "Maximum number of instrumented users." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_user_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_MAX_USER), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); static Sys_var_ulong Sys_pfs_max_stage_classes( @@ -247,20 +261,22 @@ static Sys_var_ulong Sys_pfs_max_stage_classes( DEFAULT(PFS_MAX_STAGE_CLASS), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_events_stages_history_long_size( +static Sys_var_long Sys_pfs_events_stages_history_long_size( "performance_schema_events_stages_history_long_size", - "Number of rows in EVENTS_STAGES_HISTORY_LONG.", + "Number of rows in EVENTS_STAGES_HISTORY_LONG." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_stages_history_long_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_STAGES_HISTORY_LONG_SIZE), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_events_stages_history_size( +static Sys_var_long Sys_pfs_events_stages_history_size( "performance_schema_events_stages_history_size", - "Number of rows per thread in EVENTS_STAGES_HISTORY.", + "Number of rows per thread in EVENTS_STAGES_HISTORY." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_stages_history_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024), - DEFAULT(PFS_STAGES_HISTORY_SIZE), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024), + DEFAULT(-1), BLOCK_SIZE(1)); /** @@ -280,28 +296,41 @@ static Sys_var_ulong Sys_pfs_max_statement_classes( DEFAULT((ulong) SQLCOM_END + (ulong) COM_END + 3), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_events_statements_history_long_size( +static Sys_var_long Sys_pfs_events_statements_history_long_size( "performance_schema_events_statements_history_long_size", - "Number of rows in EVENTS_STATEMENTS_HISTORY_LONG.", + "Number of rows in EVENTS_STATEMENTS_HISTORY_LONG." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_statements_history_long_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024), - DEFAULT(PFS_STATEMENTS_HISTORY_LONG_SIZE), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_events_statements_history_size( +static Sys_var_long Sys_pfs_events_statements_history_size( "performance_schema_events_statements_history_size", - "Number of rows per thread in EVENTS_STATEMENTS_HISTORY.", + "Number of rows per thread in EVENTS_STATEMENTS_HISTORY." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_statements_history_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024), - DEFAULT(PFS_STATEMENTS_HISTORY_SIZE), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024), + DEFAULT(-1), BLOCK_SIZE(1)); -static Sys_var_ulong Sys_pfs_digest_size( +static Sys_var_long Sys_pfs_digest_size( "performance_schema_digests_size", - "Size of the statement digest.", + "Size of the statement digest." + " Use 0 to disable, -1 for automated sizing.", PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_digest_sizing), - CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 200), - DEFAULT(PFS_DIGEST_SIZE), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 200), + DEFAULT(-1), + BLOCK_SIZE(1)); + +static Sys_var_long Sys_pfs_connect_attrs_size( + "performance_schema_session_connect_attrs_size", + "Size of session attribute string buffer per thread." + " Use 0 to disable, -1 for automated sizing.", + PARSED_EARLY READ_ONLY + GLOBAL_VAR(pfs_param.m_session_connect_attrs_sizing), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024 * 1024), + DEFAULT(-1), BLOCK_SIZE(1)); #endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */ @@ -1195,7 +1224,7 @@ static bool check_max_allowed_packet(sys_var *self, THD *thd, set_var *var) val= var->save_result.ulonglong_value; if (val < (longlong) global_system_variables.net_buffer_length) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_BELOW_LIMIT, ER(WARN_OPTION_BELOW_LIMIT), "max_allowed_packet", "net_buffer_length"); } @@ -1262,8 +1291,9 @@ static bool fix_max_connections(sys_var *self, THD *thd, enum_var_type type) // children, to avoid "too many connections" error in a common setup static Sys_var_ulong Sys_max_connections( "max_connections", "The number of simultaneous clients allowed", - GLOBAL_VAR(max_connections), CMD_LINE(REQUIRED_ARG), - VALID_RANGE(1, 100000), DEFAULT(151), BLOCK_SIZE(1), NO_MUTEX_GUARD, + PARSED_EARLY GLOBAL_VAR(max_connections), CMD_LINE(REQUIRED_ARG), + VALID_RANGE(1, 100000), + DEFAULT(MAX_CONNECTIONS_DEFAULT), BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(fix_max_connections)); static Sys_var_ulong Sys_max_connect_errors( @@ -1320,6 +1350,12 @@ static Sys_var_ulong Sys_metadata_locks_cache_size( VALID_RANGE(1, 1024*1024), DEFAULT(MDL_LOCKS_CACHE_SIZE_DEFAULT), BLOCK_SIZE(1)); +static Sys_var_ulong Sys_metadata_locks_hash_instances( + "metadata_locks_hash_instances", "Number of metadata locks hash instances", + READ_ONLY GLOBAL_VAR(mdl_locks_hash_partitions), CMD_LINE(REQUIRED_ARG), + VALID_RANGE(1, 1024), DEFAULT(MDL_LOCKS_HASH_PARTITIONS_DEFAULT), + BLOCK_SIZE(1)); + /* "pseudo_thread_id" variable used in the test suite to detect 32/64bit systems. If you change it to something else then ulong then fix the tests @@ -1695,7 +1731,7 @@ static bool check_net_buffer_length(sys_var *self, THD *thd, set_var *var) val= var->save_result.ulonglong_value; if (val > (longlong) global_system_variables.max_allowed_packet) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, WARN_OPTION_BELOW_LIMIT, ER(WARN_OPTION_BELOW_LIMIT), "max_allowed_packet", "net_buffer_length"); } @@ -2258,7 +2294,7 @@ static bool fix_query_cache_size(sys_var *self, THD *thd, enum_var_type type) requested cache size. See also query_cache_size_arg */ if (query_cache_size != new_cache_size) - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_QC_RESIZE, ER(ER_WARN_QC_RESIZE), query_cache_size, new_cache_size); @@ -2976,7 +3012,7 @@ static bool fix_autocommit(sys_var *self, THD *thd, enum_var_type type) Don't close thread tables or release metadata locks: if we do so, we risk releasing locks/closing tables of expressions used to assign other variables, as in: - set @var=my_stored_function1(), @@autocommit=1, @var2=(select max(a) + set @var=my_stored_function1(), @@autocommit=1, @var2=(select MY_MAX(a) from my_table), ... The locks will be released at statement end anyway, as SET statement that assigns autocommit is marked to commit @@ -3315,7 +3351,7 @@ static Sys_var_session_special Sys_rand_seed2( static ulonglong read_error_count(THD *thd) { - return thd->warning_info->error_count(); + return thd->get_stmt_da()->error_count(); } // this really belongs to the SHOW STATUS static Sys_var_session_special Sys_error_count( @@ -3327,7 +3363,7 @@ static Sys_var_session_special Sys_error_count( static ulonglong read_warning_count(THD *thd) { - return thd->warning_info->warn_count(); + return thd->get_stmt_da()->warn_count(); } // this really belongs to the SHOW STATUS static Sys_var_session_special Sys_warning_count( @@ -3424,6 +3460,14 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var) if (!path_length) return true; + if (!is_filename_allowed(var->save_result.string_value.str, + var->save_result.string_value.length, TRUE)) + { + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), + self->name.str, var->save_result.string_value.str); + return true; + } + MY_STAT f_stat; if (my_stat(path, &f_stat, MYF(0))) @@ -3703,13 +3747,13 @@ bool Sys_var_rpl_filter::global_update(THD *thd, set_var *var) { mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_ERROR); + Sql_condition::WARN_LEVEL_ERROR); } else // has base name { mi= master_info_index-> get_master_info(&var->base, - MYSQL_ERROR::WARN_LEVEL_WARN); + Sql_condition::WARN_LEVEL_WARN); } if (mi) @@ -3775,13 +3819,13 @@ uchar *Sys_var_rpl_filter::global_value_ptr(THD *thd, LEX_STRING *base) { mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_ERROR); + Sql_condition::WARN_LEVEL_ERROR); } else // has base name { mi= master_info_index-> get_master_info(base, - MYSQL_ERROR::WARN_LEVEL_WARN); + Sql_condition::WARN_LEVEL_WARN); } mysql_mutex_lock(&LOCK_global_system_variables); @@ -3889,7 +3933,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset) mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_WARN); + Sql_condition::WARN_LEVEL_WARN); if (mi) { mysql_mutex_lock(&mi->rli.data_lock); @@ -3914,7 +3958,7 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd, mysql_mutex_lock(&LOCK_active_mi); mi= master_info_index-> get_master_info(&thd->variables.default_master_connection, - MYSQL_ERROR::WARN_LEVEL_ERROR); + Sql_condition::WARN_LEVEL_ERROR); if (mi) { mysql_mutex_lock(&mi->rli.run_lock); @@ -4063,7 +4107,7 @@ static bool check_locale(sys_var *self, THD *thd, set_var *var) mysql_mutex_unlock(&LOCK_error_messages); if (res) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, "Can't process error message file for locale '%s'", locale->name); return true; @@ -4091,6 +4135,22 @@ static Sys_var_tz Sys_time_zone( SESSION_VAR(time_zone), NO_CMD_LINE, DEFAULT(&default_tz), NO_MUTEX_GUARD, IN_BINLOG); +static bool fix_host_cache_size(sys_var *, THD *, enum_var_type) +{ + hostname_cache_resize((uint) host_cache_size); + return false; +} + +static Sys_var_ulong Sys_host_cache_size( + "host_cache_size", + "How many host names should be cached to avoid resolving.", + GLOBAL_VAR(host_cache_size), + CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 65536), + DEFAULT(HOST_CACHE_SIZE), + BLOCK_SIZE(1), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), + ON_UPDATE(fix_host_cache_size)); + static Sys_var_charptr Sys_ignore_db_dirs( "ignore_db_dirs", "Specifies a directory to add to the ignore list when collecting " @@ -4354,7 +4414,7 @@ static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var) else if (previous_val && val) goto ineffective; else if (!previous_val && val) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_VAR, "'pseudo_slave_mode' is already ON."); } @@ -4363,7 +4423,7 @@ static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var) if (!previous_val && !val) goto ineffective; else if (previous_val && !val) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_VAR, "Slave applier execution mode not active, " "statement ineffective."); @@ -4371,7 +4431,7 @@ static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var) goto end; ineffective: - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_VALUE_FOR_VAR, "'pseudo_slave_mode' change was ineffective."); diff --git a/sql/sys_vars.h b/sql/sys_vars.h index bf17040e65c..179069040ff 100644 --- a/sql/sys_vars.h +++ b/sql/sys_vars.h @@ -224,6 +224,8 @@ typedef Sys_var_integer<uint, GET_UINT, SHOW_UINT> Sys_var_uint; typedef Sys_var_integer<ulong, GET_ULONG, SHOW_ULONG> Sys_var_ulong; typedef Sys_var_integer<ha_rows, GET_HA_ROWS, SHOW_HA_ROWS> Sys_var_harows; typedef Sys_var_integer<ulonglong, GET_ULL, SHOW_ULONGLONG> Sys_var_ulonglong; +typedef Sys_var_integer<long, GET_LONG, SHOW_LONG> Sys_var_long; + /** Helper class for variables that take values from a TYPELIB diff --git a/sql/table.cc b/sql/table.cc index 0be93aaec65..266749d98a2 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -148,7 +148,7 @@ View_creation_ctx * View_creation_ctx::create(THD *thd, if (!view->view_client_cs_name.str || !view->view_connection_cl_name.str) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_VIEW_NO_CREATION_CTX, ER(ER_VIEW_NO_CREATION_CTX), (const char *) view->db, @@ -182,7 +182,7 @@ View_creation_ctx * View_creation_ctx::create(THD *thd, (const char *) view->view_client_cs_name.str, (const char *) view->view_connection_cl_name.str); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_VIEW_INVALID_CREATION_CTX, ER(ER_VIEW_INVALID_CREATION_CTX), (const char *) view->db, @@ -273,7 +273,7 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name) /* - Allocate a setup TABLE_SHARE structure + Allocate and setup a TABLE_SHARE structure SYNOPSIS alloc_table_share() @@ -287,7 +287,7 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name) */ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name, - char *key, uint key_length) + const char *key, uint key_length) { MEM_ROOT mem_root; TABLE_SHARE *share; @@ -336,6 +336,8 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name, init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0)); memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root)); + mysql_mutex_init(key_TABLE_SHARE_LOCK_share, + &share->LOCK_share, MY_MUTEX_INIT_SLOW); mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data, &share->LOCK_ha_data, MY_MUTEX_INIT_FAST); } @@ -419,20 +421,27 @@ void TABLE_SHARE::destroy() { uint idx; KEY *info_it; + DBUG_ENTER("TABLE_SHARE::destroy"); + DBUG_PRINT("info", ("db: %s table: %s", db.str, table_name.str)); + + if (ha_share) + { + delete ha_share; + ha_share= NULL; // Safety + } - if (tmp_table == NO_TMP_TABLE) - mysql_mutex_lock(&LOCK_ha_data); free_root(&stats_cb.mem_root, MYF(0)); stats_cb.stats_can_be_read= FALSE; stats_cb.stats_is_read= FALSE; stats_cb.histograms_can_be_read= FALSE; stats_cb.histograms_are_read= FALSE; - if (tmp_table == NO_TMP_TABLE) - mysql_mutex_unlock(&LOCK_ha_data); - /* The mutex is initialized only for shares that are part of the TDC */ + /* The mutexes are initialized only for shares that are part of the TDC */ if (tmp_table == NO_TMP_TABLE) + { + mysql_mutex_destroy(&LOCK_share); mysql_mutex_destroy(&LOCK_ha_data); + } my_hash_free(&name_hash); plugin_unlock(NULL, db_plugin); @@ -448,25 +457,20 @@ void TABLE_SHARE::destroy() info_it->flags= 0; } } - if (ha_data_destroy) - { - ha_data_destroy(ha_data); - ha_data_destroy= NULL; - } + #ifdef WITH_PARTITION_STORAGE_ENGINE plugin_unlock(NULL, default_part_plugin); - if (ha_part_data_destroy) - { - ha_part_data_destroy(ha_part_data); - ha_part_data_destroy= NULL; - } #endif /* WITH_PARTITION_STORAGE_ENGINE */ + + PSI_CALL_release_table_share(m_psi); + /* Make a copy since the share is allocated in its own root, and free_root() updates its argument after freeing the memory. */ MEM_ROOT own_root= mem_root; free_root(&own_root, MYF(0)); + DBUG_VOID_RETURN; } /* @@ -876,8 +880,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, share->page_checksum= (ha_choice) ((frm_image[39] >> 2) & 3); share->row_type= (enum row_type) frm_image[40]; share->table_charset= get_charset((((uint) frm_image[41]) << 8) + - (uint) frm_image[38],MYF(0)); + (uint) frm_image[38], MYF(0)); share->null_field_first= 1; + share->stats_sample_pages= uint2korr(frm_image+42); + share->stats_auto_recalc= (enum_stats_auto_recalc)(frm_image[44]); } if (!share->table_charset) { @@ -893,8 +899,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, share->table_charset= default_charset_info; } share->db_record_offset= 1; - if (db_create_options & HA_OPTION_LONG_BLOB_PTR) - share->blob_ptr_size= portable_sizeof_char_ptr; share->max_rows= uint4korr(frm_image+18); share->min_rows= uint4korr(frm_image+22); @@ -957,7 +961,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, goto err; keyinfo->flags= (uint) uint2korr(strpos) ^ HA_NOSAME; keyinfo->key_length= (uint) uint2korr(strpos+2); - keyinfo->key_parts= (uint) strpos[4]; + keyinfo->user_defined_key_parts= (uint) strpos[4]; keyinfo->algorithm= (enum ha_key_alg) strpos[5]; keyinfo->block_size= uint2korr(strpos+6); strpos+=8; @@ -968,7 +972,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, goto err; keyinfo->flags= ((uint) strpos[0]) ^ HA_NOSAME; keyinfo->key_length= (uint) uint2korr(strpos+1); - keyinfo->key_parts= (uint) strpos[3]; + keyinfo->user_defined_key_parts= (uint) strpos[3]; keyinfo->algorithm= HA_KEY_ALG_UNDEF; strpos+=4; } @@ -976,7 +980,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (i == 0) { ext_key_parts= key_parts + - (share->use_ext_keys ? first_keyinfo.key_parts*(keys-1) : 0); + (share->use_ext_keys ? first_keyinfo.user_defined_key_parts*(keys-1) : 0); n_length=keys * sizeof(KEY) + ext_key_parts * sizeof(KEY_PART_INFO); if (!(keyinfo= (KEY*) alloc_root(&share->mem_root, @@ -990,10 +994,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, sizeof(ulong) * ext_key_parts))) goto err; first_key_part= key_part; - first_key_parts= first_keyinfo.key_parts; + first_key_parts= first_keyinfo.user_defined_key_parts; keyinfo->flags= first_keyinfo.flags; keyinfo->key_length= first_keyinfo.key_length; - keyinfo->key_parts= first_keyinfo.key_parts; + keyinfo->user_defined_key_parts= first_keyinfo.user_defined_key_parts; keyinfo->algorithm= first_keyinfo.algorithm; if (new_frm_ver >= 3) keyinfo->block_size= first_keyinfo.block_size; @@ -1001,7 +1005,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, keyinfo->key_part= key_part; keyinfo->rec_per_key= rec_per_key; - for (j=keyinfo->key_parts ; j-- ; key_part++) + for (j=keyinfo->user_defined_key_parts ; j-- ; key_part++) { if (strpos + (new_frm_ver >= 1 ? 9 : 7) >= frm_image_end) goto err; @@ -1029,17 +1033,22 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } key_part->store_length=key_part->length; } - keyinfo->ext_key_parts= keyinfo->key_parts; + + /* + Add primary key to end of extended keys for non unique keys for + storage engines that supports it. + */ + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->ext_key_flags= keyinfo->flags; keyinfo->ext_key_part_map= 0; - if (share->use_ext_keys && i) + if (share->use_ext_keys && i && !(keyinfo->flags & HA_NOSAME)) { keyinfo->ext_key_part_map= 0; for (j= 0; j < first_key_parts && keyinfo->ext_key_parts < MAX_REF_PARTS; j++) { - uint key_parts= keyinfo->key_parts; + uint key_parts= keyinfo->user_defined_key_parts; KEY_PART_INFO* curr_key_part= keyinfo->key_part; KEY_PART_INFO* curr_key_part_end= curr_key_part+key_parts; for ( ; curr_key_part < curr_key_part_end; curr_key_part++) @@ -1359,6 +1368,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, plugin_hton(se_plugin)))) goto err; + if (handler_file->set_ha_share_ref(&share->ha_share)) + goto err; + record= share->default_values-1; /* Fieldstart = 1 */ null_bits_are_used= share->null_fields != 0; if (share->null_field_first) @@ -1556,7 +1568,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, "Please do \"ALTER TABLE '%s' FORCE\" to fix it!", share->fieldnames.type_names[i], share->table_name.str, share->table_name.str); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_CRASHED_ON_USAGE, "Found incompatible DECIMAL field '%s' in %s; " "Please do \"ALTER TABLE '%s' FORCE\" to fix it!", @@ -1687,7 +1699,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, Do not extend the key that contains a component defined over the beginning of a field. */ - for (i= 0; i < keyinfo->key_parts; i++) + for (i= 0; i < keyinfo->user_defined_key_parts; i++) { uint fieldnr= keyinfo->key_part[i].fieldnr; if (share->field[fieldnr-1]->key_length() != @@ -1698,11 +1710,11 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, } } - if (add_first_key_parts < keyinfo->ext_key_parts-keyinfo->key_parts) + if (add_first_key_parts < keyinfo->ext_key_parts-keyinfo->user_defined_key_parts) { share->ext_key_parts-= keyinfo->ext_key_parts; key_part_map ext_key_part_map= keyinfo->ext_key_part_map; - keyinfo->ext_key_parts= keyinfo->key_parts; + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->ext_key_flags= keyinfo->flags; keyinfo->ext_key_part_map= 0; for (i= 0; i < add_first_key_parts; i++) @@ -1735,7 +1747,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, */ primary_key=key; key_part= keyinfo->key_part; - for (i=0 ; i < keyinfo->key_parts ;i++) + for (i=0 ; i < keyinfo->user_defined_key_parts ;i++) { uint fieldnr= key_part[i].fieldnr; if (!fieldnr || @@ -1751,7 +1763,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, key_part= keyinfo->key_part; uint key_parts= share->use_ext_keys ? keyinfo->ext_key_parts : - keyinfo->key_parts; + keyinfo->user_defined_key_parts; for (i=0; i < key_parts; key_part++, i++) { Field *field; @@ -1791,7 +1803,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (i == 0 && key != primary_key) field->flags |= (((keyinfo->flags & HA_NOSAME) && - (keyinfo->key_parts == 1)) ? + (keyinfo->user_defined_key_parts == 1)) ? UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG); if (i == 0) field->key_start.set_bit(key); @@ -1802,7 +1814,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, { share->keys_for_keyread.set_bit(key); field->part_of_key.set_bit(key); - if (i < keyinfo->key_parts) + if (i < keyinfo->user_defined_key_parts) field->part_of_key_not_clustered.set_bit(key); } if (handler_file->index_flags(key, i, 1) & HA_READ_ORDER) @@ -1848,7 +1860,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, "Please do \"ALTER TABLE '%s' FORCE \" to fix it!", share->table_name.str, share->table_name.str); - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_CRASHED_ON_USAGE, "Found wrong key definition in %s; " "Please do \"ALTER TABLE '%s' FORCE\" to fix " @@ -1876,7 +1888,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, keyinfo->usable_key_parts= usable_parts; // Filesort set_if_bigger(share->max_key_length,keyinfo->key_length+ - keyinfo->key_parts); + keyinfo->user_defined_key_parts); share->total_key_length+= keyinfo->key_length; /* MERGE tables do not have unique indexes. But every key could be @@ -1894,7 +1906,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, If we are using an integer as the primary key then allow the user to refer to it as '_rowid' */ - if (share->key_info[primary_key].key_parts == 1) + if (share->key_info[primary_key].user_defined_key_parts == 1) { Field *field= share->key_info[primary_key].key_part[0].field; if (field && field->result_type() == INT_RESULT) @@ -1993,18 +2005,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, delete handler_file; plugin_unlock(0, se_plugin); my_hash_free(&share->name_hash); - if (share->ha_data_destroy) - { - share->ha_data_destroy(share->ha_data); - share->ha_data_destroy= NULL; - } -#ifdef WITH_PARTITION_STORAGE_ENGINE - if (share->ha_part_data_destroy) - { - share->ha_part_data_destroy(share->ha_part_data); - share->ha_data_destroy= NULL; - } -#endif /* WITH_PARTITION_STORAGE_ENGINE */ if (!thd->is_error()) open_table_error(share, OPEN_FRM_CORRUPTED, share->open_errno); @@ -2064,6 +2064,8 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write, LEX *old_lex; Query_arena *arena, backup; LEX tmp_lex; + KEY *unused1; + uint unused2; LEX_CUSTRING frm= {0,0}; DBUG_ENTER("TABLE_SHARE::init_from_sql_statement_string"); @@ -2105,7 +2107,7 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write, file= mysql_create_frm_image(thd, db.str, table_name.str, &thd->lex->create_info, &thd->lex->alter_info, - C_ORDINARY_CREATE, &frm); + C_ORDINARY_CREATE, &unused1, &unused2, &frm); error|= file == 0; delete file; @@ -2516,6 +2518,9 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, if (!(outparam->file= get_new_handler(share, &outparam->mem_root, share->db_type()))) goto err; + + if (outparam->file->set_ha_share_ref(&share->ha_share)) + goto err; } else { @@ -2615,7 +2620,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, key_info->key_part= key_part; key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts : - key_info->key_parts) ; + key_info->user_defined_key_parts) ; for ( ; key_part < key_part_end; key_part++) { Field *field= key_part->field= outparam->field[key_part->fieldnr - 1]; @@ -2633,7 +2638,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, } } if (!share->use_ext_keys) - key_part+= key_info->ext_key_parts - key_info->key_parts; + key_part+= key_info->ext_key_parts - key_info->user_defined_key_parts; } } @@ -2726,8 +2731,9 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share, } outparam->part_info->is_auto_partitioned= share->auto_partitioned; DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned)); - /* we should perform the fix_partition_func in either local or - caller's arena depending on work_part_info_used value + /* + We should perform the fix_partition_func in either local or + caller's arena depending on work_part_info_used value. */ if (!work_part_info_used) tmp= fix_partition_func(thd, outparam, is_create_table); @@ -2836,9 +2842,22 @@ partititon_err: bzero((char*) bitmaps, bitmap_size*3); #endif - outparam->no_replicate= outparam->file && - test(outparam->file->ha_table_flags() & - HA_HAS_OWN_BINLOGGING); + if (share->table_category == TABLE_CATEGORY_LOG) + { + outparam->no_replicate= TRUE; + } + else if (outparam->file) + { + handler::Table_flags flags= outparam->file->ha_table_flags(); + outparam->no_replicate= ! test(flags & (HA_BINLOG_STMT_CAPABLE + | HA_BINLOG_ROW_CAPABLE)) + || test(flags & HA_HAS_OWN_BINLOGGING); + } + else + { + outparam->no_replicate= FALSE; + } + thd->status_var.opened_tables++; thd->lex->context_analysis_only= save_context_analysis_only; @@ -2898,6 +2917,7 @@ int closefrm(register TABLE *table, bool free_share) #ifdef WITH_PARTITION_STORAGE_ENGINE if (table->part_info) { + /* Allocated through table->mem_root, freed below */ free_items(table->part_info->item_free_list); table->part_info->item_free_list= 0; table->part_info= 0; @@ -3236,11 +3256,10 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo, fileinfo[39]= (uchar) ((uint) create_info->transactional | ((uint) create_info->page_checksum << 2)); fileinfo[40]= (uchar) create_info->row_type; - /* Next few bytes where for RAID support */ + /* Bytes 41-46 were for RAID support; now reused for other purposes */ fileinfo[41]= (uchar) (csid >> 8); - fileinfo[42]= 0; - fileinfo[43]= 0; - fileinfo[44]= 0; + int2store(fileinfo+42, create_info->stats_sample_pages & 0xffff); + fileinfo[44]= (uchar) create_info->stats_auto_recalc; fileinfo[45]= 0; fileinfo[46]= 0; int4store(fileinfo+47, key_length); @@ -3665,18 +3684,18 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def) else { KEY *pk= &table->s->key_info[table->s->primary_key]; - if (pk->key_parts != table_def->primary_key_parts) + if (pk->user_defined_key_parts != table_def->primary_key_parts) { report_error(0, "Incorrect definition of table %s.%s: " "Expected primary key to have %u columns, but instead " "found %u columns.", table->s->db.str, table->alias.c_ptr(), table_def->primary_key_parts, - pk->key_parts); + pk->user_defined_key_parts); error= TRUE; } else { - for (i= 0; i < pk->key_parts; ++i) + for (i= 0; i < pk->user_defined_key_parts; ++i) { if (table_def->primary_key_columns[i] + 1 != pk->key_part[i].fieldnr) { @@ -3750,7 +3769,7 @@ bool TABLE_SHARE::visit_subgraph(Wait_for_flush *wait_for_flush, if (gvisitor->m_lock_open_count++ == 0) mysql_mutex_lock(&LOCK_open); - I_P_List_iterator <TABLE, TABLE_share> tables_it(used_tables); + TABLE_list::Iterator tables_it(used_tables); /* In case of multiple searches running in parallel, avoid going @@ -4418,27 +4437,32 @@ void TABLE_LIST::hide_view_error(THD *thd) return; /* Hide "Unknown column" or "Unknown function" error */ DBUG_ASSERT(thd->is_error()); + switch (thd->get_stmt_da()->sql_errno()) { + case ER_BAD_FIELD_ERROR: + case ER_SP_DOES_NOT_EXIST: + case ER_FUNC_INEXISTENT_NAME_COLLISION: + case ER_PROCACCESS_DENIED_ERROR: + case ER_COLUMNACCESS_DENIED_ERROR: + case ER_TABLEACCESS_DENIED_ERROR: + case ER_TABLE_NOT_LOCKED: + case ER_NO_SUCH_TABLE: + { + TABLE_LIST *top= top_table(); + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), + top->view_db.str, top->view_name.str); + break; + } - if (thd->stmt_da->sql_errno() == ER_BAD_FIELD_ERROR || - thd->stmt_da->sql_errno() == ER_SP_DOES_NOT_EXIST || - thd->stmt_da->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION || - thd->stmt_da->sql_errno() == ER_PROCACCESS_DENIED_ERROR || - thd->stmt_da->sql_errno() == ER_COLUMNACCESS_DENIED_ERROR || - thd->stmt_da->sql_errno() == ER_TABLEACCESS_DENIED_ERROR || - thd->stmt_da->sql_errno() == ER_TABLE_NOT_LOCKED || - thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE) - { - TABLE_LIST *top= top_table(); - thd->clear_error(); - my_error(ER_VIEW_INVALID, MYF(0), top->view_db.str, top->view_name.str); - } - else if (thd->stmt_da->sql_errno() == ER_NO_DEFAULT_FOR_FIELD) - { - TABLE_LIST *top= top_table(); - thd->clear_error(); - // TODO: make correct error message - my_error(ER_NO_DEFAULT_FOR_VIEW_FIELD, MYF(0), - top->view_db.str, top->view_name.str); + case ER_NO_DEFAULT_FOR_FIELD: + { + TABLE_LIST *top= top_table(); + thd->clear_error(); + // TODO: make correct error message + my_error(ER_NO_DEFAULT_FOR_VIEW_FIELD, MYF(0), + top->view_db.str, top->view_name.str); + break; + } } } @@ -4514,7 +4538,7 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure) TABLE_LIST *main_view= top_table(); if (ignore_failure) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED), main_view->view_db.str, main_view->view_name.str); return(VIEW_CHECK_SKIP); @@ -4807,7 +4831,7 @@ bool TABLE_LIST::prepare_view_security_context(THD *thd) if ((thd->lex->sql_command == SQLCOM_SHOW_CREATE) || (thd->lex->sql_command == SQLCOM_SHOW_FIELDS)) { - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_NO_SUCH_USER, ER(ER_NO_SUCH_USER), definer.user.str, definer.host.str); @@ -4836,6 +4860,7 @@ bool TABLE_LIST::prepare_view_security_context(THD *thd) } } DBUG_RETURN(FALSE); + } #endif @@ -5602,7 +5627,7 @@ void TABLE::mark_columns_used_by_index_no_reset(uint index, { KEY_PART_INFO *key_part= key_info[index].key_part; KEY_PART_INFO *key_part_end= (key_part + - key_info[index].key_parts); + key_info[index].user_defined_key_parts); for (;key_part != key_part_end; key_part++) { bitmap_set_bit(bitmap, key_part->fieldnr-1); @@ -6032,8 +6057,8 @@ bool TABLE::add_tmp_key(uint key, uint key_parts, return TRUE; keyinfo= key_info + key; keyinfo->key_part= key_part_info; - keyinfo->usable_key_parts= keyinfo->key_parts = key_parts; - keyinfo->ext_key_parts= keyinfo->key_parts; + keyinfo->usable_key_parts= keyinfo->user_defined_key_parts = key_parts; + keyinfo->ext_key_parts= keyinfo->user_defined_key_parts; keyinfo->key_length=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; keyinfo->flags= HA_GENERATED_KEY; @@ -6132,7 +6157,7 @@ bool TABLE::is_filled_at_execution() uint TABLE::actual_n_key_parts(KEY *keyinfo) { return optimizer_flag(in_use, OPTIMIZER_SWITCH_EXTENDED_KEYS) ? - keyinfo->ext_key_parts : keyinfo->key_parts; + keyinfo->ext_key_parts : keyinfo->user_defined_key_parts; } @@ -6449,7 +6474,7 @@ bool TABLE::update_const_key_parts(COND *conds) for (uint index= 0; index < s->keys; index++) { KEY_PART_INFO *keyinfo= key_info[index].key_part; - KEY_PART_INFO *keyinfo_end= keyinfo + key_info[index].key_parts; + KEY_PART_INFO *keyinfo_end= keyinfo + key_info[index].user_defined_key_parts; for (key_part_map part_map= (key_part_map)1; keyinfo < keyinfo_end; diff --git a/sql/table.h b/sql/table.h index c7282cee093..9079d6fa847 100644 --- a/sql/table.h +++ b/sql/table.h @@ -498,19 +498,6 @@ typedef struct st_table_field_def } TABLE_FIELD_DEF; -#ifdef WITH_PARTITION_STORAGE_ENGINE -/** - Partition specific ha_data struct. -*/ -typedef struct st_ha_data_partition -{ - bool auto_inc_initialized; - mysql_mutex_t LOCK_auto_inc; /**< protecting auto_inc val */ - ulonglong next_auto_inc_val; /**< first non reserved value */ -} HA_DATA_PARTITION; -#endif - - class Table_check_intact { protected: @@ -611,14 +598,16 @@ struct TABLE_SHARE TYPELIB fieldnames; /* Pointer to fieldnames */ TYPELIB *intervals; /* pointer to interval info */ mysql_mutex_t LOCK_ha_data; /* To protect access to ha_data */ + mysql_mutex_t LOCK_share; /* To protect TABLE_SHARE */ TABLE_SHARE *next, **prev; /* Link to unused shares */ /* Doubly-linked (back-linked) lists of used and unused TABLE objects for this share. */ - I_P_List <TABLE, TABLE_share> used_tables; - I_P_List <TABLE, TABLE_share> free_tables; + typedef I_P_List <TABLE, TABLE_share> TABLE_list; + TABLE_list used_tables; + TABLE_list free_tables; LEX_CUSTRING tabledef_version; @@ -663,7 +652,8 @@ struct TABLE_SHARE key_map keys_for_keyread; ha_rows min_rows, max_rows; /* create information */ ulong avg_row_length; /* create information */ - ulong version, mysql_version; + ulong version; + ulong mysql_version; /* 0 if .frm is created before 5.0 */ ulong reclength; /* Recordlength */ /* Stored record length. No generated-only virtual fields are included */ ulong stored_rec_length; @@ -683,8 +673,10 @@ struct TABLE_SHARE enum ha_choice page_checksum; uint ref_count; /* How many TABLE objects uses this */ - uint blob_ptr_size; /* 4 or 8 */ uint key_block_size; /* create key_block_size, if used */ + uint stats_sample_pages; /* number of pages to sample during + stats estimation, if used, otherwise 0. */ + enum_stats_auto_recalc stats_auto_recalc; /* Automatic recalc of stats. */ uint null_bytes, last_null_bit_pos; /* Same as null_bytes, except that if there is only a 'delete-marker' in @@ -735,6 +727,9 @@ struct TABLE_SHARE */ int cached_row_logging_check; + /* Name of the tablespace used for this table */ + char *tablespace; + #ifdef WITH_PARTITION_STORAGE_ENGINE /* filled in when reading from frm */ bool auto_partitioned; @@ -756,16 +751,8 @@ struct TABLE_SHARE */ const TABLE_FIELD_DEF *table_field_def_cache; - /** place to store storage engine specific data */ - void *ha_data; - void (*ha_data_destroy)(void *); /* An optional destructor for ha_data */ - -#ifdef WITH_PARTITION_STORAGE_ENGINE - /** place to store partition specific data, LOCK_ha_data hold while init. */ - HA_DATA_PARTITION *ha_part_data; - /* Destructor for ha_part_data */ - void (*ha_part_data_destroy)(HA_DATA_PARTITION *); -#endif + /** Main handler's share */ + Handler_share *ha_share; /** Instrumentation for this table share. */ PSI_table_share *m_psi; @@ -1248,6 +1235,9 @@ public: */ bool key_read; bool no_keyread; + /** + If set, indicate that the table is not replicated by the server. + */ bool locked_by_logger; bool no_replicate; bool locked_by_name; @@ -1281,7 +1271,8 @@ public: Query_arena *expr_arena; #ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info; /* Partition related information */ - bool no_partitions_used; /* If true, all partitions have been pruned away */ + /* If true, all partitions have been pruned away */ + bool all_partitions_pruned_away; #endif uint max_keys; /* Size of allocated key_info array. */ bool stats_is_read; /* Persistent statistics is read for the table */ @@ -2051,6 +2042,11 @@ struct TABLE_LIST MDL_request mdl_request; +#ifdef WITH_PARTITION_STORAGE_ENGINE + /* List to carry partition names from PARTITION (...) clause in statement */ + List<String> *partition_names; +#endif /* WITH_PARTITION_STORAGE_ENGINE */ + void calc_md5(char *buffer); int view_check_option(THD *thd, bool ignore_failure); bool create_field_translation(THD *thd); @@ -2206,7 +2202,7 @@ struct TABLE_LIST @brief Returns the name of the database that the referenced table belongs to. */ - char *get_db_name() { return view != NULL ? view_db.str : db; } + char *get_db_name() const { return view != NULL ? view_db.str : db; } /** @brief Returns the name of the table that this TABLE_LIST represents. @@ -2214,7 +2210,7 @@ struct TABLE_LIST @details The unqualified table name or view name for a table or view, respectively. */ - char *get_table_name() { return view != NULL ? view_name.str : table_name; } + char *get_table_name() const { return view != NULL ? view_name.str : table_name; } bool is_active_sjm(); bool is_jtbm() { return test(jtbm_subselect!=NULL); } st_select_lex_unit *get_unit(); @@ -2511,7 +2507,7 @@ bool unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root, TABLE *table, Field *field, LEX_STRING *vcol_expr, bool *error_reported); TABLE_SHARE *alloc_table_share(const char *db, const char *table_name, - char *key, uint key_length); + const char *key, uint key_length); void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key, uint key_length, const char *table_name, const char *path); diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc index 8c7db0673ac..a14ed36837b 100644 --- a/sql/thr_malloc.cc +++ b/sql/thr_malloc.cc @@ -46,10 +46,7 @@ extern "C" { returned in the error packet. - SHOW ERROR/SHOW WARNINGS may be empty. */ - thd->stmt_da->set_error_status(thd, - ER_OUT_OF_RESOURCES, - ER(ER_OUT_OF_RESOURCES), - NULL); + thd->get_stmt_da()->set_error_status(ER_OUT_OF_RESOURCES); } } @@ -134,7 +131,7 @@ char *sql_strmake_with_convert(const char *str, size_t arg_length, if ((from_cs == &my_charset_bin) || (to_cs == &my_charset_bin)) { // Safety if to_cs->mbmaxlen > 0 - new_length= min(arg_length, max_res_length); + new_length= MY_MIN(arg_length, max_res_length); memcpy(pos, str, new_length); } else diff --git a/sql/transaction.cc b/sql/transaction.cc index 09de480e236..239fdef7064 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -524,7 +524,7 @@ bool trans_rollback_to_savepoint(THD *thd, LEX_STRING name) else if (((thd->variables.option_bits & OPTION_KEEP_LOG) || thd->transaction.all.modified_non_trans_table) && !thd->slave_thread) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARNING_NOT_COMPLETE_ROLLBACK, ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); @@ -815,7 +815,7 @@ bool trans_xa_rollback(THD *thd) ha_commit_or_rollback_by_xid(thd->lex->xid, 0); xid_cache_delete(xs); } - DBUG_RETURN(thd->stmt_da->is_error()); + DBUG_RETURN(thd->get_stmt_da()->is_error()); } if (xa_state != XA_IDLE && xa_state != XA_PREPARED && xa_state != XA_ROLLBACK_ONLY) diff --git a/sql/tztime.cc b/sql/tztime.cc index f5e9182522e..272dfb6381b 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -176,7 +176,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage) uchar buf[sizeof(struct tzhead) + sizeof(my_time_t) * TZ_MAX_TIMES + TZ_MAX_TIMES + sizeof(TRAN_TYPE_INFO) * TZ_MAX_TYPES + #ifdef ABBR_ARE_USED - max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) + + MY_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) + #endif sizeof(LS_INFO) * TZ_MAX_LEAPS]; } u; @@ -405,7 +405,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage) Let us choose end_t as point before next time type change or leap second correction. */ - end_t= min((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1: + end_t= MY_MIN((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1: MY_TIME_T_MAX, (next_leap_idx < sp->leapcnt) ? sp->lsis[next_leap_idx].ls_trans - 1: MY_TIME_T_MAX); @@ -1690,7 +1690,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) MYSQL_OPEN_IGNORE_FLUSH | MYSQL_LOCK_IGNORE_TIMEOUT)) { sql_print_warning("Can't open and lock time zone table: %s " - "trying to live without them", thd->stmt_da->message()); + "trying to live without them", + thd->get_stmt_da()->message()); /* We will try emulate that everything is ok */ return_val= time_zone_tables_exist= 0; goto end_with_setting_default_tz; @@ -1876,7 +1877,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) uchar types[TZ_MAX_TIMES]; TRAN_TYPE_INFO ttis[TZ_MAX_TYPES]; #ifdef ABBR_ARE_USED - char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))]; + char chars[MY_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))]; #endif /* Used as a temporary tz_info until we decide that we actually want to @@ -1927,7 +1928,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) field->store((longlong) tzid, TRUE); DBUG_ASSERT(field->key_length() <= sizeof(keybuff)); field->get_key_image(keybuff, - min(field->key_length(), sizeof(keybuff)), + MY_MIN(field->key_length(), sizeof(keybuff)), Field::itRAW); if (table->file->ha_index_init(0, 1)) goto end; @@ -1960,7 +1961,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) field->store((longlong) tzid, TRUE); DBUG_ASSERT(field->key_length() <= sizeof(keybuff)); field->get_key_image(keybuff, - min(field->key_length(), sizeof(keybuff)), + MY_MIN(field->key_length(), sizeof(keybuff)), Field::itRAW); if (table->file->ha_index_init(0, 1)) goto end; diff --git a/sql/unireg.cc b/sql/unireg.cc index 388aa2863af..7bb943dc9b0 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -191,7 +191,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table, char warn_buff[MYSQL_ERRMSG_SIZE]; my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_TABLE_COMMENT), real_table_name, TABLE_COMMENT_MAXLEN); - push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, ER_TOO_LONG_TABLE_COMMENT, warn_buff); create_info->comment.length= tmp_len; } @@ -369,59 +369,53 @@ err: } -/* +/** Create a frm (table definition) file and the tables - SYNOPSIS - rea_create_table() - thd Thread handler - frm binary frm image of the table to create - path Name of file (including database, without .frm) - db Data base name - table_name Table name - create_info create info parameters - file Handler to use or NULL if only frm needs to be created - - RETURN - 0 ok - 1 error + @param thd Thread handler + @param frm Binary frm image of the table to create + @param path Name of file (including database, without .frm) + @param db Data base name + @param table_name Table name + @param create_info create info parameters + @param file Handler to use or NULL if only frm needs to be created + + @retval 0 ok + @retval 1 error */ int rea_create_table(THD *thd, LEX_CUSTRING *frm, const char *path, const char *db, const char *table_name, - HA_CREATE_INFO *create_info, handler *file) + HA_CREATE_INFO *create_info, handler *file, + bool no_ha_create_table) { DBUG_ENTER("rea_create_table"); - if (file) + // TODO don't write frm for temp tables + if (no_ha_create_table || create_info->tmp_table()) { - // TODO don't write frm for temp tables - if (create_info->tmp_table() && - writefrm(path, db, table_name, true, frm->str, frm->length)) - goto err_handler; + if (writefrm(path, db, table_name, true, frm->str, frm->length)) + goto err_frm; + } - if (thd->variables.keep_files_on_create) - create_info->options|= HA_CREATE_KEEP_FILES; + if (thd->variables.keep_files_on_create) + create_info->options|= HA_CREATE_KEEP_FILES; - if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG) || - ha_create_table(thd, path, db, table_name, create_info, frm)) - { - file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); - goto err_handler; - } - } - else + if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG)) + goto err_part; + + if (!no_ha_create_table) { - if (writefrm(path, db, table_name, false, frm->str, frm->length)) - goto err_handler; + if (ha_create_table(thd, path, db, table_name, create_info, frm)) + goto err_part; } DBUG_RETURN(0); -err_handler: - char frm_name[FN_REFLEN]; - strxmov(frm_name, path, reg_ext, NullS); - mysql_file_delete(key_file_frm, frm_name, MYF(0)); +err_part: + file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG); +err_frm: + deletefrm(path); DBUG_RETURN(1); } /* rea_create_table */ @@ -443,15 +437,15 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, { int2store(pos, (key->flags ^ HA_NOSAME)); int2store(pos+2,key->key_length); - pos[4]= (uchar) key->key_parts; + pos[4]= (uchar) key->user_defined_key_parts; pos[5]= (uchar) key->algorithm; int2store(pos+6, key->block_size); pos+=8; - key_parts+=key->key_parts; + key_parts+=key->user_defined_key_parts; DBUG_PRINT("loop", ("flags: %lu key_parts: %d key_part: 0x%lx", - key->flags, key->key_parts, + key->flags, key->user_defined_key_parts, (long) key->key_part)); - for (key_part=key->key_part,key_part_end=key_part+key->key_parts ; + for (key_part=key->key_part,key_part_end=key_part+key->user_defined_key_parts ; key_part != key_part_end ; key_part++) @@ -660,7 +654,7 @@ static bool pack_header(uchar *forminfo, List<Create_field> &create_fields, DBUG_RETURN(1); } /* Hack to avoid bugs with small static rows in MySQL */ - reclength=max(file->min_record_length(table_options),reclength); + reclength=MY_MAX(file->min_record_length(table_options),reclength); if ((ulong) create_fields.elements*FCOMP+FRM_FORMINFO_SIZE+ n_length+int_length+com_length+vcol_info_length > 65535L || int_count > 255) @@ -940,7 +934,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, table.s= &share; table.in_use= thd; - table.s->blob_ptr_size= portable_sizeof_char_ptr; null_count=0; if (!(table_options & HA_OPTION_PACK_RECORD)) diff --git a/sql/unireg.h b/sql/unireg.h index c867f50197d..9b40b7b0779 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -132,6 +132,10 @@ The flag means that I_S table uses optimization algorithm. */ #define OPTIMIZE_I_S_TABLE OPEN_VIEW_FULL*2 +/** + This flag is used to instruct tdc_open_view() to check metadata version. +*/ +#define CHECK_METADATA_VERSION OPEN_TRIGGER_ONLY*2 /* The flag means that we need to process trigger files only. @@ -190,7 +194,8 @@ enum extra2_frm_value_type { int rea_create_table(THD *thd, LEX_CUSTRING *frm, const char *path, const char *db, const char *table_name, - HA_CREATE_INFO *create_info, handler *file); + HA_CREATE_INFO *create_info, handler *file, + bool no_ha_create_table); LEX_CUSTRING build_frm_image(THD *thd, const char *table, HA_CREATE_INFO *create_info, List<Create_field> &create_fields, |