diff options
author | Michael Widenius <monty@askmonty.org> | 2011-05-02 20:58:45 +0300 |
---|---|---|
committer | Michael Widenius <monty@askmonty.org> | 2011-05-02 20:58:45 +0300 |
commit | e415ba0fb2c57eaf370e84a3a9c8d831f820a560 (patch) | |
tree | f113a8024de4ee4f1bc19aae98c19a2835f5b4e7 /sql | |
parent | 046418ad956c98c3788d79650fcb50479844df3b (diff) | |
parent | a1f7ceb281f9d87c9baea125ebab26f99a0370f8 (diff) | |
download | mariadb-git-e415ba0fb2c57eaf370e84a3a9c8d831f820a560.tar.gz |
Merge with MySQL 5.1.57/58
Moved some BSD string functions from Unireg
Diffstat (limited to 'sql')
43 files changed, 1300 insertions, 550 deletions
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index 753e9d21b65..7473cf47188 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -424,7 +424,7 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table, key_copy(key_buf, event_table->record[0], key_info, key_len); if (!(ret= event_table->file->index_read_map(event_table->record[0], key_buf, (key_part_map)1, - HA_READ_PREFIX))) + HA_READ_KEY_EXACT))) { DBUG_PRINT("info",("Found rows. Let's retrieve them. ret=%d", ret)); do diff --git a/sql/field.cc b/sql/field.cc index 6116ad75b13..61566d8281c 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -5468,6 +5468,7 @@ double Field_year::val_real(void) longlong Field_year::val_int(void) { ASSERT_COLUMN_MARKED_FOR_READ; + DBUG_ASSERT(field_length == 2 || field_length == 4); int tmp= (int) ptr[0]; if (field_length != 4) tmp%=100; // Return last 2 char @@ -5480,6 +5481,7 @@ longlong Field_year::val_int(void) String *Field_year::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { + DBUG_ASSERT(field_length < 5); val_buffer->alloc(5); val_buffer->length(field_length); char *to=(char*) val_buffer->ptr(); @@ -9485,6 +9487,7 @@ void Create_field::create_length_to_internal_length(void) case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VARCHAR: diff --git a/sql/field.h b/sql/field.h index c5ced2b4c56..4cf8eeb8104 100644 --- a/sql/field.h +++ b/sql/field.h @@ -13,6 +13,8 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include "my_compare.h" /* for clr_rec_bits */ + /* Because of the function new_field() all field classes that have static variables must declare the size_of() member function. diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 323cf24ef04..2f7b43a8c0d 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -163,8 +163,7 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF; */ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) - :handler(hton, share), m_part_info(NULL), m_create_handler(FALSE), - m_is_sub_partitioned(0) + :handler(hton, share) { DBUG_ENTER("ha_partition::ha_partition(table)"); init_alloc_root(&m_mem_root, 512, 512); @@ -185,16 +184,46 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share) */ ha_partition::ha_partition(handlerton *hton, partition_info *part_info) - :handler(hton, NULL), m_part_info(part_info), m_create_handler(TRUE), - m_is_sub_partitioned(m_part_info->is_sub_partitioned()) + :handler(hton, NULL) { DBUG_ENTER("ha_partition::ha_partition(part_info)"); + DBUG_ASSERT(part_info); init_alloc_root(&m_mem_root, 512, 512); init_handler_variables(); - DBUG_ASSERT(m_part_info); + m_part_info= part_info; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); DBUG_VOID_RETURN; } +/** + ha_partition constructor method used by ha_partition::clone() + + @param hton Handlerton (partition_hton) + @param share Table share object + @param part_info_arg partition_info to use + @param clone_arg ha_partition to clone + @param clme_mem_root_arg MEM_ROOT to use + + @return New partition handler +*/ + +ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share, + partition_info *part_info_arg, + ha_partition *clone_arg, + MEM_ROOT *clone_mem_root_arg) + :handler(hton, share) +{ + DBUG_ENTER("ha_partition::ha_partition(clone)"); + init_alloc_root(&m_mem_root, 512, 512); + init_handler_variables(); + m_part_info= part_info_arg; + m_create_handler= TRUE; + m_is_sub_partitioned= m_part_info->is_sub_partitioned(); + m_is_clone_of= clone_arg; + m_clone_mem_root= clone_mem_root_arg; + DBUG_VOID_RETURN; +} /* Initialize handler object @@ -247,7 +276,6 @@ void ha_partition::init_handler_variables() m_rec0= 0; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; - is_clone= FALSE, m_part_func_monotonicity_info= NON_MONOTONIC; auto_increment_lock= FALSE; auto_increment_safe_stmt_log_lock= FALSE; @@ -255,6 +283,11 @@ void ha_partition::init_handler_variables() this allows blackhole to work properly */ m_no_locks= 0; + m_part_info= NULL; + m_create_handler= FALSE; + m_is_sub_partitioned= 0; + m_is_clone_of= NULL; + m_clone_mem_root= NULL; #ifdef DONT_HAVE_TO_BE_INITALIZED m_start_key.flag= 0; @@ -367,7 +400,8 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root) */ DBUG_RETURN(0); } - else if (get_from_handler_file(table_share->normalized_path.str, mem_root)) + else if (get_from_handler_file(table_share->normalized_path.str, + mem_root, false)) { my_message(ER_UNKNOWN_ERROR, "Failed to read from the .par file", MYF(0)); DBUG_RETURN(1); @@ -1866,7 +1900,7 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(TRUE); } - if (get_from_handler_file(from, ha_thd()->mem_root)) + if (get_from_handler_file(from, ha_thd()->mem_root, false)) DBUG_RETURN(TRUE); DBUG_ASSERT(m_file_buffer); DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to)); @@ -2086,18 +2120,16 @@ static uint name_add(char *dest, const char *first_name, const char *sec_name) } -/* +/** Create the special .par file - SYNOPSIS - create_handler_file() - name Full path of table name + @param name Full path of table name - RETURN VALUE - >0 Error code - 0 Success + @return Operation status + @retval FALSE Error code + @retval TRUE Success - DESCRIPTION + @note Method used to create handler file with names of partitions, their engine types and the number of partitions. */ @@ -2161,21 +2193,24 @@ bool ha_partition::create_handler_file(const char *name) Array of engine types n * 4 bytes where n = (m_tot_parts + 3)/4 Length of name part in bytes 4 bytes + (Names in filename format) Name part m * 4 bytes where m = ((length_name_part + 3)/4)*4 All padding bytes are zeroed */ - tot_partition_words= (tot_parts + 3) / 4; - tot_name_words= (tot_name_len + 3) / 4; + tot_partition_words= (tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + tot_name_words= (tot_name_len + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + /* 4 static words (tot words, checksum, tot partitions, name length) */ tot_len_words= 4 + tot_partition_words + tot_name_words; - tot_len_byte= 4 * tot_len_words; + tot_len_byte= PAR_WORD_SIZE * tot_len_words; file_buffer= (uchar *) my_alloca(tot_len_byte); if (!file_buffer) DBUG_RETURN(TRUE); bzero(file_buffer, tot_len_byte); - engine_array= (file_buffer + 12); - name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); + engine_array= (file_buffer + PAR_ENGINES_OFFSET); + name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE + + PAR_WORD_SIZE); part_it.rewind(); for (i= 0; i < no_parts; i++) { @@ -2213,13 +2248,15 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, tot_parts); - int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); + int4store(file_buffer + PAR_NUM_PARTS_OFFSET, tot_parts); + int4store(file_buffer + PAR_ENGINES_OFFSET + + (tot_partition_words * PAR_WORD_SIZE), + tot_name_len); for (i= 0; i < tot_len_words; i++) - chksum^= uint4korr(file_buffer + 4 * i); - int4store(file_buffer + 4, chksum); + chksum^= uint4korr(file_buffer + PAR_WORD_SIZE * i); + int4store(file_buffer + PAR_CHECKSUM_OFFSET, chksum); /* - Remove .frm extension and replace with .par + Add .par extension to the file name. Create and write and close file to be used at open, delete_table and rename_table */ @@ -2230,6 +2267,7 @@ bool ha_partition::create_handler_file(const char *name) result= my_write(file, (uchar *) file_buffer, tot_len_byte, MYF(MY_WME | MY_NABP)) != 0; + /* Write connection information (for federatedx engine) */ part_it.rewind(); for (i= 0; i < no_parts && !result; i++) { @@ -2240,7 +2278,10 @@ bool ha_partition::create_handler_file(const char *name) if (my_write(file, buffer, 4, MYF(MY_WME | MY_NABP)) || my_write(file, (uchar *) part_elem->connect_string.str, length, MYF(MY_WME | MY_NABP))) + { result= TRUE; + break; + } } VOID(my_close(file, MYF(0))); } @@ -2250,14 +2291,9 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } -/* - Clear handler variables and free some memory - - SYNOPSIS - clear_handler_file() - RETURN VALUE - NONE +/** + Clear handler variables and free some memory */ void ha_partition::clear_handler_file() @@ -2270,16 +2306,15 @@ void ha_partition::clear_handler_file() m_connect_string= NULL; } -/* + +/** Create underlying handler objects - SYNOPSIS - create_handlers() - mem_root Allocate memory through this + @param mem_root Allocate memory through this - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval TRUE Error + @retval FALSE Success */ bool ha_partition::create_handlers(MEM_ROOT *mem_root) @@ -2317,6 +2352,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_RETURN(FALSE); } + /* Create underlying handler objects from partition info @@ -2388,85 +2424,83 @@ error_end: } -/* - Get info about partition engines and their names from the .par file +/** + Read the .par file to get the partitions engines and names - SYNOPSIS - get_from_handler_file() - name Full path of table name - mem_root Allocate memory through this + @param name Name of table file (without extention) - RETURN VALUE - TRUE Error - FALSE Success + @return Operation status + @retval true Failure + @retval false Success - DESCRIPTION - Open handler file to get partition names, engine types and number of - partitions. + @note On success, m_file_buffer is allocated and must be + freed by the caller. m_name_buffer_ptr and m_tot_parts is also set. */ -bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) +bool ha_partition::read_par_file(const char *name) { - char buff[FN_REFLEN], *address_tot_name_len; + char buff[FN_REFLEN], *tot_name_len_offset; File file; - char *file_buffer, *name_buffer_ptr; - handlerton **engine_array; + char *file_buffer; uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum; - DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_ENTER("ha_partition::read_par_file"); DBUG_PRINT("enter", ("table name: '%s'", name)); if (m_file_buffer) - DBUG_RETURN(FALSE); + DBUG_RETURN(false); fn_format(buff, name, "", ha_par_ext, MY_APPEND_EXT); /* Following could be done with my_stat to read in whole file */ if ((file= my_open(buff, O_RDONLY | O_SHARE, MYF(0))) < 0) - DBUG_RETURN(TRUE); - if (my_read(file, (uchar *) & buff[0], 8, MYF(MY_NABP))) + DBUG_RETURN(true); + if (my_read(file, (uchar *) & buff[0], PAR_WORD_SIZE, MYF(MY_NABP))) goto err1; len_words= uint4korr(buff); - len_bytes= 4 * len_words; + len_bytes= PAR_WORD_SIZE * len_words; + if (my_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + goto err1; if (!(file_buffer= (char*) alloc_root(&m_mem_root, len_bytes))) goto err1; - VOID(my_seek(file, 0, MY_SEEK_SET, MYF(0))); if (my_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP))) goto err2; chksum= 0; for (i= 0; i < len_words; i++) - chksum ^= uint4korr((file_buffer) + 4 * i); + chksum ^= uint4korr((file_buffer) + PAR_WORD_SIZE * i); if (chksum) goto err2; - m_tot_parts= uint4korr((file_buffer) + 8); + m_tot_parts= uint4korr((file_buffer) + PAR_NUM_PARTS_OFFSET); DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); - tot_partition_words= (m_tot_parts + 3) / 4; - engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); - for (i= 0; i < m_tot_parts; i++) - { - engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), - (enum legacy_db_type) - *(uchar *) ((file_buffer) + - 12 + i)); - if (!engine_array[i]) - goto err3; - } - address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words; - tot_name_words= (uint4korr(address_tot_name_len) + 3) / 4; + tot_partition_words= (m_tot_parts + PAR_WORD_SIZE - 1) / PAR_WORD_SIZE; + + tot_name_len_offset= file_buffer + PAR_ENGINES_OFFSET + + PAR_WORD_SIZE * tot_partition_words; + tot_name_words= (uint4korr(tot_name_len_offset) + PAR_WORD_SIZE - 1) / + PAR_WORD_SIZE; + /* + Verify the total length = tot size word, checksum word, num parts word + + engines array + name length word + name array. + */ if (len_words != (tot_partition_words + tot_name_words + 4)) - goto err3; - name_buffer_ptr= file_buffer + 16 + 4 * tot_partition_words; + goto err2; + m_file_buffer= file_buffer; // Will be freed in clear_handler_file() + m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE; if (!(m_connect_string= (LEX_STRING*) alloc_root(&m_mem_root, m_tot_parts * sizeof(LEX_STRING)))) - goto err3; + goto err2; bzero(m_connect_string, m_tot_parts * sizeof(LEX_STRING)); + /* Read connection arguments (for federated X engine) */ for (i= 0; i < m_tot_parts; i++) { LEX_STRING connect_string; uchar buffer[4]; if (my_read(file, buffer, 4, MYF(MY_NABP))) + { + /* No extra options; Probably not a federatedx engine */ break; + } connect_string.length= uint4korr(buffer); connect_string.str= (char*) alloc_root(&m_mem_root, connect_string.length+1); if (my_read(file, (uchar*) connect_string.str, connect_string.length, @@ -2477,31 +2511,100 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root) } VOID(my_close(file, MYF(0))); - m_file_buffer= file_buffer; // Will be freed in clear_handler_file() - m_name_buffer_ptr= name_buffer_ptr; - + DBUG_RETURN(false); + +err2: +err1: + VOID(my_close(file, MYF(0))); + DBUG_RETURN(true); +} + + +/** + Setup m_engine_array + + @param mem_root MEM_ROOT to use for allocating new handlers + + @return Operation status + @retval false Success + @retval true Failure +*/ + +bool ha_partition::setup_engine_array(MEM_ROOT *mem_root) +{ + uint i; + uchar *buff; + handlerton **engine_array; + + DBUG_ASSERT(!m_file); + DBUG_ENTER("ha_partition::setup_engine_array"); + engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*)); + if (!engine_array) + DBUG_RETURN(true); + + buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET); + for (i= 0; i < m_tot_parts; i++) + { + engine_array[i]= ha_resolve_by_legacy_type(ha_thd(), + (enum legacy_db_type) + *(buff + i)); + if (!engine_array[i]) + goto err; + } if (!(m_engine_array= (plugin_ref*) alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref)))) - goto err3; + goto err; for (i= 0; i < m_tot_parts; i++) m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]); my_afree((gptr) engine_array); - if (!m_file && create_handlers(mem_root)) + if (create_handlers(mem_root)) { clear_handler_file(); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); } - DBUG_RETURN(FALSE); -err3: + DBUG_RETURN(false); + +err: my_afree((gptr) engine_array); -err2: -err1: - VOID(my_close(file, MYF(0))); - DBUG_RETURN(TRUE); + DBUG_RETURN(true); +} + + +/** + Get info about partition engines and their names from the .par file + + @param name Full path of table name + @param mem_root Allocate memory through this + @param is_clone If it is a clone, don't create new handlers + + @return Operation status + @retval true Error + @retval false Success + + @note Open handler file to get partition names, engine types and number of + partitions. +*/ + +bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone) +{ + DBUG_ENTER("ha_partition::get_from_handler_file"); + DBUG_PRINT("enter", ("table name: '%s'", name)); + + if (m_file_buffer) + DBUG_RETURN(false); + + if (read_par_file(name)) + DBUG_RETURN(true); + + if (!is_clone && setup_engine_array(mem_root)) + DBUG_RETURN(true); + + DBUG_RETURN(false); } @@ -2548,13 +2651,13 @@ void ha_data_partition_destroy(void *ha_data) int ha_partition::open(const char *name, int mode, uint test_if_locked) { - char *name_buffer_ptr= m_name_buffer_ptr; - int error; + char *name_buffer_ptr; + int error= HA_ERR_INITIALIZATION; uint alloc_len; handler **file; char name_buff[FN_REFLEN]; bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE); - ulonglong check_table_flags= 0; + ulonglong check_table_flags; DBUG_ENTER("ha_partition::open"); DBUG_ASSERT(table->s == table_share); @@ -2562,8 +2665,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) m_mode= mode; m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; - if (get_from_handler_file(name, &table->mem_root)) - DBUG_RETURN(1); + if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of))) + DBUG_RETURN(error); + name_buffer_ptr= m_name_buffer_ptr; m_start_key.length= 0; m_rec0= table->record[0]; m_rec_length= table_share->reclength; @@ -2573,7 +2677,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) { if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME)))) { - DBUG_RETURN(1); + DBUG_RETURN(error); } { /* @@ -2596,50 +2700,86 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE)) - DBUG_RETURN(1); + DBUG_RETURN(error); bitmap_clear_all(&m_bulk_insert_started); /* Initialize the bitmap we use to determine what partitions are used */ - if (!is_clone) + if (!m_is_clone_of) { + DBUG_ASSERT(!m_clone_mem_root); if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE)) { bitmap_free(&m_bulk_insert_started); - DBUG_RETURN(1); + DBUG_RETURN(error); } bitmap_set_all(&(m_part_info->used_partitions)); } + if (m_is_clone_of) + { + uint i; + DBUG_ASSERT(m_clone_mem_root); + /* Allocate an array of handler pointers for the partitions handlers. */ + alloc_len= (m_tot_parts + 1) * sizeof(handler*); + if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len))) + goto err_alloc; + memset(m_file, 0, alloc_len); + /* + Populate them by cloning the original partitions. This also opens them. + Note that file->ref is allocated too. + */ + file= m_is_clone_of->m_file; + for (i= 0; i < m_tot_parts; i++) + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root))) + { + error= HA_ERR_INITIALIZATION; + file= &m_file[i]; + goto err_handler; + } + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } + } + else + { + file= m_file; + do + { + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); + table->s->connect_string = m_connect_string[(uint)(file-m_file)]; + if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked))) + goto err_handler; + bzero(&table->s->connect_string, sizeof(LEX_STRING)); + m_no_locks+= (*file)->lock_count(); + name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + } while (*(++file)); + } + file= m_file; - do + ref_length= (*file)->ref_length; + check_table_flags= (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS)); + while (*(++file)) { - create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, - FALSE); - table->s->connect_string = m_connect_string[(uint)(file-m_file)]; - if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, - test_if_locked))) - goto err_handler; - bzero(&table->s->connect_string, sizeof(LEX_STRING)); - m_no_locks+= (*file)->lock_count(); - name_buffer_ptr+= strlen(name_buffer_ptr) + 1; + DBUG_ASSERT(ref_length >= (*file)->ref_length); set_if_bigger(ref_length, ((*file)->ref_length)); /* Verify that all partitions have the same set of table flags. Mask all flags that partitioning enables/disables. */ - if (!check_table_flags) - { - check_table_flags= (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS)); - } - else if (check_table_flags != (((*file)->ha_table_flags() & - ~(PARTITION_DISABLED_TABLE_FLAGS)) | - (PARTITION_ENABLED_TABLE_FLAGS))) + if (check_table_flags != (((*file)->ha_table_flags() & + ~(PARTITION_DISABLED_TABLE_FLAGS)) | + (PARTITION_ENABLED_TABLE_FLAGS))) { error= HA_ERR_INITIALIZATION; + /* set file to last handler, so all of them is closed */ + file = &m_file[m_tot_parts - 1]; goto err_handler; } - } while (*(++file)); + } key_used_on_scan= m_file[0]->key_used_on_scan; implicit_emptied= m_file[0]->implicit_emptied; /* @@ -2648,6 +2788,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) */ ref_length+= PARTITION_BYTES_IN_POS; m_ref_length= ref_length; + /* Release buffer read from .par file. It will not be reused again after being opened once. @@ -2705,25 +2846,54 @@ err_handler: DEBUG_SYNC(ha_thd(), "partition_open_error"); while (file-- != m_file) (*file)->close(); +err_alloc: bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); DBUG_RETURN(error); } -handler *ha_partition::clone(MEM_ROOT *mem_root) + +/** + Clone the open and locked partitioning handler. + + @param mem_root MEM_ROOT to use. + + @return Pointer to the successfully created clone or NULL + + @details + This function creates a new ha_partition handler as a clone/copy. The + original (this) must already be opened and locked. The clone will use + the originals m_part_info. + It also allocates memory for ref + ref_dup. + In ha_partition::open() it will clone its original handlers partitions + which will allocate then on the correct MEM_ROOT and also open them. +*/ + +handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, - table->s->db_type()); - ((ha_partition*)new_handler)->m_part_info= m_part_info; - ((ha_partition*)new_handler)->is_clone= TRUE; - if (new_handler && !new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, - HA_OPEN_IGNORE_IF_LOCKED)) - return new_handler; - return NULL; + ha_partition *new_handler; + + DBUG_ENTER("ha_partition::clone"); + new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info, + this, mem_root); + /* + Allocate new_handler->ref here because otherwise ha_open will allocate it + on this->table->mem_root and we will not be able to reclaim that memory + when the clone handler object is destroyed. + */ + if (new_handler && + !(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(m_ref_length)*2))) + new_handler= NULL; + + if (new_handler && + new_handler->ha_open(table, name, + table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + new_handler= NULL; + + DBUG_RETURN((handler*) new_handler); } @@ -2754,7 +2924,7 @@ int ha_partition::close(void) DBUG_ASSERT(table->s == table_share); delete_queue(&m_queue); bitmap_free(&m_bulk_insert_started); - if (!is_clone) + if (!m_is_clone_of) bitmap_free(&(m_part_info->used_partitions)); file= m_file; @@ -4376,6 +4546,7 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index, break; } } + m_last_part= part; } else { diff --git a/sql/ha_partition.h b/sql/ha_partition.h index b5463a8f48e..3ec7656e62c 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -55,6 +55,16 @@ typedef struct st_ha_data_partition HA_DUPLICATE_POS | \ HA_CAN_SQL_HANDLER | \ HA_CAN_INSERT_DELAYED) + +/* First 4 bytes in the .par file is the number of 32-bit words in the file */ +#define PAR_WORD_SIZE 4 +/* offset to the .par file checksum */ +#define PAR_CHECKSUM_OFFSET 4 +/* offset to the total number of partitions */ +#define PAR_NUM_PARTS_OFFSET 8 +/* offset to the engines array */ +#define PAR_ENGINES_OFFSET 12 + class ha_partition :public handler { private: @@ -71,7 +81,7 @@ private: /* Data for the partition handler */ int m_mode; // Open mode uint m_open_test_lock; // Open test_if_locked - char *m_file_buffer; // Buffer with names + char *m_file_buffer; // Content of the .par file char *m_name_buffer_ptr; // Pointer to first partition name MEM_ROOT m_mem_root; plugin_ref *m_engine_array; // Array of types of the handlers @@ -135,6 +145,13 @@ private: bool m_is_sub_partitioned; // Is subpartitioned bool m_ordered_scan_ongoing; + /* + If set, this object was created with ha_partition::clone and doesn't + "own" the m_part_info structure. + */ + ha_partition *m_is_clone_of; + MEM_ROOT *m_clone_mem_root; + /* We keep track if all underlying handlers are MyISAM since MyISAM has a great number of extra flags not needed by other handlers. @@ -171,11 +188,6 @@ private: PARTITION_SHARE *share; /* Shared lock info */ #endif - /* - TRUE <=> this object was created with ha_partition::clone and doesn't - "own" the m_part_info structure. - */ - bool is_clone; bool auto_increment_lock; /**< lock reading/updating auto_inc */ /** Flag to keep the auto_increment lock through out the statement. @@ -188,7 +200,7 @@ private: /** used for prediction of start_bulk_insert rows */ enum_monotonicity_info m_part_func_monotonicity_info; public: - handler *clone(MEM_ROOT *mem_root); + handler *clone(const char *name, MEM_ROOT *mem_root); virtual void set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -207,6 +219,10 @@ public: */ ha_partition(handlerton *hton, TABLE_SHARE * table); ha_partition(handlerton *hton, partition_info * part_info); + ha_partition(handlerton *hton, TABLE_SHARE *share,
+ partition_info *part_info_arg,
+ ha_partition *clone_arg,
+ MEM_ROOT *clone_mem_root_arg); ~ha_partition(); /* A partition handler has no characteristics in itself. It only inherits @@ -277,7 +293,10 @@ private: And one method to read it in. */ bool create_handler_file(const char *name); - bool get_from_handler_file(const char *name, MEM_ROOT *mem_root); + bool setup_engine_array(MEM_ROOT *mem_root); + bool read_par_file(const char *name); + bool get_from_handler_file(const char *name, MEM_ROOT *mem_root, + bool is_clone); bool new_handlers_from_part_info(MEM_ROOT *mem_root); bool create_handlers(MEM_ROOT *mem_root); void clear_handler_file(); diff --git a/sql/handler.cc b/sql/handler.cc index 612eda84a10..eaa4c6e5036 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2035,11 +2035,10 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, /**************************************************************************** ** General handler functions ****************************************************************************/ -handler *handler::clone(MEM_ROOT *mem_root) +handler *handler::clone(const char *name, MEM_ROOT *mem_root) { - handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); - - if (!new_handler) + handler *new_handler= get_new_handler(table->s, mem_root, ht); + if (! new_handler) return NULL; /* @@ -2047,17 +2046,27 @@ handler *handler::clone(MEM_ROOT *mem_root) on this->table->mem_root and we will not be able to reclaim that memory when the clone handler object is destroyed. */ - if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2))) + + if (!(new_handler->ref= (uchar*) alloc_root(mem_root, + ALIGN_SIZE(ref_length)*2))) return NULL; - if (new_handler->ha_open(table, - table->s->normalized_path.str, - table->db_stat, + + /* + TODO: Implement a more efficient way to have more than one index open for + the same table instance. The ha_open call is not cachable for clone. + + This is not critical as the engines already have the table open + and should be able to use the original instance of the table. + */ + if (new_handler->ha_open(table, name, table->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) return NULL; + new_handler->cloned= 1; // Marker for debugging return new_handler; } + double handler::keyread_time(uint index, uint ranges, ha_rows rows) { /* diff --git a/sql/handler.h b/sql/handler.h index 830af71baee..e564d5562fe 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -20,7 +20,6 @@ #pragma interface /* gcc class implementation */ #endif -#include <my_handler.h> #include <ft_global.h> #include <keycache.h> @@ -1180,7 +1179,7 @@ public: DBUG_ASSERT(locked == FALSE); /* TODO: DBUG_ASSERT(inited == NONE); */ } - virtual handler *clone(MEM_ROOT *mem_root); + virtual handler *clone(const char *name, MEM_ROOT *mem_root); /** This is called after create to allow us to set up cached variables */ void init() { diff --git a/sql/hostname.cc b/sql/hostname.cc index ec090cbe02f..dfcdd3edd90 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -183,7 +184,7 @@ char * ip_to_hostname(struct in_addr *in, uint *errors) &tmp_hostent,buff,sizeof(buff),&tmp_errno))) { DBUG_PRINT("error",("gethostbyaddr_r returned %d",tmp_errno)); - return 0; + DBUG_RETURN(0); } if (!(check=my_gethostbyname_r(hp->h_name,&tmp_hostent2,buff2,sizeof(buff2), &tmp_errno))) diff --git a/sql/item.cc b/sql/item.cc index a08180561d9..fe7dcd56483 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -912,7 +912,7 @@ bool Item_string::eq(const Item *item, bool binary_cmp) const /** Get the value of the function as a MYSQL_TIME structure. - As a extra convenience the time structure is reset on error! + As a extra convenience the time structure is reset on error or NULL values! */ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) @@ -928,8 +928,12 @@ bool Item::get_date(MYSQL_TIME *ltime,uint fuzzydate) } else { - longlong value= val_int(); int was_cut; + longlong value= val_int(); + + if (null_value) + goto err; + if (number_to_datetime(value, ltime, fuzzydate, &was_cut) == LL(-1)) { char buff[22], *end; @@ -2755,6 +2759,16 @@ bool Item_param::set_longdata(const char *str, ulong length) (here), and first have to concatenate all pieces together, write query to the binary log and only then perform conversion. */ + if (str_value.length() + length > max_long_data_size) + { + my_message(ER_UNKNOWN_ERROR, + "Parameter of prepared statement which is set through " + "mysql_send_long_data() is longer than " + "'max_long_data_size' bytes", + MYF(0)); + DBUG_RETURN(true); + } + if (str_value.append(str, length, &my_charset_bin)) DBUG_RETURN(TRUE); state= LONG_DATA_VALUE; @@ -6107,7 +6121,7 @@ void Item_ref::print(String *str, enum_query_type query_type) { THD *thd= current_thd; append_identifier(thd, str, (*ref)->real_item()->name, - (*ref)->real_item()->name_length); + strlen((*ref)->real_item()->name)); } else (*ref)->print(str, query_type); @@ -7129,7 +7143,7 @@ String *Item_cache_int::val_str(String *str) DBUG_ASSERT(fixed == 1); if (!value_cached && !cache_value()) return NULL; - str->set(value, default_charset()); + str->set_int(value, unsigned_flag, default_charset()); return str; } diff --git a/sql/item.h b/sql/item.h index 4bad8bf0722..be2d340a4d3 100644 --- a/sql/item.h +++ b/sql/item.h @@ -514,6 +514,11 @@ public: */ Item *next; uint32 max_length; + /* + TODO: convert name and name_length fields into LEX_STRING to keep them in + sync (see bug #11829681/60295 etc). Then also remove some strlen(name) + calls. + */ uint name_length; /* Length of name */ int8 marker; uint8 decimals; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 64eb8614ee3..c6c09bf0cb5 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -912,7 +912,7 @@ int Arg_comparator::set_cmp_func(Item_result_field *owner_arg, cache_converted_constant can't be used here because it can't correctly convert a DATETIME value from string to int representation. */ - Item_cache_int *cache= new Item_cache_int(); + Item_cache_int *cache= new Item_cache_int(MYSQL_TYPE_DATETIME); /* Mark the cache as non-const to prevent re-caching. */ cache->set_used_tables(1); if (!(*a)->is_datetime()) @@ -4008,13 +4008,11 @@ void Item_func_in::fix_length_and_dec() uint j=0; for (uint i=1 ; i < arg_count ; i++) { - if (!args[i]->null_value) // Skip NULL values - { - array->set(j,args[i]); - j++; - } - else - have_null= 1; + array->set(j,args[i]); + if (!args[i]->null_value) // Skip NULL values + j++; + else + have_null= 1; } if ((array->used_count= j)) array->sort(); diff --git a/sql/item_func.cc b/sql/item_func.cc index c16aa3a1823..34448f7bb9b 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -496,7 +496,10 @@ bool Item_func::is_expensive_processor(uchar *arg) my_decimal *Item_func::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed); - int2my_decimal(E_DEC_FATAL_ERROR, val_int(), unsigned_flag, decimal_value); + longlong nr= val_int(); + if (null_value) + return 0; /* purecov: inspected */ + int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value); return decimal_value; } @@ -854,7 +857,7 @@ longlong Item_func_numhybrid::val_int() return 0; char *end= (char*) res->ptr() + res->length(); - CHARSET_INFO *cs= str_value.charset(); + CHARSET_INFO *cs= res->charset(); return (*(cs->cset->strtoll10))(cs, res->ptr(), &end, &err_not_used); } default: @@ -1817,9 +1820,10 @@ void Item_func_integer::fix_length_and_dec() void Item_func_int_val::fix_num_length_and_dec() { - max_length= args[0]->max_length - (args[0]->decimals ? - args[0]->decimals + 1 : - 0) + 2; + ulonglong tmp_max_length= (ulonglong ) args[0]->max_length - + (args[0]->decimals ? args[0]->decimals + 1 : 0) + 2; + max_length= tmp_max_length > (ulonglong) max_field_size ? + max_field_size : (uint32) tmp_max_length; uint tmp= float_length(decimals); set_if_smaller(max_length,tmp); decimals= 0; @@ -2132,10 +2136,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) if (!(null_value= (args[0]->null_value || args[1]->null_value || my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec, truncate, decimal_value) > 1))) - { - decimal_value->frac= decimals; return decimal_value; - } return 0; } @@ -3853,6 +3854,7 @@ Item_func_set_user_var::fix_length_and_dec() maybe_null=args[0]->maybe_null; max_length=args[0]->max_length; decimals=args[0]->decimals; + unsigned_flag= args[0]->unsigned_flag; collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); } diff --git a/sql/item_func.h b/sql/item_func.h index 98207099da1..9c7f1602ac0 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1,4 +1,5 @@ -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011 Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 1a83d21d38a..7e651a034d8 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -39,6 +39,9 @@ C_MODE_START #include "../mysys/my_static.h" // For soundex_map C_MODE_END +/** + @todo Remove this. It is not safe to use a shared String object. + */ String my_empty_string("",default_charset_info); @@ -116,7 +119,6 @@ String *Item_func_md5::val_str(String *str) { DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); - str->set_charset(&my_charset_bin); if (sptr) { uchar digest[16]; @@ -129,6 +131,7 @@ String *Item_func_md5::val_str(String *str) return 0; } array_to_hex((char *) str->ptr(), (const char*) digest, 16); + str->set_charset(&my_charset_bin); str->length((uint) 32); return str; } @@ -155,7 +158,6 @@ String *Item_func_sha::val_str(String *str) { DBUG_ASSERT(fixed == 1); String * sptr= args[0]->val_str(str); - str->set_charset(&my_charset_bin); if (sptr) /* If we got value different from NULL */ { SHA1_CONTEXT context; /* Context used to generate SHA1 hash */ @@ -165,11 +167,13 @@ String *Item_func_sha::val_str(String *str) /* No need to check error as the only case would be too long message */ mysql_sha1_input(&context, (const uchar *) sptr->ptr(), sptr->length()); + /* Ensure that memory is free and we got result */ if (!( str->alloc(SHA1_HASH_SIZE*2) || (mysql_sha1_result(&context,digest)))) { array_to_hex((char *) str->ptr(), (const char*) digest, SHA1_HASH_SIZE); + str->set_charset(&my_charset_bin); str->length((uint) SHA1_HASH_SIZE*2); null_value=0; return str; @@ -461,7 +465,7 @@ String *Item_func_des_encrypt::val_str(String *str) if ((null_value= args[0]->null_value)) return 0; // ENCRYPT(NULL) == NULL if ((res_length=res->length()) == 0) - return &my_empty_string; + return make_empty_result(); if (arg_count == 1) { @@ -517,6 +521,7 @@ String *Item_func_des_encrypt::val_str(String *str) tmp_arg[res_length-1]=tail; // save extra length tmp_value.realloc(res_length+1); tmp_value.length(res_length+1); + tmp_value.set_charset(&my_charset_bin); tmp_value[0]=(char) (128 | key_number); // Real encryption bzero((char*) &ivec,sizeof(ivec)); @@ -604,6 +609,7 @@ String *Item_func_des_decrypt::val_str(String *str) if ((tail=(uint) (uchar) tmp_value[length-2]) > 8) goto wrong_key; // Wrong key tmp_value.length(length-1-tail); + tmp_value.set_charset(&my_charset_bin); return &tmp_value; error: @@ -641,7 +647,7 @@ String *Item_func_concat_ws::val_str(String *str) use_as_buff= &tmp_value; str->length(0); // QQ; Should be removed - res=str; + res=str; // If 0 arg_count // Skip until non-null argument is found. // If not, return the empty string @@ -653,7 +659,7 @@ String *Item_func_concat_ws::val_str(String *str) } if (i == arg_count) - return &my_empty_string; + return make_empty_result(); for (i++; i < arg_count ; i++) { @@ -804,7 +810,7 @@ String *Item_func_reverse::val_str(String *str) return 0; /* An empty string is a special case as the string pointer may be null */ if (!res->length()) - return &my_empty_string; + return make_empty_result(); if (tmp_value.alloced_length() < res->length() && tmp_value.realloc(res->length())) { @@ -1144,8 +1150,7 @@ String *Item_func_left::val_str(String *str) /* if "unsigned_flag" is set, we have a *huge* positive number. */ if ((length <= 0) && (!args[1]->unsigned_flag)) - return &my_empty_string; - + return make_empty_result(); if ((res->length() <= (ulonglong) length) || (res->length() <= (char_pos= res->charpos((int) length)))) return res; @@ -1188,7 +1193,7 @@ String *Item_func_right::val_str(String *str) /* if "unsigned_flag" is set, we have a *huge* positive number. */ if ((length <= 0) && (!args[1]->unsigned_flag)) - return &my_empty_string; /* purecov: inspected */ + return make_empty_result(); /* purecov: inspected */ if (res->length() <= (ulonglong) length) return res; /* purecov: inspected */ @@ -1227,7 +1232,7 @@ String *Item_func_substr::val_str(String *str) /* Negative or zero length, will return empty string. */ if ((arg_count == 3) && (length <= 0) && (length == 0 || !args[2]->unsigned_flag)) - return &my_empty_string; + return make_empty_result(); /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Set here so that rest of code sees out-of-bound value as such. */ @@ -1238,12 +1243,12 @@ String *Item_func_substr::val_str(String *str) /* Assumes that the maximum length of a String is < INT_MAX32. */ if ((!args[1]->unsigned_flag && (start < INT_MIN32 || start > INT_MAX32)) || (args[1]->unsigned_flag && ((ulonglong) start > INT_MAX32))) - return &my_empty_string; + return make_empty_result(); start= ((start < 0) ? res->numchars() + start : start - 1); start= res->charpos((int) start); if ((start < 0) || ((uint) start + 1 > res->length())) - return &my_empty_string; + return make_empty_result(); length= res->charpos((int) length, (uint32) start); tmp_length= res->length() - start; @@ -1306,7 +1311,7 @@ String *Item_func_substr_index::val_str(String *str) null_value=0; uint delimiter_length= delimiter->length(); if (!res->length() || !delimiter_length || !count) - return &my_empty_string; // Wrong parameters + return make_empty_result(); // Wrong parameters res->set_charset(collation.collation); @@ -1655,7 +1660,7 @@ String *Item_func_password::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; + return make_empty_result(); my_make_scrambled_password(tmp_value, res->ptr(), res->length()); str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, res->charset()); return str; @@ -1679,7 +1684,7 @@ String *Item_func_old_password::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; + return make_empty_result(); my_make_scrambled_password_323(tmp_value, res->ptr(), res->length()); str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH_323, res->charset()); return str; @@ -1707,8 +1712,7 @@ String *Item_func_encrypt::val_str(String *str) if ((null_value=args[0]->null_value)) return 0; if (res->length() == 0) - return &my_empty_string; - + return make_empty_result(); if (arg_count == 1) { // generate random salt time_t timestamp=current_thd->query_start(); @@ -1968,7 +1972,7 @@ String *Item_func_soundex::val_str(String *str) for ( ; ; ) /* Skip pre-space */ { if ((rc= cs->cset->mb_wc(cs, &wc, (uchar*) from, (uchar*) end)) <= 0) - return &my_empty_string; /* EOL or invalid byte sequence */ + return make_empty_result(); /* EOL or invalid byte sequence */ if (rc == 1 && cs->ctype) { @@ -1993,7 +1997,7 @@ String *Item_func_soundex::val_str(String *str) { /* Extra safety - should not really happen */ DBUG_ASSERT(false); - return &my_empty_string; + return make_empty_result(); } to+= rc; break; @@ -2290,7 +2294,7 @@ String *Item_func_make_set::val_str(String *str) else { if (tmp_str.copy(*res)) // Don't use 'str' - return &my_empty_string; + return make_empty_result(); result= &tmp_str; } } @@ -2300,11 +2304,11 @@ String *Item_func_make_set::val_str(String *str) { // Copy data to tmp_str if (tmp_str.alloc(result->length()+res->length()+1) || tmp_str.copy(*result)) - return &my_empty_string; + return make_empty_result(); result= &tmp_str; } if (tmp_str.append(STRING_WITH_LEN(","), &my_charset_bin) || tmp_str.append(*res)) - return &my_empty_string; + return make_empty_result(); } } } @@ -2443,7 +2447,7 @@ String *Item_func_repeat::val_str(String *str) null_value= 0; if (count <= 0 && (count == 0 || !args[1]->unsigned_flag)) - return &my_empty_string; + return make_empty_result(); /* Assumes that the maximum length of a String is < INT_MAX32. */ /* Bounds check on count: If this is triggered, we will error. */ @@ -2751,7 +2755,7 @@ String *Item_func_conv::val_str(String *str) ptr= longlong2str(dec, ans, to_base); if (str->copy(ans, (uint32) (ptr-ans), default_charset())) - return &my_empty_string; + return make_empty_result(); return str; } @@ -2761,22 +2765,16 @@ String *Item_func_conv_charset::val_str(String *str) DBUG_ASSERT(fixed == 1); if (use_cached_value) return null_value ? 0 : &str_value; - /* - Here we don't pass 'str' as a parameter to args[0]->val_str() - as 'str' may point to 'str_value' (e.g. see Item::save_in_field()), - which we use below to convert string. - Use argument's 'str_value' instead. - */ - String *arg= args[0]->val_str(&args[0]->str_value); + String *arg= args[0]->val_str(str); uint dummy_errors; if (!arg) { null_value=1; return 0; } - null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(), + null_value= tmp_value.copy(arg->ptr(), arg->length(), arg->charset(), conv_charset, &dummy_errors); - return null_value ? 0 : check_well_formed_result(&str_value); + return null_value ? 0 : check_well_formed_result(&tmp_value); } void Item_func_conv_charset::fix_length_and_dec() @@ -2918,7 +2916,7 @@ String *Item_func_hex::val_str(String *str) return 0; ptr= longlong2str(dec,ans,16); if (str->copy(ans,(uint32) (ptr-ans),default_charset())) - return &my_empty_string; // End of memory + return make_empty_result(); // End of memory return str; } @@ -3218,14 +3216,68 @@ String *Item_func_quote::val_str(String *str) } arg_length= arg->length(); - new_length= arg_length+2; /* for beginning and ending ' signs */ - for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) - new_length+= get_esc_bit(escmask, (uchar) *from); + if (collation.collation->mbmaxlen == 1) + { + new_length= arg_length + 2; /* for beginning and ending ' signs */ + for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) + new_length+= get_esc_bit(escmask, (uchar) *from); + } + else + { + new_length= (arg_length * 2) + /* For string characters */ + (2 * collation.collation->mbmaxlen); /* For quotes */ + } if (tmp_value.alloc(new_length)) goto null; + if (collation.collation->mbmaxlen > 1) + { + CHARSET_INFO *cs= collation.collation; + int mblen; + uchar *to_end; + to= (char*) tmp_value.ptr(); + to_end= (uchar*) to + new_length; + + /* Put leading quote */ + if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) + goto null; + to+= mblen; + + for (start= (char*) arg->ptr(), end= start + arg_length; start < end; ) + { + my_wc_t wc; + bool escape; + if ((mblen= cs->cset->mb_wc(cs, &wc, (uchar*) start, (uchar*) end)) <= 0) + goto null; + start+= mblen; + switch (wc) { + case 0: escape= 1; wc= '0'; break; + case '\032': escape= 1; wc= 'Z'; break; + case '\'': escape= 1; break; + case '\\': escape= 1; break; + default: escape= 0; break; + } + if (escape) + { + if ((mblen= cs->cset->wc_mb(cs, '\\', (uchar*) to, to_end)) <= 0) + goto null; + to+= mblen; + } + if ((mblen= cs->cset->wc_mb(cs, wc, (uchar*) to, to_end)) <= 0) + goto null; + to+= mblen; + } + + /* Put trailing quote */ + if ((mblen= cs->cset->wc_mb(cs, '\'', (uchar *) to, to_end)) <= 0) + goto null; + to+= mblen; + new_length= to - tmp_value.ptr(); + goto ret; + } + /* We replace characters from the end to the beginning */ @@ -3257,6 +3309,8 @@ String *Item_func_quote::val_str(String *str) } } *to= '\''; + +ret: tmp_value.length(new_length); tmp_value.set_charset(collation.collation); null_value= 0; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 0a6375e80a5..8974feb10fa 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -22,6 +22,23 @@ class Item_str_func :public Item_func { +protected: + /** + Sets the result value of the function an empty string, using the current + character set. No memory is allocated. + @retval A pointer to the str_value member. + */ + String *make_empty_result() + { + /* + Reset string length to an empty string. We don't use str_value.set() as + we don't want to free and potentially have to reallocate the buffer + for each call. + */ + str_value.length(0); + str_value.set_charset(collation.collation); + return &str_value; + } public: Item_str_func() :Item_func() { decimals=NOT_FIXED_DEC; } Item_str_func(Item *a) :Item_func(a) {decimals=NOT_FIXED_DEC; } @@ -694,15 +711,17 @@ public: String *val_str(String *); void fix_length_and_dec() { - ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + 2; - max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); collation.set(args[0]->collation); + ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 + + 2 * collation.collation->mbmaxlen; + max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH); } }; class Item_func_conv_charset :public Item_str_func { bool use_cached_value; + String tmp_value; public: bool safe; CHARSET_INFO *conv_charset; // keep it public diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 4f4073f22af..7b2aee02374 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -607,17 +607,13 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref) switch (hybrid_type= item->result_type()) { case INT_RESULT: - max_length= 20; - break; case DECIMAL_RESULT: + case STRING_RESULT: max_length= item->max_length; break; case REAL_RESULT: max_length= float_length(decimals); break; - case STRING_RESULT: - max_length= item->max_length; - break; case ROW_RESULT: default: DBUG_ASSERT(0); diff --git a/sql/item_sum.h b/sql/item_sum.h index 78bc30295f8..587061c1ed3 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -354,6 +354,7 @@ public: forced_const= TRUE; } virtual bool const_item() const { return forced_const; } + virtual bool const_during_execution() const { return false; } virtual void print(String *str, enum_query_type query_type); void fix_num_length_and_dec(); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 9cf56148994..c17557905bd 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -294,8 +294,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, for (; ptr != end && val != val_end; ptr++) { /* Skip pre-space between each argument */ - while (val != val_end && my_isspace(cs, *val)) - val++; + if ((val+= cs->cset->scan(cs, val, val_end, MY_SEQ_SPACES)) >= val_end) + break; if (*ptr == '%' && ptr+1 != end) { @@ -649,7 +649,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'W': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday= calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -658,7 +658,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, system_charset_info); break; case 'a': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),0); @@ -823,7 +823,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, MYSQL_TIME *l_time, } break; case 'w': - if (type == MYSQL_TIMESTAMP_TIME) + if (type == MYSQL_TIMESTAMP_TIME || !(l_time->month || l_time->year)) return 1; weekday=calc_weekday(calc_daynr(l_time->year,l_time->month, l_time->day),1); @@ -3300,6 +3300,7 @@ void Item_func_str_to_date::fix_length_and_dec() { maybe_null= 1; decimals=0; + cached_format_type= DATE_TIME; cached_field_type= MYSQL_TYPE_DATETIME; max_length= MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; cached_timestamp_type= MYSQL_TIMESTAMP_NONE; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index f4299460abf..ae387363977 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -106,8 +107,11 @@ public: { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } String *val_str(String *str) { - str->set(val_int(), &my_charset_bin); - return null_value ? 0 : str; + longlong nr= val_int(); + if (null_value) + return 0; + str->set(nr, &my_charset_bin); + return str; } const char *func_name() const { return "month"; } enum Item_result result_type () const { return INT_RESULT; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 2bb95930aca..428edbce365 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8905,7 +8905,19 @@ static bool record_compare(TABLE *table) } } - if (table->s->blob_fields + table->s->varchar_fields == 0) + /** + Compare full record only if: + - there are no blob fields (otherwise we would also need + to compare blobs contents as well); + - there are no varchar fields (otherwise we would also need + to compare varchar contents as well); + - there are no null fields, otherwise NULLed fields + contents (i.e., the don't care bytes) may show arbitrary + values, depending on how each engine handles internally. + */ + if ((table->s->blob_fields + + table->s->varchar_fields + + table->s->null_fields) == 0) { result= cmp_record(table,record[1]); goto record_compare_exit; @@ -8920,13 +8932,22 @@ static bool record_compare(TABLE *table) goto record_compare_exit; } - /* Compare updated fields */ + /* Compare fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { - if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + + /** + We only compare field contents that are not null. + NULL fields (i.e., their null bits) were compared + earlier. + */ + if (!(*(ptr))->is_null()) { - result= TRUE; - goto record_compare_exit; + if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + { + result= TRUE; + goto record_compare_exit; + } } } diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index e11a86c6b4e..0a62c17e2fc 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1033,7 +1033,11 @@ void reset_mqh(LEX_USER *lu, bool get_them); bool check_mqh(THD *thd, uint check_command); void time_out_user_resource_limits(THD *thd, USER_CONN *uc); void decrease_user_connections(USER_CONN *uc); -void thd_init_client_charset(THD *thd, uint cs_number); +bool thd_init_client_charset(THD *thd, uint cs_number); +inline bool is_supported_parser_charset(CHARSET_INFO *cs) +{ + return test(cs->mbminlen == 1); +} bool setup_connection_thread_globals(THD *thd); bool login_connection(THD *thd); void end_connection(THD *thd); @@ -1982,6 +1986,7 @@ extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb; extern uint test_flags,select_errors,ha_open_options; extern uint protocol_version, mysqld_port, mysqld_extra_port, dropping_tables; extern uint delay_key_write_options; +extern ulong max_long_data_size; #endif /* MYSQL_SERVER */ #if defined MYSQL_SERVER || defined INNODB_COMPATIBILITY_HOOKS extern MYSQL_PLUGIN_IMPORT uint lower_case_table_names; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 51ce881ee78..41ca0773782 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -410,6 +410,7 @@ TYPELIB log_output_typelib= {array_elements(log_output_names)-1,"", /* the default log output is log tables */ static bool lower_case_table_names_used= 0; +static bool max_long_data_size_used= false; static bool volatile select_thread_in_use, signal_thread_in_use; static bool volatile ready_to_exit; static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; @@ -585,6 +586,12 @@ ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; ulong max_connections, max_connect_errors; ulong extra_max_connections; +/* + Maximum length of parameter value which can be set through + mysql_send_long_data() call. +*/ +ulong max_long_data_size; + uint max_user_connections= 0; /** Limit of the total number of prepared statements in the server. @@ -3170,6 +3177,19 @@ sizeof(load_default_groups)/sizeof(load_default_groups[0]); #endif +#ifndef EMBEDDED_LIBRARY +static +int +check_enough_stack_size() +{ + uchar stack_top; + + return check_stack_overrun(current_thd, STACK_MIN_SIZE, + &stack_top); +} +#endif + + /** Initialize one of the global date/time format variables. @@ -3370,12 +3390,6 @@ static int init_common_variables(const char *conf_file_name, int argc, max_system_variables.pseudo_thread_id= (ulong)~0; server_start_time= flush_status_time= my_time(0); - /* TODO: remove this when my_time_t is 64 bit compatible */ - if (server_start_time >= (time_t) MY_TIME_T_MAX) - { - sql_print_error("This MySQL server doesn't support dates later then 2038"); - return 1; - } rpl_filter= new Rpl_filter; binlog_filter= new Rpl_filter; @@ -3414,6 +3428,13 @@ static int init_common_variables(const char *conf_file_name, int argc, */ mysql_bin_log.init_pthread_objects(); + /* TODO: remove this when my_time_t is 64 bit compatible */ + if (!IS_TIME_T_VALID_FOR_TIMESTAMP(server_start_time)) + { + sql_print_error("This MySQL server doesn't support dates later then 2038"); + return 1; + } + if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0) { strmake(glob_hostname, STRING_WITH_LEN("localhost")); @@ -3550,7 +3571,11 @@ static int init_common_variables(const char *conf_file_name, int argc, #endif mysys_uses_curses=0; #ifdef USE_REGEX - my_regex_init(&my_charset_latin1); +#ifndef EMBEDDED_LIBRARY + my_regex_init(&my_charset_latin1, check_enough_stack_size); +#else + my_regex_init(&my_charset_latin1, NULL); +#endif #endif /* Process a comma-separated character set list and choose @@ -5969,7 +5994,8 @@ enum options_mysqld OPT_SLOW_QUERY_LOG_FILE, OPT_IGNORE_BUILTIN_INNODB, OPT_BINLOG_DIRECT_NON_TRANS_UPDATE, - OPT_DEFAULT_CHARACTER_SET_OLD + OPT_DEFAULT_CHARACTER_SET_OLD, + OPT_MAX_LONG_DATA_SIZE }; @@ -7127,6 +7153,12 @@ thread is in the relay logs.", &global_system_variables.max_length_for_sort_data, &max_system_variables.max_length_for_sort_data, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, + {"max_long_data_size", OPT_MAX_LONG_DATA_SIZE, + "The maximum size of prepared statement parameter which can be provided " + "through mysql_send_long_data() API call. To be used when limit of " + "max_allowed_packet is too small", + &max_long_data_size, &max_long_data_size, 0, GET_ULONG, + REQUIRED_ARG, 1024*1024L, 1024, UINT_MAX32, MALLOC_OVERHEAD, 1, 0}, {"max_prepared_stmt_count", OPT_MAX_PREPARED_STMT_COUNT, "Maximum number of prepared statements in the server.", &max_prepared_stmt_count, &max_prepared_stmt_count, @@ -9044,6 +9076,9 @@ mysqld_get_one_option(int optid, } break; #endif /* defined(ENABLED_DEBUG_SYNC) */ + case OPT_MAX_LONG_DATA_SIZE: + max_long_data_size_used= true; + break; } return 0; } @@ -9133,6 +9168,14 @@ static int get_options(int *argc,char **argv) opt_log_slow_slave_statements) && !opt_slow_log) sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set"); + if (global_system_variables.net_buffer_length > + global_system_variables.max_allowed_packet) + { + sql_print_warning("net_buffer_length (%lu) is set to be larger " + "than max_allowed_packet (%lu). Please rectify.", + global_system_variables.net_buffer_length, + global_system_variables.max_allowed_packet); + } #if defined(HAVE_BROKEN_REALPATH) my_use_symdir=0; @@ -9215,6 +9258,14 @@ static int get_options(int *argc,char **argv) &extra_max_connections, &extra_connection_count); #endif + + /* + If max_long_data_size is not specified explicitly use + value of max_allowed_packet. + */ + if (!max_long_data_size_used) + max_long_data_size= global_system_variables.max_allowed_packet; + return 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index f64dad8bb8d..ffeea3dd25f 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1335,7 +1335,7 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) } thd= head->in_use; - if (!(file= head->file->clone(thd->mem_root))) + if (!(file= head->file->clone(head->s->normalized_path.str, thd->mem_root))) { /* Manually set the error flag. Note: there seems to be quite a few diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index a7aee1d7f61..d786bba8b37 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -211,6 +211,7 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) /** Substitutes constants for some COUNT(), MIN() and MAX() functions. + @param thd thread handler @param tables list of leaves of join table tree @param all_fields All fields to be returned @param conds WHERE clause @@ -228,9 +229,12 @@ static int get_index_max_value(TABLE *table, TABLE_REF *ref, uint range_fl) HA_ERR_KEY_NOT_FOUND on impossible conditions @retval HA_ERR_... if a deadlock or a lock wait timeout happens, for example + @retval + ER_... e.g. ER_SUBQUERY_NO_1_ROW */ -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) +int opt_sum_query(THD *thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds) { List_iterator_fast<Item> it(all_fields); int const_result= 1; @@ -241,6 +245,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) table_map where_tables= 0; Item *item; int error; + DBUG_ENTER("opt_sum_query"); if (conds) where_tables= conds->used_tables(); @@ -269,7 +274,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) WHERE t2.field IS NULL; */ if (tl->table->map & where_tables) - return 0; + DBUG_RETURN(0); } else used_tables|= tl->table->map; @@ -297,7 +302,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) { tl->table->file->print_error(error, MYF(0)); tl->table->in_use->fatal_error(); - return error; + DBUG_RETURN(error); } count*= tl->table->file->stats.records; } @@ -390,10 +395,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) if (error) { if (error == HA_ERR_KEY_NOT_FOUND || error == HA_ERR_END_OF_FILE) - return HA_ERR_KEY_NOT_FOUND; // No rows matching WHERE + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); // No rows matching WHERE /* HA_ERR_LOCK_DEADLOCK or some other error */ table->file->print_error(error, MYF(0)); - return(error); + DBUG_RETURN(error); } removed_tables|= table->map; } @@ -437,6 +442,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) const_result= 0; } } + + if (thd->is_error()) + DBUG_RETURN(thd->main_da.sql_errno()); + /* If we have a where clause, we can only ignore searching in the tables if MIN/MAX optimisation replaced all used tables @@ -446,7 +455,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) */ if (removed_tables && used_tables != removed_tables) const_result= 0; // We didn't remove all tables - return const_result; + DBUG_RETURN(const_result); } @@ -732,6 +741,12 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, if (is_null || (is_null_safe_eq && args[1]->is_null())) { + /* + If we have a non-nullable index, we cannot use it, + since set_null will be ignored, and we will compare uninitialized data. + */ + if (!part->field->real_maybe_null()) + DBUG_RETURN(FALSE); part->field->set_null(); *key_ptr= (uchar) 1; } @@ -802,8 +817,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, @param[out] prefix_len Length of prefix for the search range @note - This function may set table->key_read to 1, which must be reset after - index is used! (This can only happen when function returns 1) + This function may set field->table->key_read to true, + which must be reset after index is used! + (This can only happen when function returns 1) @retval 0 Index can not be used to optimize MIN(field)/MAX(field) @@ -818,7 +834,9 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint *range_fl, uint *prefix_len) { if (!(field->flags & PART_KEY_FLAG)) - return 0; // Not key field + return FALSE; // Not key field + + DBUG_ENTER("find_key_for_maxmin"); TABLE *table= field->table; uint idx= 0; @@ -843,7 +861,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, part++, jdx++, key_part_to_use= (key_part_to_use << 1) | 1) { if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) - return 0; + DBUG_RETURN(FALSE); /* Check whether the index component is partial */ Field *part_field= table->field[part->fieldnr-1]; @@ -892,12 +910,12 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, */ if (field->part_of_key.is_set(idx)) table->enable_keyread(); - return 1; + DBUG_RETURN(TRUE); } } } } - return 0; + DBUG_RETURN(FALSE); } diff --git a/sql/set_var.cc b/sql/set_var.cc index 125b7ddce90..cbd6122fc22 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -154,6 +154,8 @@ static bool sys_update_slow_log_path(THD *thd, set_var * var); static void sys_default_slow_log_path(THD *thd, enum_var_type type); static void fix_sys_log_slow_filter(THD *thd, enum_var_type); static uchar *get_myisam_mmap_size(THD *thd); +static int check_max_allowed_packet(THD *thd, set_var *var); +static int check_net_buffer_length(THD *thd, set_var *var); /* Variable definition list @@ -390,7 +392,8 @@ static sys_var_const sys_lower_case_table_names(&vars, (uchar*) &lower_case_table_names); static sys_var_thd_ulong_session_readonly sys_max_allowed_packet(&vars, "max_allowed_packet", - &SV::max_allowed_packet); + &SV::max_allowed_packet, + check_max_allowed_packet); static sys_var_ulonglong_ptr sys_max_binlog_cache_size(&vars, "max_binlog_cache_size", &max_binlog_cache_size); static sys_var_long_ptr sys_max_binlog_size(&vars, "max_binlog_size", @@ -424,6 +427,12 @@ static sys_var_thd_ulong sys_max_seeks_for_key(&vars, "max_seeks_for_key", &SV::max_seeks_for_key); static sys_var_thd_ulong sys_max_length_for_sort_data(&vars, "max_length_for_sort_data", &SV::max_length_for_sort_data); +static sys_var_const sys_max_long_data_size(&vars, + "max_long_data_size", + OPT_GLOBAL, SHOW_LONG, + (uchar*) + &max_long_data_size); + #ifndef TO_BE_DELETED /* Alias for max_join_size */ static sys_var_thd_ha_rows sys_sql_max_join_size(&vars, "sql_max_join_size", &SV::max_join_size, @@ -474,7 +483,8 @@ static sys_var_const sys_named_pipe(&vars, "named_pipe", /* purecov: end */ #endif static sys_var_thd_ulong_session_readonly sys_net_buffer_length(&vars, "net_buffer_length", - &SV::net_buffer_length); + &SV::net_buffer_length, + check_net_buffer_length); static sys_var_thd_ulong sys_net_read_timeout(&vars, "net_read_timeout", &SV::net_read_timeout, 0, fix_net_read_timeout); @@ -1888,7 +1898,7 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) } var->save_result.ulong_value= ((ulong) - find_set(enum_names, res->ptr(), + find_set(enum_names, res->c_ptr_safe(), res->length(), NULL, &error, &error_len, @@ -2299,7 +2309,7 @@ bool sys_var_character_set_client::check(THD *thd, set_var *var) if (sys_var_character_set_sv::check(thd, var)) return 1; /* Currently, UCS-2 cannot be used as a client character set */ - if (var->save_result.charset->mbminlen > 1) + if (!is_supported_parser_charset(var->save_result.charset)) { my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, var->save_result.charset->csname); @@ -2803,14 +2813,14 @@ int set_var_collation_client::update(THD *thd) bool sys_var_timestamp::check(THD *thd, set_var *var) { - time_t val; + longlong val; var->save_result.ulonglong_value= var->value->val_int(); - val= (time_t) var->save_result.ulonglong_value; - if (val < (time_t) MY_TIME_T_MIN || val > (time_t) MY_TIME_T_MAX) + val= (longlong) var->save_result.ulonglong_value; + if (val != 0 && // this is how you set the default value + (val < TIMESTAMP_MIN_VALUE || val > TIMESTAMP_MAX_VALUE)) { - my_message(ER_UNKNOWN_ERROR, - "This version of MySQL doesn't support dates later than 2038", - MYF(0)); + char buf[64]; + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "timestamp", llstr(val, buf)); return TRUE; } return FALSE; @@ -3045,7 +3055,7 @@ bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var) my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, "NULL"); return 1; } - const char *locale_str= res->c_ptr(); + const char *locale_str= res->c_ptr_safe(); if (!(locale_match= my_locale_by_name(locale_str))) { my_printf_error(ER_UNKNOWN_ERROR, @@ -4388,6 +4398,36 @@ uchar *sys_var_event_scheduler::value_ptr(THD *thd, enum_var_type type, } #endif + +int +check_max_allowed_packet(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val < (longlong) global_system_variables.net_buffer_length) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + + +int +check_net_buffer_length(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val > (longlong) global_system_variables.max_allowed_packet) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, + "The value of 'max_allowed_packet' should be no less than " + "the value of 'net_buffer_length'"); + } + return 0; +} + /**************************************************************************** Used templates ****************************************************************************/ diff --git a/sql/slave.cc b/sql/slave.cc index 1dbe4ee35c6..d29c67dc3bd 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (C) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -408,17 +409,6 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) int error,force_all = (thread_mask & SLAVE_FORCE_ALL); pthread_mutex_t *sql_lock = &mi->rli.run_lock, *io_lock = &mi->run_lock; - if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) - { - DBUG_PRINT("info",("Terminating IO thread")); - mi->abort_slave=1; - if ((error=terminate_slave_thread(mi->io_thd, io_lock, - &mi->stop_cond, - &mi->slave_running, - skip_lock)) && - !force_all) - DBUG_RETURN(error); - } if (thread_mask & (SLAVE_SQL|SLAVE_FORCE_ALL)) { DBUG_PRINT("info",("Terminating SQL thread")); @@ -430,6 +420,17 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock) !force_all) DBUG_RETURN(error); } + if (thread_mask & (SLAVE_IO|SLAVE_FORCE_ALL)) + { + DBUG_PRINT("info",("Terminating IO thread")); + mi->abort_slave=1; + if ((error=terminate_slave_thread(mi->io_thd, io_lock, + &mi->stop_cond, + &mi->slave_running, + skip_lock)) && + !force_all) + DBUG_RETURN(error); + } DBUG_RETURN(0); } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index dde81f1926f..27cf9ec3441 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2003 MySQL AB +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -5382,18 +5383,15 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, } -/* +/** Handle an in-memory privilege structure. - SYNOPSIS - handle_grant_struct() - struct_no The number of the structure to handle (0..3). - drop If user_from is to be dropped. - user_from The the user to be searched/dropped/renamed. - user_to The new name for the user if to be renamed, - NULL otherwise. + @param struct_no The number of the structure to handle (0..4). + @param drop If user_from is to be dropped. + @param user_from The the user to be searched/dropped/renamed. + @param user_to The new name for the user if to be renamed, NULL otherwise. - DESCRIPTION + @note Scan through all elements in an in-memory grant structure and apply the requested operation. Delete from grant structure if drop is true. @@ -5403,12 +5401,12 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, 0 acl_users 1 acl_dbs 2 column_priv_hash - 3 procs_priv_hash + 3 proc_priv_hash + 4 func_priv_hash - RETURN - > 0 At least one element matched. - 0 OK, but no element matched. - -1 Wrong arguments to function + @retval > 0 At least one element matched. + @retval 0 OK, but no element matched. + @retval -1 Wrong arguments to function. */ static int handle_grant_struct(uint struct_no, bool drop, @@ -5422,6 +5420,7 @@ static int handle_grant_struct(uint struct_no, bool drop, ACL_USER *acl_user= NULL; ACL_DB *acl_db= NULL; GRANT_NAME *grant_name= NULL; + HASH *grant_name_hash= NULL; DBUG_ENTER("handle_grant_struct"); DBUG_PRINT("info",("scan struct: %u search: '%s'@'%s'", struct_no, user_from->user.str, user_from->host.str)); @@ -5440,10 +5439,16 @@ static int handle_grant_struct(uint struct_no, bool drop, elements= acl_dbs.elements; break; case 2: - elements= column_priv_hash.records; + grant_name_hash= &column_priv_hash; + elements= grant_name_hash->records; break; case 3: - elements= proc_priv_hash.records; + grant_name_hash= &proc_priv_hash; + elements= grant_name_hash->records; + break; + case 4: + grant_name_hash= &func_priv_hash; + elements= grant_name_hash->records; break; default: return -1; @@ -5473,16 +5478,13 @@ static int handle_grant_struct(uint struct_no, bool drop, break; case 2: - grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx); - user= grant_name->user; - host= grant_name->host.hostname; - break; - case 3: - grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx); + case 4: + grant_name= (GRANT_NAME*) hash_element(grant_name_hash, idx); user= grant_name->user; host= grant_name->host.hostname; break; + default: MY_ASSERT_UNREACHABLE(); } @@ -5512,14 +5514,25 @@ static int handle_grant_struct(uint struct_no, bool drop, break; case 2: - hash_delete(&column_priv_hash, (uchar*) grant_name); - break; - case 3: - hash_delete(&proc_priv_hash, (uchar*) grant_name); + case 4: + hash_delete(grant_name_hash, (uchar*) grant_name); break; } elements--; + /* + - If we are iterating through an array then we just have moved all + elements after the current element one position closer to its head. + This means that we have to take another look at the element at + current position as it is a new element from the array's tail. + - If we are iterating through a hash the current element was replaced + with one of elements from the tail. So we also have to take a look + at the new element in current position. + Note that in our HASH implementation hash_delete() won't move any + elements with position after current one to position before the + current (i.e. from the tail to the head), so it is safe to continue + iteration without re-starting. + */ idx--; } else if ( user_to ) @@ -5537,22 +5550,41 @@ static int handle_grant_struct(uint struct_no, bool drop, case 2: case 3: - /* - Update the grant structure with the new user name and - host name - */ - grant_name->set_user_details(user_to->host.str, grant_name->db, - user_to->user.str, grant_name->tname, - TRUE); - - /* - Since username is part of the hash key, when the user name - is renamed, the hash key is changed. Update the hash to - ensure that the position matches the new hash key value - */ - hash_update(&column_priv_hash, (uchar*) grant_name, - (uchar*) grant_name->hash_key, grant_name->key_length); - break; + case 4: + { + /* + Save old hash key and its length to be able properly update + element position in hash. + */ + char *old_key= grant_name->hash_key; + size_t old_key_length= grant_name->key_length; + + /* + Update the grant structure with the new user name and host name. + */ + grant_name->set_user_details(user_to->host.str, grant_name->db, + user_to->user.str, grant_name->tname, + TRUE); + + /* + Since username is part of the hash key, when the user name + is renamed, the hash key is changed. Update the hash to + ensure that the position matches the new hash key value + */ + hash_update(grant_name_hash, (uchar*) grant_name, (uchar*) old_key, + old_key_length); + /* + hash_update() operation could have moved element from the tail + of the hash to the current position. So we need to take a look + at the element in current position once again. + Thanks to the fact that hash_update() for our HASH implementation + won't move any elements from the tail of the hash to the positions + before the current one (a.k.a. head) it is safe to continue + iteration without restarting. + */ + idx--; + break; + } } } else @@ -5609,8 +5641,7 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, else { /* Handle user array. */ - if ((handle_grant_struct(0, drop, user_from, user_to) && ! result) || - found) + if ((handle_grant_struct(0, drop, user_from, user_to)) || found) { result= 1; /* At least one record/element found. */ /* If search is requested, we do not need to search further. */ @@ -5638,7 +5669,7 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, } } - /* Handle procedures table. */ + /* Handle stored routines table. */ if ((found= handle_grant_table(tables, 4, drop, user_from, user_to)) < 0) { /* Handle of table failed, don't touch in-memory array. */ @@ -5655,6 +5686,15 @@ static int handle_grant_data(TABLE_LIST *tables, bool drop, if (! drop && ! user_to) goto end; } + /* Handle funcs array. */ + if (((handle_grant_struct(4, drop, user_from, user_to) && ! result) || + found) && ! result) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } } /* Handle tables table. */ diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index a57bd41c7e6..218f1a6bab1 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -242,7 +242,7 @@ bool test_if_number(NUM_INFO *info, const char *str, uint str_len) if (str == end) { info->is_float = 1; // we can't use variable decimals here - return 1; + DBUG_RETURN(1); } DBUG_RETURN(0); } diff --git a/sql/sql_base.cc b/sql/sql_base.cc index e55fce90291..59bbd85682c 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -2807,10 +2807,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, ("Found table '%s.%s' with different refresh version", table_list->db, table_list->table_name)); - /* Ignore FLUSH, but not name locks! */ + /* Ignore FLUSH and pending name locks, but not acquired name locks! */ if (flags & MYSQL_LOCK_IGNORE_FLUSH && !table->open_placeholder) { - DBUG_ASSERT(table->db_stat); /* Force close at once after usage */ thd->version= table->s->version; continue; diff --git a/sql/sql_class.h b/sql/sql_class.h index ca6a6eed626..cec97e91c84 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -2064,7 +2064,7 @@ public: /*TODO: this will be obsolete when we have support for 64 bit my_time_t */ inline bool is_valid_time() { - return (start_time < (time_t) MY_TIME_T_MAX); + return (IS_TIME_T_VALID_FOR_TIMESTAMP(start_time)); } void set_time_after_lock() { utime_after_lock= my_micro_time(); } ulonglong current_utime() { return my_micro_time(); } diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index bc833afc839..ec0f65e3c58 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -584,8 +584,23 @@ void reset_mqh(LEX_USER *lu, bool get_them= 0) } -void thd_init_client_charset(THD *thd, uint cs_number) +/** + Set thread character set variables from the given ID + + @param thd thread handle + @param cs_number character set and collation ID + + @retval 0 OK; character_set_client, collation_connection and + character_set_results are set to the new value, + or to the default global values. + + @retval 1 error, e.g. the given ID is not supported by parser. + Corresponding SQL error is sent. +*/ + +bool thd_init_client_charset(THD *thd, uint cs_number) { + CHARSET_INFO *cs; /* Use server character set and collation if - opt_character_set_client_handshake is not set @@ -594,10 +609,10 @@ void thd_init_client_charset(THD *thd, uint cs_number) - client character set doesn't exists in server */ if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !(cs= get_charset(cs_number, MYF(0))) || !my_strcasecmp(&my_charset_latin1, global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) + cs->name)) { thd->variables.character_set_client= global_system_variables.character_set_client; @@ -608,10 +623,18 @@ void thd_init_client_charset(THD *thd, uint cs_number) } else { + if (!is_supported_parser_charset(cs)) + { + /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", + cs->csname); + return true; + } thd->variables.character_set_results= thd->variables.collation_connection= - thd->variables.character_set_client; + thd->variables.character_set_client= cs; } + return false; } @@ -784,7 +807,8 @@ static int check_connection(THD *thd) thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - thd_init_client_charset(thd, (uint) net->read_pos[8]); + if (thd_init_client_charset(thd, (uint) net->read_pos[8])) + return 1; thd->update_charset(); end= (char*) net->read_pos+32; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index dd13f36a432..fabc3b79c23 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -906,7 +906,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ query_cache_invalidate3(thd, table_list, 1); } - if ((changed && error <= 0) || + if (error <= 0 || thd->transaction.stmt.modified_non_trans_table || was_insert_delayed) { diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 776644024ff..52c7a3fca82 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1074,9 +1074,10 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, String &field_term, String &line_start, String &line_term, String &enclosed_par, int escape, bool get_it_from_net, bool is_fifo) - :file(file_par),buffer(0),escape_char(escape) + :file(file_par), buff_length(tot_length), escape_char(escape), + found_end_of_line(false), eof(false), need_end_io_cache(false), + error(false), line_cuted(false), found_null(false), read_charset(cs) { - read_charset= cs; field_term_ptr=(char*) field_term.ptr(); field_term_length= field_term.length(); line_term_ptr=(char*) line_term.ptr(); @@ -1103,12 +1104,9 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, (uchar) enclosed_par[0] : INT_MAX; field_term_char= field_term_length ? (uchar) field_term_ptr[0] : INT_MAX; line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX; - error=eof=found_end_of_line=found_null=line_cuted=0; - buff_length=tot_length; - /* Set of a stack for unget if long terminators */ - uint length=max(field_term_length,line_term_length)+1; + uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1; set_if_bigger(length,line_start.length()); stack=stack_pos=(int*) sql_alloc(sizeof(int)*length); @@ -1150,11 +1148,8 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, READ_INFO::~READ_INFO() { - if (!error) - { - if (need_end_io_cache) - ::end_io_cache(&cache); - } + if (need_end_io_cache) + ::end_io_cache(&cache); my_free(buffer, MYF(MY_ALLOW_ZERO_PTR)); } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index d5036cdf317..1544c13f666 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1165,13 +1165,22 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (ptr < packet_end) { + CHARSET_INFO *cs; if (ptr + 2 > packet_end) { my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } - cs_number= uint2korr(ptr); + if ((cs_number= uint2korr(ptr)) && + (cs= get_charset(cs_number, MYF(0))) && + !is_supported_parser_charset(cs)) + { + /* Disallow non-supported parser character sets: UCS2, UTF16, UTF32 */ + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), "character_set_client", + cs->csname); + break; + } } /* Convert database name to utf8 */ @@ -1217,7 +1226,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (cs_number) { - thd_init_client_charset(thd, cs_number); + /* + We have checked charset earlier, + so thd_init_client_charset cannot fail. + */ + if (thd_init_client_charset(thd, cs_number)) + DBUG_ASSERT(0); thd->update_charset(); } } diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 01fb0a299bf..5f3d6993ed8 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1,4 +1,5 @@ -/* Copyright 2005-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2005, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -761,6 +762,9 @@ static bool handle_list_of_fields(List_iterator<char> it, bool result; char *field_name; bool is_list_empty= TRUE; + int fields_handled = 0; + char* field_name_array[MAX_KEY]; + DBUG_ENTER("handle_list_of_fields"); while ((field_name= it++)) @@ -776,6 +780,25 @@ static bool handle_list_of_fields(List_iterator<char> it, result= TRUE; goto end; } + + /* + Check for duplicate fields in the list. + Assuming that there are not many fields in the partition key list. + If there were, it would be better to replace the for-loop + with a more efficient algorithm. + */ + + field_name_array[fields_handled] = field_name; + for (int i = 0; i < fields_handled; ++i) + { + if (my_strcasecmp(system_charset_info, + field_name_array[i], field_name) == 0) + { + my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + } + fields_handled++; } if (is_list_empty) { diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index e96b46e4de5..894cf426875 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2002, 2010, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2733,6 +2734,32 @@ void mysql_sql_stmt_close(THD *thd) } } + +class Set_longdata_error_handler : public Internal_error_handler +{ +public: + Set_longdata_error_handler(Prepared_statement *statement) + : stmt(statement) + { } + +public: + bool handle_error(uint sql_errno, + const char *message, + MYSQL_ERROR::enum_warning_level level, + THD *) + { + stmt->state= Query_arena::ERROR; + stmt->last_errno= sql_errno; + strnmov(stmt->last_error, message, MYSQL_ERRMSG_SIZE); + + return TRUE; + } + +private: + Prepared_statement *stmt; +}; + + /** Handle long data in pieces from client. @@ -2789,16 +2816,19 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) param= stmt->param_array[param_number]; + Set_longdata_error_handler err_handler(stmt); + /* + Install handler that will catch any errors that can be generated + during execution of Item_param::set_longdata() and propagate + them to Statement::last_error. + */ + thd->push_internal_handler(&err_handler); #ifndef EMBEDDED_LIBRARY - if (param->set_longdata(packet, (ulong) (packet_end - packet))) + param->set_longdata(packet, (ulong) (packet_end - packet)); #else - if (param->set_longdata(thd->extra_data, thd->extra_length)) + param->set_longdata(thd->extra_data, thd->extra_length); #endif - { - stmt->state= Query_arena::ERROR; - stmt->last_errno= ER_OUTOFMEMORY; - sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0); - } + thd->pop_internal_handler(); general_log_print(thd, thd->command, NullS); @@ -3260,6 +3290,13 @@ Prepared_statement::execute_loop(String *expanded_query, bool error; int reprepare_attempt= 0; + /* Check if we got an error when sending long data */ + if (state == Query_arena::ERROR) + { + my_message(last_errno, last_error, MYF(0)); + return TRUE; + } + if (set_parameters(expanded_query, packet, packet_end)) return TRUE; @@ -3500,12 +3537,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) status_var_increment(thd->status_var.com_stmt_execute); - /* Check if we got an error when sending long data */ - if (state == Query_arena::ERROR) - { - my_message(last_errno, last_error, MYF(0)); - return TRUE; - } if (flags & (uint) IS_IN_USE) { my_error(ER_PS_NO_RECURSION, MYF(0)); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 95e48c531be..418c2985f85 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,4 +1,5 @@ -/* Copyright (C) 2000-2006 MySQL AB & Sasha +/* Copyright (C) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -21,6 +22,7 @@ #include "log_event.h" #include "rpl_filter.h" #include <my_dir.h> +#include "debug_sync.h" int max_binlog_dump_events = 0; // unlimited my_bool opt_sporadic_binlog_dump_fail = 0; @@ -544,8 +546,10 @@ impossible position"; while (!net->error && net->vio != 0 && !thd->killed) { + my_off_t prev_pos= pos; while (!(error = Log_event::read_log_event(&log, packet, log_lock))) { + prev_pos= my_b_tell(&log); #ifndef DBUG_OFF if (max_binlog_dump_events && !left_events--) { @@ -556,6 +560,20 @@ impossible position"; } #endif + DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", + { + if ((*packet)[EVENT_TYPE_OFFSET+1] == XID_EVENT) + { + net_flush(net); + const char act[]= + "now " + "wait_for signal.continue"; + DBUG_ASSERT(opt_debug_sync_timeout > 0); + DBUG_ASSERT(!debug_sync_set_action(current_thd, + STRING_WITH_LEN(act))); + } + }); + if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) { binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+1] & @@ -572,6 +590,14 @@ impossible position"; goto err; } + DBUG_EXECUTE_IF("dump_thread_wait_before_send_xid", + { + if ((*packet)[EVENT_TYPE_OFFSET+1] == XID_EVENT) + { + net_flush(net); + } + }); + DBUG_PRINT("info", ("log event code %d", (*packet)[LOG_EVENT_OFFSET+1] )); if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) @@ -590,8 +616,13 @@ impossible position"; here we were reading binlog that was not closed properly (as a result of a crash ?). treat any corruption as EOF */ - if (binlog_can_be_corrupted && error != LOG_READ_MEM) + if (binlog_can_be_corrupted && + error != LOG_READ_MEM && error != LOG_READ_EOF) + { + my_b_seek(&log, prev_pos); error=LOG_READ_EOF; + } + /* TODO: now that we are logging the offset, check to make sure the recorded offset and the actual match. diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 6ce4ddcf2ea..91fc95a2909 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -289,57 +289,60 @@ bool handle_select(THD *thd, LEX *lex, select_result *result, } -/* +/** Fix fields referenced from inner selects. - SYNOPSIS - fix_inner_refs() - thd Thread handle - all_fields List of all fields used in select - select Current select - ref_pointer_array Array of references to Items used in current select - group_list GROUP BY list (is NULL by default) + @param thd Thread handle + @param all_fields List of all fields used in select + @param select Current select + @param ref_pointer_array Array of references to Items used in current select + @param group_list GROUP BY list (is NULL by default) - DESCRIPTION - The function serves 3 purposes - adds fields referenced from inner - selects to the current select list, resolves which class to use - to access referenced item (Item_ref of Item_direct_ref) and fixes - references (Item_ref objects) to these fields. + @details + The function serves 3 purposes - If a field isn't already in the select list and the ref_pointer_array + - adds fields referenced from inner query blocks to the current select list + + - Decides which class to use to reference the items (Item_ref or + Item_direct_ref) + + - fixes references (Item_ref objects) to these fields. + + If a field isn't already on the select list and the ref_pointer_array is provided then it is added to the all_fields list and the pointer to it is saved in the ref_pointer_array. The class to access the outer field is determined by the following rules: - 1. If the outer field isn't used under an aggregate function - then the Item_ref class should be used. - 2. If the outer field is used under an aggregate function and this - function is aggregated in the select where the outer field was - resolved or in some more inner select then the Item_direct_ref - class should be used. - It used used also if we are grouping by a subquery that refers - this outer field. + + -#. If the outer field isn't used under an aggregate function then the + Item_ref class should be used. + + -#. If the outer field is used under an aggregate function and this + function is, in turn, aggregated in the query block where the outer + field was resolved or some query nested therein, then the + Item_direct_ref class should be used. Also it should be used if we are + grouping by a subquery containing the outer field. + The resolution is done here and not at the fix_fields() stage as - it can be done only after sum functions are fixed and pulled up to - selects where they are have to be aggregated. + it can be done only after aggregate functions are fixed and pulled up to + selects where they are to be aggregated. + When the class is chosen it substitutes the original field in the Item_outer_ref object. After this we proceed with fixing references (Item_outer_ref objects) to this field from inner subqueries. - RETURN - TRUE an error occured - FALSE ok -*/ + @return Status + @retval true An error occured. + @retval false OK. + */ bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, Item **ref_pointer_array) { Item_outer_ref *ref; - bool res= FALSE; - bool direct_ref= FALSE; /* Mark the references from the inner_refs_list that are occurred in @@ -356,6 +359,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, while ((ref= ref_it++)) { + bool direct_ref= false; Item *item= ref->outer_ref; Item **item_ref= ref->ref; Item_ref *new_ref; @@ -414,7 +418,7 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, return TRUE; thd->used_tables|= item->used_tables(); } - return res; + return false; } /** @@ -966,7 +970,7 @@ JOIN::optimize() If all items were resolved by opt_sum_query, there is no need to open any tables. */ - if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) + if ((res=opt_sum_query(thd, select_lex->leaf_tables, all_fields, conds))) { if (res == HA_ERR_KEY_NOT_FOUND) { @@ -1920,7 +1924,11 @@ JOIN::exec() if (!curr_join->sort_and_group && curr_join->const_tables != curr_join->tables) curr_join->join_tab[curr_join->const_tables].sorted= 0; - if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) + + Procedure *save_proc= curr_join->procedure; + tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0); + curr_join->procedure= save_proc; + if (tmp_error) { error= tmp_error; DBUG_VOID_RETURN; @@ -3305,6 +3313,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, @param field Field used in comparision @param eq_func True if we used =, <=> or IS NULL @param value Value used for comparison with field + @param num_values Number of values[] that we are comparing against @param usable_tables Tables which can be used for key optimization @param sargables IN/OUT Array of found sargable candidates @@ -3397,26 +3406,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, eq_func is NEVER true when num_values > 1 */ if (!eq_func) - { - /* - Additional optimization: if we're processing - "t.key BETWEEN c1 AND c1" then proceed as if we were processing - "t.key = c1". - TODO: This is a very limited fix. A more generic fix is possible. - There are 2 options: - A) Make equality propagation code be able to handle BETWEEN - (including cases like t1.key BETWEEN t2.key AND t3.key) - B) Make range optimizer to infer additional "t.key = c" equalities - and use them in equality propagation process (see details in - OptimizerKBAndTodo) - */ - if ((cond->functype() != Item_func::BETWEEN) || - ((Item_func_between*) cond)->negated || - !value[0]->eq(value[1], field->binary())) - return; - eq_func= TRUE; - } - + return; if (field->result_type() == STRING_RESULT) { if ((*value)->result_type() != STRING_RESULT) @@ -3631,9 +3621,65 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, case Item_func::OPTIMIZE_KEY: { Item **values; - // BETWEEN, IN, NE - if (is_local_field (cond_func->key_item()) && - !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) + /* + Build list of possible keys for 'a BETWEEN low AND high'. + It is handled similar to the equivalent condition + 'a >= low AND a <= high': + */ + if (cond_func->functype() == Item_func::BETWEEN) + { + Item_field *field_item; + bool equal_func= FALSE; + uint num_values= 2; + values= cond_func->arguments(); + + bool binary_cmp= (values[0]->real_item()->type() == Item::FIELD_ITEM) + ? ((Item_field*)values[0]->real_item())->field->binary() + : TRUE; + + /* + Additional optimization: If 'low = high': + Handle as if the condition was "t.key = low". + */ + if (!((Item_func_between*)cond_func)->negated && + values[1]->eq(values[2], binary_cmp)) + { + equal_func= TRUE; + num_values= 1; + } + + /* + Append keys for 'field <cmp> value[]' if the + condition is of the form:: + '<field> BETWEEN value[1] AND value[2]' + */ + if (is_local_field(values[0])) + { + field_item= (Item_field *) (values[0]->real_item()); + add_key_equal_fields(key_fields, *and_level, cond_func, + field_item, equal_func, &values[1], + num_values, usable_tables, sargables); + } + /* + Append keys for 'value[0] <cmp> field' if the + condition is of the form: + 'value[0] BETWEEN field1 AND field2' + */ + for (uint i= 1; i <= num_values; i++) + { + if (is_local_field(values[i])) + { + field_item= (Item_field *) (values[i]->real_item()); + add_key_equal_fields(key_fields, *and_level, cond_func, + field_item, equal_func, values, + 1, usable_tables, sargables); + } + } + } // if ( ... Item_func::BETWEEN) + + // IN, NE + else if (is_local_field (cond_func->key_item()) && + !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) { values= cond_func->arguments()+1; if (cond_func->functype() == Item_func::NE_FUNC && @@ -3647,21 +3693,6 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, cond_func->argument_count()-1, usable_tables, sargables); } - if (cond_func->functype() == Item_func::BETWEEN) - { - values= cond_func->arguments(); - for (uint i= 1 ; i < cond_func->argument_count() ; i++) - { - Item_field *field_item; - if (is_local_field (cond_func->arguments()[i])) - { - field_item= (Item_field *) (cond_func->arguments()[i]->real_item()); - add_key_equal_fields(key_fields, *and_level, cond_func, - field_item, 0, values, 1, usable_tables, - sargables); - } - } - } break; } case Item_func::OPTIMIZE_OP: @@ -12660,22 +12691,21 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_ENTER("end_send"); if (!end_of_records) { - int error; if (join->having && join->having->val_int() == 0) DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having - error=0; if (join->procedure) - error=join->procedure->send_row(join->procedure_fields_list); - else if (join->do_send_rows) { - if ((error= join->result->send_data(*join->fields)) < 0) - { - /* row was not accepted. Don't count it */ - DBUG_RETURN(NESTED_LOOP_OK); - } + if (join->procedure->send_row(join->procedure_fields_list)) + DBUG_RETURN(NESTED_LOOP_ERROR); + DBUG_RETURN(NESTED_LOOP_OK); + } + if (join->do_send_rows) + { + int error; + /* result < 0 if row was not accepted and should not be counted */ + if ((error= join->result->send_data(*join->fields))) + DBUG_RETURN(error < 0 ? NESTED_LOOP_OK : NESTED_LOOP_ERROR); } - if (error) - DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (++join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { @@ -13175,6 +13205,42 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) return 0; // keep test } +/** + Extract a condition that can be checked after reading given table + + @param cond Condition to analyze + @param tables Tables for which "current field values" are available + @param used_table Table that we're extracting the condition for (may + also include PSEUDO_TABLE_BITS, and may be zero) + @param exclude_expensive_cond Do not push expensive conditions + + @retval <>NULL Generated condition + @retval =NULL Already checked, OR error + + @details + Extract the condition that can be checked after reading the table + specified in 'used_table', given that current-field values for tables + specified in 'tables' bitmap are available. + If 'used_table' is 0 + - extract conditions for all tables in 'tables'. + - extract conditions are unrelated to any tables + in the same query block/level(i.e. conditions + which have used_tables == 0). + + The function assumes that + - Constant parts of the condition has already been checked. + - Condition that could be checked for tables in 'tables' has already + been checked. + + The function takes into account that some parts of the condition are + guaranteed to be true by employed 'ref' access methods (the code that + does this is located at the end, search down for "EQ_FUNC"). + + @note + Make sure to keep the implementations of make_cond_for_table() and + make_cond_after_sjm() synchronized. + make_cond_for_info_schema() uses similar algorithm as well. +*/ static COND * make_cond_for_table(COND *cond, table_map tables, table_map used_table) @@ -13662,12 +13728,13 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, { int ref_key; uint ref_key_parts; - int order_direction; + int order_direction= 0; uint used_key_parts; TABLE *table=tab->table; SQL_SELECT *select=tab->select; key_map usable_keys; QUICK_SELECT_I *save_quick= 0; + int best_key= -1; DBUG_ENTER("test_if_skip_sort_order"); LINT_INIT(ref_key_parts); @@ -13771,13 +13838,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, new_ref_key_map.clear_all(); // Force the creation of quick select new_ref_key_map.set_bit(new_ref_key); // only for new_ref_key. + /* Reset quick; This will be restored in 'use_filesort' if needed */ + select->quick= 0; if (select->test_quick_select(tab->join->thd, new_ref_key_map, 0, (tab->join->select_options & OPTION_FOUND_ROWS) ? HA_POS_ERROR : tab->join->unit->select_limit_cnt,0) <= 0) - DBUG_RETURN(0); + goto use_filesort; } ref_key= new_ref_key; } @@ -13802,7 +13871,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, int best_key_direction; ha_rows best_records; double read_time; - int best_key= -1; bool is_best_covering= FALSE; double fanout= 1; JOIN *join= tab->join; @@ -14020,72 +14088,21 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, tab->join->tables > tab->join->const_tables + 1) && ((unsigned) best_key != table->s->primary_key || !table->file->primary_key_is_clustered())) - DBUG_RETURN(0); + goto use_filesort; if (best_key >= 0) { - bool quick_created= FALSE; if (table->quick_keys.is_set(best_key) && best_key != ref_key) { key_map map; map.clear_all(); // Force the creation of quick select map.set_bit(best_key); // only best_key. - quick_created= - select->test_quick_select(join->thd, map, 0, - join->select_options & OPTION_FOUND_ROWS ? - HA_POS_ERROR : - join->unit->select_limit_cnt, - 0) > 0; - } - if (!no_changes) - { - /* - If ref_key used index tree reading only ('Using index' in EXPLAIN), - and best_key doesn't, then revert the decision. - */ - if (!table->covering_keys.is_set(best_key)) - table->disable_keyread(); - if (!quick_created) - { - tab->index= best_key; - tab->read_first_record= best_key_direction > 0 ? - join_read_first:join_read_last; - tab->type=JT_NEXT; // Read with index_first(), index_next() - if (select && select->quick) - { - delete select->quick; - select->quick= 0; - } - if (table->covering_keys.is_set(best_key) && ! table->key_read) - table->enable_keyread(); - table->file->ha_index_or_rnd_end(); - if (join->select_options & SELECT_DESCRIBE) - { - tab->ref.key= -1; - tab->ref.key_parts= 0; - if (select_limit < table_records) - tab->limit= select_limit; - } - } - else if (tab->type != JT_ALL) - { - /* - We're about to use a quick access to the table. - We need to change the access method so as the quick access - method is actually used. - */ - DBUG_ASSERT(tab->select->quick); - tab->type=JT_ALL; - tab->use_quick=1; - tab->ref.key= -1; - tab->ref.key_parts=0; // Don't use ref key. - tab->read_first_record= join_init_read_record; - if (tab->is_using_loose_index_scan()) - join->tmp_table_param.precomputed_group_by= TRUE; - /* - TODO: update the number of records in join->best_positions[tablenr] - */ - } + select->quick= 0; + select->test_quick_select(join->thd, map, 0, + join->select_options & OPTION_FOUND_ROWS ? + HA_POS_ERROR : + join->unit->select_limit_cnt, + 0); } order_direction= best_key_direction; /* @@ -14098,61 +14115,155 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, saved_best_key_parts : best_key_parts; } else - DBUG_RETURN(0); + goto use_filesort; } check_reverse_order: + DBUG_ASSERT(order_direction != 0); + if (order_direction == -1) // If ORDER BY ... DESC { + int quick_type; if (select && select->quick) { /* Don't reverse the sort order, if it's already done. (In some cases test_if_order_by_key() can be called multiple times */ - if (!select->quick->reverse_sorted()) + if (select->quick->reverse_sorted()) + goto skipped_filesort; + + quick_type= select->quick->get_type(); + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) { - QUICK_SELECT_DESC *tmp; - int quick_type= select->quick->get_type(); - if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || - quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || - quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || - quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + tab->limit= 0; + goto use_filesort; // Use filesort + } + } + } + + /* + Update query plan with access pattern for doing ordered access + according to what we have decided above. + */ + if (!no_changes) // We are allowed to update QEP + { + if (best_key >= 0) + { + bool quick_created= + (select && select->quick && select->quick!=save_quick); + + /* + If ref_key used index tree reading only ('Using index' in EXPLAIN), + and best_key doesn't, then revert the decision. + */ + if (!table->covering_keys.is_set(best_key)) + table->disable_keyread(); + if (!quick_created) + { + if (select) // Throw any existing quick select + select->quick= 0; // Cleanup either reset to save_quick, + // or 'delete save_quick' + tab->index= best_key; + tab->read_first_record= order_direction > 0 ? + join_read_first:join_read_last; + tab->type=JT_NEXT; // Read with index_first(), index_next() + + if (table->covering_keys.is_set(best_key) && ! table->key_read) + table->enable_keyread(); + table->file->ha_index_or_rnd_end(); + if (tab->join->select_options & SELECT_DESCRIBE) { - tab->limit= 0; - select->quick= save_quick; - DBUG_RETURN(0); // Use filesort + tab->ref.key= -1; + tab->ref.key_parts= 0; + if (select_limit < table->file->stats.records) + tab->limit= select_limit; } - + } + else if (tab->type != JT_ALL) + { + /* + We're about to use a quick access to the table. + We need to change the access method so as the quick access + method is actually used. + */ + DBUG_ASSERT(tab->select->quick); + tab->type=JT_ALL; + tab->use_quick=1; + tab->ref.key= -1; + tab->ref.key_parts=0; // Don't use ref key. + tab->read_first_record= join_init_read_record; + if (tab->is_using_loose_index_scan()) + tab->join->tmp_table_param.precomputed_group_by= TRUE; + /* + TODO: update the number of records in join->best_positions[tablenr] + */ + } + } // best_key >= 0 + + if (order_direction == -1) // If ORDER BY ... DESC + { + if (select && select->quick) + { + QUICK_SELECT_DESC *tmp; /* ORDER BY range_key DESC */ - tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), + tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), used_key_parts); - if (!tmp || tmp->error) - { - delete tmp; - select->quick= save_quick; + if (tmp && select->quick == save_quick) + save_quick= 0; // ::QUICK_SELECT_DESC consumed it + + if (!tmp || tmp->error) + { + delete tmp; tab->limit= 0; - DBUG_RETURN(0); // Reverse sort not supported - } - select->quick=tmp; + goto use_filesort; // Reverse sort failed -> filesort + } + select->quick= tmp; } - } - else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && - tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) - { - /* - SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC + else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL && + tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts) + { + /* + SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC - Use a traversal function that starts by reading the last row - with key part (A) and then traverse the index backwards. - */ - tab->read_first_record= join_read_last_key; - tab->read_record.read_record= join_read_prev_same; + Use a traversal function that starts by reading the last row + with key part (A) and then traverse the index backwards. + */ + tab->read_first_record= join_read_last_key; + tab->read_record.read_record= join_read_prev_same; + } } + else if (select && select->quick) + select->quick->sorted= 1; + + } // QEP has been modified + + /* + Cleanup: + We may have both a 'select->quick' and 'save_quick' (original) + at this point. Delete the one that we wan't use. + */ + +skipped_filesort: + // Keep current (ordered) select->quick + if (select && save_quick != select->quick) + { + delete save_quick; + save_quick= NULL; } - else if (select && select->quick) - select->quick->sorted= 1; DBUG_RETURN(1); + +use_filesort: + // Restore original save_quick + if (select && select->quick != save_quick) + { + delete select->quick; + select->quick= save_quick; + } + DBUG_RETURN(0); } diff --git a/sql/sql_select.h b/sql/sql_select.h index ea06b26a229..b821181207c 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -635,7 +635,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field, /* functions from opt_sum.cc */ bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); -int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +int opt_sum_query(THD* thd, + TABLE_LIST *tables, List<Item> &all_fields, COND *conds); /* from sql_delete.cc, used by opt_range.cc */ extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index b8132d02624..09367503448 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3415,6 +3415,9 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) it.rewind(); /* To get access to new elements in basis list */ while ((db_name= it++)) { + /* db_name can be changed in make_table_list() func */ + LEX_STRING orig_db_name= *db_name; + #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!(check_access(thd,SELECT_ACL, db_name->str, &thd->col_access, 0, 1, with_i_schema) || @@ -3477,17 +3480,13 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) } int res; - LEX_STRING tmp_lex_string, orig_db_name; + LEX_STRING tmp_lex_string; /* Set the parent lex of 'sel' because it is needed by sel.init_query() which is called inside make_table_list. */ thd->no_warnings_for_error= 1; sel.parent_lex= lex; - /* db_name can be changed in make_table_list() func */ - if (!thd->make_lex_string(&orig_db_name, db_name->str, - db_name->length, FALSE)) - goto err; if (make_table_list(thd, &sel, db_name, table_name)) goto err; TABLE_LIST *show_table_list= sel.table_list.first; diff --git a/sql/sql_string.cc b/sql/sql_string.cc index bdecabd782b..65af78d1abe 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -50,11 +50,33 @@ bool String::real_alloc(uint32 arg_length) } -/* -** Check that string is big enough. Set string[alloc_length] to 0 -** (for C functions) -*/ +/** + Allocates a new buffer on the heap for this String. + + - If the String's internal buffer is privately owned and heap allocated, + one of the following is performed. + + - If the requested length is greater than what fits in the buffer, a new + buffer is allocated, data moved and the old buffer freed. + + - If the requested length is less or equal to what fits in the buffer, a + null character is inserted at the appropriate position. + - If the String does not keep a private buffer on the heap, such a buffer + will be allocated and the string copied accoring to its length, as found + in String::length(). + + For C compatibility, the new string buffer is null terminated. + + @param alloc_length The requested string size in characters, excluding any + null terminator. + + @retval false Either the copy operation is complete or, if the size of the + new buffer is smaller than the currently allocated buffer (if one exists), + no allocation occured. + + @retval true An error occured when attempting to allocate memory. +*/ bool String::realloc(uint32 alloc_length) { if (Alloced_length <= alloc_length) @@ -187,6 +209,17 @@ bool String::copy() return FALSE; } +/** + Copies the internal buffer from str. If this String has a private heap + allocated buffer where new data does not fit, a new buffer is allocated + before copying and the old buffer freed. Character set information is also + copied. + + @param str The string whose internal buffer is to be copied. + + @retval false Success. + @retval true Memory allocation failed. +*/ bool String::copy(const String &str) { if (alloc(str.str_length)) diff --git a/sql/sql_string.h b/sql/sql_string.h index d5d31c8db65..5b7b55575de 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -111,6 +111,9 @@ public: inline const char *ptr() const { return Ptr; } inline char *c_ptr() { + DBUG_ASSERT(!alloced || !Ptr || !Alloced_length || + (Alloced_length >= (str_length + 1))); + if (!Ptr || Ptr[str_length]) /* Should be safe */ (void) realloc(str_length); return Ptr; @@ -141,6 +144,16 @@ public: Alloced_length=0; str_charset=str.str_charset; } + + + /** + Points the internal buffer to the supplied one. The old buffer is freed. + @param str Pointer to the new buffer. + @param arg_length Length of the new buffer in characters, excluding any + null character. + @param cs Character set to use for interpreting string data. + @note The new buffer will not be null terminated. + */ inline void set(char *str,uint32 arg_length, CHARSET_INFO *cs) { free(); diff --git a/sql/table.cc b/sql/table.cc index 90d113bcbc5..feb9777c9fd 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1,4 +1,5 @@ -/* Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc. +/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. + Copyright (c) 2009-2011, Monty Program Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by |