diff options
author | tsmith@ramayana.hindu.god <> | 2007-12-04 20:58:21 -0700 |
---|---|---|
committer | tsmith@ramayana.hindu.god <> | 2007-12-04 20:58:21 -0700 |
commit | 10cab933b29583df5d9d4c490c6e602d35264385 (patch) | |
tree | 0314ad5cd173e9b49968cb56e0081e21eb2fdd5f /sql | |
parent | 0e78354baa5ed83f115b6f96e21b31fe0b1e4cbc (diff) | |
parent | 02e321d3502e8771f2092c6167de189423f4ec49 (diff) | |
download | mariadb-git-10cab933b29583df5d9d4c490c6e602d35264385.tar.gz |
Merge tsmith@bk-internal.mysql.com:/home/bk/mysql-5.0
into ramayana.hindu.god:/home/tsmith/m/bk/maint/50
Diffstat (limited to 'sql')
62 files changed, 1407 insertions, 539 deletions
diff --git a/sql/field.cc b/sql/field.cc index 31d2af1bb14..0a7c6a04f39 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1356,15 +1356,25 @@ void Field_num::add_zerofill_and_unsigned(String &res) const void Field::make_field(Send_field *field) { - if (orig_table->s->table_cache_key && *(orig_table->s->table_cache_key)) + if (orig_table && orig_table->s->table_cache_key && + *(orig_table->s->table_cache_key)) { field->org_table_name= orig_table->s->table_name; field->db_name= orig_table->s->table_cache_key; } else field->org_table_name= field->db_name= ""; - field->table_name= orig_table->alias; - field->col_name= field->org_col_name= field_name; + if (orig_table) + { + field->table_name= orig_table->alias; + field->org_col_name= field_name; + } + else + { + field->table_name= ""; + field->org_col_name= ""; + } + field->col_name= field_name; field->charsetnr= charset()->number; field->length=field_length; field->type=type(); @@ -5272,7 +5282,7 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) { tmp= l_time.day + l_time.month*32 + l_time.year*16*32; if (!error && (ret != MYSQL_TIMESTAMP_DATE) && - thd->count_cuted_fields != CHECK_FIELD_IGNORE) + (l_time.hour || l_time.minute || l_time.second || l_time.second_part)) error= 3; // Datetime was cut (note) } @@ -5319,10 +5329,16 @@ int Field_newdate::store(longlong nr, bool unsigned_val) else tmp= l_time.day + l_time.month*32 + l_time.year*16*32; + if (!error && l_time.time_type != MYSQL_TIMESTAMP_DATE && + (l_time.hour || l_time.minute || l_time.second || l_time.second_part)) + error= 3; + if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - error == 2 ? ER_WARN_DATA_OUT_OF_RANGE : - WARN_DATA_TRUNCATED,nr,MYSQL_TIMESTAMP_DATE, 1); + set_datetime_warning(error == 3 ? MYSQL_ERROR::WARN_LEVEL_NOTE : + MYSQL_ERROR::WARN_LEVEL_WARN, + error == 2 ? + ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED, + nr,MYSQL_TIMESTAMP_DATE, 1); int3store(ptr,tmp); return error; @@ -5349,6 +5365,17 @@ int Field_newdate::store_time(MYSQL_TIME *ltime, timestamp_type time_type) set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, str.ptr(), str.length(), MYSQL_TIMESTAMP_DATE, 1); } + if (!error && ltime->time_type != MYSQL_TIMESTAMP_DATE && + (ltime->hour || ltime->minute || ltime->second || ltime->second_part)) + { + char buff[MAX_DATE_STRING_REP_LENGTH]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_datetime((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, + WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATE, 1); + error= 3; + } } else { @@ -7456,36 +7483,6 @@ uint Field_blob::max_packed_col_length(uint max_length) #ifdef HAVE_SPATIAL -uint Field_geom::get_key_image(char *buff, uint length, imagetype type) -{ - char *blob; - const char *dummy; - MBR mbr; - ulong blob_length= get_length(ptr); - Geometry_buffer buffer; - Geometry *gobj; - const uint image_length= SIZEOF_STORED_DOUBLE*4; - - if (blob_length < SRID_SIZE) - { - bzero(buff, image_length); - return image_length; - } - get_ptr(&blob); - gobj= Geometry::construct(&buffer, blob, blob_length); - if (!gobj || gobj->get_mbr(&mbr, &dummy)) - bzero(buff, image_length); - else - { - float8store(buff, mbr.xmin); - float8store(buff + 8, mbr.xmax); - float8store(buff + 16, mbr.ymin); - float8store(buff + 24, mbr.ymax); - } - return image_length; -} - - void Field_geom::sql_type(String &res) const { CHARSET_INFO *cs= &my_charset_latin1; diff --git a/sql/field.h b/sql/field.h index 4fcdb50f8c7..8c01931fa21 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1326,7 +1326,6 @@ public: int store(double nr); int store(longlong nr, bool unsigned_val); int store_decimal(const my_decimal *); - uint get_key_image(char *buff,uint length,imagetype type); uint size_of() const { return sizeof(*this); } int reset(void) { return !maybe_null() || Field_blob::reset(); } geometry_type get_geometry_type() { return geom_type; }; diff --git a/sql/filesort.cc b/sql/filesort.cc index db73ede99b0..08ffa2211fa 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -534,7 +534,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, file->unlock_row(); /* It does not make sense to read more keys in case of a fatal error */ if (thd->net.report_error) - DBUG_RETURN(HA_POS_ERROR); + break; } if (quick_select) { @@ -551,6 +551,9 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, file->ha_rnd_end(); } + if (thd->net.report_error) + DBUG_RETURN(HA_POS_ERROR); + DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos)); if (error != HA_ERR_END_OF_FILE) { diff --git a/sql/gstream.cc b/sql/gstream.cc index 46e12b6ef3b..0c8011549f3 100644 --- a/sql/gstream.cc +++ b/sql/gstream.cc @@ -44,7 +44,7 @@ bool Gis_read_stream::get_next_word(LEX_STRING *res) skip_space(); res->str= (char*) m_cur; /* The following will also test for \0 */ - if (!my_isvar_start(&my_charset_bin, *m_cur)) + if ((m_cur >= m_limit) || !my_isvar_start(&my_charset_bin, *m_cur)) return 1; /* diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index d8ffd6c55f8..d7f2309657b 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -2528,7 +2528,12 @@ int ha_federated::info(uint flag) status_query_string.length(0); result= mysql_store_result(mysql); - if (!result) + + /* + We're going to use fields num. 4, 12 and 13 of the resultset, + so make sure we have these fields. + */ + if (!result || (mysql_num_fields(result) < 14)) goto error; if (!mysql_num_rows(result)) @@ -2557,9 +2562,9 @@ int ha_federated::info(uint flag) data_file_length= records * mean_rec_length; if (row[12] != NULL) - update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); + update_time= (time_t) my_strtoll10(row[12], (char**) 0, &error); if (row[13] != NULL) - check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); + check_time= (time_t) my_strtoll10(row[13], (char**) 0, &error); } /* diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index bf807407df1..4b96e5b5744 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -175,7 +175,7 @@ void ha_heap::update_key_stats() else { ha_rows hash_buckets= file->s->keydef[i].hash_buckets; - uint no_records= hash_buckets ? file->s->records/hash_buckets : 2; + uint no_records= hash_buckets ? (uint) (file->s->records/hash_buckets) : 2; if (no_records < 2) no_records= 2; key->rec_per_key[key->key_parts-1]= no_records; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 86d3e930122..fa68da87661 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -522,6 +522,9 @@ convert_error_code_to_mysql( mark_transaction_to_rollback(thd, TRUE); return(HA_ERR_LOCK_TABLE_FULL); + } else if (error == DB_UNSUPPORTED) { + + return(HA_ERR_UNSUPPORTED); } else { return(-1); // Unknown error } @@ -3713,11 +3716,22 @@ convert_search_mode_to_innobase( and comparison of non-latin1 char type fields in innobase_mysql_cmp() to get PAGE_CUR_LE_OR_EXTENDS to work correctly. */ - - default: assert(0); + case HA_READ_MBR_CONTAIN: + case HA_READ_MBR_INTERSECT: + case HA_READ_MBR_WITHIN: + case HA_READ_MBR_DISJOINT: + case HA_READ_MBR_EQUAL: + my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0)); + return(PAGE_CUR_UNSUPP); + /* do not use "default:" in order to produce a gcc warning: + enumeration value '...' not handled in switch + (if -Wswitch or -Wall is used) + */ } - return(0); + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "this functionality"); + + return(PAGE_CUR_UNSUPP); } /* @@ -3855,11 +3869,18 @@ ha_innobase::index_read( last_match_mode = (uint) match_mode; - innodb_srv_conc_enter_innodb(prebuilt->trx); + if (mode != PAGE_CUR_UNSUPP) { - ret = row_search_for_mysql((byte*) buf, mode, prebuilt, match_mode, 0); + innodb_srv_conc_enter_innodb(prebuilt->trx); - innodb_srv_conc_exit_innodb(prebuilt->trx); + ret = row_search_for_mysql((byte*) buf, mode, prebuilt, + match_mode, 0); + + innodb_srv_conc_exit_innodb(prebuilt->trx); + } else { + + ret = DB_UNSUPPORTED; + } if (ret == DB_SUCCESS) { error = 0; @@ -5174,8 +5195,16 @@ ha_innobase::records_in_range( mode2 = convert_search_mode_to_innobase(max_key ? max_key->flag : HA_READ_KEY_EXACT); - n_rows = btr_estimate_n_rows_in_range(index, range_start, - mode1, range_end, mode2); + if (mode1 != PAGE_CUR_UNSUPP && mode2 != PAGE_CUR_UNSUPP) { + + n_rows = btr_estimate_n_rows_in_range(index, range_start, + mode1, range_end, + mode2); + } else { + + n_rows = 0; + } + dtuple_free_for_mysql(heap1); dtuple_free_for_mysql(heap2); @@ -5477,7 +5506,7 @@ ha_innobase::info( table->key_info[i].rec_per_key[j]= rec_per_key >= ~(ulong) 0 ? ~(ulong) 0 : - rec_per_key; + (ulong) rec_per_key; } index = dict_table_get_next_index_noninline(index); diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 66f66dccd99..a24486385fd 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1412,7 +1412,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows) DBUG_ENTER("ha_myisam::start_bulk_insert"); THD *thd= current_thd; ulong size= min(thd->variables.read_buff_size, - table->s->avg_row_length*rows); + (ulong) (table->s->avg_row_length*rows)); DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu", (ulong) rows, size)); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 05e35bd0157..d2c580a61de 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1356,6 +1356,30 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec DBUG_RETURN(0); } +bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno) +{ + KEY* key_info= table->key_info + keyno; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + uint i; + DBUG_ENTER("check_index_fields_in_write_set"); + + if (m_retrieve_all_fields) + { + DBUG_RETURN(true); + } + for (i= 0; key_part != end; key_part++, i++) + { + Field* field= key_part->field; + if (field->query_id != current_thd->query_id) + { + DBUG_RETURN(false); + } + } + + DBUG_RETURN(true); +} + int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, const byte *record, uint keyno) { KEY* key_info= table->key_info + keyno; @@ -1643,7 +1667,8 @@ check_null_in_record(const KEY* key_info, const byte *record) * primary key or unique index values */ -int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk) +int ha_ndbcluster::peek_indexed_rows(const byte *record, + NDB_WRITE_OP write_op) { NdbTransaction *trans= m_active_trans; NdbOperation *op; @@ -1656,7 +1681,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk) (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); first= NULL; - if (check_pk && table->s->primary_key != MAX_KEY) + if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY) { /* * Fetch any row with colliding primary key @@ -1687,9 +1712,15 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk) */ if (check_null_in_record(key_info, record)) { - DBUG_PRINT("info", ("skipping check for key with NULL")); + DBUG_PRINT("info", ("skipping check for key with NULL")); continue; } + if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i)) + { + DBUG_PRINT("info", ("skipping check for key %u not in write_set", i)); + continue; + } + NdbIndexOperation *iop; NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; key_part= key_info->key_part; @@ -2268,7 +2299,7 @@ int ha_ndbcluster::write_row(byte *record) start_bulk_insert will set parameters to ensure that each write_row is committed individually */ - int peek_res= peek_indexed_rows(record, true); + int peek_res= peek_indexed_rows(record, NDB_INSERT); if (!peek_res) { @@ -2302,7 +2333,7 @@ int ha_ndbcluster::write_row(byte *record) auto_value, 1) == -1) { if (--retries && - ndb->getNdbError().status == NdbError::TemporaryError); + ndb->getNdbError().status == NdbError::TemporaryError) { my_sleep(retry_sleep); continue; @@ -2456,7 +2487,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE || thd->lex->sql_command == SQLCOM_UPDATE_MULTI)) { - int peek_res= peek_indexed_rows(new_data, pk_update); + NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE; + int peek_res= peek_indexed_rows(new_data, write_op); if (!peek_res) { @@ -4862,7 +4894,7 @@ ulonglong ha_ndbcluster::get_auto_increment() auto_value, cache_size, step, start)) { if (--retries && - ndb->getNdbError().status == NdbError::TemporaryError); + ndb->getNdbError().status == NdbError::TemporaryError) { my_sleep(retry_sleep); continue; diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 81cbdcd8fea..324969ad374 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -59,6 +59,12 @@ typedef struct ndb_index_data { bool null_in_unique_index; } NDB_INDEX_DATA; +typedef enum ndb_write_op { + NDB_INSERT = 0, + NDB_UPDATE = 1, + NDB_PK_UPDATE = 2 +} NDB_WRITE_OP; + typedef struct st_ndbcluster_share { THR_LOCK lock; pthread_mutex_t mutex; @@ -251,7 +257,7 @@ private: const NdbOperation *first, const NdbOperation *last, uint errcode); - int peek_indexed_rows(const byte *record, bool check_pk); + int peek_indexed_rows(const byte *record, NDB_WRITE_OP write_op); int unique_index_read(const byte *key, uint key_len, byte *buf); int ordered_index_scan(const key_range *start_key, @@ -286,6 +292,7 @@ private: int get_ndb_blobs_value(NdbBlob *last_ndb_blob, my_ptrdiff_t ptrdiff); int set_primary_key(NdbOperation *op, const byte *key); int set_primary_key_from_record(NdbOperation *op, const byte *record); + bool check_index_fields_in_write_set(uint keyno); int set_index_key_from_record(NdbOperation *op, const byte *record, uint keyno); int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0); diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc index ea3f8a7683a..c7b185a92f0 100644 --- a/sql/ha_ndbcluster_cond.cc +++ b/sql/ha_ndbcluster_cond.cc @@ -1338,9 +1338,23 @@ ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op) if (m_cond_stack) { - NdbScanFilter filter(op); + NdbScanFilter filter(op, false); // don't abort on too large - DBUG_RETURN(generate_scan_filter_from_cond(filter)); + int ret=generate_scan_filter_from_cond(filter); + if (ret != 0) + { + const NdbError& err=filter.getNdbError(); + if (err.code == NdbScanFilter::FilterTooLarge) + { + // err.message has static storage + DBUG_PRINT("info", ("%s", err.message)); + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + err.code, err.message); + ret=0; + } + } + if (ret != 0) + DBUG_RETURN(ret); } else { @@ -1391,7 +1405,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op, { KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; - NdbScanFilter filter(op); + NdbScanFilter filter(op, true); // abort on too large int res; DBUG_ENTER("generate_scan_filter_from_key"); diff --git a/sql/item.cc b/sql/item.cc index c451fe94b29..431d82af331 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -939,9 +939,12 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions) int res; THD *thd= field->table->in_use; enum_check_fields tmp= thd->count_cuted_fields; + ulong sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); thd->count_cuted_fields= CHECK_FIELD_IGNORE; res= save_in_field(field, no_conversions); thd->count_cuted_fields= tmp; + thd->variables.sql_mode= sql_mode; return res; } @@ -3304,7 +3307,7 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list) if (cur_field->table_name && table_name) { /* If field_name is qualified by a table name. */ - if (strcmp(cur_field->table_name, table_name)) + if (my_strcasecmp(table_alias_charset, cur_field->table_name, table_name)) /* Same field names, different tables. */ return NULL; @@ -4248,6 +4251,47 @@ bool Item::is_datetime() } +String *Item::check_well_formed_result(String *str, bool send_error) +{ + /* Check whether we got a well-formed string */ + CHARSET_INFO *cs= str->charset(); + int well_formed_error; + uint wlen= cs->cset->well_formed_len(cs, + str->ptr(), str->ptr() + str->length(), + str->length(), &well_formed_error); + if (wlen < str->length()) + { + THD *thd= current_thd; + char hexbuf[7]; + enum MYSQL_ERROR::enum_warning_level level; + uint diff= str->length() - wlen; + set_if_smaller(diff, 3); + octet2hex(hexbuf, str->ptr() + wlen, diff); + if (send_error) + { + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), + cs->csname, hexbuf); + return 0; + } + if ((thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))) + { + level= MYSQL_ERROR::WARN_LEVEL_ERROR; + null_value= 1; + str= 0; + } + else + { + level= MYSQL_ERROR::WARN_LEVEL_WARN; + str->length(wlen); + } + push_warning_printf(thd, level, ER_INVALID_CHARACTER_STRING, + ER(ER_INVALID_CHARACTER_STRING), cs->csname, hexbuf); + } + return str; +} + + /* Create a field to hold a string value from an item @@ -4366,11 +4410,8 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table) break; // Blob handled outside of case #ifdef HAVE_SPATIAL case MYSQL_TYPE_GEOMETRY: - return new Field_geom(max_length, maybe_null, name, table, - (Field::geometry_type) - ((type() == Item::TYPE_HOLDER) ? - ((Item_type_holder *)this)->get_geometry_type() : - ((Item_geometry_func *)this)->get_geometry_type())); + return new Field_geom(max_length, maybe_null, + name, table, get_geometry_type()); #endif /* HAVE_SPATIAL */ } } @@ -4510,7 +4551,7 @@ int Item::save_in_field(Field *field, bool no_conversions) my_decimal decimal_value; my_decimal *value= val_decimal(&decimal_value); if (null_value) - return set_field_to_null(field); + return set_field_to_null_with_conversions(field, no_conversions); field->set_notnull(); error=field->store_decimal(value); } @@ -4772,6 +4813,19 @@ warn: } +void Item_hex_string::print(String *str) +{ + char *end= (char*) str_value.ptr() + str_value.length(), + *ptr= end - min(str_value.length(), sizeof(longlong)); + str->append("0x"); + for (; ptr != end ; ptr++) + { + str->append(_dig_vec_lower[((uchar) *ptr) >> 4]); + str->append(_dig_vec_lower[((uchar) *ptr) & 0x0F]); + } +} + + bool Item_hex_string::eq(const Item *arg, bool binary_cmp) const { if (arg->basic_const_item() && arg->type() == type()) @@ -6205,9 +6259,9 @@ bool field_is_equal_to_item(Field *field,Item *item) return result == field->val_real(); } -Item_cache* Item_cache::get_cache(Item_result type) +Item_cache* Item_cache::get_cache(const Item *item) { - switch (type) { + switch (item->result_type()) { case INT_RESULT: return new Item_cache_int(); case REAL_RESULT: @@ -6215,7 +6269,7 @@ Item_cache* Item_cache::get_cache(Item_result type) case DECIMAL_RESULT: return new Item_cache_decimal(); case STRING_RESULT: - return new Item_cache_str(); + return new Item_cache_str(item); case ROW_RESULT: return new Item_cache_row(); default: @@ -6393,6 +6447,14 @@ my_decimal *Item_cache_str::val_decimal(my_decimal *decimal_val) } +int Item_cache_str::save_in_field(Field *field, bool no_conversions) +{ + int res= Item_cache::save_in_field(field, no_conversions); + return (is_varbinary && field->type() == MYSQL_TYPE_STRING && + value->length() < field->field_length) ? 1 : res; +} + + bool Item_cache_row::allocate(uint num) { item_count= num; @@ -6411,7 +6473,7 @@ bool Item_cache_row::setup(Item * item) { Item *el= item->element_index(i); Item_cache *tmp; - if (!(tmp= values[i]= Item_cache::get_cache(el->result_type()))) + if (!(tmp= values[i]= Item_cache::get_cache(el))) return 1; tmp->setup(el); } @@ -6493,9 +6555,7 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) prev_decimal_int_part= item->decimal_int_part(); #ifdef HAVE_SPATIAL if (item->field_type() == MYSQL_TYPE_GEOMETRY) - geometry_type= (item->type() == Item::FIELD_ITEM) ? - ((Item_field *)item)->get_geometry_type() : - (Field::geometry_type)((Item_geometry_func *)item)->get_geometry_type(); + geometry_type= item->get_geometry_type(); #endif /* HAVE_SPATIAL */ } diff --git a/sql/item.h b/sql/item.h index 3c699c0eda3..a1135c2c725 100644 --- a/sql/item.h +++ b/sql/item.h @@ -870,6 +870,9 @@ public: */ virtual bool result_as_longlong() { return FALSE; } bool is_datetime(); + virtual Field::geometry_type get_geometry_type() const + { return Field::GEOM_GEOMETRY; }; + String *check_well_formed_result(String *str, bool send_error= 0); }; @@ -1112,6 +1115,8 @@ public: Item_name_const(Item *name_arg, Item *val): value_item(val), name_item(name_arg) { + if(!value_item->basic_const_item()) + my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST"); Item::maybe_null= TRUE; } @@ -1335,7 +1340,7 @@ public: int fix_outer_field(THD *thd, Field **field, Item **reference); virtual Item *update_value_transformer(byte *select_arg); void print(String *str); - Field::geometry_type get_geometry_type() + Field::geometry_type get_geometry_type() const { DBUG_ASSERT(field_type() == MYSQL_TYPE_GEOMETRY); return field->get_geometry_type(); @@ -1579,7 +1584,7 @@ public: double val_real() { DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); } String *val_str(String*); - Item *clone_item() { return new Item_uint(name,max_length); } + Item *clone_item() { return new Item_uint(name, value, max_length); } int save_in_field(Field *field, bool no_conversions); void print(String *str); Item_num *neg (); @@ -1853,6 +1858,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} + void print(String *str); bool eq(const Item *item, bool binary_cmp) const; virtual Item *safe_charset_converter(CHARSET_INFO *tocs); }; @@ -2463,7 +2469,7 @@ public: }; virtual void store(Item *)= 0; enum Type type() const { return CACHE_ITEM; } - static Item_cache* get_cache(Item_result type); + static Item_cache* get_cache(const Item *item); table_map used_tables() const { return used_table_map; } virtual void keep_array() {} // to prevent drop fixed flag (no need parent cleanup call) @@ -2525,9 +2531,16 @@ class Item_cache_str: public Item_cache { char buffer[STRING_BUFFER_USUAL_SIZE]; String *value, value_buff; + bool is_varbinary; + public: - Item_cache_str(): Item_cache(), value(0) { } - + Item_cache_str(const Item *item) : + Item_cache(), value(0), + is_varbinary(item->type() == FIELD_ITEM && + ((const Item_field *) item)->field->type() == + MYSQL_TYPE_VARCHAR && + !((const Item_field *) item)->field->has_charset()) + {} void store(Item *item); double val_real(); longlong val_int(); @@ -2535,6 +2548,7 @@ public: my_decimal *val_decimal(my_decimal *); enum Item_result result_type() const { return STRING_RESULT; } CHARSET_INFO *charset() const { return value->charset(); }; + int save_in_field(Field *field, bool no_conversions); }; class Item_cache_row: public Item_cache @@ -2637,7 +2651,7 @@ public: Field *make_field_by_type(TABLE *table); static uint32 display_length(Item *item); static enum_field_types get_real_type(Item *); - Field::geometry_type get_geometry_type() { return geometry_type; }; + Field::geometry_type get_geometry_type() const { return geometry_type; }; }; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 0e718eb52bd..ee7a929024e 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -147,6 +147,36 @@ static int agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems) } +/** + @brief Aggregates field types from the array of items. + + @param[in] items array of items to aggregate the type from + @paran[in] nitems number of items in the array + + @details This function aggregates field types from the array of items. + Found type is supposed to be used later as the result field type + of a multi-argument function. + Aggregation itself is performed by the Field::field_type_merge() + function. + + @note The term "aggregation" is used here in the sense of inferring the + result type of a function from its argument types. + + @return aggregated field type. +*/ + +enum_field_types agg_field_type(Item **items, uint nitems) +{ + uint i; + if (!nitems || items[0]->result_type() == ROW_RESULT ) + return (enum_field_types)-1; + enum_field_types res= items[0]->field_type(); + for (i= 1 ; i < nitems ; i++) + res= Field::field_type_merge(res, items[i]->field_type()); + return res; +} + + static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { @@ -522,26 +552,26 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) } -/* - Convert date provided in a string to the int representation. +/** + @brief Convert date provided in a string to the int representation. - SYNOPSIS - get_date_from_str() - thd Thread handle - str a string to convert - warn_type type of the timestamp for issuing the warning - warn_name field name for issuing the warning - error_arg [out] TRUE if string isn't a DATETIME or clipping occur + @param[in] thd thread handle + @param[in] str a string to convert + @param[in] warn_type type of the timestamp for issuing the warning + @param[in] warn_name field name for issuing the warning + @param[out] error_arg could not extract a DATE or DATETIME - DESCRIPTION - Convert date provided in the string str to the int representation. - if the string contains wrong date or doesn't contain it at all - then the warning is issued and TRUE returned in the error_arg argument. - The warn_type and the warn_name arguments are used as the name and the - type of the field when issuing the warning. + @details Convert date provided in the string str to the int + representation. If the string contains wrong date or doesn't + contain it at all then a warning is issued. The warn_type and + the warn_name arguments are used as the name and the type of the + field when issuing the warning. If any input was discarded + (trailing or non-timestampy characters), was_cut will be non-zero. + was_type will return the type str_to_datetime() could correctly + extract. - RETURN - converted value. + @return + converted value. 0 on error and on zero-dates -- check 'failure' */ static ulonglong @@ -552,26 +582,33 @@ get_date_from_str(THD *thd, String *str, timestamp_type warn_type, int error; MYSQL_TIME l_time; enum_mysql_timestamp_type ret; - *error_arg= TRUE; ret= str_to_datetime(str->ptr(), str->length(), &l_time, (TIME_FUZZY_DATE | MODE_INVALID_DATES | (thd->variables.sql_mode & (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE))), &error); - if ((ret == MYSQL_TIMESTAMP_DATETIME || ret == MYSQL_TIMESTAMP_DATE)) + + if (ret == MYSQL_TIMESTAMP_DATETIME || ret == MYSQL_TIMESTAMP_DATE) { - value= TIME_to_ulonglong_datetime(&l_time); + /* + Do not return yet, we may still want to throw a "trailing garbage" + warning. + */ *error_arg= FALSE; + value= TIME_to_ulonglong_datetime(&l_time); } - - if (error || *error_arg) + else { + *error_arg= TRUE; + error= 1; /* force warning */ + } + + if (error > 0) make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, str->ptr(), str->length(), warn_type, warn_name); - *error_arg= TRUE; - } + return value; } @@ -872,6 +909,12 @@ get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg, timestamp_type t_type= f_type == MYSQL_TYPE_DATE ? MYSQL_TIMESTAMP_DATE : MYSQL_TIMESTAMP_DATETIME; value= get_date_from_str(thd, str, t_type, warn_item->name, &error); + /* + If str did not contain a valid date according to the current + SQL_MODE, get_date_from_str() has already thrown a warning, + and we don't want to throw NULL on invalid date (see 5.2.6 + "SQL modes" in the manual), so we're done here. + */ } /* Do not cache GET_USER_VAR() function as its const_item() may return TRUE @@ -1343,7 +1386,7 @@ longlong Item_func_truth::val_int() bool Item_in_optimizer::fix_left(THD *thd, Item **ref) { if (!args[0]->fixed && args[0]->fix_fields(thd, args) || - !cache && !(cache= Item_cache::get_cache(args[0]->result_type()))) + !cache && !(cache= Item_cache::get_cache(args[0]))) return 1; cache->setup(args[0]); @@ -1595,24 +1638,27 @@ bool Item_func_opt_neg::eq(const Item *item, bool binary_cmp) const void Item_func_interval::fix_length_and_dec() { + uint rows= row->cols(); + use_decimal_comparison= (row->element_index(0)->result_type() == DECIMAL_RESULT) || (row->element_index(0)->result_type() == INT_RESULT); - if (row->cols() > 8) + if (rows > 8) { - bool consts=1; + bool not_null_consts= TRUE; - for (uint i=1 ; consts && i < row->cols() ; i++) + for (uint i= 1; not_null_consts && i < rows; i++) { - consts&= row->element_index(i)->const_item(); + Item *el= row->element_index(i); + not_null_consts&= el->const_item() & !el->is_null(); } - if (consts && + if (not_null_consts && (intervals= - (interval_range*) sql_alloc(sizeof(interval_range)*(row->cols()-1)))) + (interval_range*) sql_alloc(sizeof(interval_range) * (rows - 1)))) { if (use_decimal_comparison) { - for (uint i=1 ; i < row->cols(); i++) + for (uint i= 1; i < rows; i++) { Item *el= row->element_index(i); interval_range *range= intervals + (i-1); @@ -1637,7 +1683,7 @@ void Item_func_interval::fix_length_and_dec() } else { - for (uint i=1 ; i < row->cols(); i++) + for (uint i= 1; i < rows; i++) { intervals[i-1].dbl= row->element_index(i)->val_real(); } @@ -1728,12 +1774,22 @@ longlong Item_func_interval::val_int() ((el->result_type() == DECIMAL_RESULT) || (el->result_type() == INT_RESULT))) { - my_decimal e_dec_buf, *e_dec= row->element_index(i)->val_decimal(&e_dec_buf); + my_decimal e_dec_buf, *e_dec= el->val_decimal(&e_dec_buf); + /* Skip NULL ranges. */ + if (el->null_value) + continue; if (my_decimal_cmp(e_dec, dec) > 0) - return i-1; + return i - 1; + } + else + { + double val= el->val_real(); + /* Skip NULL ranges. */ + if (el->null_value) + continue; + if (val > value) + return i - 1; } - else if (row->element_index(i)->val_real() > value) - return i-1; } return i-1; } @@ -1990,10 +2046,20 @@ Item_func_ifnull::fix_length_and_dec() agg_result_type(&hybrid_type, args, 2); maybe_null=args[1]->maybe_null; decimals= max(args[0]->decimals, args[1]->decimals); - max_length= (hybrid_type == DECIMAL_RESULT || hybrid_type == INT_RESULT) ? - (max(args[0]->max_length - args[0]->decimals, - args[1]->max_length - args[1]->decimals) + decimals) : - max(args[0]->max_length, args[1]->max_length); + unsigned_flag= args[0]->unsigned_flag && args[1]->unsigned_flag; + + if (hybrid_type == DECIMAL_RESULT || hybrid_type == INT_RESULT) + { + int len0= args[0]->max_length - args[0]->decimals + - (args[0]->unsigned_flag ? 0 : 1); + + int len1= args[1]->max_length - args[1]->decimals + - (args[1]->unsigned_flag ? 0 : 1); + + max_length= max(len0, len1) + decimals + (unsigned_flag ? 0 : 1); + } + else + max_length= max(args[0]->max_length, args[1]->max_length); switch (hybrid_type) { case STRING_RESULT: @@ -2009,9 +2075,7 @@ Item_func_ifnull::fix_length_and_dec() default: DBUG_ASSERT(0); } - cached_field_type= args[0]->field_type(); - if (cached_field_type != args[1]->field_type()) - cached_field_type= Item_func::field_type(); + cached_field_type= agg_field_type(args, 2); } @@ -2159,11 +2223,13 @@ Item_func_if::fix_length_and_dec() { cached_result_type= arg2_type; collation.set(args[2]->collation.collation); + cached_field_type= args[2]->field_type(); } else if (null2) { cached_result_type= arg1_type; collation.set(args[1]->collation.collation); + cached_field_type= args[1]->field_type(); } else { @@ -2177,6 +2243,7 @@ Item_func_if::fix_length_and_dec() { collation.set(&my_charset_bin); // Number } + cached_field_type= agg_field_type(args + 1, 2); } if ((cached_result_type == DECIMAL_RESULT ) @@ -2573,7 +2640,7 @@ void Item_func_case::fix_length_and_dec() agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV, 1)) return; - + cached_field_type= agg_field_type(agg, nagg); /* Aggregate first expression and all THEN expression types and collations when string comparison @@ -2719,6 +2786,7 @@ my_decimal *Item_func_coalesce::decimal_op(my_decimal *decimal_value) void Item_func_coalesce::fix_length_and_dec() { + cached_field_type= agg_field_type(args, arg_count); agg_result_type(&hybrid_type, args, arg_count); switch (hybrid_type) { case STRING_RESULT: @@ -4250,6 +4318,51 @@ void Item_func_like::cleanup() #ifdef USE_REGEX bool +Item_func_regex::regcomp(bool send_error) +{ + char buff[MAX_FIELD_WIDTH]; + String tmp(buff,sizeof(buff),&my_charset_bin); + String *res= args[1]->val_str(&tmp); + int error; + + if (args[1]->null_value) + return TRUE; + + if (regex_compiled) + { + if (!stringcmp(res, &prev_regexp)) + return FALSE; + prev_regexp.copy(*res); + my_regfree(&preg); + regex_compiled= 0; + } + + if (cmp_collation.collation != regex_lib_charset) + { + /* Convert UCS2 strings to UTF8 */ + uint dummy_errors; + if (conv.copy(res->ptr(), res->length(), res->charset(), + regex_lib_charset, &dummy_errors)) + return TRUE; + res= &conv; + } + + if ((error= my_regcomp(&preg, res->c_ptr_safe(), + regex_lib_flags, regex_lib_charset))) + { + if (send_error) + { + (void) my_regerror(error, &preg, buff, sizeof(buff)); + my_error(ER_REGEXP_ERROR, MYF(0), buff); + } + return TRUE; + } + regex_compiled= 1; + return FALSE; +} + + +bool Item_func_regex::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); @@ -4265,34 +4378,34 @@ Item_func_regex::fix_fields(THD *thd, Item **ref) if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1)) return TRUE; + regex_lib_flags= (cmp_collation.collation->state & + (MY_CS_BINSORT | MY_CS_CSSORT)) ? + REG_EXTENDED | REG_NOSUB : + REG_EXTENDED | REG_NOSUB | REG_ICASE; + /* + If the case of UCS2 and other non-ASCII character sets, + we will convert patterns and strings to UTF8. + */ + regex_lib_charset= (cmp_collation.collation->mbminlen > 1) ? + &my_charset_utf8_general_ci : + cmp_collation.collation; + used_tables_cache=args[0]->used_tables() | args[1]->used_tables(); not_null_tables_cache= (args[0]->not_null_tables() | args[1]->not_null_tables()); const_item_cache=args[0]->const_item() && args[1]->const_item(); if (!regex_compiled && args[1]->const_item()) { - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),&my_charset_bin); - String *res=args[1]->val_str(&tmp); if (args[1]->null_value) { // Will always return NULL maybe_null=1; + fixed= 1; return FALSE; } - int error; - if ((error= my_regcomp(&preg,res->c_ptr(), - ((cmp_collation.collation->state & - (MY_CS_BINSORT | MY_CS_CSSORT)) ? - REG_EXTENDED | REG_NOSUB : - REG_EXTENDED | REG_NOSUB | REG_ICASE), - cmp_collation.collation))) - { - (void) my_regerror(error,&preg,buff,sizeof(buff)); - my_error(ER_REGEXP_ERROR, MYF(0), buff); + if (regcomp(TRUE)) return TRUE; - } - regex_compiled=regex_is_const=1; - maybe_null=args[0]->maybe_null; + regex_is_const= 1; + maybe_null= args[0]->maybe_null; } else maybe_null=1; @@ -4305,47 +4418,25 @@ longlong Item_func_regex::val_int() { DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; - String *res, tmp(buff,sizeof(buff),&my_charset_bin); + String tmp(buff,sizeof(buff),&my_charset_bin); + String *res= args[0]->val_str(&tmp); - res=args[0]->val_str(&tmp); - if (args[0]->null_value) - { - null_value=1; + if ((null_value= (args[0]->null_value || + (!regex_is_const && regcomp(FALSE))))) return 0; - } - if (!regex_is_const) - { - char buff2[MAX_FIELD_WIDTH]; - String *res2, tmp2(buff2,sizeof(buff2),&my_charset_bin); - res2= args[1]->val_str(&tmp2); - if (args[1]->null_value) + if (cmp_collation.collation != regex_lib_charset) + { + /* Convert UCS2 strings to UTF8 */ + uint dummy_errors; + if (conv.copy(res->ptr(), res->length(), res->charset(), + regex_lib_charset, &dummy_errors)) { - null_value=1; + null_value= 1; return 0; } - if (!regex_compiled || stringcmp(res2,&prev_regexp)) - { - prev_regexp.copy(*res2); - if (regex_compiled) - { - my_regfree(&preg); - regex_compiled=0; - } - if (my_regcomp(&preg,res2->c_ptr_safe(), - ((cmp_collation.collation->state & - (MY_CS_BINSORT | MY_CS_CSSORT)) ? - REG_EXTENDED | REG_NOSUB : - REG_EXTENDED | REG_NOSUB | REG_ICASE), - cmp_collation.collation)) - { - null_value=1; - return 0; - } - regex_compiled=1; - } + res= &conv; } - null_value=0; return my_regexec(&preg,res->c_ptr_safe(),0,(my_regmatch_t*) 0,0) ? 0 : 1; } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 1f4359c454b..d250e1b366a 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -640,6 +640,7 @@ public: class Item_func_coalesce :public Item_func_numhybrid { protected: + enum_field_types cached_field_type; Item_func_coalesce(Item *a, Item *b) :Item_func_numhybrid(a, b) {} public: Item_func_coalesce(List<Item> &list) :Item_func_numhybrid(list) {} @@ -652,13 +653,13 @@ public: enum Item_result result_type () const { return hybrid_type; } const char *func_name() const { return "coalesce"; } table_map not_null_tables() const { return 0; } + enum_field_types field_type() const { return cached_field_type; } }; class Item_func_ifnull :public Item_func_coalesce { protected: - enum_field_types cached_field_type; bool field_type_defined; public: Item_func_ifnull(Item *a, Item *b) :Item_func_coalesce(a,b) {} @@ -677,6 +678,7 @@ public: class Item_func_if :public Item_func { enum Item_result cached_result_type; + enum_field_types cached_field_type; public: Item_func_if(Item *a,Item *b,Item *c) :Item_func(a,b,c), cached_result_type(INT_RESULT) @@ -686,6 +688,7 @@ public: String *val_str(String *str); my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return cached_result_type; } + enum_field_types field_type() const { return cached_field_type; } bool fix_fields(THD *, Item **); void fix_length_and_dec(); uint decimal_precision() const; @@ -722,6 +725,7 @@ class Item_func_case :public Item_func uint ncases; Item_result cmp_type; DTCollation cmp_collation; + enum_field_types cached_field_type; public: Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg) :Item_func(), first_expr_num(-1), else_expr_num(-1), @@ -749,6 +753,7 @@ public: uint decimal_precision() const; table_map not_null_tables() const { return 0; } enum Item_result result_type () const { return cached_result_type; } + enum_field_types field_type() const { return cached_field_type; } const char *func_name() const { return "case"; } void print(String *str); Item *find_item(String *str); @@ -1315,6 +1320,10 @@ class Item_func_regex :public Item_bool_func bool regex_is_const; String prev_regexp; DTCollation cmp_collation; + CHARSET_INFO *regex_lib_charset; + int regex_lib_flags; + String conv; + bool regcomp(bool send_error); public: Item_func_regex(Item *a,Item *b) :Item_bool_func(a,b), regex_compiled(0),regex_is_const(0) {} @@ -1384,6 +1393,7 @@ public: bool subst_argument_checker(byte **arg) { return TRUE; } Item *compile(Item_analyzer analyzer, byte **arg_p, Item_transformer transformer, byte *arg_t); + enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } }; diff --git a/sql/item_func.cc b/sql/item_func.cc index e4d370bbdf2..264d9265bca 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1380,7 +1380,11 @@ longlong Item_func_int_div::val_int() void Item_func_int_div::fix_length_and_dec() { - max_length=args[0]->max_length - args[0]->decimals; + Item_result argtype= args[0]->result_type(); + /* use precision ony for the data type it is applicable for and valid */ + max_length=args[0]->max_length - + (argtype == DECIMAL_RESULT || argtype == INT_RESULT ? + args[0]->decimals : 0); maybe_null=1; unsigned_flag=args[0]->unsigned_flag | args[1]->unsigned_flag; } @@ -1999,6 +2003,7 @@ void Item_func_round::fix_length_and_dec() case DECIMAL_RESULT: { hybrid_type= DECIMAL_RESULT; + decimals_to_set= min(DECIMAL_MAX_SCALE, decimals_to_set); int decimals_delta= args[0]->decimals - decimals_to_set; int precision= args[0]->decimal_precision(); int length_increase= ((decimals_delta <= 0) || truncate) ? 0:1; @@ -2105,7 +2110,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) longlong dec= args[1]->val_int(); if (dec > 0 || (dec < 0 && args[1]->unsigned_flag)) { - dec= min((ulonglong) dec, DECIMAL_MAX_SCALE); + dec= min((ulonglong) dec, decimals); decimals= (uint8) dec; // to get correct output } else if (dec < INT_MIN) @@ -2243,6 +2248,7 @@ void Item_func_min_max::fix_length_and_dec() else if ((cmp_type == DECIMAL_RESULT) || (cmp_type == INT_RESULT)) max_length= my_decimal_precision_to_length(max_int_part+decimals, decimals, unsigned_flag); + cached_field_type= agg_field_type(args, arg_count); } @@ -4939,13 +4945,44 @@ bool Item_func_match::fix_fields(THD *thd, Item **ref) my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); return TRUE; } - table=((Item_field *)item)->field->table; + /* + With prepared statements Item_func_match::fix_fields is called twice. + When it is called first time we have original item tree here and add + conversion layer for character sets that do not have ctype array a few + lines below. When it is called second time, we already have conversion + layer in item tree. + */ + table= (item->type() == Item::FIELD_ITEM) ? + ((Item_field *)item)->field->table : + ((Item_field *)((Item_func_conv *)item)->key_item())->field->table; if (!(table->file->table_flags() & HA_CAN_FULLTEXT)) { my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0)); return 1; } table->fulltext_searched=1; + /* A workaround for ucs2 character set */ + if (!args[1]->collation.collation->ctype) + { + CHARSET_INFO *compatible_cs= + get_compatible_charset_with_ctype(args[1]->collation.collation); + bool rc= 1; + if (compatible_cs) + { + Item_string *conv_item= new Item_string("", 0, compatible_cs, + DERIVATION_EXPLICIT); + item= args[0]; + args[0]= conv_item; + rc= agg_item_charsets(cmp_collation, func_name(), args, arg_count, + MY_COLL_ALLOW_SUPERSET_CONV | + MY_COLL_ALLOW_COERCIBLE_CONV | + MY_COLL_DISALLOW_NONE, 1); + args[0]= item; + } + else + my_error(ER_WRONG_ARGUMENTS, MYF(0), "MATCH"); + return rc; + } return agg_arg_collations_for_comparison(cmp_collation, args+1, arg_count-1, 0); } diff --git a/sql/item_func.h b/sql/item_func.h index 56b5e75652c..a5e162f344f 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -236,9 +236,40 @@ public: my_decimal *val_decimal(my_decimal *); String *val_str(String*str); + /** + @brief Performs the operation that this functions implements when the + result type is INT. + + @return The result of the operation. + */ virtual longlong int_op()= 0; + + /** + @brief Performs the operation that this functions implements when the + result type is REAL. + + @return The result of the operation. + */ virtual double real_op()= 0; + + /** + @brief Performs the operation that this functions implements when the + result type is DECIMAL. + + @param A pointer where the DECIMAL value will be allocated. + @return + - 0 If the result is NULL + - The same pointer it was given, with the area initialized to the + result of the operation. + */ virtual my_decimal *decimal_op(my_decimal *)= 0; + + /** + @brief Performs the operation that this functions implements when the + result type is a string type. + + @return The result of the operation. + */ virtual String *str_op(String *)= 0; bool is_null() { update_null_value(); return null_value; } }; @@ -435,6 +466,7 @@ public: longlong int_op(); my_decimal *decimal_op(my_decimal *); const char *func_name() const { return "-"; } + virtual bool basic_const_item() const { return args[0]->basic_const_item(); } void fix_length_and_dec(); void fix_num_length_and_dec(); uint decimal_precision() const { return args[0]->decimal_precision(); } @@ -692,7 +724,8 @@ class Item_func_min_max :public Item_func /* An item used for issuing warnings while string to DATETIME conversion. */ Item *datetime_item; THD *thd; - +protected: + enum_field_types cached_field_type; public: Item_func_min_max(List<Item> &list,int cmp_sign_arg) :Item_func(list), cmp_type(INT_RESULT), cmp_sign(cmp_sign_arg), compare_as_dates(FALSE), @@ -705,6 +738,7 @@ public: enum Item_result result_type () const { return cmp_type; } bool result_as_longlong() { return compare_as_dates; }; uint cmp_datetimes(ulonglong *value); + enum_field_types field_type() const { return cached_field_type; } }; class Item_func_min :public Item_func_min_max @@ -747,6 +781,8 @@ public: collation= args[0]->collation; max_length= args[0]->max_length; decimals=args[0]->decimals; + /* The item could be a NULL constant. */ + null_value= args[0]->is_null(); } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 6c012277888..966cefea9fe 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -27,7 +27,7 @@ Field *Item_geometry_func::tmp_table_field(TABLE *t_arg) { return new Field_geom(max_length, maybe_null, name, t_arg, - (Field::geometry_type) get_geometry_type()); + get_geometry_type()); } void Item_geometry_func::fix_length_and_dec() @@ -38,10 +38,6 @@ void Item_geometry_func::fix_length_and_dec() maybe_null= 1; } -int Item_geometry_func::get_geometry_type() const -{ - return (int)Field::GEOM_GEOMETRY; -} String *Item_func_geometry_from_text::val_str(String *str) { @@ -160,9 +156,9 @@ String *Item_func_geometry_type::val_str(String *str) } -int Item_func_envelope::get_geometry_type() const +Field::geometry_type Item_func_envelope::get_geometry_type() const { - return (int) Field::GEOM_POLYGON; + return Field::GEOM_POLYGON; } @@ -190,9 +186,9 @@ String *Item_func_envelope::val_str(String *str) } -int Item_func_centroid::get_geometry_type() const +Field::geometry_type Item_func_centroid::get_geometry_type() const { - return (int) Field::GEOM_POINT; + return Field::GEOM_POINT; } @@ -330,9 +326,9 @@ err: */ -int Item_func_point::get_geometry_type() const +Field::geometry_type Item_func_point::get_geometry_type() const { - return (int) Field::GEOM_POINT; + return Field::GEOM_POINT; } diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 9c7970f9e53..e99510f762f 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -33,7 +33,6 @@ public: void fix_length_and_dec(); enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } Field *tmp_table_field(TABLE *t_arg); - virtual int get_geometry_type() const; bool is_null() { (void) val_int(); return null_value; } }; @@ -92,7 +91,7 @@ public: Item_func_centroid(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "centroid"; } String *val_str(String *); - int get_geometry_type() const; + Field::geometry_type get_geometry_type() const; }; class Item_func_envelope: public Item_geometry_func @@ -101,7 +100,7 @@ public: Item_func_envelope(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "envelope"; } String *val_str(String *); - int get_geometry_type() const; + Field::geometry_type get_geometry_type() const; }; class Item_func_point: public Item_geometry_func @@ -111,7 +110,7 @@ public: Item_func_point(Item *a, Item *b, Item *srid): Item_geometry_func(a, b, srid) {} const char *func_name() const { return "point"; } String *val_str(String *); - int get_geometry_type() const; + Field::geometry_type get_geometry_type() const; }; class Item_func_spatial_decomp: public Item_geometry_func diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 0c11c9eece8..4e72f117869 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -38,36 +38,6 @@ C_MODE_END String my_empty_string("",default_charset_info); -String *Item_str_func::check_well_formed_result(String *str) -{ - /* Check whether we got a well-formed string */ - CHARSET_INFO *cs= str->charset(); - int well_formed_error; - uint wlen= cs->cset->well_formed_len(cs, - str->ptr(), str->ptr() + str->length(), - str->length(), &well_formed_error); - if (wlen < str->length()) - { - THD *thd= current_thd; - char hexbuf[7]; - enum MYSQL_ERROR::enum_warning_level level; - uint diff= str->length() - wlen; - set_if_smaller(diff, 3); - octet2hex(hexbuf, str->ptr() + wlen, diff); - if (thd->variables.sql_mode & - (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)) - { - level= MYSQL_ERROR::WARN_LEVEL_ERROR; - null_value= 1; - str= 0; - } - else - level= MYSQL_ERROR::WARN_LEVEL_WARN; - push_warning_printf(thd, level, ER_INVALID_CHARACTER_STRING, - ER(ER_INVALID_CHARACTER_STRING), cs->csname, hexbuf); - } - return str; -} bool Item_str_func::fix_fields(THD *thd, Item **ref) @@ -2229,11 +2199,13 @@ String *Item_func_char::val_str(String *str) { DBUG_ASSERT(fixed == 1); str->length(0); + str->set_charset(collation.collation); for (uint i=0 ; i < arg_count ; i++) { int32 num=(int32) args[i]->val_int(); if (!args[i]->null_value) { + char char_num= (char) num; if (num&0xFF000000L) { str->append((char)(num>>24)); goto b2; @@ -2243,10 +2215,9 @@ String *Item_func_char::val_str(String *str) } else if (num&0xFF00L) { b1: str->append((char)(num>>8)); } - str->append((char) num); + str->append(&char_num, 1); } } - str->set_charset(collation.collation); str->realloc(str->length()); // Add end 0 (for Purify) return check_well_formed_result(str); } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 4ffd8125422..3648438a69b 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -35,7 +35,6 @@ public: my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return STRING_RESULT; } void left_right_max_length(); - String *check_well_formed_result(String *str); bool fix_fields(THD *thd, Item **ref); }; @@ -535,7 +534,7 @@ public: String *val_str(String *); void fix_length_and_dec() { - max_length= arg_count * collation.collation->mbmaxlen; + max_length= arg_count * 4; } const char *func_name() const { return "char"; } }; @@ -585,7 +584,8 @@ public: void fix_length_and_dec() { collation.set(default_charset()); - max_length= 64; + max_length=64; + maybe_null= 1; } }; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 0020dd35c61..57c3b391507 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1717,7 +1717,7 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row) item->decimals= sel_item->decimals; item->unsigned_flag= sel_item->unsigned_flag; maybe_null= sel_item->maybe_null; - if (!(row[i]= Item_cache::get_cache(res_type))) + if (!(row[i]= Item_cache::get_cache(sel_item))) return; row[i]->setup(sel_item); } @@ -2178,6 +2178,7 @@ int subselect_indexsubquery_engine::exec() ((Item_in_subselect *) item)->value= 0; empty_result_set= TRUE; null_keypart= 0; + table->status= 0; if (check_null) { @@ -2190,6 +2191,16 @@ int subselect_indexsubquery_engine::exec() if (copy_ref_key()) DBUG_RETURN(1); + if (table->status) + { + /* + We know that there will be no rows even if we scan. + Can be set in copy_ref_key. + */ + ((Item_in_subselect *) item)->value= 0; + DBUG_RETURN(0); + } + if (null_keypart) DBUG_RETURN(scan_table()); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 118609671b8..51dcd3ca175 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -306,6 +306,7 @@ public: double val_real(); String *val_str(String*); my_decimal *val_decimal(my_decimal *); + void update_null_value () { (void) val_bool(); } bool val_bool(); void top_level_item() { abort_on_null=1; } inline bool is_top_level_item() { return abort_on_null; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 48ad53fbf75..ad8ebd0791c 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -905,7 +905,9 @@ bool Item_sum_distinct::setup(THD *thd) List<create_field> field_list; create_field field_def; /* field definition */ DBUG_ENTER("Item_sum_distinct::setup"); - DBUG_ASSERT(tree == 0); + /* It's legal to call setup() more than once when in a subquery */ + if (tree) + DBUG_RETURN(FALSE); /* Virtual table and the tree are created anew on each re-execution of @@ -913,7 +915,7 @@ bool Item_sum_distinct::setup(THD *thd) mem_root. */ if (field_list.push_back(&field_def)) - return TRUE; + DBUG_RETURN(TRUE); null_value= maybe_null= 1; quick_group= 0; @@ -925,7 +927,7 @@ bool Item_sum_distinct::setup(THD *thd) args[0]->unsigned_flag); if (! (table= create_virtual_tmp_table(thd, field_list))) - return TRUE; + DBUG_RETURN(TRUE); /* XXX: check that the case of CHAR(0) works OK */ tree_key_length= table->s->reclength - table->s->null_bytes; @@ -2443,6 +2445,7 @@ bool Item_sum_count_distinct::setup(THD *thd) /* Setup can be called twice for ROLLUP items. This is a bug. Please add DBUG_ASSERT(tree == 0) here when it's fixed. + It's legal to call setup() more than once when in a subquery */ if (tree || table || tmp_table_param) return FALSE; @@ -3403,7 +3406,7 @@ String* Item_func_group_concat::val_str(String* str) DBUG_ASSERT(fixed == 1); if (null_value) return 0; - if (!result.length() && tree) + if (no_appended && tree) /* Tree is used for sorting as in ORDER BY */ tree_walk(tree, (tree_walk_action)&dump_leaf_key, (void*)this, left_root_right); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 3a2df5125ba..4ddc788b182 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1603,8 +1603,7 @@ bool Item_func_now::get_date(MYSQL_TIME *res, int Item_func_now::save_in_field(Field *to, bool no_conversions) { to->set_notnull(); - to->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); - return 0; + return to->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); } @@ -2646,6 +2645,13 @@ bool Item_date_typecast::get_date(MYSQL_TIME *ltime, uint fuzzy_date) } +bool Item_date_typecast::get_time(MYSQL_TIME *ltime) +{ + bzero((char *)ltime, sizeof(MYSQL_TIME)); + return args[0]->null_value; +} + + String *Item_date_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -3308,7 +3314,7 @@ void Item_func_str_to_date::fix_length_and_dec() { maybe_null= 1; decimals=0; - cached_field_type= MYSQL_TYPE_STRING; + cached_field_type= MYSQL_TYPE_DATETIME; max_length= MAX_DATETIME_FULL_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; cached_timestamp_type= MYSQL_TIMESTAMP_NONE; if ((const_item= args[1]->const_item())) diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 94bee28bb6b..b647e93b700 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -779,6 +779,7 @@ public: const char *func_name() const { return "cast_as_date"; } String *val_str(String *str); bool get_date(MYSQL_TIME *ltime, uint fuzzy_date); + bool get_time(MYSQL_TIME *ltime); const char *cast_type() const { return "date"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } Field *tmp_table_field(TABLE *t_arg) @@ -964,7 +965,10 @@ class Item_func_maketime :public Item_str_timefunc { public: Item_func_maketime(Item *a, Item *b, Item *c) - :Item_str_timefunc(a, b ,c) {} + :Item_str_timefunc(a, b, c) + { + maybe_null= TRUE; + } String *val_str(String *str); const char *func_name() const { return "maketime"; } }; @@ -1032,7 +1036,7 @@ class Item_func_str_to_date :public Item_str_func bool const_item; public: Item_func_str_to_date(Item *a, Item *b) - :Item_str_func(a, b) + :Item_str_func(a, b), const_item(false) {} String *val_str(String *str); bool get_date(MYSQL_TIME *ltime, uint fuzzy_date); diff --git a/sql/log.cc b/sql/log.cc index e9aa273676a..af03cecd462 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -448,13 +448,10 @@ const char *MYSQL_LOG::generate_name(const char *log_name, { if (!log_name || !log_name[0]) { - /* - TODO: The following should be using fn_format(); We just need to - first change fn_format() to cut the file name if it's too long. - */ - strmake(buff, pidfile_name,FN_REFLEN-5); - strmov(fn_ext(buff),suffix); - return (const char *)buff; + strmake(buff, pidfile_name, FN_REFLEN - strlen(suffix) - 1); + return (const char *) + fn_format(buff, buff, "", suffix, MYF(MY_REPLACE_EXT|MY_REPLACE_DIR)); + } // get rid of extension if the log is binary to avoid problems if (strip_ext) diff --git a/sql/log_event.cc b/sql/log_event.cc index 1ef765f607f..d22973d12a3 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -27,6 +27,10 @@ #define log_cs &my_charset_latin1 +#ifndef DBUG_OFF +uint debug_not_change_ts_if_art_event= 1; // bug#29309 simulation +#endif + /* pretty_print_str() */ @@ -481,6 +485,18 @@ int Log_event::exec_event(struct st_relay_log_info* rli) rli->inc_event_relay_log_pos(); else { + /* + bug#29309 simulation: resetting the flag to force + wrong behaviour of artificial event to update + rli->last_master_timestamp for only one time - + the first FLUSH LOGS in the test. + */ + DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp", + if (debug_not_change_ts_if_art_event == 1 + && is_artificial_event()) + { + debug_not_change_ts_if_art_event= 0; + }); rli->inc_group_relay_log_pos(log_pos); flush_relay_log_info(rli); /* @@ -491,7 +507,21 @@ int Log_event::exec_event(struct st_relay_log_info* rli) rare cases, only consequence is that value may take some time to display in Seconds_Behind_Master - not critical). */ - rli->last_master_timestamp= when; +#ifndef DBUG_OFF + if (!(is_artificial_event() && debug_not_change_ts_if_art_event > 0)) +#else + if (!is_artificial_event()) +#endif + rli->last_master_timestamp= when; + /* + The flag is set back to be positive so that + any further FLUSH LOGS will be handled as prescribed. + */ + DBUG_EXECUTE_IF("let_first_flush_log_change_timestamp", + if (debug_not_change_ts_if_art_event == 0) + { + debug_not_change_ts_if_art_event= 2; + }); } } DBUG_RETURN(0); @@ -1370,18 +1400,48 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, /* 2 utility functions for the next method */ -/* - Get the pointer for a string (src) that contains the length in - the first byte. Set the output string (dst) to the string value - and place the length of the string in the byte after the string. +/** + Read a string with length from memory. + + This function reads the string-with-length stored at + <code>src</code> and extract the length into <code>*len</code> and + a pointer to the start of the string into <code>*dst</code>. The + string can then be copied using <code>memcpy()</code> with the + number of bytes given in <code>*len</code>. + + @param src Pointer to variable holding a pointer to the memory to + read the string from. + @param dst Pointer to variable holding a pointer where the actual + string starts. Starting from this position, the string + can be copied using @c memcpy(). + @param len Pointer to variable where the length will be stored. + @param end One-past-the-end of the memory where the string is + stored. + + @return Zero if the entire string can be copied successfully, + @c UINT_MAX if the length could not be read from memory + (that is, if <code>*src >= end</code>), otherwise the + number of bytes that are missing to read the full + string, which happends <code>*dst + *len >= end</code>. */ -static void get_str_len_and_pointer(const Log_event::Byte **src, - const char **dst, - uint *len) -{ - if ((*len= **src)) - *dst= (char *)*src + 1; // Will be copied later - (*src)+= *len + 1; +static int +get_str_len_and_pointer(const Log_event::Byte **src, + const char **dst, + uint *len, + const Log_event::Byte *end) +{ + if (*src >= end) + return -1; // Will be UINT_MAX in two-complement arithmetics + uint length= **src; + if (length > 0) + { + if (*src + length >= end) + return *src + length - end + 1; // Number of bytes missing + *dst= (char *)*src + 1; // Will be copied later + } + *len= length; + *src+= length + 1; + return 0; } static void copy_str_and_move(const char **src, @@ -1394,6 +1454,46 @@ static void copy_str_and_move(const char **src, *(*dst)++= 0; } + +#ifndef DBUG_OFF +static char const * +code_name(int code) +{ + static char buf[255]; + switch (code) { + case Q_FLAGS2_CODE: return "Q_FLAGS2_CODE"; + case Q_SQL_MODE_CODE: return "Q_SQL_MODE_CODE"; + case Q_CATALOG_CODE: return "Q_CATALOG_CODE"; + case Q_AUTO_INCREMENT: return "Q_AUTO_INCREMENT"; + case Q_CHARSET_CODE: return "Q_CHARSET_CODE"; + case Q_TIME_ZONE_CODE: return "Q_TIME_ZONE_CODE"; + case Q_CATALOG_NZ_CODE: return "Q_CATALOG_NZ_CODE"; + case Q_LC_TIME_NAMES_CODE: return "Q_LC_TIME_NAMES_CODE"; + case Q_CHARSET_DATABASE_CODE: return "Q_CHARSET_DATABASE_CODE"; + } + sprintf(buf, "CODE#%d", code); + return buf; +} +#endif + +/** + Macro to check that there is enough space to read from memory. + + @param PTR Pointer to memory + @param END End of memory + @param CNT Number of bytes that should be read. + */ +#define CHECK_SPACE(PTR,END,CNT) \ + do { \ + DBUG_PRINT("info", ("Read %s", code_name(pos[-1]))); \ + DBUG_ASSERT((PTR) + (CNT) <= (END)); \ + if ((PTR) + (CNT) > (END)) { \ + DBUG_PRINT("info", ("query= 0")); \ + query= 0; \ + DBUG_VOID_RETURN; \ + } \ + } while (0) + /* Query_log_event::Query_log_event() This is used by the SQL slave thread to prepare the event before execution. @@ -1445,6 +1545,19 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, if (tmp) { status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET); + /* + Check if status variable length is corrupt and will lead to very + wrong data. We could be even more strict and require data_len to + be even bigger, but this will suffice to catch most corruption + errors that can lead to a crash. + */ + if (status_vars_len > min(data_len, MAX_SIZE_LOG_EVENT_STATUS)) + { + DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0", + status_vars_len, data_len)); + query= 0; + DBUG_VOID_RETURN; + } data_len-= status_vars_len; DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u", (uint) status_vars_len)); @@ -1464,6 +1577,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, { switch (*pos++) { case Q_FLAGS2_CODE: + CHECK_SPACE(pos, end, 4); flags2_inited= 1; flags2= uint4korr(pos); DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", (ulong) flags2)); @@ -1474,6 +1588,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, #ifndef DBUG_OFF char buff[22]; #endif + CHECK_SPACE(pos, end, 8); sql_mode_inited= 1; sql_mode= (ulong) uint8korr(pos); // QQ: Fix when sql_mode is ulonglong DBUG_PRINT("info",("In Query_log_event, read sql_mode: %s", @@ -1482,15 +1597,24 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, break; } case Q_CATALOG_NZ_CODE: - get_str_len_and_pointer(&pos, &catalog, &catalog_len); + DBUG_PRINT("info", ("case Q_CATALOG_NZ_CODE; pos: 0x%lx; end: 0x%lx", + (ulong) pos, (ulong) end)); + if (get_str_len_and_pointer(&pos, &catalog, &catalog_len, end)) + { + DBUG_PRINT("info", ("query= 0")); + query= 0; + DBUG_VOID_RETURN; + } break; case Q_AUTO_INCREMENT: + CHECK_SPACE(pos, end, 4); auto_increment_increment= uint2korr(pos); auto_increment_offset= uint2korr(pos+2); pos+= 4; break; case Q_CHARSET_CODE: { + CHECK_SPACE(pos, end, 6); charset_inited= 1; memcpy(charset, pos, 6); pos+= 6; @@ -1498,20 +1622,29 @@ Query_log_event::Query_log_event(const char* buf, uint event_len, } case Q_TIME_ZONE_CODE: { - get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len); + if (get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len, end)) + { + DBUG_PRINT("info", ("Q_TIME_ZONE_CODE: query= 0")); + query= 0; + DBUG_VOID_RETURN; + } break; } case Q_CATALOG_CODE: /* for 5.0.x where 0<=x<=3 masters */ + CHECK_SPACE(pos, end, 1); if ((catalog_len= *pos)) catalog= (char*) pos+1; // Will be copied later + CHECK_SPACE(pos, end, catalog_len + 2); pos+= catalog_len+2; // leap over end 0 catalog_nz= 0; // catalog has end 0 in event break; case Q_LC_TIME_NAMES_CODE: + CHECK_SPACE(pos, end, 2); lc_time_names_number= uint2korr(pos); pos+= 2; break; case Q_CHARSET_DATABASE_CODE: + CHECK_SPACE(pos, end, 2); charset_database_number= uint2korr(pos); pos+= 2; break; @@ -2021,6 +2154,7 @@ end: */ thd->catalog= 0; thd->set_db(NULL, 0); /* will free the current database */ + DBUG_PRINT("info", ("end: query= 0")); thd->query= 0; // just to be sure thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -4964,12 +5098,13 @@ int Begin_load_query_log_event::get_create_or_append() const #ifndef MYSQL_CLIENT Execute_load_query_log_event:: Execute_load_query_log_event(THD* thd_arg, const char* query_arg, - ulong query_length_arg, uint fn_pos_start_arg, - uint fn_pos_end_arg, - enum_load_dup_handling dup_handling_arg, - bool using_trans, bool suppress_use): + ulong query_length_arg, uint fn_pos_start_arg, + uint fn_pos_end_arg, + enum_load_dup_handling dup_handling_arg, + bool using_trans, bool suppress_use, + THD::killed_state killed_err_arg): Query_log_event(thd_arg, query_arg, query_length_arg, using_trans, - suppress_use), + suppress_use, killed_err_arg), file_id(thd_arg->file_id), fn_pos_start(fn_pos_start_arg), fn_pos_end(fn_pos_end_arg), dup_handling(dup_handling_arg) { diff --git a/sql/log_event.h b/sql/log_event.h index 04aac5d08fc..5b065a33dd1 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1619,10 +1619,12 @@ public: #ifndef MYSQL_CLIENT Execute_load_query_log_event(THD* thd, const char* query_arg, - ulong query_length, uint fn_pos_start_arg, - uint fn_pos_end_arg, - enum_load_dup_handling dup_handling_arg, - bool using_trans, bool suppress_use); + ulong query_length, uint fn_pos_start_arg, + uint fn_pos_end_arg, + enum_load_dup_handling dup_handling_arg, + bool using_trans, bool suppress_use, + THD::killed_state + killed_err_arg= THD::KILLED_NO_VALUE); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index e50d1bd1654..47396748da6 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1224,6 +1224,7 @@ my_bool mysql_rm_tmp_tables(void); /* item_func.cc */ extern bool check_reserved_words(LEX_STRING *name); +extern enum_field_types agg_field_type(Item **items, uint nitems); /* strfunc.cc */ ulonglong find_set(TYPELIB *lib, const char *x, uint length, CHARSET_INFO *cs, @@ -1309,8 +1310,8 @@ extern bool opt_endinfo, using_udf_functions; extern my_bool locked_in_memory; extern bool opt_using_transactions, mysqld_embedded; extern bool using_update_log, opt_large_files, server_id_supplied; -extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log; -extern my_bool opt_log_queries_not_using_indexes; +extern bool opt_update_log, opt_bin_log, opt_error_log; +extern my_bool opt_log, opt_slow_log, opt_log_queries_not_using_indexes; extern bool opt_disable_networking, opt_skip_show_db; extern my_bool opt_character_set_client_handshake; extern bool volatile abort_loop, shutdown_in_progress, grant_option; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index c128aeaa47d..f8705a773c6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -339,8 +339,8 @@ static my_bool opt_sync_bdb_logs; /* Global variables */ -bool opt_log, opt_update_log, opt_bin_log, opt_slow_log; -my_bool opt_log_queries_not_using_indexes= 0; +bool opt_update_log, opt_bin_log; +my_bool opt_log, opt_slow_log, opt_log_queries_not_using_indexes= 0; bool opt_error_log= IF_WIN(1,0); bool opt_disable_networking=0, opt_skip_show_db=0; my_bool opt_character_set_client_handshake= 1; @@ -2862,7 +2862,6 @@ static int init_common_variables(const char *conf_file_name, int argc, global_system_variables.collation_connection= default_charset_info; global_system_variables.character_set_results= default_charset_info; global_system_variables.character_set_client= default_charset_info; - global_system_variables.collation_connection= default_charset_info; if (!(character_set_filesystem= get_charset_by_csname(character_set_filesystem_name, @@ -6544,7 +6543,8 @@ static void mysql_init_variables(void) /* Things reset to zero */ opt_skip_slave_start= opt_reckless_slave = 0; mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0; - opt_log= opt_update_log= opt_slow_log= 0; + opt_log= opt_slow_log= 0; + opt_update_log= 0; opt_bin_log= 0; opt_disable_networking= opt_skip_show_db=0; opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index c3e9b6638bd..facc8e09c27 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2206,7 +2206,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) if (param->table->file->primary_key_is_clustered()) { result= param->table->file->read_time(param->table->s->primary_key, - records, records); + (uint)records, records); } else { @@ -2414,7 +2414,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, /* Add Unique operations cost */ unique_calc_buff_size= - Unique::get_cost_calc_buff_size(non_cpk_scan_records, + Unique::get_cost_calc_buff_size((ulong)non_cpk_scan_records, param->table->file->ref_length, param->thd->variables.sortbuff_size); if (param->imerge_cost_buff_size < unique_calc_buff_size) @@ -2426,7 +2426,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, } imerge_cost += - Unique::get_use_cost(param->imerge_cost_buff, non_cpk_scan_records, + Unique::get_use_cost(param->imerge_cost_buff, (uint)non_cpk_scan_records, param->table->file->ref_length, param->thd->variables.sortbuff_size); DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)", @@ -2765,7 +2765,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param) info->is_covering= FALSE; info->index_scan_costs= 0.0; info->index_records= 0; - info->out_rows= param->table->file->records; + info->out_rows= (double) param->table->file->records; bitmap_clear_all(&info->covered_fields); return info; } @@ -6757,7 +6757,7 @@ int QUICK_RANGE_SELECT::reset() if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER) { mrange_bufsiz= min(multi_range_bufsiz, - (QUICK_SELECT_I::records + 1)* head->s->reclength); + ((uint)QUICK_SELECT_I::records + 1)* head->s->reclength); while (mrange_bufsiz && ! my_multi_malloc(MYF(MY_WME), @@ -8359,7 +8359,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, bool have_min, bool have_max, double *read_cost, ha_rows *records) { - uint table_records; + ha_rows table_records; uint num_groups; uint num_blocks; uint keys_per_block; @@ -8376,14 +8376,14 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, keys_per_block= (table->file->block_size / 2 / (index_info->key_length + table->file->ref_length) + 1); - num_blocks= (table_records / keys_per_block) + 1; + num_blocks= (uint)(table_records / keys_per_block) + 1; /* Compute the number of keys in a group. */ keys_per_group= index_info->rec_per_key[group_key_parts - 1]; if (keys_per_group == 0) /* If there is no statistics try to guess */ /* each group contains 10% of all records */ - keys_per_group= (table_records / 10) + 1; - num_groups= (table_records / keys_per_group) + 1; + keys_per_group= (uint)(table_records / 10) + 1; + num_groups= (uint)(table_records / keys_per_group) + 1; /* Apply the selectivity of the quick select for group prefixes. */ if (range_tree && (quick_prefix_records != HA_POS_ERROR)) @@ -8427,9 +8427,9 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, *records= num_groups; DBUG_PRINT("info", - ("table rows: %u keys/block: %u keys/group: %u result rows: %lu blocks: %u", - table_records, keys_per_block, keys_per_group, (ulong) *records, - num_blocks)); + ("table rows: %lu keys/block: %u keys/group: %u result rows: %lu blocks: %u", + (ulong)table_records, keys_per_block, keys_per_group, + (ulong) *records, num_blocks)); DBUG_VOID_RETURN; } diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index b9de54dbf5c..3fc62d05ae5 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -249,20 +249,20 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Check if case 1 from above holds. If it does, we should read the skipped tuple. */ - if (ref.key_buff[prefix_len] == 1 && - /* + if (item_field->field->real_maybe_null() && + ref.key_buff[prefix_len] == 1 && + /* Last keypart (i.e. the argument to MIN) is set to NULL by find_key_for_maxmin only if all other keyparts are bound to constants in a conjunction of equalities. Hence, we can detect this by checking only if the last keypart is NULL. - */ + */ (error == HA_ERR_KEY_NOT_FOUND || key_cmp_if_same(table, ref.key_buff, ref.key, prefix_len))) { - DBUG_ASSERT(item_field->field->real_maybe_null()); error= table->file->index_read(table->record[0], ref.key_buff, - ref.key_length, + ref.key_length, HA_READ_KEY_EXACT); } } diff --git a/sql/protocol.cc b/sql/protocol.cc index ced6d78519a..2bdbe83eea1 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -824,6 +824,7 @@ bool Protocol_simple::store(const char *from, uint length, field_types[field_pos] == MYSQL_TYPE_DECIMAL || field_types[field_pos] == MYSQL_TYPE_BIT || field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL || + field_types[field_pos] == MYSQL_TYPE_NEWDATE || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; diff --git a/sql/set_var.cc b/sql/set_var.cc index f4e80302ad7..1f88509d1b6 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -201,6 +201,7 @@ sys_var_key_cache_long sys_key_cache_age_threshold("key_cache_age_threshold", param_age_threshold)); sys_var_bool_ptr sys_local_infile("local_infile", &opt_local_infile); +sys_var_bool_const_ptr sys_log("log", &opt_log); sys_var_trust_routine_creators sys_trust_routine_creators("log_bin_trust_routine_creators", &trust_function_creators); @@ -213,6 +214,7 @@ sys_var_bool_ptr sys_var_thd_ulong sys_log_warnings("log_warnings", &SV::log_warnings); sys_var_thd_ulong sys_long_query_time("long_query_time", &SV::long_query_time); +sys_var_bool_const_ptr sys_log_slow("log_slow_queries", &opt_slow_log); sys_var_thd_bool sys_low_priority_updates("low_priority_updates", &SV::low_priority_updates, fix_low_priority_updates); @@ -665,9 +667,11 @@ sys_var *sys_variables[]= &sys_lc_time_names, &sys_license, &sys_local_infile, + &sys_log, &sys_log_binlog, &sys_log_off, &sys_log_queries_not_using_indexes, + &sys_log_slow, &sys_log_update, &sys_log_warnings, &sys_long_query_time, @@ -946,7 +950,7 @@ struct show_var_st init_vars[]= { #ifdef HAVE_MLOCKALL {"locked_in_memory", (char*) &locked_in_memory, SHOW_BOOL}, #endif - {"log", (char*) &opt_log, SHOW_BOOL}, + {sys_log.name, (char*) &sys_log, SHOW_SYS}, {"log_bin", (char*) &opt_bin_log, SHOW_BOOL}, {sys_trust_function_creators.name,(char*) &sys_trust_function_creators, SHOW_SYS}, {"log_error", (char*) log_error_file, SHOW_CHAR}, @@ -955,7 +959,7 @@ struct show_var_st init_vars[]= { #ifdef HAVE_REPLICATION {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, #endif - {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, + {sys_log_slow.name, (char*) &sys_log_slow, SHOW_SYS}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, {sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS}, {sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS}, @@ -1040,6 +1044,9 @@ struct show_var_st init_vars[]= { {sys_readonly.name, (char*) &sys_readonly, SHOW_SYS}, {sys_read_rnd_buff_size.name,(char*) &sys_read_rnd_buff_size, SHOW_SYS}, #ifdef HAVE_REPLICATION + {"relay_log" , (char*) &opt_relay_logname, SHOW_CHAR_PTR}, + {"relay_log_index", (char*) &opt_relaylog_index_name, SHOW_CHAR_PTR}, + {"relay_log_info_file", (char*) &relay_log_info_file, SHOW_CHAR_PTR}, {sys_relay_log_purge.name, (char*) &sys_relay_log_purge, SHOW_SYS}, {"relay_log_space_limit", (char*) &relay_log_space_limit, SHOW_LONGLONG}, #endif @@ -1764,7 +1771,7 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) ¬_used)); if (error_len) { - strmake(buff, error, min(sizeof(buff), error_len)); + strmake(buff, error, min(sizeof(buff) - 1, error_len)); goto err; } } diff --git a/sql/set_var.h b/sql/set_var.h index d408384d5f0..99db38933c2 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -160,6 +160,28 @@ public: }; +class sys_var_bool_const_ptr : public sys_var +{ +public: + my_bool *value; + sys_var_bool_const_ptr(const char *name_arg, my_bool *value_arg) + :sys_var(name_arg),value(value_arg) + {} + bool check(THD *thd, set_var *var) + { + return 1; + } + bool update(THD *thd, set_var *var) + { + return 1; + } + SHOW_TYPE show_type() { return SHOW_MY_BOOL; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { return (byte*) value; } + bool check_update_type(Item_result type) { return 0; } + bool is_readonly() const { return 1; } +}; + class sys_var_str :public sys_var { public: diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 709cd1fc0a9..9e6cf462113 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5639,3 +5639,5 @@ ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT eng "Too high level of nesting for select" ER_NAME_BECOMES_EMPTY eng "Name '%-.64s' has become ''" +ER_AMBIGUOUS_FIELD_TERM + eng "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY" diff --git a/sql/slave.cc b/sql/slave.cc index c1b0d655bea..1509916fe91 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3279,7 +3279,43 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) now the relay log starts with its Format_desc, has a Rotate etc). */ - DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + DBUG_PRINT("info",("type_code: %d; server_id: %d; slave_skip_counter: %d", + type_code, ev->server_id, rli->slave_skip_counter)); + + /* + If the slave skip counter is positive, we still need to set the + OPTION_BEGIN flag correctly and not skip the log events that + start or end a transaction. If we do this, the slave will not + notice that it is inside a transaction, and happily start + executing from inside the transaction. + + Note that the code block below is strictly 5.0. + */ +#if MYSQL_VERSION_ID < 50100 + if (unlikely(rli->slave_skip_counter > 0)) + { + switch (type_code) + { + case QUERY_EVENT: + { + Query_log_event* const qev= (Query_log_event*) ev; + DBUG_PRINT("info", ("QUERY_EVENT { query: '%s', q_len: %u }", + qev->query, qev->q_len)); + if (memcmp("BEGIN", qev->query, qev->q_len+1) == 0) + thd->options|= OPTION_BEGIN; + else if (memcmp("COMMIT", qev->query, qev->q_len+1) == 0 || + memcmp("ROLLBACK", qev->query, qev->q_len+1) == 0) + thd->options&= ~OPTION_BEGIN; + } + break; + + case XID_EVENT: + DBUG_PRINT("info", ("XID_EVENT")); + thd->options&= ~OPTION_BEGIN; + break; + } + } +#endif if ((ev->server_id == (uint32) ::server_id && !replicate_same_server_id && @@ -3301,6 +3337,9 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) flush_relay_log_info(rli); } + DBUG_PRINT("info", ("thd->options: %s", + (thd->options & OPTION_BEGIN) ? "OPTION_BEGIN" : "")) + /* Protect against common user error of setting the counter to 1 instead of 2 while recovering from an insert which used auto_increment, @@ -3311,6 +3350,15 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) type_code == RAND_EVENT || type_code == USER_VAR_EVENT) && rli->slave_skip_counter == 1) && +#if MYSQL_VERSION_ID < 50100 + /* + Decrease the slave skip counter only if we are not inside + a transaction or the slave skip counter is more than + 1. The slave skip counter will be decreased from 1 to 0 + when reaching the final ROLLBACK, COMMIT, or XID_EVENT. + */ + (!(thd->options & OPTION_BEGIN) || rli->slave_skip_counter > 1) && +#endif /* The events from ourselves which have something to do with the relay log itself must be skipped, true, but they mustn't decrement @@ -3321,8 +3369,10 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) would not be skipped. */ !(ev->server_id == (uint32) ::server_id && - (type_code == ROTATE_EVENT || type_code == STOP_EVENT || - type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) + (type_code == ROTATE_EVENT || + type_code == STOP_EVENT || + type_code == START_EVENT_V3 || + type_code == FORMAT_DESCRIPTION_EVENT))) --rli->slave_skip_counter; pthread_mutex_unlock(&rli->data_lock); delete ev; @@ -3530,7 +3580,7 @@ connected: on with life. */ thd->proc_info = "Registering slave on master"; - if (register_slave_on_master(mysql) || update_slave_list(mysql, mi)) + if (register_slave_on_master(mysql)) goto err; } @@ -4931,7 +4981,16 @@ Log_event* next_event(RELAY_LOG_INFO* rli) a new event and is queuing it; the false "0" will exist until SQL finishes executing the new event; it will be look abnormal only if the events have old timestamps (then you get "many", 0, "many"). - Transient phases like this can't really be fixed. + + Transient phases like this can be fixed with implemeting + Heartbeat event which provides the slave the status of the + master at time the master does not have any new update to send. + Seconds_Behind_Master would be zero only when master has no + more updates in binlog for slave. The heartbeat can be sent + in a (small) fraction of slave_net_timeout. Until it's done + rli->last_master_timestamp is temporarely (for time of + waiting for the following event) reset whenever EOF is + reached. */ time_t save_timestamp= rli->last_master_timestamp; rli->last_master_timestamp= 0; diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 69dda9ec1e8..5759d619e7e 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -100,8 +100,9 @@ sp_get_item_value(THD *thd, Item *item, String *str) case REAL_RESULT: case INT_RESULT: case DECIMAL_RESULT: - return item->val_str(str); - + if (item->field_type() != MYSQL_TYPE_BIT) + return item->val_str(str); + else {/* Bit type is handled as binary string */} case STRING_RESULT: { String *result= item->val_str(str); diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc index ac7c46c9fe5..54e016f6099 100644 --- a/sql/sp_rcontext.cc +++ b/sql/sp_rcontext.cc @@ -503,14 +503,14 @@ sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars) */ Item_cache * -sp_rcontext::create_case_expr_holder(THD *thd, Item_result result_type) +sp_rcontext::create_case_expr_holder(THD *thd, const Item *item) { Item_cache *holder; Query_arena current_arena; thd->set_n_backup_active_arena(thd->spcont->callers_arena, ¤t_arena); - holder= Item_cache::get_cache(result_type); + holder= Item_cache::get_cache(item); thd->restore_active_arena(thd->spcont->callers_arena, ¤t_arena); @@ -559,7 +559,7 @@ sp_rcontext::set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr case_expr_item->result_type()) { m_case_expr_holders[case_expr_id]= - create_case_expr_holder(thd, case_expr_item->result_type()); + create_case_expr_holder(thd, case_expr_item); } m_case_expr_holders[case_expr_id]->store(case_expr_item); diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h index 0104b71a648..43102cfeeb2 100644 --- a/sql/sp_rcontext.h +++ b/sql/sp_rcontext.h @@ -261,7 +261,7 @@ private: bool init_var_table(THD *thd); bool init_var_items(); - Item_cache *create_case_expr_holder(THD *thd, Item_result result_type); + Item_cache *create_case_expr_holder(THD *thd, const Item *item); int set_variable(THD *thd, Field *field, Item **value); }; // class sp_rcontext : public Sql_alloc diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 213e460f699..134541368e9 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -311,7 +311,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) continue; } - const char *password= get_field(&mem, table->field[2]); + const char *password= get_field(thd->mem_root, table->field[2]); uint password_len= password ? strlen(password) : 0; set_user_salt(&user, password, password_len); if (user.salt_len == 0 && password_len != 0) @@ -364,7 +364,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) /* Starting from 4.0.2 we have more fields */ if (table->s->fields >= 31) { - char *ssl_type=get_field(&mem, table->field[next_field++]); + char *ssl_type=get_field(thd->mem_root, table->field[next_field++]); if (!ssl_type) user.ssl_type=SSL_TYPE_NONE; else if (!strcmp(ssl_type, "ANY")) @@ -378,11 +378,11 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) user.x509_issuer= get_field(&mem, table->field[next_field++]); user.x509_subject= get_field(&mem, table->field[next_field++]); - char *ptr = get_field(&mem, table->field[next_field++]); + char *ptr = get_field(thd->mem_root, table->field[next_field++]); user.user_resource.questions=ptr ? atoi(ptr) : 0; - ptr = get_field(&mem, table->field[next_field++]); + ptr = get_field(thd->mem_root, table->field[next_field++]); user.user_resource.updates=ptr ? atoi(ptr) : 0; - ptr = get_field(&mem, table->field[next_field++]); + ptr = get_field(thd->mem_root, table->field[next_field++]); user.user_resource.conn_per_hour= ptr ? atoi(ptr) : 0; if (user.user_resource.questions || user.user_resource.updates || user.user_resource.conn_per_hour) @@ -391,7 +391,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) if (table->s->fields >= 36) { /* Starting from 5.0.3 we have max_user_connections field */ - ptr= get_field(&mem, table->field[next_field++]); + ptr= get_field(thd->mem_root, table->field[next_field++]); user.user_resource.user_conn= ptr ? atoi(ptr) : 0; } else @@ -3835,50 +3835,83 @@ bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, } -bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, - const char* db_name, const char *table_name, - Field_iterator *fields) +/** + @brief check if a query can access a set of columns + + @param thd the current thread + @param want_access_arg the privileges requested + @param fields an iterator over the fields of a table reference. + @return Operation status + @retval 0 Success + @retval 1 Falure + @details This function walks over the columns of a table reference + The columns may originate from different tables, depending on the kind of + table reference, e.g. join. + For each table it will retrieve the grant information and will use it + to check the required access privileges for the fields requested from it. +*/ +bool check_grant_all_columns(THD *thd, ulong want_access_arg, + Field_iterator_table_ref *fields) { Security_context *sctx= thd->security_ctx; - GRANT_TABLE *grant_table; - GRANT_COLUMN *grant_column; + ulong want_access= want_access_arg; + const char *table_name= NULL; - want_access &= ~grant->privilege; - if (!want_access) - return 0; // Already checked - if (!grant_option) - goto err2; + if (grant_option) + { + const char* db_name; + GRANT_INFO *grant; + /* Initialized only to make gcc happy */ + GRANT_TABLE *grant_table= NULL; - rw_rdlock(&LOCK_grant); + rw_rdlock(&LOCK_grant); - /* reload table if someone has modified any grants */ + for (; !fields->end_of_fields(); fields->next()) + { + const char *field_name= fields->name(); - if (grant->version != grant_version) - { - grant->grant_table= - table_hash_search(sctx->host, sctx->ip, db_name, - sctx->priv_user, - table_name, 0); /* purecov: inspected */ - grant->version= grant_version; /* purecov: inspected */ - } - /* The following should always be true */ - if (!(grant_table= grant->grant_table)) - goto err; /* purecov: inspected */ + if (table_name != fields->table_name()) + { + table_name= fields->table_name(); + db_name= fields->db_name(); + grant= fields->grant(); + /* get a fresh one for each table */ + want_access= want_access_arg & ~grant->privilege; + if (want_access) + { + /* reload table if someone has modified any grants */ + if (grant->version != grant_version) + { + grant->grant_table= + table_hash_search(sctx->host, sctx->ip, db_name, + sctx->priv_user, + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ + } - for (; !fields->end_of_fields(); fields->next()) - { - const char *field_name= fields->name(); - grant_column= column_hash_search(grant_table, field_name, - (uint) strlen(field_name)); - if (!grant_column || (~grant_column->rights & want_access)) - goto err; - } - rw_unlock(&LOCK_grant); - return 0; + grant_table= grant->grant_table; + DBUG_ASSERT (grant_table); + } + } + + if (want_access) + { + GRANT_COLUMN *grant_column= + column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); + if (!grant_column || (~grant_column->rights & want_access)) + goto err; + } + } + rw_unlock(&LOCK_grant); + return 0; err: - rw_unlock(&LOCK_grant); -err2: + rw_unlock(&LOCK_grant); + } + else + table_name= fields->table_name(); + char command[128]; get_privilege_desc(command, sizeof(command), want_access); my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), @@ -4865,6 +4898,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, byte user_key[MAX_KEY_LENGTH]; uint key_prefix_length; DBUG_ENTER("handle_grant_table"); + THD *thd= current_thd; if (! table_no) // mysql.user table { @@ -4932,17 +4966,18 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, DBUG_PRINT("info",("scan error: %d", error)); continue; } - if (! (host= get_field(&mem, host_field))) + if (! (host= get_field(thd->mem_root, host_field))) host= ""; - if (! (user= get_field(&mem, user_field))) + if (! (user= get_field(thd->mem_root, user_field))) user= ""; #ifdef EXTRA_DEBUG DBUG_PRINT("loop",("scan fields: '%s'@'%s' '%s' '%s' '%s'", user, host, - get_field(&mem, table->field[1]) /*db*/, - get_field(&mem, table->field[3]) /*table*/, - get_field(&mem, table->field[4]) /*column*/)); + get_field(thd->mem_root, table->field[1]) /*db*/, + get_field(thd->mem_root, table->field[3]) /*table*/, + get_field(thd->mem_root, + table->field[4]) /*column*/)); #endif if (strcmp(user_str, user) || my_strcasecmp(system_charset_info, host_str, host)) diff --git a/sql/sql_acl.h b/sql/sql_acl.h index d08f2663af5..b2007ccdf47 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -205,9 +205,8 @@ bool check_grant_column (THD *thd, GRANT_INFO *grant, const char *name, uint length, Security_context *sctx); bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, const char *name, uint length); -bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, - const char* db_name, const char *table_name, - Field_iterator *fields); +bool check_grant_all_columns(THD *thd, ulong want_access, + Field_iterator_table_ref *fields); bool check_grant_routine(THD *thd, ulong want_access, TABLE_LIST *procs, bool is_proc, bool no_error); bool check_grant_db(THD *thd,const char *db); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 905190cb9cd..fd921be1ecf 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -3968,7 +3968,9 @@ find_field_in_tables(THD *thd, Item_ident *item, { Field *cur_field= find_field_in_table_ref(thd, cur_table, name, length, item->name, db, table_name, ref, - check_privileges, + (thd->lex->sql_command == + SQLCOM_SHOW_FIELDS) + ? false : check_privileges, allow_rowid, &(item->cached_field_index), register_tree_change, @@ -4162,7 +4164,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, if (item_field->field_name && item_field->table_name && !my_strcasecmp(system_charset_info, item_field->field_name, field_name) && - !strcmp(item_field->table_name, table_name) && + !my_strcasecmp(table_alias_charset, item_field->table_name, + table_name) && (!db_name || (item_field->db_name && !strcmp(item_field->db_name, db_name)))) { @@ -5424,10 +5427,7 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, !any_privileges) { field_iterator.set(tables); - if (check_grant_all_columns(thd, SELECT_ACL, field_iterator.grant(), - field_iterator.db_name(), - field_iterator.table_name(), - &field_iterator)) + if (check_grant_all_columns(thd, SELECT_ACL, &field_iterator)) DBUG_RETURN(TRUE); } #endif diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 9a29350880e..8c868971911 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1032,12 +1032,14 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) Query_cache_block_table *block_table, *block_table_end; ulong tot_length; Query_cache_query_flags flags; +#ifndef __WIN__ const uint spin_treshold= 50000; const double lock_time_treshold= 0.1; /* Time in seconds */ uint spin_count= 0; int lock_status= 0; ulong new_time= 0; ulong stop_time= 0; +#endif DBUG_ENTER("Query_cache::send_result_to_client"); @@ -1085,6 +1087,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) } } +#ifdef __WIN__ + STRUCT_LOCK(&structure_guard_mutex); +#else stop_time= my_clock()+(ulong)lock_time_treshold*CLOCKS_PER_SEC; while ((lock_status= pthread_mutex_trylock(&structure_guard_mutex)) == EBUSY && spin_count < spin_treshold @@ -1107,6 +1112,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) thd->lex->safe_to_cache_query= FALSE; goto err; } +#endif if (query_cache_size == 0 || flush_in_progress) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 4a98a044e25..ef199b6f883 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1194,6 +1194,7 @@ int select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) { bool blob_flag=0; + bool string_results= FALSE, non_string_results= FALSE; unit= u; if ((uint) strlen(exchange->file_name) + NAME_LEN >= FN_REFLEN) strmake(path,exchange->file_name,FN_REFLEN-1); @@ -1211,13 +1212,18 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) blob_flag=1; break; } + if (item->result_type() == STRING_RESULT) + string_results= TRUE; + else + non_string_results= TRUE; } } field_term_length=exchange->field_term->length(); + field_term_char= field_term_length ? (*exchange->field_term)[0] : INT_MAX; if (!exchange->line_term->length()) exchange->line_term=exchange->field_term; // Use this if it exists field_sep_char= (exchange->enclosed->length() ? (*exchange->enclosed)[0] : - field_term_length ? (*exchange->field_term)[0] : INT_MAX); + field_term_char); escape_char= (exchange->escaped->length() ? (*exchange->escaped)[0] : -1); is_ambiguous_field_sep= test(strchr(ESCAPE_CHARS, field_sep_char)); is_unsafe_field_sep= test(strchr(NUMERIC_CHARS, field_sep_char)); @@ -1229,12 +1235,25 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u) exchange->opt_enclosed=1; // A little quicker loop fixed_row_size= (!field_term_length && !exchange->enclosed->length() && !blob_flag); + if ((is_ambiguous_field_sep && exchange->enclosed->is_empty() && + (string_results || is_unsafe_field_sep)) || + (exchange->opt_enclosed && non_string_results && + field_term_length && strchr(NUMERIC_CHARS, field_term_char))) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_AMBIGUOUS_FIELD_TERM, ER(ER_AMBIGUOUS_FIELD_TERM)); + is_ambiguous_field_term= TRUE; + } + else + is_ambiguous_field_term= FALSE; + return 0; } #define NEED_ESCAPING(x) ((int) (uchar) (x) == escape_char || \ - (int) (uchar) (x) == field_sep_char || \ + (enclosed ? (int) (uchar) (x) == field_sep_char \ + : (int) (uchar) (x) == field_term_char) || \ (int) (uchar) (x) == line_sep_char || \ !(x)) @@ -1263,8 +1282,10 @@ bool select_export::send_data(List<Item> &items) while ((item=li++)) { Item_result result_type=item->result_type(); + bool enclosed = (exchange->enclosed->length() && + (!exchange->opt_enclosed || result_type == STRING_RESULT)); res=item->str_result(&tmp); - if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT)) + if (res && enclosed) { if (my_b_write(&cache,(byte*) exchange->enclosed->ptr(), exchange->enclosed->length())) @@ -1355,11 +1376,16 @@ bool select_export::send_data(List<Item> &items) DBUG_ASSERT before the loop makes that sure. */ - if (NEED_ESCAPING(*pos) || - (check_second_byte && - my_mbcharlen(character_set_client, (uchar) *pos) == 2 && - pos + 1 < end && - NEED_ESCAPING(pos[1]))) + if ((NEED_ESCAPING(*pos) || + (check_second_byte && + my_mbcharlen(character_set_client, (uchar) *pos) == 2 && + pos + 1 < end && + NEED_ESCAPING(pos[1]))) && + /* + Don't escape field_term_char by doubling - doubling is only + valid for ENCLOSED BY characters: + */ + (enclosed || !is_ambiguous_field_term || *pos != field_term_char)) { char tmp_buff[2]; tmp_buff[0]= ((int) *pos == field_sep_char && @@ -1398,7 +1424,7 @@ bool select_export::send_data(List<Item> &items) goto err; } } - if (res && (!exchange->opt_enclosed || result_type == STRING_RESULT)) + if (res && enclosed) { if (my_b_write(&cache, (byte*) exchange->enclosed->ptr(), exchange->enclosed->length())) @@ -1527,7 +1553,7 @@ bool select_max_min_finder_subselect::send_data(List<Item> &items) { if (!cache) { - cache= Item_cache::get_cache(val_item->result_type()); + cache= Item_cache::get_cache(val_item); switch (val_item->result_type()) { case REAL_RESULT: diff --git a/sql/sql_class.h b/sql/sql_class.h index 4fac86dc405..9f294d09d5a 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -159,7 +159,13 @@ typedef struct st_log_info my_off_t pos; bool fatal; // if the purge happens to give us a negative offset pthread_mutex_t lock; - st_log_info():fatal(0) { pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST);} + st_log_info() + : index_file_offset(0), index_file_start_offset(0), + pos(0), fatal(0) + { + log_file_name[0] = '\0'; + pthread_mutex_init(&lock, MY_MUTEX_INIT_FAST); + } ~st_log_info() { pthread_mutex_destroy(&lock);} } LOG_INFO; @@ -1368,9 +1374,20 @@ public: ulonglong limit_found_rows; ulonglong options; /* Bitmap of states */ - longlong row_count_func; /* For the ROW_COUNT() function */ - ha_rows cuted_fields, - sent_row_count, examined_row_count; + longlong row_count_func; /* For the ROW_COUNT() function */ + ha_rows cuted_fields; + + /* + number of rows we actually sent to the client, including "synthetic" + rows in ROLLUP etc. + */ + ha_rows sent_row_count; + + /* + number of rows we read, sent or not, including in create_sort_index() + */ + ha_rows examined_row_count; + /* The set of those tables whose fields are referenced in all subqueries of the query. @@ -1403,7 +1420,11 @@ public: /* Statement id is thread-wide. This counter is used to generate ids */ ulong statement_id_counter; ulong rand_saved_seed1, rand_saved_seed2; - ulong row_count; // Row counter, mainly for errors and warnings + /* + Row counter, mainly for errors and warnings. Not increased in + create_sort_index(); may differ from examined_row_count. + */ + ulong row_count; long dbug_thread_id; pthread_t real_id; uint tmp_table, global_read_lock; @@ -1992,6 +2013,7 @@ public: class select_export :public select_to_file { uint field_term_length; int field_sep_char,escape_char,line_sep_char; + int field_term_char; // first char of FIELDS TERMINATED BY or MAX_INT /* The is_ambiguous_field_sep field is true if a value of the field_sep_char field is one of the 'n', 't', 'r' etc characters @@ -1999,6 +2021,12 @@ class select_export :public select_to_file { */ bool is_ambiguous_field_sep; /* + The is_ambiguous_field_term is true if field_sep_char contains the first + char of the FIELDS TERMINATED BY (ENCLOSED BY is empty), and items can + contain this character. + */ + bool is_ambiguous_field_term; + /* The is_unsafe_field_sep field is true if a value of the field_sep_char field is one of the '0'..'9', '+', '-', '.' and 'e' characters (see the NUMERIC_CHARS constant value). @@ -2029,7 +2057,7 @@ class select_insert :public select_result_interceptor { ulonglong last_insert_id; COPY_INFO info; bool insert_into_view; - + bool is_bulk_insert_mode; select_insert(TABLE_LIST *table_list_par, TABLE *table_par, List<Item> *fields_par, List<Item> *update_fields, List<Item> *update_values, @@ -2350,6 +2378,11 @@ class multi_delete :public select_result_interceptor /* True if at least one table we delete from is not transactional */ bool normal_tables; bool delete_while_scanning; + /* + error handling (rollback and binlogging) can happen in send_eof() + so that afterward send_error() needs to find out that. + */ + bool error_handled; public: multi_delete(TABLE_LIST *dt, uint num_of_tables); @@ -2385,6 +2418,11 @@ class multi_update :public select_result_interceptor /* True if the update operation has made a change in a transactional table */ bool transactional_tables; bool ignore; + /* + error handling (rollback and binlogging) can happen in send_eof() + so that afterward send_error() needs to find out that. + */ + bool error_handled; public: multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list, diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 56edfa6c5b2..a28a39a769d 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -38,6 +38,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ha_rows deleted; uint usable_index= MAX_KEY; SELECT_LEX *select_lex= &thd->lex->select_lex; + THD::killed_state killed_status= THD::NOT_KILLED; DBUG_ENTER("mysql_delete"); if (open_and_lock_tables(thd, table_list)) @@ -280,8 +281,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, else table->file->unlock_row(); // Row failed selection, release lock on it } - if (thd->killed && !error) - error= 1; // Aborted + killed_status= thd->killed; + error= (killed_status == THD::NOT_KILLED)? error : 1; thd->proc_info="end"; end_read_record(&info); free_io_cache(table); // Will not do any harm @@ -319,14 +320,14 @@ cleanup: thd->transaction.stmt.modified_non_trans_table= TRUE; /* See similar binlogging code in sql_update.cc, for comments */ - if ((error < 0) || (deleted && !transactional_table)) + if ((error < 0) || thd->transaction.stmt.modified_non_trans_table) { if (mysql_bin_log.is_open()) { if (error < 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_table, FALSE); + transactional_table, FALSE, killed_status); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; } @@ -504,7 +505,7 @@ bool mysql_multi_delete_prepare(THD *thd) multi_delete::multi_delete(TABLE_LIST *dt, uint num_of_tables_arg) : delete_tables(dt), deleted(0), found(0), num_of_tables(num_of_tables_arg), error(0), - do_delete(0), transactional_tables(0), normal_tables(0) + do_delete(0), transactional_tables(0), normal_tables(0), error_handled(0) { tempfiles= (Unique **) sql_calloc(sizeof(Unique *) * num_of_tables); } @@ -685,12 +686,14 @@ void multi_delete::send_error(uint errcode,const char *err) /* First send error what ever it is ... */ my_message(errcode, err, MYF(0)); - /* If nothing deleted return */ - if (!deleted) + /* the error was handled or nothing deleted and no side effects return */ + if (error_handled || + !thd->transaction.stmt.modified_non_trans_table && !deleted) DBUG_VOID_RETURN; /* Something already deleted so we have to invalidate cache */ - query_cache_invalidate3(thd, delete_tables, 1); + if (deleted) + query_cache_invalidate3(thd, delete_tables, 1); /* If rows from the first table only has been deleted and it is @@ -710,12 +713,30 @@ void multi_delete::send_error(uint errcode,const char *err) */ error= 1; send_eof(); + DBUG_ASSERT(error_handled); + DBUG_VOID_RETURN; } - DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table); + + if (thd->transaction.stmt.modified_non_trans_table) + { + /* + there is only side effects; to binlog with the error + */ + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, + transactional_tables, FALSE); + mysql_bin_log.write(&qinfo); + } + thd->transaction.all.modified_non_trans_table= true; + } + DBUG_ASSERT(!normal_tables || !deleted || + thd->transaction.stmt.modified_non_trans_table); DBUG_VOID_RETURN; } + /* Do delete from other tables. Returns values: @@ -798,6 +819,7 @@ int multi_delete::do_deletes() bool multi_delete::send_eof() { + THD::killed_state killed_status= THD::NOT_KILLED; thd->proc_info="deleting from reference tables"; /* Does deletes for the last n - 1 tables, returns 0 if ok */ @@ -805,7 +827,7 @@ bool multi_delete::send_eof() /* compute a total error to know if something failed */ local_error= local_error || error; - + killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed; /* reset used flags */ thd->proc_info="end"; @@ -817,21 +839,24 @@ bool multi_delete::send_eof() { query_cache_invalidate3(thd, delete_tables, 1); } - if ((local_error == 0) || (deleted && normal_tables)) + DBUG_ASSERT(!normal_tables || !deleted || + thd->transaction.stmt.modified_non_trans_table); + if ((local_error == 0) || thd->transaction.stmt.modified_non_trans_table) { if (mysql_bin_log.is_open()) { if (local_error == 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_tables, FALSE); + transactional_tables, FALSE, killed_status); if (mysql_bin_log.write(&qinfo) && !normal_tables) local_error=1; // Log write failed: roll back the SQL statement } if (thd->transaction.stmt.modified_non_trans_table) thd->transaction.all.modified_non_trans_table= TRUE; } - DBUG_ASSERT(!normal_tables || !deleted || thd->transaction.stmt.modified_non_trans_table); + if (local_error != 0) + error_handled= TRUE; // to force early leave from ::send_error() /* Commit or rollback the current SQL statement */ if (transactional_tables) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index f07af393070..65d8bb23706 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -189,11 +189,9 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list, #ifndef NO_EMBEDDED_ACCESS_CHECKS if (grant_option) { - Field_iterator_table field_it; - field_it.set_table(table); - if (check_grant_all_columns(thd, INSERT_ACL, &table->grant, - table->s->db, table->s->table_name, - &field_it)) + Field_iterator_table_ref field_it; + field_it.set(table_list); + if (check_grant_all_columns(thd, INSERT_ACL, &field_it)) return -1; } #endif @@ -866,8 +864,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, transactional_table= table->file->has_transactions(); - if ((changed= (info.copied || info.deleted || info.updated)) || - was_insert_delayed) + if ((changed= (info.copied || info.deleted || info.updated))) { /* Invalidate the table in the query cache if something changed. @@ -876,46 +873,47 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ if (changed) query_cache_invalidate3(thd, table_list, 1); - if (error <= 0 || !transactional_table) + } + if (changed && error <= 0 || thd->transaction.stmt.modified_non_trans_table + || was_insert_delayed) + { + if (mysql_bin_log.is_open()) { - if (mysql_bin_log.is_open()) + if (error <= 0) { - if (error <= 0) - { - /* - [Guilhem wrote] Temporary errors may have filled - thd->net.last_error/errno. For example if there has - been a disk full error when writing the row, and it was - MyISAM, then thd->net.last_error/errno will be set to - "disk full"... and the my_pwrite() will wait until free - space appears, and so when it finishes then the - write_row() was entirely successful - */ - /* todo: consider removing */ - thd->clear_error(); - } - /* bug#22725: - - A query which per-row-loop can not be interrupted with - KILLED, like INSERT, and that does not invoke stored - routines can be binlogged with neglecting the KILLED error. - - If there was no error (error == zero) until after the end of - inserting loop the KILLED flag that appeared later can be - disregarded since previously possible invocation of stored - routines did not result in any error due to the KILLED. In - such case the flag is ignored for constructing binlog event. + /* + [Guilhem wrote] Temporary errors may have filled + thd->net.last_error/errno. For example if there has + been a disk full error when writing the row, and it was + MyISAM, then thd->net.last_error/errno will be set to + "disk full"... and the my_pwrite() will wait until free + space appears, and so when it finishes then the + write_row() was entirely successful */ - Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_table, FALSE, - (error>0) ? thd->killed : THD::NOT_KILLED); - DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0); - if (mysql_bin_log.write(&qinfo) && transactional_table) - error=1; + /* todo: consider removing */ + thd->clear_error(); } - if (thd->transaction.stmt.modified_non_trans_table) - thd->transaction.all.modified_non_trans_table= TRUE; + /* bug#22725: + + A query which per-row-loop can not be interrupted with + KILLED, like INSERT, and that does not invoke stored + routines can be binlogged with neglecting the KILLED error. + + If there was no error (error == zero) until after the end of + inserting loop the KILLED flag that appeared later can be + disregarded since previously possible invocation of stored + routines did not result in any error due to the KILLED. In + such case the flag is ignored for constructing binlog event. + */ + Query_log_event qinfo(thd, thd->query, thd->query_length, + transactional_table, FALSE, + (error>0) ? thd->killed : THD::NOT_KILLED); + DBUG_ASSERT(thd->killed != THD::KILL_BAD_DATA || error > 0); + if (mysql_bin_log.write(&qinfo) && transactional_table) + error=1; } + if (thd->transaction.stmt.modified_non_trans_table) + thd->transaction.all.modified_non_trans_table= TRUE; } DBUG_ASSERT(transactional_table || !changed || thd->transaction.stmt.modified_non_trans_table); @@ -2647,7 +2645,8 @@ select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, bool ignore_check_option_errors) :table_list(table_list_par), table(table_par), fields(fields_par), last_insert_id(0), - insert_into_view(table_list_par && table_list_par->view != 0) + insert_into_view(table_list_par && table_list_par->view != 0), + is_bulk_insert_mode(FALSE) { bzero((char*) &info,sizeof(info)); info.handle_duplicates= duplic; @@ -2832,8 +2831,11 @@ int select_insert::prepare2(void) { DBUG_ENTER("select_insert::prepare2"); if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && - !thd->prelocked_mode) + !thd->prelocked_mode && !is_bulk_insert_mode) + { table->file->start_bulk_insert((ha_rows) 0); + is_bulk_insert_mode= TRUE; + } DBUG_RETURN(0); } @@ -2936,9 +2938,11 @@ bool select_insert::send_eof() { int error, error2; bool changed, transactional_table= table->file->has_transactions(); + THD::killed_state killed_status= thd->killed; DBUG_ENTER("select_insert::send_eof"); error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0; + is_bulk_insert_mode= FALSE; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -2964,7 +2968,7 @@ bool select_insert::send_eof() if (!error) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_table, FALSE); + transactional_table, FALSE, killed_status); mysql_bin_log.write(&qinfo); } if ((error2=ha_autocommit_or_rollback(thd,error)) && ! error) @@ -3001,6 +3005,7 @@ void select_insert::abort() */ DBUG_VOID_RETURN; } + changed= (info.copied || info.deleted || info.updated); transactional_table= table->file->has_transactions(); if (!thd->prelocked_mode) table->file->end_bulk_insert(); @@ -3010,8 +3015,7 @@ void select_insert::abort() error while inserting into a MyISAM table) we must write to the binlog (and the error code will make the slave stop). */ - if ((changed= info.copied || info.deleted || info.updated) && - !transactional_table) + if (thd->transaction.stmt.modified_non_trans_table) { if (last_insert_id) thd->insert_id(last_insert_id); // For binary log @@ -3129,7 +3133,10 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, create_field *cr_field; Field *field, *def_field; if (item->type() == Item::FUNC_ITEM) - field= item->tmp_table_field(&tmp_table); + if (item->result_type() != STRING_RESULT) + field= item->tmp_table_field(&tmp_table); + else + field= item->tmp_table_field_from_field_type(&tmp_table); else field= create_tmp_field(thd, &tmp_table, item, item->type(), (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0, @@ -3271,7 +3278,10 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) if (info.handle_duplicates == DUP_UPDATE) table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE); if (!thd->prelocked_mode) + { table->file->start_bulk_insert((ha_rows) 0); + is_bulk_insert_mode= TRUE; + } thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 55cbbf1c540..d687ceff393 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -85,7 +85,8 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, #ifndef EMBEDDED_LIBRARY static bool write_execute_load_query_log_event(THD *thd, bool duplicates, bool ignore, - bool transactional_table); + bool transactional_table, + THD::killed_state killed_status); #endif /* EMBEDDED_LIBRARY */ /* @@ -135,6 +136,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, char *tdb= thd->db ? thd->db : db; // Result is never null ulong skip_lines= ex->skip_lines; bool transactional_table; + THD::killed_state killed_status= THD::NOT_KILLED; DBUG_ENTER("mysql_load"); #ifdef EMBEDDED_LIBRARY @@ -404,7 +406,16 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, free_blobs(table); /* if pack_blob was used */ table->copy_blobs=0; thd->count_cuted_fields= CHECK_FIELD_IGNORE; - + /* + simulated killing in the middle of per-row loop + must be effective for binlogging + */ + DBUG_EXECUTE_IF("simulate_kill_bug27571", + { + error=1; + thd->killed= THD::KILL_QUERY; + };); + killed_status= (error == 0)? THD::NOT_KILLED : thd->killed; /* We must invalidate the table in query cache before binlog writing and ha_autocommit_... @@ -444,9 +455,10 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, /* If the file was not empty, wrote_create_file is true */ if (lf_info.wrote_create_file) { - if ((info.copied || info.deleted) && !transactional_table) + if (thd->transaction.stmt.modified_non_trans_table) write_execute_load_query_log_event(thd, handle_duplicates, - ignore, transactional_table); + ignore, transactional_table, + killed_status); else { Delete_file_log_event d(thd, db, transactional_table); @@ -477,7 +489,8 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, read_info.end_io_cache(); if (lf_info.wrote_create_file) write_execute_load_query_log_event(thd, handle_duplicates, - ignore, transactional_table); + ignore, transactional_table, + killed_status); } #endif /*!EMBEDDED_LIBRARY*/ if (transactional_table) @@ -504,7 +517,8 @@ err: /* Not a very useful function; just to avoid duplication of code */ static bool write_execute_load_query_log_event(THD *thd, bool duplicates, bool ignore, - bool transactional_table) + bool transactional_table, + THD::killed_state killed_err_arg) { Execute_load_query_log_event e(thd, thd->query, thd->query_length, @@ -512,7 +526,7 @@ static bool write_execute_load_query_log_event(THD *thd, (char*)thd->lex->fname_end - (char*)thd->query, (duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE : (ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR), - transactional_table, FALSE); + transactional_table, FALSE, killed_err_arg); return mysql_bin_log.write(&e); } diff --git a/sql/sql_map.cc b/sql/sql_map.cc index 03dc091b9b7..282716c6151 100644 --- a/sql/sql_map.cc +++ b/sql/sql_map.cc @@ -41,7 +41,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt struct stat stat_buf; if (!fstat(file,&stat_buf)) { - if (!(map=(byte*) my_mmap(0,(size=(ulong) stat_buf.st_size),PROT_READ, + if (!(map=(byte*) my_mmap(0,(size_t)(size=(ulong) stat_buf.st_size),PROT_READ, MAP_SHARED | MAP_NORESERVE,file, 0L))) { @@ -52,7 +52,7 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt if (map && memcmp(map,magic,magic_length)) { my_error(ER_WRONG_MAGIC, MYF(0), name); - VOID(my_munmap(map,size)); + VOID(my_munmap(map,(size_t)size)); map=0; } if (!map) @@ -70,7 +70,7 @@ mapped_files::~mapped_files() #ifdef HAVE_MMAP if (file >= 0) { - VOID(my_munmap(map,size)); + VOID(my_munmap(map,(size_t)size)); VOID(my_close(file,MYF(0))); file= -1; map=0; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index a111208cbf9..16a64c48cdd 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2853,7 +2853,16 @@ mysql_execute_command(THD *thd) if (check_global_access(thd, SUPER_ACL | REPL_CLIENT_ACL)) goto error; pthread_mutex_lock(&LOCK_active_mi); - res = show_master_info(thd,active_mi); + if (active_mi != NULL) + { + res = show_master_info(thd, active_mi); + } + else + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "the master info structure does not exist"); + send_ok(thd); + } pthread_mutex_unlock(&LOCK_active_mi); break; } @@ -3701,6 +3710,13 @@ end_with_restore_list: SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | OPTION_SETUP_TABLES_DONE, del_result, unit, select_lex); + res|= thd->net.report_error; + if (unlikely(res)) + { + /* If we had a another error reported earlier then this will be ignored */ + del_result->send_error(ER_UNKNOWN_ERROR, "Execution of the query failed"); + del_result->abort(); + } delete del_result; } else @@ -6430,7 +6446,12 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name); if (!schema_table || (schema_table->hidden && - lex->orig_sql_command == SQLCOM_END)) // not a 'show' command + (lex->orig_sql_command == SQLCOM_END || // not a 'show' command + /* + this check is used for show columns|keys from I_S hidden table + */ + lex->orig_sql_command == SQLCOM_SHOW_FIELDS || + lex->orig_sql_command == SQLCOM_SHOW_KEYS))) { my_error(ER_UNKNOWN_TABLE, MYF(0), ptr->table_name, INFORMATION_SCHEMA_NAME.str); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 9cc0ba7c29a..903d254db8f 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -364,7 +364,6 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, name=0; // Find first log linfo.index_file_offset = 0; - thd->current_linfo = &linfo; if (mysql_bin_log.find_log_pos(&linfo, name, 1)) { @@ -373,6 +372,10 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, goto err; } + pthread_mutex_lock(&LOCK_thread_count); + thd->current_linfo = &linfo; + pthread_mutex_unlock(&LOCK_thread_count); + if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0) { my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; @@ -1338,7 +1341,6 @@ bool mysql_show_binlog_events(THD* thd) name=0; // Find first log linfo.index_file_offset = 0; - thd->current_linfo = &linfo; if (mysql_bin_log.find_log_pos(&linfo, name, 1)) { @@ -1346,6 +1348,10 @@ bool mysql_show_binlog_events(THD* thd) goto err; } + pthread_mutex_lock(&LOCK_thread_count); + thd->current_linfo = &linfo; + pthread_mutex_unlock(&LOCK_thread_count); + if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0) goto err; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index a17d00dcf6f..2ca4b73a115 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -565,11 +565,12 @@ JOIN::prepare(Item ***rref_pointer_array, /* - Check if one one uses a not constant column with group functions - and no GROUP BY. + Check if there are references to un-aggregated columns when computing + aggregate functions with implicit grouping (there is no GROUP BY). TODO: Add check of calculation of GROUP functions and fields: SELECT COUNT(*)+table.col1 from table1; */ + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) { if (!group_list) { @@ -583,6 +584,13 @@ JOIN::prepare(Item ***rref_pointer_array, else if (!(flag & 2) && !item->const_during_execution()) flag|=2; } + if (having) + { + if (having->with_sum_func) + flag |= 1; + else if (!having->const_during_execution()) + flag |= 2; + } if (flag == 3) { my_message(ER_MIX_OF_GROUP_FUNC_AND_FIELDS, @@ -1057,10 +1065,19 @@ JOIN::optimize() We have found that grouping can be removed since groups correspond to only one row anyway, but we still have to guarantee correct result order. The line below effectively rewrites the query from GROUP BY - <fields> to ORDER BY <fields>. One exception is if skip_sort_order is - set (see above), then we can simply skip GROUP BY. + <fields> to ORDER BY <fields>. There are two exceptions: + - if skip_sort_order is set (see above), then we can simply skip + GROUP BY; + - we can only rewrite ORDER BY if the ORDER BY fields are 'compatible' + with the GROUP BY ones, i.e. either one is a prefix of another. + We only check if the ORDER BY is a prefix of GROUP BY. In this case + test_if_subpart() copies the ASC/DESC attributes from the original + ORDER BY fields. + If GROUP BY is a prefix of ORDER BY, then it is safe to leave + 'order' as is. */ - order= skip_sort_order ? 0 : group_list; + if (!order || test_if_subpart(group_list, order)) + order= skip_sort_order ? 0 : group_list; group_list= 0; group= 0; } @@ -5927,7 +5944,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) /* Fix for EXPLAIN */ if (sel->quick) - join->best_positions[i].records_read= sel->quick->records; + join->best_positions[i].records_read= (double)sel->quick->records; } else { @@ -6083,10 +6100,9 @@ make_join_readinfo(JOIN *join, ulonglong options) ordered. If there is a temp table the ordering is done as a last operation and doesn't prevent join cache usage. */ - if (!ordered_set && !join->need_tmp && - ((table == join->sort_by_table && - (!join->order || join->skip_sort_order)) || - (join->sort_by_table == (TABLE *) 1 && i != join->const_tables))) + if (!ordered_set && !join->need_tmp && + (table == join->sort_by_table || + (join->sort_by_table == (TABLE *) 1 && i != join->const_tables))) ordered_set= 1; switch (tab->type) { @@ -10385,6 +10401,15 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) error= (*end_select)(join,join_tab,0); if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT) error= (*end_select)(join,join_tab,1); + + /* + If we don't go through evaluate_join_record(), do the counting + here. join->send_records is increased on success in end_send(), + so we don't touch it here. + */ + join->examined_rows++; + join->thd->row_count++; + DBUG_ASSERT(join->examined_rows <= 1); } else if (join->send_row_on_empty_set()) { @@ -14395,6 +14420,9 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, item_field= (Item*) new Item_field(field); if (!item_field) DBUG_RETURN(TRUE); // Fatal error + + if (item->real_item()->type() != Item::FIELD_ITEM) + field->orig_table= 0; item_field->name= item->name; if (item->type() == Item::REF_ITEM) { @@ -15534,6 +15562,55 @@ static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables) } +/** + @brief Print an index hint for a table + + @details Prints out the USE|FORCE|IGNORE index hints for a table. + + @param thd the current thread + @param[out] str appends the index hint here + @param hint what the hint is (as string : "USE INDEX"| + "FORCE INDEX"|"IGNORE INDEX") + @param hint_length the length of the string in 'hint' + @param indexes a list of index names for the hint +*/ + +void +TABLE_LIST::print_index_hint(THD *thd, String *str, + const char *hint, uint32 hint_length, + List<String> indexes) +{ + List_iterator_fast<String> li(indexes); + String *idx; + bool first= 1; + size_t find_length= strlen(primary_key_name); + + str->append (' '); + str->append (hint, hint_length); + str->append (STRING_WITH_LEN(" (")); + while ((idx = li++)) + { + if (first) + first= 0; + else + str->append(','); + /* + It's safe to use ptr() here because we compare the length first + and we rely that my_strcasecmp will not access more than length() + chars from the string. See test_if_string_in_list() for similar + implementation. + */ + if (find_length == idx->length() && + !my_strcasecmp (system_charset_info, primary_key_name, + idx->ptr())) + str->append(primary_key_name); + else + append_identifier (thd, str, idx->ptr(), idx->length()); + } + str->append(')'); +} + + /* Print table as it should be in join list @@ -15598,9 +15675,33 @@ void TABLE_LIST::print(THD *thd, String *str) } if (my_strcasecmp(table_alias_charset, cmp_name, alias)) { + char t_alias_buff[MAX_ALIAS_NAME]; + const char *t_alias= alias; + str->append(' '); - append_identifier(thd, str, alias, strlen(alias)); + if (lower_case_table_names== 1) + { + if (alias && alias[0]) + { + strmov(t_alias_buff, alias); + my_casedn_str(files_charset_info, t_alias_buff); + t_alias= t_alias_buff; + } + } + + append_identifier(thd, str, t_alias, strlen(t_alias)); } + + if (use_index) + { + if (force_index) + print_index_hint(thd, str, STRING_WITH_LEN("FORCE INDEX"), *use_index); + else + print_index_hint(thd, str, STRING_WITH_LEN("USE INDEX"), *use_index); + } + if (ignore_index) + print_index_hint (thd, str, STRING_WITH_LEN("IGNORE INDEX"), *ignore_index); + } } diff --git a/sql/sql_select.h b/sql/sql_select.h index d84fbcb8c2d..4fc32e7fdb3 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -521,9 +521,13 @@ public: store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) :null_key(0), null_ptr(null), err(0) { - if (field_arg->type() == FIELD_TYPE_BLOB) + if (field_arg->type() == FIELD_TYPE_BLOB + || field_arg->type() == FIELD_TYPE_GEOMETRY) { - /* Key segments are always packed with a 2 byte length prefix */ + /* + Key segments are always packed with a 2 byte length prefix. + See mi_rkey for details. + */ to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1, Field::NONE, field_arg->field_name, field_arg->table, field_arg->charset()); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9d6f66aff53..2a87d603cf9 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -961,8 +961,8 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, sql_field->length= dup_field->char_length; sql_field->pack_length= dup_field->pack_length; sql_field->key_length= dup_field->key_length; - sql_field->create_length_to_internal_length(); sql_field->decimals= dup_field->decimals; + sql_field->create_length_to_internal_length(); sql_field->unireg_check= dup_field->unireg_check; /* We're making one field from two, the result field will have @@ -1293,7 +1293,7 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type == Field::GEOM_POINT) - column->length= 21; + column->length= 25; if (!column->length) { my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name); diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index ce6cd43eace..34ca18d5c39 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -410,7 +410,12 @@ int mysql_create_function(THD *thd,udf_func *udf) if (!initialized) { - my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + if (opt_noacl) + my_error(ER_CANT_INITIALIZE_UDF, MYF(0), + udf->name.str, + "UDFs are unavailable with the --skip-grant-tables option"); + else + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } @@ -514,7 +519,10 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) DBUG_ENTER("mysql_drop_function"); if (!initialized) { - my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + if (opt_noacl) + my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), udf_name->str); + else + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } rw_wrlock(&THR_LOCK_udf); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c78e246f518..84349a40977 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -134,6 +134,7 @@ int mysql_update(THD *thd, SELECT_LEX *select_lex= &thd->lex->select_lex; bool need_reopen; List<Item> all_fields; + THD::killed_state killed_status= THD::NOT_KILLED; DBUG_ENTER("mysql_update"); LINT_INIT(timestamp_query_id); @@ -362,7 +363,7 @@ int mysql_update(THD *thd, init_read_record_idx(&info, thd, table, 1, used_index); thd->proc_info="Searching rows for update"; - uint tmp_limit= limit; + ha_rows tmp_limit= limit; while (!(error=info.read_record(&info)) && !thd->killed) { @@ -519,43 +520,26 @@ int mysql_update(THD *thd, table->file->unlock_row(); thd->row_count++; } + /* + Caching the killed status to pass as the arg to query event constuctor; + The cached value can not change whereas the killed status can + (externally) since this point and change of the latter won't affect + binlogging. + It's assumed that if an error was set in combination with an effective + killed status then the error is due to killing. + */ + killed_status= thd->killed; // get the status of the volatile + // simulated killing after the loop must be ineffective for binlogging + DBUG_EXECUTE_IF("simulate_kill_bug27571", + { + thd->killed= THD::KILL_QUERY; + };); + error= (killed_status == THD::NOT_KILLED)? error : 1; + if (!transactional_table && updated > 0) thd->transaction.stmt.modified_non_trans_table= TRUE; - - /* - todo bug#27571: to avoid asynchronization of `error' and - `error_code' of binlog event constructor - - The concept, which is a bit different for insert(!), is to - replace `error' assignment with the following lines - - killed_status= thd->killed; // get the status of the volatile - - Notice: thd->killed is type of "state" whereas the lhs has - "status" the suffix which translates according to WordNet: a state - at a particular time - at the time of the end of per-row loop in - our case. Binlogging ops are conducted with the status. - - error= (killed_status == THD::NOT_KILLED)? error : 1; - - which applies to most mysql_$query functions. - Event's constructor will accept `killed_status' as an argument: - - Query_log_event qinfo(..., killed_status); - - thd->killed might be changed after killed_status had got cached and this - won't affect binlogging event but other effects remain. - - Open issue: In a case the error happened not because of KILLED - - and then KILLED was caught later still within the loop - we shall - do something to avoid binlogging of incorrect ER_SERVER_SHUTDOWN - error_code. - */ - - if (thd->killed && !error) - error= 1; // Aborted end_read_record(&info); free_io_cache(table); // If ORDER BY delete select; @@ -580,14 +564,14 @@ int mysql_update(THD *thd, Sometimes we want to binlog even if we updated no rows, in case user used it to be sure master and slave are in same state. */ - if ((error < 0) || (updated && !transactional_table)) + if ((error < 0) || thd->transaction.stmt.modified_non_trans_table) { if (mysql_bin_log.is_open()) { if (error < 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_table, FALSE); + transactional_table, FALSE, killed_status); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; // Rollback update } @@ -994,8 +978,8 @@ multi_update::multi_update(TABLE_LIST *table_list, :all_tables(table_list), leaves(leaves_list), update_tables(0), tmp_tables(0), updated(0), found(0), fields(field_list), values(value_list), table_count(0), copy_field(0), - handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0), - transactional_tables(1), ignore(ignore_arg) + handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1), + transactional_tables(1), ignore(ignore_arg), error_handled(0) {} @@ -1202,7 +1186,6 @@ multi_update::initialize_tables(JOIN *join) if ((thd->options & OPTION_SAFE_UPDATES) && error_if_full_join(join)) DBUG_RETURN(1); main_table=join->join_tab->table; - trans_safe= transactional_tables= main_table->file->has_transactions(); table_to_update= 0; /* Any update has at least one pair (field, value) */ @@ -1484,12 +1467,14 @@ void multi_update::send_error(uint errcode,const char *err) /* First send error what ever it is ... */ my_error(errcode, MYF(0), err); - /* If nothing updated return */ - if (updated == 0) /* the counter might be reset in send_eof */ - return; /* and then the query has been binlogged */ + /* the error was handled or nothing deleted and no side effects return */ + if (error_handled || + !thd->transaction.stmt.modified_non_trans_table && !updated) + return; /* Something already updated so we have to invalidate cache */ - query_cache_invalidate3(thd, update_tables, 1); + if (updated) + query_cache_invalidate3(thd, update_tables, 1); /* If all tables that has been updated are trans safe then just do rollback. If not attempt to do remaining updates. @@ -1521,12 +1506,16 @@ void multi_update::send_error(uint errcode,const char *err) */ if (mysql_bin_log.is_open()) { + /* + THD::killed status might not have been set ON at time of an error + got caught and if happens later the killed error is written + into repl event. + */ Query_log_event qinfo(thd, thd->query, thd->query_length, transactional_tables, FALSE); mysql_bin_log.write(&qinfo); } - if (!trans_safe) - thd->transaction.all.modified_non_trans_table= TRUE; + thd->transaction.all.modified_non_trans_table= TRUE; } DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table); @@ -1709,10 +1698,19 @@ err2: bool multi_update::send_eof() { char buff[STRING_BUFFER_USUAL_SIZE]; + THD::killed_state killed_status= THD::NOT_KILLED; thd->proc_info="updating reference tables"; - /* Does updates for the last n - 1 tables, returns 0 if ok */ + /* + Does updates for the last n - 1 tables, returns 0 if ok; + error takes into account killed status gained in do_updates() + */ int local_error = (table_count) ? do_updates(0) : 0; + /* + if local_error is not set ON until after do_updates() then + later carried out killing should not affect binlogging. + */ + killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed; thd->proc_info= "end"; /* We must invalidate the query cache before binlog writing and @@ -1739,16 +1737,16 @@ bool multi_update::send_eof() { if (local_error == 0) thd->clear_error(); - else - updated= 0; /* if there's an error binlog it here not in ::send_error */ Query_log_event qinfo(thd, thd->query, thd->query_length, - transactional_tables, FALSE); + transactional_tables, FALSE, killed_status); if (mysql_bin_log.write(&qinfo) && trans_safe) local_error= 1; // Rollback update } if (thd->transaction.stmt.modified_non_trans_table) thd->transaction.all.modified_non_trans_table= TRUE; } + if (local_error != 0) + error_handled= TRUE; // to force early leave from ::send_error() if (transactional_tables) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 35a97411511..297edd0d90d 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -223,9 +223,6 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, { LEX *lex= thd->lex; bool link_to_local; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - bool definer_check_is_needed= mode != VIEW_ALTER || lex->definer; -#endif /* first table in list is target VIEW name => cut off it */ TABLE_LIST *view= lex->unlink_first_table(&link_to_local); TABLE_LIST *tables= lex->query_tables; @@ -280,7 +277,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, - same as current user - current user has SUPER_ACL */ - if (definer_check_is_needed && + if (lex->definer && (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) != 0 || my_strcasecmp(system_charset_info, lex->definer->host.str, @@ -925,6 +922,15 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, DBUG_RETURN(0); } + if (table->use_index || table->ignore_index) + { + my_error(ER_WRONG_USAGE, MYF(0), + table->ignore_index ? "IGNORE INDEX" : + (table->force_index ? "FORCE INDEX" : "USE INDEX"), + "VIEW"); + DBUG_RETURN(TRUE); + } + /* check loop via view definition */ for (TABLE_LIST *precedent= table->referencing_view; precedent; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 3fd8d339f31..92b8b96378f 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3094,7 +3094,7 @@ type: spatial_type: GEOMETRY_SYM { $$= Field::GEOM_GEOMETRY; } | GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; } - | POINT_SYM { Lex->length= (char*)"21"; + | POINT_SYM { Lex->length= (char*)"25"; $$= Field::GEOM_POINT; } | MULTIPOINT { $$= Field::GEOM_MULTIPOINT; } @@ -3921,7 +3921,7 @@ analyze: lex->no_write_to_binlog= $2; lex->check_opt.init(); } - table_list opt_mi_check_type + table_list {} ; @@ -3966,7 +3966,7 @@ optimize: lex->no_write_to_binlog= $2; lex->check_opt.init(); } - table_list opt_mi_check_type + table_list {} ; @@ -4305,6 +4305,12 @@ select_item: MYSQL_YYABORT; if ($4.str) { + if (Lex->sql_command == SQLCOM_CREATE_VIEW && + check_column_name($4.str)) + { + my_error(ER_WRONG_COLUMN_NAME, MYF(0), $4.str); + MYSQL_YYABORT; + } $2->is_autogenerated_name= FALSE; $2->set_name($4.str, $4.length, system_charset_info); } @@ -7639,11 +7645,15 @@ literal: String *str= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : (String*) 0; - $$= new Item_string(str ? str->ptr() : "", + $$= new Item_string(NULL, /* name will be set in select_item */ + str ? str->ptr() : "", str ? str->length() : 0, Lex->underscore_charset); - if ($$) - ((Item_string *) $$)->set_repertoire_from_value(); + if (!$$ || !$$->check_well_formed_result(&$$->str_value, TRUE)) + { + MYSQL_YYABORT; + } + ((Item_string *) $$)->set_repertoire_from_value(); } | UNDERSCORE_CHARSET BIN_NUM { @@ -7655,9 +7665,14 @@ literal: String *str= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : (String*) 0; - $$= new Item_string(str ? str->ptr() : "", - str ? str->length() : 0, - Lex->charset); + $$= new Item_string(NULL, /* name will be set in select_item */ + str ? str->ptr() : "", + str ? str->length() : 0, + Lex->underscore_charset); + if (!$$ || !$$->check_well_formed_result(&$$->str_value, TRUE)) + { + MYSQL_YYABORT; + } } | DATE_SYM text_literal { $$ = $2; } | TIME_SYM text_literal { $$ = $2; } diff --git a/sql/table.cc b/sql/table.cc index a393f1a676b..7fe9aa774f3 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -732,9 +732,11 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, keyinfo->key_length+= HA_KEY_NULL_LENGTH; } if (field->type() == FIELD_TYPE_BLOB || - field->real_type() == MYSQL_TYPE_VARCHAR) + field->real_type() == MYSQL_TYPE_VARCHAR || + field->type() == FIELD_TYPE_GEOMETRY) { - if (field->type() == FIELD_TYPE_BLOB) + if (field->type() == FIELD_TYPE_BLOB || + field->type() == FIELD_TYPE_GEOMETRY) key_part->key_part_flag|= HA_BLOB_PART; else key_part->key_part_flag|= HA_VAR_LENGTH_PART; diff --git a/sql/table.h b/sql/table.h index f411ce489c4..cff4be630e4 100644 --- a/sql/table.h +++ b/sql/table.h @@ -773,6 +773,8 @@ struct TABLE_LIST private: bool prep_check_option(THD *thd, uint8 check_opt_type); bool prep_where(THD *thd, Item **conds, bool no_where_clause); + void print_index_hint(THD *thd, String *str, const char *hint, + uint32 hint_length, List<String>); /* Cleanup for re-execution in a prepared statement or a stored procedure. |