summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/ha_heap.cc62
-rw-r--r--sql/ha_heap.h4
-rw-r--r--sql/ha_ndbcluster.cc45
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/item_func.h5
-rw-r--r--sql/log_event.cc12
-rw-r--r--sql/sql_base.cc2
-rw-r--r--sql/sql_class.cc3
-rw-r--r--sql/sql_db.cc13
-rw-r--r--sql/sql_load.cc7
-rw-r--r--sql/sql_parse.cc15
-rw-r--r--sql/sql_select.cc14
-rw-r--r--sql/sql_select.h11
-rw-r--r--sql/sql_update.cc13
14 files changed, 158 insertions, 51 deletions
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index f8c2e6cc338..ab0ab5d8b64 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -68,7 +68,7 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked)
ha_heap::info(), which is always called before key statistics are
used.
*/
- key_stats_ok= FALSE;
+ key_stat_version= file->s->key_stat_version-1;
}
return (file ? 0 : 1);
}
@@ -114,14 +114,21 @@ void ha_heap::update_key_stats()
continue;
if (key->algorithm != HA_KEY_ALG_BTREE)
{
- ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
- key->rec_per_key[key->key_parts-1]=
- hash_buckets ? file->s->records/hash_buckets : 0;
+ if (key->flags & HA_NOSAME)
+ key->rec_per_key[key->key_parts-1]= 1;
+ else
+ {
+ ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
+ uint no_records= hash_buckets ? file->s->records/hash_buckets : 2;
+ if (no_records < 2)
+ no_records= 2;
+ key->rec_per_key[key->key_parts-1]= no_records;
+ }
}
}
records_changed= 0;
/* At the end of update_key_stats() we can proudly claim they are OK. */
- key_stats_ok= TRUE;
+ key_stat_version= file->s->key_stat_version;
}
int ha_heap::write_row(byte * buf)
@@ -135,7 +142,13 @@ int ha_heap::write_row(byte * buf)
res= heap_write(file,buf);
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
file->s->records)
- key_stats_ok= FALSE;
+ {
+ /*
+ We can perform this safely since only one writer at the time is
+ allowed on the table.
+ */
+ file->s->key_stat_version++;
+ }
return res;
}
@@ -148,7 +161,13 @@ int ha_heap::update_row(const byte * old_data, byte * new_data)
res= heap_update(file,old_data,new_data);
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
file->s->records)
- key_stats_ok= FALSE;
+ {
+ /*
+ We can perform this safely since only one writer at the time is
+ allowed on the table.
+ */
+ file->s->key_stat_version++;
+ }
return res;
}
@@ -159,7 +178,13 @@ int ha_heap::delete_row(const byte * buf)
res= heap_delete(file,buf);
if (!res && table->tmp_table == NO_TMP_TABLE &&
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
- key_stats_ok= FALSE;
+ {
+ /*
+ We can perform this safely since only one writer at the time is
+ allowed on the table.
+ */
+ file->s->key_stat_version++;
+ }
return res;
}
@@ -277,7 +302,7 @@ void ha_heap::info(uint flag)
have to update the key statistics. Hoping that a table lock is now
in place.
*/
- if (! key_stats_ok)
+ if (key_stat_version != file->s->key_stat_version)
update_key_stats();
}
@@ -290,7 +315,13 @@ int ha_heap::delete_all_rows()
{
heap_clear(file);
if (table->tmp_table == NO_TMP_TABLE)
- key_stats_ok= FALSE;
+ {
+ /*
+ We can perform this safely since only one writer at the time is
+ allowed on the table.
+ */
+ file->s->key_stat_version++;
+ }
return 0;
}
@@ -451,9 +482,14 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
return HA_POS_ERROR; // Can only use exact keys
else
{
- /* Assert that info() did run. We need current statistics here. */
- DBUG_ASSERT(key_stats_ok);
- return key->rec_per_key[key->key_parts-1];
+ if (records <= 1)
+ return records;
+ else
+ {
+ /* Assert that info() did run. We need current statistics here. */
+ DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
+ return key->rec_per_key[key->key_parts-1];
+ }
}
}
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index cbe2474492d..0a087fde1b0 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -29,10 +29,10 @@ class ha_heap: public handler
key_map btree_keys;
/* number of records changed since last statistics update */
uint records_changed;
- bool key_stats_ok;
+ uint key_stat_version;
public:
ha_heap(TABLE *table): handler(table), file(0), records_changed(0),
- key_stats_ok(0) {}
+ key_stat_version(0) {}
~ha_heap() {}
const char *table_type() const { return "HEAP"; }
const char *index_type(uint inx)
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 594551b918a..876d5d2f8fd 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -45,8 +45,6 @@ static const int max_transactions= 256;
static const char *ha_ndb_ext=".ndb";
-#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
-
#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0
#define NDB_AUTO_INCREMENT_RETRIES 10
@@ -747,7 +745,7 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field,
}
// Used for hidden key only
- m_value[fieldnr].rec= ndb_op->getValue(fieldnr, NULL);
+ m_value[fieldnr].rec= ndb_op->getValue(fieldnr, m_ref);
DBUG_RETURN(m_value[fieldnr].rec == NULL);
}
@@ -2098,13 +2096,10 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
DBUG_PRINT("info", ("Using hidden key"));
// Require that the PK for this record has previously been
- // read into m_value
- uint no_fields= table->fields;
- NdbRecAttr* rec= m_value[no_fields].rec;
- DBUG_ASSERT(rec);
- DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH);
+ // read into m_ref
+ DBUG_DUMP("key", m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH);
- if (set_hidden_key(op, no_fields, rec->aRef()))
+ if (set_hidden_key(op, table->fields, m_ref))
ERR_RETURN(op->getNdbError());
}
else
@@ -2181,11 +2176,8 @@ int ha_ndbcluster::delete_row(const byte *record)
{
// This table has no primary key, use "hidden" primary key
DBUG_PRINT("info", ("Using hidden key"));
- uint no_fields= table->fields;
- NdbRecAttr* rec= m_value[no_fields].rec;
- DBUG_ASSERT(rec != NULL);
- if (set_hidden_key(op, no_fields, rec->aRef()))
+ if (set_hidden_key(op, table->fields, m_ref))
ERR_RETURN(op->getNdbError());
}
else
@@ -2792,7 +2784,7 @@ void ha_ndbcluster::position(const byte *record)
hidden_col->getAutoIncrement() &&
rec != NULL &&
ref_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH);
- memcpy(ref, (const void*)rec->aRef(), ref_length);
+ memcpy(ref, m_ref, ref_length);
}
DBUG_DUMP("ref", (char*)ref, ref_length);
@@ -3046,9 +3038,26 @@ int ha_ndbcluster::end_bulk_insert()
"rows_inserted:%d, bulk_insert_rows: %d",
(int) m_rows_inserted, (int) m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
- if (execute_no_commit(this,trans) != 0) {
- no_uncommitted_rows_execute_failure();
- my_errno= error= ndb_err(trans);
+ if (m_transaction_on)
+ {
+ if (execute_no_commit(this, trans) != 0)
+ {
+ no_uncommitted_rows_execute_failure();
+ my_errno= error= ndb_err(trans);
+ }
+ }
+ else
+ {
+ if (execute_commit(this, trans) != 0)
+ {
+ no_uncommitted_rows_execute_failure();
+ my_errno= error= ndb_err(trans);
+ }
+ else
+ {
+ int res= trans->restart();
+ DBUG_ASSERT(res == 0);
+ }
}
}
@@ -4867,7 +4876,7 @@ bool ha_ndbcluster::low_byte_first() const
}
bool ha_ndbcluster::has_transactions()
{
- return m_transaction_on;
+ return TRUE;
}
const char* ha_ndbcluster::index_type(uint key_number)
{
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index 90d5d59cabe..83d9d87777a 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -27,6 +27,8 @@
#include <ndbapi_limits.h>
+#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
+
class Ndb; // Forward declaration
class NdbOperation; // Forward declaration
class NdbConnection; // Forward declaration
@@ -226,6 +228,7 @@ class ha_ndbcluster: public handler
// NdbRecAttr has no reference to blob
typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue;
NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE];
+ byte m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH];
bool m_use_write;
bool m_ignore_dup_key;
bool m_primary_key_update;
diff --git a/sql/item_func.h b/sql/item_func.h
index 2c4976d1152..51f9d3fb36f 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -1056,16 +1056,13 @@ public:
if (!master && ft_handler)
{
ft_handler->please->close_search(ft_handler);
- ft_handler=0;
- if (join_key)
- table->file->ft_handler=0;
- table->fulltext_searched=0;
}
if (concat)
{
delete concat;
concat= 0;
}
+ ft_handler= 0;
DBUG_VOID_RETURN;
}
enum Functype functype() const { return FT_FUNC; }
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3f545df5776..19c32b2d28e 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -949,6 +949,7 @@ Query_log_event::Query_log_event(const char* buf, int event_len,
void Query_log_event::print(FILE* file, bool short_form, char* last_db)
{
char buff[40],*end; // Enough for SET TIMESTAMP
+ const uint set_len= sizeof("SET ONE_SHOT CHARACTER_SET_CLIENT=") - 1;
if (!short_form)
{
print_header(file);
@@ -978,6 +979,17 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db)
my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME));
if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id);
+ /* charset_name command for mysql client */
+ if (!strncmp(query, "SET ONE_SHOT CHARACTER_SET_CLIENT=", set_len))
+ {
+ char * endptr;
+ int cs_number= strtoul(query + set_len, &endptr, 10);
+ DBUG_ASSERT(*endptr == ',');
+ CHARSET_INFO *cs_info= get_charset(cs_number, MYF(MY_WME));
+ if (cs_info) {
+ fprintf(file, "/*!\\C %s */;\n", cs_info->csname);
+ }
+ }
my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME));
fprintf(file, ";\n");
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index ea013bb4e1e..223e8482f49 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -988,6 +988,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name,
table->status=STATUS_NO_RECORD;
table->keys_in_use_for_query= table->keys_in_use;
table->used_keys= table->keys_for_keyread;
+ table->file->ft_handler=0;
+ table->fulltext_searched=0;
if (table->timestamp_field)
table->timestamp_field_type= table->timestamp_field->get_auto_set_type();
DBUG_ASSERT(table->key_read == 0);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ef938a13489..6e24e951aa4 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1681,7 +1681,10 @@ bool select_dumpvar::send_eof()
void TMP_TABLE_PARAM::init()
{
+ DBUG_ENTER("TMP_TABLE_PARAM::init");
+ DBUG_PRINT("enter", ("this: 0x%lx", (ulong)this));
field_count= sum_func_count= func_count= hidden_field_count= 0;
group_parts= group_length= group_null_parts= 0;
quick_group= 1;
+ DBUG_VOID_RETURN;
}
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 7c834c91183..035a0b22a6b 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -388,6 +388,12 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create)
silent Used by replication when internally creating a database.
In this case the entry should not be logged.
+ SIDE-EFFECTS
+ 1. Report back to client that command succeeded (send_ok)
+ 2. Report errors to client
+ 3. Log event to binary log
+ (The 'silent' flags turns off 1 and 3.)
+
RETURN VALUES
0 ok
-1 Error
@@ -421,16 +427,17 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
if (my_stat(path,&stat_info,MYF(0)))
{
- if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS))
+ if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS))
{
my_error(ER_DB_CREATE_EXISTS,MYF(0),db);
error= -1;
goto exit;
}
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db);
+ ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db);
+ if (!silent)
+ send_ok(thd);
error= 0;
- send_ok(thd);
goto exit;
}
else
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index aa4ea3e6c8c..4e6c458cc43 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -285,8 +285,11 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
else
error=read_sep_field(thd,info,table,fields,read_info,*enclosed,
skip_lines);
- if (table->file->end_bulk_insert())
- error=1; /* purecov: inspected */
+ if (table->file->end_bulk_insert() && !error)
+ {
+ table->file->print_error(my_errno, MYF(0));
+ error= 1;
+ }
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->next_number_field=0;
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 90de630da60..1daa0a5ffec 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2840,21 +2840,26 @@ unsent_create_error:
if ((res= multi_update_precheck(thd, tables)))
break;
- if ((res= mysql_multi_update_lock(thd, tables, &select_lex->item_list,
- select_lex)))
- break;
-
+ res= mysql_multi_update_lock(thd, tables, &select_lex->item_list,
+ select_lex);
#ifdef HAVE_REPLICATION
/* Check slave filtering rules */
if (thd->slave_thread)
if (all_tables_not_ok(thd,tables))
{
+ if (res!= 0)
+ {
+ res= 0; /* don't care of prev failure */
+ thd->clear_error(); /* filters are of highest prior */
+ }
/* we warn the slave SQL thread */
my_error(ER_SLAVE_IGNORED_TABLE, MYF(0));
break;
}
#endif /* HAVE_REPLICATION */
-
+ if (res)
+ break;
+
res= mysql_multi_update(thd,tables,
&select_lex->item_list,
&lex->value_list,
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 1b09913dcc8..ec62e80ba13 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -4107,6 +4107,20 @@ JOIN::join_free(bool full)
problems in free_elements() as some of the elements are then deleted.
*/
tmp_table_param.copy_funcs.empty();
+ /*
+ If we have tmp_join and 'this' JOIN is not tmp_join and
+ tmp_table_param.copy_field's of them are equal then we have to remove
+ pointer to tmp_table_param.copy_field from tmp_join, because it qill
+ be removed in tmp_table_param.cleanup().
+ */
+ if (tmp_join &&
+ tmp_join != this &&
+ tmp_join->tmp_table_param.copy_field ==
+ tmp_table_param.copy_field)
+ {
+ tmp_join->tmp_table_param.copy_field=
+ tmp_join->tmp_table_param.save_copy_field= 0;
+ }
tmp_table_param.cleanup();
}
DBUG_VOID_RETURN;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 636ee967645..94cc8839466 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -227,7 +227,14 @@ class JOIN :public Sql_alloc
{
init(thd_arg, fields_arg, select_options_arg, result_arg);
}
-
+
+ JOIN(JOIN &join)
+ :fields_list(join.fields_list)
+ {
+ init(join.thd, join.fields_list, join.select_options,
+ join.result);
+ }
+
void init(THD *thd_arg, List<Item> &fields_arg, ulong select_options_arg,
select_result *result_arg)
{
@@ -272,7 +279,7 @@ class JOIN :public Sql_alloc
fields_list= fields_arg;
bzero((char*) &keyuse,sizeof(keyuse));
- tmp_table_param.copy_field=0;
+ tmp_table_param.init();
tmp_table_param.end_write_records= HA_POS_ERROR;
rollup.state= ROLLUP::STATE_NONE;
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 24c7a278e79..48a8cf93917 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -212,7 +212,6 @@ int mysql_update(THD *thd,
SORT_FIELD *sortorder;
ha_rows examined_rows;
- used_index= MAX_KEY; // For call to init_read_record()
table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
if (!(sortorder=make_unireg_sortorder(order, &length)) ||
@@ -244,7 +243,17 @@ int mysql_update(THD *thd,
DISK_BUFFER_SIZE, MYF(MY_WME)))
goto err;
- if (used_index == MAX_KEY)
+ /*
+ When we get here, we have one of the following options:
+ A. used_index == MAX_KEY
+ This means we should use full table scan, and start it with
+ init_read_record call
+ B. used_index != MAX_KEY
+ B.1 quick select is used, start the scan with init_read_record
+ B.2 quick select is not used, this is full index scan (with LIMIT)
+ Full index scan must be started with init_read_record_idx
+ */
+ if (used_index == MAX_KEY || (select && select->quick))
init_read_record(&info,thd,table,select,0,1);
else
init_read_record_idx(&info, thd, table, 1, used_index);