summaryrefslogtreecommitdiff
path: root/sql/ha_myisam.cc
diff options
context:
space:
mode:
authormonty@mashka.mysql.fi <>2002-09-18 21:04:49 +0300
committermonty@mashka.mysql.fi <>2002-09-18 21:04:49 +0300
commitf638ee6df91a32ed767fa67cf643dfa07f9a82d4 (patch)
tree83f34b98c4e43e4f23f9b941d93676b65ef1b657 /sql/ha_myisam.cc
parentdfd0f82b9368cd4d023ebc1f6eab6e88622b060c (diff)
downloadmariadb-git-f638ee6df91a32ed767fa67cf643dfa07f9a82d4.tar.gz
Added code to flush a bulk_insert index.
This fixes a bug when doing multi-row inserts on table with an auto_increment key that is not in the first key segment.
Diffstat (limited to 'sql/ha_myisam.cc')
-rw-r--r--sql/ha_myisam.cc20
1 files changed, 18 insertions, 2 deletions
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index bae455cbb3c..cc1e4c3f45c 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -657,7 +657,15 @@ int ha_myisam::repair(THD *thd, MI_CHECK &param, bool optimize)
}
-/* Deactive all not unique index that can be recreated fast */
+/*
+ Deactive all not unique index that can be recreated fast
+
+ SYNOPSIS
+ deactivate_non_unique_index()
+ rows Rows to be inserted
+ 0 if we don't know
+ HA_POS_ERROR if we want to disable all keys
+*/
void ha_myisam::deactivate_non_unique_index(ha_rows rows)
{
@@ -670,9 +678,12 @@ void ha_myisam::deactivate_non_unique_index(ha_rows rows)
mi_extra(file, HA_EXTRA_NO_KEYS, 0);
else
{
- mi_disable_non_unique_index(file,rows);
+ /* Only disable old index if the table was empty */
+ if (file->state->records == 0)
+ mi_disable_non_unique_index(file,rows);
ha_myisam::extra_opt(HA_EXTRA_BULK_INSERT_BEGIN,
current_thd->variables.bulk_insert_buff_size);
+ table->bulk_insert= 1;
}
}
enable_activate_all_index=1;
@@ -690,6 +701,7 @@ bool ha_myisam::activate_all_index(THD *thd)
DBUG_ENTER("activate_all_index");
mi_extra(file, HA_EXTRA_BULK_INSERT_END, 0);
+ table->bulk_insert= 0;
if (enable_activate_all_index &&
share->state.key_map != set_bits(ulonglong, share->base.keys))
{
@@ -1194,6 +1206,10 @@ longlong ha_myisam::get_auto_increment()
return auto_increment_value;
}
+ if (table->bulk_insert)
+ mi_extra(file, HA_EXTRA_BULK_INSERT_FLUSH,
+ (void*) &table->next_number_index);
+
longlong nr;
int error;
byte key[MI_MAX_KEY_LENGTH];