summaryrefslogtreecommitdiff
path: root/sql/handler.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/handler.cc')
-rw-r--r--sql/handler.cc4110
1 files changed, 3031 insertions, 1079 deletions
diff --git a/sql/handler.cc b/sql/handler.cc
index 0de772e366b..ebe5ea5d4fa 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -13,154 +13,56 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/** @file handler.cc
-/* Handler-calling-functions */
+ @brief
+ Handler-calling-functions
+*/
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation // gcc: Class implementation
#endif
#include "mysql_priv.h"
-#include "ha_heap.h"
-#include "ha_myisam.h"
-#include "ha_myisammrg.h"
-
-
-/*
- We have dummy hanldertons in case the handler has not been compiled
- in. This will be removed in 5.1.
-*/
-#ifdef HAVE_BERKELEY_DB
-#include "ha_berkeley.h"
-extern handlerton berkeley_hton;
-#else
-handlerton berkeley_hton = { "BerkeleyDB", SHOW_OPTION_NO,
- "Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB, NULL,
- 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_BLACKHOLE_DB
-#include "ha_blackhole.h"
-extern handlerton blackhole_hton;
-#else
-handlerton blackhole_hton = { "BLACKHOLE", SHOW_OPTION_NO,
- "/dev/null storage engine (anything you write to it disappears)",
- DB_TYPE_BLACKHOLE_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_EXAMPLE_DB
-#include "examples/ha_example.h"
-extern handlerton example_hton;
-#else
-handlerton example_hton = { "EXAMPLE", SHOW_OPTION_NO,
- "Example storage engine",
- DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#if defined(HAVE_ARCHIVE_DB)
-#include "ha_archive.h"
-extern handlerton archive_hton;
-#else
-handlerton archive_hton = { "ARCHIVE", SHOW_OPTION_NO,
- "Archive storage engine", DB_TYPE_ARCHIVE_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_CSV_DB
-#include "examples/ha_tina.h"
-extern handlerton tina_hton;
-#else
-handlerton tina_hton = { "CSV", SHOW_OPTION_NO, "CSV storage engine",
- DB_TYPE_CSV_DB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_INNOBASE_DB
-#include "ha_innodb.h"
-extern handlerton innobase_hton;
-#else
-handlerton innobase_hton = { "InnoDB", SHOW_OPTION_NO,
- "Supports transactions, row-level locking, and foreign keys",
- DB_TYPE_INNODB, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
-#include "ha_ndbcluster.h"
-extern handlerton ndbcluster_hton;
-#else
-handlerton ndbcluster_hton = { "ndbcluster", SHOW_OPTION_NO,
- "Clustered, fault-tolerant, memory-based tables",
- DB_TYPE_NDBCLUSTER, NULL, 0, 0, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
-#ifdef HAVE_FEDERATED_DB
-#include "ha_federated.h"
-extern handlerton federated_hton;
-#else
-handlerton federated_hton = { "FEDERATED", SHOW_OPTION_NO,
- "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB, NULL, 0, 0, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- HTON_NO_FLAGS };
-#endif
+#include "rpl_filter.h"
#include <myisampack.h>
#include <errno.h>
-extern handlerton myisam_hton;
-extern handlerton myisammrg_hton;
-extern handlerton heap_hton;
-extern handlerton binlog_hton;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
+#endif
/*
- Obsolete
+ While we have legacy_db_type, we have this array to
+ check for dups and to find handlerton from legacy_db_type.
+ Remove when legacy_db_type is finally gone
*/
-handlerton isam_hton = { "ISAM", SHOW_OPTION_NO, "Obsolete storage engine",
- DB_TYPE_ISAM, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, HTON_NO_FLAGS };
+st_plugin_int *hton2plugin[MAX_HA];
+
+static handlerton *installed_htons[128];
+
+#define BITMAP_STACKBUF_SIZE (128/8)
+
+KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} };
/* number of entries in handlertons[] */
-ulong total_ha;
+ulong total_ha= 0;
/* number of storage engines (from handlertons[]) that support 2pc */
-ulong total_ha_2pc;
+ulong total_ha_2pc= 0;
/* size of savepoint storage area (see ha_init) */
-ulong savepoint_alloc_size;
+ulong savepoint_alloc_size= 0;
-/*
- This array is used for processing compiled in engines.
-*/
-handlerton *sys_table_types[]=
-{
- &myisam_hton,
- &heap_hton,
- &innobase_hton,
- &berkeley_hton,
- &blackhole_hton,
- &example_hton,
- &archive_hton,
- &tina_hton,
- &ndbcluster_hton,
- &federated_hton,
- &myisammrg_hton,
- &binlog_hton,
- &isam_hton,
- NULL
-};
-
-struct show_table_alias_st sys_table_aliases[]=
+static const LEX_STRING sys_table_aliases[]=
{
- {"INNOBASE", "InnoDB"},
- {"NDB", "NDBCLUSTER"},
- {"BDB", "BERKELEYDB"},
- {"HEAP", "MEMORY"},
- {"MERGE", "MRG_MYISAM"},
- {NullS, NullS}
+ { C_STRING_WITH_LEN("INNOBASE") }, { C_STRING_WITH_LEN("INNODB") },
+ { C_STRING_WITH_LEN("NDB") }, { C_STRING_WITH_LEN("NDBCLUSTER") },
+ { C_STRING_WITH_LEN("HEAP") }, { C_STRING_WITH_LEN("MEMORY") },
+ { C_STRING_WITH_LEN("MERGE") }, { C_STRING_WITH_LEN("MRG_MYISAM") },
+ {NullS, 0}
};
const char *ha_row_type[] = {
- "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "?","?","?"
+ "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "PAGE", "?","?","?"
};
const char *tx_isolation_names[] =
@@ -172,208 +74,222 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
static TYPELIB known_extensions= {0,"known_exts", NULL, NULL};
uint known_extensions_id= 0;
-enum db_type ha_resolve_by_name(const char *name, uint namelen)
+
+
+static plugin_ref ha_default_plugin(THD *thd)
{
- THD *thd= current_thd;
- show_table_alias_st *table_alias;
- handlerton **types;
+ if (thd->variables.table_plugin)
+ return thd->variables.table_plugin;
+ return my_plugin_lock(thd, &global_system_variables.table_plugin);
+}
+
+
+/** @brief
+ Return the default storage engine handlerton for thread
+
+ SYNOPSIS
+ ha_default_handlerton(thd)
+ thd current thread
- if (thd && !my_strnncoll(&my_charset_latin1,
- (const uchar *)name, namelen,
- (const uchar *)"DEFAULT", 7))
- return (enum db_type) thd->variables.table_type;
+ RETURN
+ pointer to handlerton
+*/
+handlerton *ha_default_handlerton(THD *thd)
+{
+ plugin_ref plugin= ha_default_plugin(thd);
+ DBUG_ASSERT(plugin);
+ handlerton *hton= plugin_data(plugin, handlerton*);
+ DBUG_ASSERT(hton);
+ return hton;
+}
-retest:
- for (types= sys_table_types; *types; types++)
+
+/** @brief
+ Return the storage engine handlerton for the supplied name
+
+ SYNOPSIS
+ ha_resolve_by_name(thd, name)
+ thd current thread
+ name name of storage engine
+
+ RETURN
+ pointer to storage engine plugin handle
+*/
+plugin_ref ha_resolve_by_name(THD *thd, const LEX_STRING *name)
+{
+ const LEX_STRING *table_alias;
+ plugin_ref plugin;
+
+redo:
+ /* my_strnncoll is a macro and gcc doesn't do early expansion of macro */
+ if (thd && !my_charset_latin1.coll->strnncoll(&my_charset_latin1,
+ (const uchar *)name->str, name->length,
+ (const uchar *)STRING_WITH_LEN("DEFAULT"), 0))
+ return ha_default_plugin(thd);
+
+ if ((plugin= my_plugin_lock_by_name(thd, name, MYSQL_STORAGE_ENGINE_PLUGIN)))
{
- if (!my_strnncoll(&my_charset_latin1,
- (const uchar *)name, namelen,
- (const uchar *)(*types)->name, strlen((*types)->name)))
- return (enum db_type) (*types)->db_type;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (!(hton->flags & HTON_NOT_USER_SELECTABLE))
+ return plugin;
+
+ /*
+ unlocking plugin immediately after locking is relatively low cost.
+ */
+ plugin_unlock(thd, plugin);
}
/*
We check for the historical aliases.
*/
- for (table_alias= sys_table_aliases; table_alias->type; table_alias++)
+ for (table_alias= sys_table_aliases; table_alias->str; table_alias+= 2)
{
if (!my_strnncoll(&my_charset_latin1,
- (const uchar *)name, namelen,
- (const uchar *)table_alias->alias,
- strlen(table_alias->alias)))
+ (const uchar *)name->str, name->length,
+ (const uchar *)table_alias->str, table_alias->length))
{
- name= table_alias->type;
- namelen= strlen(name);
- goto retest;
+ name= table_alias + 1;
+ goto redo;
}
}
- return DB_TYPE_UNKNOWN;
+ return NULL;
}
-const char *ha_get_storage_engine(enum db_type db_type)
+plugin_ref ha_lock_engine(THD *thd, handlerton *hton)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
+ if (hton)
{
- if (db_type == (*types)->db_type)
- return (*types)->name;
+ st_plugin_int **plugin= hton2plugin + hton->slot;
+
+#ifdef DBUG_OFF
+ return my_plugin_lock(thd, plugin);
+#else
+ return my_plugin_lock(thd, &plugin);
+#endif
}
- return "*NONE*";
+ return NULL;
}
-bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag)
+#ifdef NOT_USED
+static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- {
- if (db_type == (*types)->db_type)
- return test((*types)->flags & flag);
- }
- return FALSE; // No matching engine
+ handlerton *hton= ha_default_handlerton(current_thd);
+ return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL;
}
+#endif
-my_bool ha_storage_engine_is_enabled(enum db_type database_type)
+handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- {
- if ((database_type == (*types)->db_type) &&
- ((*types)->state == SHOW_OPTION_YES))
- return TRUE;
+ plugin_ref plugin;
+ switch (db_type) {
+ case DB_TYPE_DEFAULT:
+ return ha_default_handlerton(thd);
+ default:
+ if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT &&
+ (plugin= ha_lock_engine(thd, installed_htons[db_type])))
+ return plugin_data(plugin, handlerton*);
+ /* fall through */
+ case DB_TYPE_UNKNOWN:
+ return NULL;
}
- return FALSE;
}
-/* Use other database handler if databasehandler is not compiled in */
-
-enum db_type ha_checktype(THD *thd, enum db_type database_type,
+/**
+ Use other database handler if databasehandler is not compiled in.
+*/
+handlerton *ha_checktype(THD *thd, enum legacy_db_type database_type,
bool no_substitute, bool report_error)
{
- if (ha_storage_engine_is_enabled(database_type))
- return database_type;
+ handlerton *hton= ha_resolve_by_legacy_type(thd, database_type);
+ if (ha_storage_engine_is_enabled(hton))
+ return hton;
if (no_substitute)
{
if (report_error)
{
- const char *engine_name= ha_get_storage_engine(database_type);
+ const char *engine_name= ha_resolve_storage_engine_name(hton);
my_error(ER_FEATURE_DISABLED,MYF(0),engine_name,engine_name);
}
- return DB_TYPE_UNKNOWN;
+ return NULL;
}
switch (database_type) {
#ifndef NO_HASH
case DB_TYPE_HASH:
- return (database_type);
+ return ha_resolve_by_legacy_type(thd, DB_TYPE_HASH);
#endif
case DB_TYPE_MRG_ISAM:
- return (DB_TYPE_MRG_MYISAM);
+ return ha_resolve_by_legacy_type(thd, DB_TYPE_MRG_MYISAM);
default:
break;
}
- return ((enum db_type) thd->variables.table_type != DB_TYPE_UNKNOWN ?
- (enum db_type) thd->variables.table_type :
- ((enum db_type) global_system_variables.table_type !=
- DB_TYPE_UNKNOWN ?
- (enum db_type) global_system_variables.table_type : DB_TYPE_MYISAM)
- );
+ return ha_default_handlerton(thd);
} /* ha_checktype */
-handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type)
+handler *get_new_handler(TABLE_SHARE *share, MEM_ROOT *alloc,
+ handlerton *db_type)
{
- switch (db_type) {
-#ifndef NO_HASH
- case DB_TYPE_HASH:
- return new (alloc) ha_hash(table);
-#endif
- case DB_TYPE_MRG_MYISAM:
- case DB_TYPE_MRG_ISAM:
- if (have_merge_db == SHOW_OPTION_YES)
- return new (alloc) ha_myisammrg(table);
- return NULL;
-#ifdef HAVE_BERKELEY_DB
- case DB_TYPE_BERKELEY_DB:
- if (have_berkeley_db == SHOW_OPTION_YES)
- return new (alloc) ha_berkeley(table);
- return NULL;
-#endif
-#ifdef HAVE_INNOBASE_DB
- case DB_TYPE_INNODB:
- if (have_innodb == SHOW_OPTION_YES)
- return new (alloc) ha_innobase(table);
- return NULL;
-#endif
-#ifdef HAVE_EXAMPLE_DB
- case DB_TYPE_EXAMPLE_DB:
- if (have_example_db == SHOW_OPTION_YES)
- return new (alloc) ha_example(table);
- return NULL;
-#endif
-#if defined(HAVE_ARCHIVE_DB)
- case DB_TYPE_ARCHIVE_DB:
- if (have_archive_db == SHOW_OPTION_YES)
- return new (alloc) ha_archive(table);
- return NULL;
-#endif
-#ifdef HAVE_BLACKHOLE_DB
- case DB_TYPE_BLACKHOLE_DB:
- if (have_blackhole_db == SHOW_OPTION_YES)
- return new (alloc) ha_blackhole(table);
- return NULL;
-#endif
-#ifdef HAVE_FEDERATED_DB
- case DB_TYPE_FEDERATED_DB:
- if (have_federated_db == SHOW_OPTION_YES)
- return new (alloc) ha_federated(table);
- return NULL;
-#endif
-#ifdef HAVE_CSV_DB
- case DB_TYPE_CSV_DB:
- if (have_csv_db == SHOW_OPTION_YES)
- return new (alloc) ha_tina(table);
- return NULL;
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
- case DB_TYPE_NDBCLUSTER:
- if (have_ndbcluster == SHOW_OPTION_YES)
- return new (alloc) ha_ndbcluster(table);
- return NULL;
-#endif
- case DB_TYPE_HEAP:
- return new (alloc) ha_heap(table);
- default: // should never happen
+ handler *file;
+ DBUG_ENTER("get_new_handler");
+ DBUG_PRINT("enter", ("alloc: 0x%lx", (long) alloc));
+
+ if (db_type && db_type->state == SHOW_OPTION_YES && db_type->create)
{
- enum db_type def=(enum db_type) current_thd->variables.table_type;
- /* Try first with 'default table type' */
- if (db_type != def)
- return get_new_handler(table, alloc, def);
+ if ((file= db_type->create(db_type, share, alloc)))
+ file->init();
+ DBUG_RETURN(file);
}
- /* Fall back to MyISAM */
- case DB_TYPE_MYISAM:
- return new (alloc) ha_myisam(table);
+ /*
+ Try the default table type
+ Here the call to current_thd() is ok as we call this function a lot of
+ times but we enter this branch very seldom.
+ */
+ DBUG_RETURN(get_new_handler(share, alloc, ha_default_handlerton(current_thd)));
+}
+
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+handler *get_ha_partition(partition_info *part_info)
+{
+ ha_partition *partition;
+ DBUG_ENTER("get_ha_partition");
+ if ((partition= new ha_partition(partition_hton, part_info)))
+ {
+ if (partition->initialise_partition(current_thd->mem_root))
+ {
+ delete partition;
+ partition= 0;
+ }
+ else
+ partition->init();
}
+ else
+ {
+ my_error(ER_OUTOFMEMORY, MYF(0), sizeof(ha_partition));
+ }
+ DBUG_RETURN(((handler*) partition));
}
+#endif
-/*
- Register handler error messages for use with my_error().
- SYNOPSIS
- ha_init_errors()
+/**
+ Register handler error messages for use with my_error().
- RETURN
+ @retval
0 OK
- != 0 Error
+ @retval
+ !=0 Error
*/
-static int ha_init_errors(void)
+int ha_init_errors(void)
{
#define SETMSG(nr, msg) errmsgs[(nr) - HA_ERR_FIRST]= (msg)
const char **errmsgs;
@@ -421,6 +337,7 @@ static int ha_init_errors(void)
SETMSG(HA_ERR_TABLE_EXIST, ER(ER_TABLE_EXISTS_ERROR));
SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine");
SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER(ER_TABLE_DEF_CHANGED));
+ SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key");
SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER(ER_TABLE_NEEDS_UPGRADE));
SETMSG(HA_ERR_TABLE_READONLY, ER(ER_OPEN_AS_READONLY));
SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER(ER_AUTOINC_READ_FAILED));
@@ -431,17 +348,14 @@ static int ha_init_errors(void)
}
-/*
+/**
Unregister handler error messages.
- SYNOPSIS
- ha_finish_errors()
-
- RETURN
+ @retval
0 OK
- != 0 Error
+ @retval
+ !=0 Error
*/
-
static int ha_finish_errors(void)
{
const char **errmsgs;
@@ -449,40 +363,144 @@ static int ha_finish_errors(void)
/* Allocate a pointer array for the error message strings. */
if (! (errmsgs= my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST)))
return 1;
- my_free((gptr) errmsgs, MYF(0));
+ my_free((uchar*) errmsgs, MYF(0));
return 0;
}
-static inline void ha_was_inited_ok(handlerton **ht)
+int ha_finalize_handlerton(st_plugin_int *plugin)
{
- uint tmp= (*ht)->savepoint_offset;
- (*ht)->savepoint_offset= savepoint_alloc_size;
- savepoint_alloc_size+= tmp;
- (*ht)->slot= total_ha++;
- if ((*ht)->prepare)
- total_ha_2pc++;
+ handlerton *hton= (handlerton *)plugin->data;
+ DBUG_ENTER("ha_finalize_handlerton");
+
+ switch (hton->state)
+ {
+ case SHOW_OPTION_NO:
+ case SHOW_OPTION_DISABLED:
+ break;
+ case SHOW_OPTION_YES:
+ if (installed_htons[hton->db_type] == hton)
+ installed_htons[hton->db_type]= NULL;
+ break;
+ };
+
+ if (hton->panic)
+ hton->panic(hton, HA_PANIC_CLOSE);
+
+ if (plugin->plugin->deinit)
+ {
+ /*
+ Today we have no defined/special behavior for uninstalling
+ engine plugins.
+ */
+ DBUG_PRINT("info", ("Deinitializing plugin: '%s'", plugin->name.str));
+ if (plugin->plugin->deinit(NULL))
+ {
+ DBUG_PRINT("warning", ("Plugin '%s' deinit function returned error.",
+ plugin->name.str));
+ }
+ }
+
+ my_free((uchar*)hton, MYF(0));
+
+ DBUG_RETURN(0);
}
-int ha_init()
-{
- int error= 0;
- handlerton **types;
- total_ha= savepoint_alloc_size= 0;
- if (ha_init_errors())
- return 1;
+int ha_initialize_handlerton(st_plugin_int *plugin)
+{
+ handlerton *hton;
+ DBUG_ENTER("ha_initialize_handlerton");
+ DBUG_PRINT("plugin", ("initialize plugin: '%s'", plugin->name.str));
+
+ hton= (handlerton *)my_malloc(sizeof(handlerton),
+ MYF(MY_WME | MY_ZEROFILL));
+ /* Historical Requirement */
+ plugin->data= hton; // shortcut for the future
+ if (plugin->plugin->init)
+ {
+ if (plugin->plugin->init(hton))
+ {
+ sql_print_error("Plugin '%s' init function returned error.",
+ plugin->name.str);
+ goto err;
+ }
+ }
/*
- We now initialize everything here.
+ the switch below and hton->state should be removed when
+ command-line options for plugins will be implemented
*/
- for (types= sys_table_types; *types; types++)
- {
- if (!(*types)->init || !(*types)->init())
- ha_was_inited_ok(types);
- else
- (*types)->state= SHOW_OPTION_DISABLED;
+ switch (hton->state) {
+ case SHOW_OPTION_NO:
+ break;
+ case SHOW_OPTION_YES:
+ {
+ uint tmp;
+ /* now check the db_type for conflict */
+ if (hton->db_type <= DB_TYPE_UNKNOWN ||
+ hton->db_type >= DB_TYPE_DEFAULT ||
+ installed_htons[hton->db_type])
+ {
+ int idx= (int) DB_TYPE_FIRST_DYNAMIC;
+
+ while (idx < (int) DB_TYPE_DEFAULT && installed_htons[idx])
+ idx++;
+
+ if (idx == (int) DB_TYPE_DEFAULT)
+ {
+ sql_print_warning("Too many storage engines!");
+ DBUG_RETURN(1);
+ }
+ if (hton->db_type != DB_TYPE_UNKNOWN)
+ sql_print_warning("Storage engine '%s' has conflicting typecode. "
+ "Assigning value %d.", plugin->plugin->name, idx);
+ hton->db_type= (enum legacy_db_type) idx;
+ }
+ installed_htons[hton->db_type]= hton;
+ tmp= hton->savepoint_offset;
+ hton->savepoint_offset= savepoint_alloc_size;
+ savepoint_alloc_size+= tmp;
+ hton->slot= total_ha++;
+ hton2plugin[hton->slot]=plugin;
+ if (hton->prepare)
+ total_ha_2pc++;
+ break;
+ }
+ /* fall through */
+ default:
+ hton->state= SHOW_OPTION_DISABLED;
+ break;
}
+
+ /*
+ This is entirely for legacy. We will create a new "disk based" hton and a
+ "memory" hton which will be configurable longterm. We should be able to
+ remove partition and myisammrg.
+ */
+ switch (hton->db_type) {
+ case DB_TYPE_HEAP:
+ heap_hton= hton;
+ break;
+ case DB_TYPE_MYISAM:
+ myisam_hton= hton;
+ break;
+ case DB_TYPE_PARTITION_DB:
+ partition_hton= hton;
+ break;
+ default:
+ break;
+ };
+
+ DBUG_RETURN(0);
+err:
+ DBUG_RETURN(1);
+}
+
+int ha_init()
+{
+ int error= 0;
+ DBUG_ENTER("ha_init");
DBUG_ASSERT(total_ha < MAX_HA);
/*
@@ -492,93 +510,368 @@ int ha_init()
*/
opt_using_transactions= total_ha>(ulong)opt_bin_log;
savepoint_alloc_size+= sizeof(SAVEPOINT);
- return error;
+ DBUG_RETURN(error);
}
- /* close, flush or restart databases */
- /* Ignore this for other databases than ours */
-
-int ha_panic(enum ha_panic_function flag)
+int ha_end()
{
- int error=0;
-#ifndef NO_HASH
- error|=h_panic(flag); /* fix hash */
-#endif
-#ifdef HAVE_ISAM
- error|=mrg_panic(flag);
- error|=nisam_panic(flag);
-#endif
- error|=heap_panic(flag);
- error|=mi_panic(flag);
- error|=myrg_panic(flag);
-#ifdef HAVE_BERKELEY_DB
- if (have_berkeley_db == SHOW_OPTION_YES)
- error|=berkeley_end();
-#endif
-#ifdef HAVE_BLACKHOLE_DB
- if (have_blackhole_db == SHOW_OPTION_YES)
- error|= blackhole_db_end();
-#endif
-#ifdef HAVE_INNOBASE_DB
- if (have_innodb == SHOW_OPTION_YES)
- error|=innobase_end();
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error|=ndbcluster_end();
-#endif
-#ifdef HAVE_FEDERATED_DB
- if (have_federated_db == SHOW_OPTION_YES)
- error|= federated_db_end();
-#endif
-#if defined(HAVE_ARCHIVE_DB)
- if (have_archive_db == SHOW_OPTION_YES)
- error|= archive_db_end();
-#endif
-#ifdef HAVE_CSV_DB
- if (have_csv_db == SHOW_OPTION_YES)
- error|= tina_end();
-#endif
+ int error= 0;
+ DBUG_ENTER("ha_end");
+
+
+ /*
+ This should be eventualy based on the graceful shutdown flag.
+ So if flag is equal to HA_PANIC_CLOSE, the deallocate
+ the errors.
+ */
if (ha_finish_errors())
error= 1;
- return error;
-} /* ha_panic */
+
+ DBUG_RETURN(error);
+}
+
+static my_bool dropdb_handlerton(THD *unused1, plugin_ref plugin,
+ void *path)
+{
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->drop_database)
+ hton->drop_database(hton, (char *)path);
+ return FALSE;
+}
+
void ha_drop_database(char* path)
{
-#ifdef HAVE_INNOBASE_DB
- if (have_innodb == SHOW_OPTION_YES)
- innobase_drop_database(path);
-#endif
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- ndbcluster_drop_database(path);
-#endif
+ plugin_foreach(NULL, dropdb_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, path);
}
-/* don't bother to rollback here, it's done already */
+
+static my_bool closecon_handlerton(THD *thd, plugin_ref plugin,
+ void *unused)
+{
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ /*
+ there's no need to rollback here as all transactions must
+ be rolled back already
+ */
+ if (hton->state == SHOW_OPTION_YES && hton->close_connection &&
+ thd_get_ha_data(thd, hton))
+ hton->close_connection(hton, thd);
+ return FALSE;
+}
+
+
+/**
+ @note
+ don't bother to rollback here, it's done already
+*/
void ha_close_connection(THD* thd)
{
- handlerton **types;
- for (types= sys_table_types; *types; types++)
- if (thd->ha_data[(*types)->slot])
- (*types)->close_connection(thd);
+ plugin_foreach(thd, closecon_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, 0);
}
/* ========================================================================
======================= TRANSACTIONS ===================================*/
-/*
- Register a storage engine for a transaction
+/**
+ Transaction handling in the server
+ ==================================
+
+ In each client connection, MySQL maintains two transactional
+ states:
+ - a statement transaction,
+ - a standard, also called normal transaction.
+
+ Historical note
+ ---------------
+ "Statement transaction" is a non-standard term that comes
+ from the times when MySQL supported BerkeleyDB storage engine.
+
+ First of all, it should be said that in BerkeleyDB auto-commit
+ mode auto-commits operations that are atomic to the storage
+ engine itself, such as a write of a record, and are too
+ high-granular to be atomic from the application perspective
+ (MySQL). One SQL statement could involve many BerkeleyDB
+ auto-committed operations and thus BerkeleyDB auto-commit was of
+ little use to MySQL.
+
+ Secondly, instead of SQL standard savepoints, BerkeleyDB
+ provided the concept of "nested transactions". In a nutshell,
+ transactions could be arbitrarily nested, but when the parent
+ transaction was committed or aborted, all its child (nested)
+ transactions were handled committed or aborted as well.
+ Commit of a nested transaction, in turn, made its changes
+ visible, but not durable: it destroyed the nested transaction,
+ all its changes would become available to the parent and
+ currently active nested transactions of this parent.
+
+ So the mechanism of nested transactions was employed to
+ provide "all or nothing" guarantee of SQL statements
+ required by the standard.
+ A nested transaction would be created at start of each SQL
+ statement, and destroyed (committed or aborted) at statement
+ end. Such nested transaction was internally referred to as
+ a "statement transaction" and gave birth to the term.
+
+ <Historical note ends>
+
+ Since then a statement transaction is started for each statement
+ that accesses transactional tables or uses the binary log. If
+ the statement succeeds, the statement transaction is committed.
+ If the statement fails, the transaction is rolled back. Commits
+ of statement transactions are not durable -- each such
+ transaction is nested in the normal transaction, and if the
+ normal transaction is rolled back, the effects of all enclosed
+ statement transactions are undone as well. Technically,
+ a statement transaction can be viewed as a savepoint which is
+ maintained automatically in order to make effects of one
+ statement atomic.
+
+ The normal transaction is started by the user and is ended
+ usually upon a user request as well. The normal transaction
+ encloses transactions of all statements issued between
+ its beginning and its end.
+ In autocommit mode, the normal transaction is equivalent
+ to the statement transaction.
+
+ Since MySQL supports PSEA (pluggable storage engine
+ architecture), more than one transactional engine can be
+ active at a time. Hence transactions, from the server
+ point of view, are always distributed. In particular,
+ transactional state is maintained independently for each
+ engine. In order to commit a transaction the two phase
+ commit protocol is employed.
+
+ Not all statements are executed in context of a transaction.
+ Administrative and status information statements do not modify
+ engine data, and thus do not start a statement transaction and
+ also have no effect on the normal transaction. Examples of such
+ statements are SHOW STATUS and RESET SLAVE.
+
+ Similarly DDL statements are not transactional,
+ and therefore a transaction is [almost] never started for a DDL
+ statement. The difference between a DDL statement and a purely
+ administrative statement though is that a DDL statement always
+ commits the current transaction before proceeding, if there is
+ any.
+
+ At last, SQL statements that work with non-transactional
+ engines also have no effect on the transaction state of the
+ connection. Even though they are written to the binary log,
+ and the binary log is, overall, transactional, the writes
+ are done in "write-through" mode, directly to the binlog
+ file, followed with a OS cache sync, in other words,
+ bypassing the binlog undo log (translog).
+ They do not commit the current normal transaction.
+ A failure of a statement that uses non-transactional tables
+ would cause a rollback of the statement transaction, but
+ in case there no non-transactional tables are used,
+ no statement transaction is started.
+
+ Data layout
+ -----------
+
+ The server stores its transaction-related data in
+ thd->transaction. This structure has two members of type
+ THD_TRANS. These members correspond to the statement and
+ normal transactions respectively:
+
+ - thd->transaction.stmt contains a list of engines
+ that are participating in the given statement
+ - thd->transaction.all contains a list of engines that
+ have participated in any of the statement transactions started
+ within the context of the normal transaction.
+ Each element of the list contains a pointer to the storage
+ engine, engine-specific transactional data, and engine-specific
+ transaction flags.
+
+ In autocommit mode thd->transaction.all is empty.
+ Instead, data of thd->transaction.stmt is
+ used to commit/rollback the normal transaction.
+
+ The list of registered engines has a few important properties:
+ - no engine is registered in the list twice
+ - engines are present in the list a reverse temporal order --
+ new participants are always added to the beginning of the list.
+
+ Transaction life cycle
+ ----------------------
+
+ When a new connection is established, thd->transaction
+ members are initialized to an empty state.
+ If a statement uses any tables, all affected engines
+ are registered in the statement engine list. In
+ non-autocommit mode, the same engines are registered in
+ the normal transaction list.
+ At the end of the statement, the server issues a commit
+ or a roll back for all engines in the statement list.
+ At this point transaction flags of an engine, if any, are
+ propagated from the statement list to the list of the normal
+ transaction.
+ When commit/rollback is finished, the statement list is
+ cleared. It will be filled in again by the next statement,
+ and emptied again at the next statement's end.
+
+ The normal transaction is committed in a similar way
+ (by going over all engines in thd->transaction.all list)
+ but at different times:
+ - upon COMMIT SQL statement is issued by the user
+ - implicitly, by the server, at the beginning of a DDL statement
+ or SET AUTOCOMMIT={0|1} statement.
+
+ The normal transaction can be rolled back as well:
+ - if the user has requested so, by issuing ROLLBACK SQL
+ statement
+ - if one of the storage engines requested a rollback
+ by setting thd->transaction_rollback_request. This may
+ happen in case, e.g., when the transaction in the engine was
+ chosen a victim of the internal deadlock resolution algorithm
+ and rolled back internally. When such a situation happens, there
+ is little the server can do and the only option is to rollback
+ transactions in all other participating engines. In this case
+ the rollback is accompanied by an error sent to the user.
+
+ As follows from the use cases above, the normal transaction
+ is never committed when there is an outstanding statement
+ transaction. In most cases there is no conflict, since
+ commits of the normal transaction are issued by a stand-alone
+ administrative or DDL statement, thus no outstanding statement
+ transaction of the previous statement exists. Besides,
+ all statements that manipulate with the normal transaction
+ are prohibited in stored functions and triggers, therefore
+ no conflicting situation can occur in a sub-statement either.
+ The remaining rare cases when the server explicitly has
+ to commit the statement transaction prior to committing the normal
+ one cover error-handling scenarios (see for example
+ SQLCOM_LOCK_TABLES).
+
+ When committing a statement or a normal transaction, the server
+ either uses the two-phase commit protocol, or issues a commit
+ in each engine independently. The two-phase commit protocol
+ is used only if:
+ - all participating engines support two-phase commit (provide
+ handlerton::prepare PSEA API call) and
+ - transactions in at least two engines modify data (i.e. are
+ not read-only).
+
+ Note that the two phase commit is used for
+ statement transactions, even though they are not durable anyway.
+ This is done to ensure logical consistency of data in a multiple-
+ engine transaction.
+ For example, imagine that some day MySQL supports unique
+ constraint checks deferred till the end of statement. In such
+ case a commit in one of the engines may yield ER_DUP_KEY,
+ and MySQL should be able to gracefully abort statement
+ transactions of other participants.
+
+ After the normal transaction has been committed,
+ thd->transaction.all list is cleared.
+
+ When a connection is closed, the current normal transaction, if
+ any, is rolled back.
+
+ Roles and responsibilities
+ --------------------------
+
+ The server has no way to know that an engine participates in
+ the statement and a transaction has been started
+ in it unless the engine says so. Thus, in order to be
+ a part of a transaction, the engine must "register" itself.
+ This is done by invoking trans_register_ha() server call.
+ Normally the engine registers itself whenever handler::external_lock()
+ is called. trans_register_ha() can be invoked many times: if
+ an engine is already registered, the call does nothing.
+ In case autocommit is not set, the engine must register itself
+ twice -- both in the statement list and in the normal transaction
+ list.
+ In which list to register is a parameter of trans_register_ha().
+
+ Note, that although the registration interface in itself is
+ fairly clear, the current usage practice often leads to undesired
+ effects. E.g. since a call to trans_register_ha() in most engines
+ is embedded into implementation of handler::external_lock(), some
+ DDL statements start a transaction (at least from the server
+ point of view) even though they are not expected to. E.g.
+ CREATE TABLE does not start a transaction, since
+ handler::external_lock() is never called during CREATE TABLE. But
+ CREATE TABLE ... SELECT does, since handler::external_lock() is
+ called for the table that is being selected from. This has no
+ practical effects currently, but must be kept in mind
+ nevertheless.
+
+ Once an engine is registered, the server will do the rest
+ of the work.
+
+ During statement execution, whenever any of data-modifying
+ PSEA API methods is used, e.g. handler::write_row() or
+ handler::update_row(), the read-write flag is raised in the
+ statement transaction for the involved engine.
+ Currently All PSEA calls are "traced", and the data can not be
+ changed in a way other than issuing a PSEA call. Important:
+ unless this invariant is preserved the server will not know that
+ a transaction in a given engine is read-write and will not
+ involve the two-phase commit protocol!
+
+ At the end of a statement, server call
+ ha_autocommit_or_rollback() is invoked. This call in turn
+ invokes handlerton::prepare() for every involved engine.
+ Prepare is followed by a call to handlerton::commit_one_phase()
+ If a one-phase commit will suffice, handlerton::prepare() is not
+ invoked and the server only calls handlerton::commit_one_phase().
+ At statement commit, the statement-related read-write engine
+ flag is propagated to the corresponding flag in the normal
+ transaction. When the commit is complete, the list of registered
+ engines is cleared.
+
+ Rollback is handled in a similar fashion.
+
+ Additional notes on DDL and the normal transaction.
+ ---------------------------------------------------
+
+ DDLs and operations with non-transactional engines
+ do not "register" in thd->transaction lists, and thus do not
+ modify the transaction state. Besides, each DDL in
+ MySQL is prefixed with an implicit normal transaction commit
+ (a call to end_active_trans()), and thus leaves nothing
+ to modify.
+ However, as it has been pointed out with CREATE TABLE .. SELECT,
+ some DDL statements can start a *new* transaction.
+
+ Behaviour of the server in this case is currently badly
+ defined.
+ DDL statements use a form of "semantic" logging
+ to maintain atomicity: if CREATE TABLE .. SELECT failed,
+ the newly created table is deleted.
+ In addition, some DDL statements issue interim transaction
+ commits: e.g. ALTER TABLE issues a commit after data is copied
+ from the original table to the internal temporary table. Other
+ statements, e.g. CREATE TABLE ... SELECT do not always commit
+ after itself.
+ And finally there is a group of DDL statements such as
+ RENAME/DROP TABLE that doesn't start a new transaction
+ and doesn't commit.
+
+ This diversity makes it hard to say what will happen if
+ by chance a stored function is invoked during a DDL --
+ whether any modifications it makes will be committed or not
+ is not clear. Fortunately, SQL grammar of few DDLs allows
+ invocation of a stored function.
+
+ A consistent behaviour is perhaps to always commit the normal
+ transaction after all DDLs, just like the statement transaction
+ is always committed at the end of all statements.
+*/
- DESCRIPTION
- Every storage engine MUST call this function when it starts
- a transaction or a statement (that is it must be called both for the
- "beginning of transaction" and "beginning of statement").
- Only storage engines registered for the transaction/statement
- will know when to commit/rollback it.
+/**
+ Register a storage engine for a transaction.
- NOTE
+ Every storage engine MUST call this function when it starts
+ a transaction or a statement (that is it must be called both for the
+ "beginning of transaction" and "beginning of statement").
+ Only storage engines registered for the transaction/statement
+ will know when to commit/rollback it.
+
+ @note
trans_register_ha is idempotent - storage engine may register many
times per transaction.
@@ -586,7 +879,7 @@ void ha_close_connection(THD* thd)
void trans_register_ha(THD *thd, bool all, handlerton *ht_arg)
{
THD_TRANS *trans;
- handlerton **ht;
+ Ha_trx_info *ha_info;
DBUG_ENTER("trans_register_ha");
DBUG_PRINT("enter",("%s", all ? "all" : "stmt"));
@@ -598,39 +891,42 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht_arg)
else
trans= &thd->transaction.stmt;
- for (ht=trans->ht; *ht; ht++)
- if (*ht == ht_arg)
- DBUG_VOID_RETURN; /* already registered, return */
+ ha_info= thd->ha_data[ht_arg->slot].ha_info + static_cast<unsigned>(all);
+
+ if (ha_info->is_started())
+ DBUG_VOID_RETURN; /* already registered, return */
+
+ ha_info->register_ha(trans, ht_arg);
- trans->ht[trans->nht++]=ht_arg;
- DBUG_ASSERT(*ht == ht_arg);
trans->no_2pc|=(ht_arg->prepare==0);
if (thd->transaction.xid_state.xid.is_null())
thd->transaction.xid_state.xid.set(thd->query_id);
DBUG_VOID_RETURN;
}
-/*
- RETURN
- 0 - ok
- 1 - error, transaction was rolled back
+/**
+ @retval
+ 0 ok
+ @retval
+ 1 error, transaction was rolled back
*/
int ha_prepare(THD *thd)
{
int error=0, all=1;
THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
- handlerton **ht=trans->ht;
+ Ha_trx_info *ha_info= trans->ha_list;
DBUG_ENTER("ha_prepare");
#ifdef USING_TRANSACTIONS
- if (trans->nht)
+ if (ha_info)
{
- for (; *ht; ht++)
+ for (; ha_info; ha_info= ha_info->next())
{
int err;
- statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status);
- if ((*ht)->prepare)
+ handlerton *ht= ha_info->ht();
+ status_var_increment(thd->status_var.ha_prepare_count);
+ if (ht->prepare)
{
- if ((err= (*(*ht)->prepare)(thd, all)))
+ if ((err= ht->prepare(ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
ha_rollback_trans(thd, all);
@@ -641,7 +937,8 @@ int ha_prepare(THD *thd)
else
{
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), (*ht)->name);
+ ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
+ ha_resolve_storage_engine_name(ht));
}
}
}
@@ -649,21 +946,103 @@ int ha_prepare(THD *thd)
DBUG_RETURN(error);
}
-/*
- RETURN
- 0 - ok
- 1 - transaction was rolled back
- 2 - error during commit, data may be inconsistent
+/**
+ Check if we can skip the two-phase commit.
+
+ A helper function to evaluate if two-phase commit is mandatory.
+ As a side effect, propagates the read-only/read-write flags
+ of the statement transaction to its enclosing normal transaction.
+
+ If we have at least two engines with read-write changes we must
+ run a two-phase commit. Otherwise we can run several independent
+ commits as the only transactional engine has read-write changes
+ and others are read-only.
+
+ @retval 0 All engines are read-only.
+ @retval 1 We have the only engine with read-write changes.
+ @retval >1 More than one engine have read-write changes.
+ Note: return value might NOT be the exact number of
+ engines with read-write changes.
+*/
+
+static
+uint
+ha_check_and_coalesce_trx_read_only(THD *thd, Ha_trx_info *ha_list,
+ bool all)
+{
+ /* The number of storage engines that have actual changes. */
+ unsigned rw_ha_count= 0;
+ Ha_trx_info *ha_info;
+
+ for (ha_info= ha_list; ha_info; ha_info= ha_info->next())
+ {
+ if (ha_info->is_trx_read_write())
+ ++rw_ha_count;
+
+ if (! all)
+ {
+ Ha_trx_info *ha_info_all= &thd->ha_data[ha_info->ht()->slot].ha_info[1];
+ DBUG_ASSERT(ha_info != ha_info_all);
+ /*
+ Merge read-only/read-write information about statement
+ transaction to its enclosing normal transaction. Do this
+ only if in a real transaction -- that is, if we know
+ that ha_info_all is registered in thd->transaction.all.
+ Since otherwise we only clutter the normal transaction flags.
+ */
+ if (ha_info_all->is_started()) /* FALSE if autocommit. */
+ ha_info_all->coalesce_trx_with(ha_info);
+ }
+ else if (rw_ha_count > 1)
+ {
+ /*
+ It is a normal transaction, so we don't need to merge read/write
+ information up, and the need for two-phase commit has been
+ already established. Break the loop prematurely.
+ */
+ break;
+ }
+ }
+ return rw_ha_count;
+}
+
+
+/**
+ @retval
+ 0 ok
+ @retval
+ 1 transaction was rolled back
+ @retval
+ 2 error during commit, data may be inconsistent
+
+ @todo
+ Since we don't support nested statement transactions in 5.0,
+ we can't commit or rollback stmt transactions while we are inside
+ stored functions or triggers. So we simply do nothing now.
+ TODO: This should be fixed in later ( >= 5.1) releases.
*/
int ha_commit_trans(THD *thd, bool all)
{
int error= 0, cookie= 0;
+ /*
+ 'all' means that this is either an explicit commit issued by
+ user, or an implicit commit issued by a DDL.
+ */
THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt;
- bool is_real_trans= all || thd->transaction.all.nht == 0;
- handlerton **ht= trans->ht;
+ bool is_real_trans= all || thd->transaction.all.ha_list == 0;
+ Ha_trx_info *ha_info= trans->ha_list;
my_xid xid= thd->transaction.xid_state.xid.get_my_xid();
DBUG_ENTER("ha_commit_trans");
+ /*
+ We must not commit the normal transaction if a statement
+ transaction is pending. Otherwise statement transaction
+ flags will not get propagated to its normal transaction's
+ counterpart.
+ */
+ DBUG_ASSERT(thd->transaction.stmt.ha_list == NULL ||
+ trans == &thd->transaction.stmt);
+
if (thd->in_sub_stmt)
{
/*
@@ -685,30 +1064,62 @@ int ha_commit_trans(THD *thd, bool all)
DBUG_RETURN(2);
}
#ifdef USING_TRANSACTIONS
- if (trans->nht)
+ if (ha_info)
{
- if (is_real_trans && wait_if_global_read_lock(thd, 0, 0))
- {
- ha_rollback_trans(thd, all);
- DBUG_RETURN(1);
- }
+ uint rw_ha_count;
+ bool rw_trans;
+
DBUG_EXECUTE_IF("crash_commit_before", abort(););
/* Close all cursors that can not survive COMMIT */
if (is_real_trans) /* not a statement commit */
thd->stmt_map.close_transient_cursors();
- if (!trans->no_2pc && trans->nht > 1)
+ rw_ha_count= ha_check_and_coalesce_trx_read_only(thd, ha_info, all);
+ /* rw_trans is TRUE when we in a transaction changing data */
+ rw_trans= is_real_trans && (rw_ha_count > 0);
+
+ if (rw_trans &&
+ wait_if_global_read_lock(thd, 0, 0))
{
- for (; *ht && !error; ht++)
+ ha_rollback_trans(thd, all);
+ DBUG_RETURN(1);
+ }
+
+ if (rw_trans &&
+ opt_readonly &&
+ !(thd->security_ctx->master_access & SUPER_ACL) &&
+ !thd->slave_thread)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only");
+ ha_rollback_trans(thd, all);
+ error= 1;
+ goto end;
+ }
+
+ if (!trans->no_2pc && (rw_ha_count > 1))
+ {
+ for (; ha_info && !error; ha_info= ha_info->next())
{
int err;
- if ((err= (*(*ht)->prepare)(thd, all)))
+ handlerton *ht= ha_info->ht();
+ /*
+ Do not call two-phase commit if this particular
+ transaction is read-only. This allows for simpler
+ implementation in engines that are always read-only.
+ */
+ if (! ha_info->is_trx_read_write())
+ continue;
+ /*
+ Sic: we know that prepare() is not NULL since otherwise
+ trans->no_2pc would have been set.
+ */
+ if ((err= ht->prepare(ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error= 1;
}
- statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status);
+ status_var_increment(thd->status_var.ha_prepare_count);
}
DBUG_EXECUTE_IF("crash_commit_after_prepare", abort(););
if (error || (is_real_trans && xid &&
@@ -726,39 +1137,41 @@ int ha_commit_trans(THD *thd, bool all)
tc_log->unlog(cookie, xid);
DBUG_EXECUTE_IF("crash_commit_after", abort(););
end:
- if (is_real_trans)
+ if (rw_trans)
start_waiting_global_read_lock(thd);
}
#endif /* USING_TRANSACTIONS */
DBUG_RETURN(error);
}
-/*
- NOTE - this function does not care about global read lock.
- A caller should.
+/**
+ @note
+ This function does not care about global read lock. A caller should.
*/
int ha_commit_one_phase(THD *thd, bool all)
{
int error=0;
THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
- bool is_real_trans=all || thd->transaction.all.nht == 0;
- handlerton **ht=trans->ht;
+ bool is_real_trans=all || thd->transaction.all.ha_list == 0;
+ Ha_trx_info *ha_info= trans->ha_list, *ha_info_next;
DBUG_ENTER("ha_commit_one_phase");
#ifdef USING_TRANSACTIONS
- if (trans->nht)
+ if (ha_info)
{
- for (ht=trans->ht; *ht; ht++)
+ for (; ha_info; ha_info= ha_info_next)
{
int err;
- if ((err= (*(*ht)->commit)(thd, all)))
+ handlerton *ht= ha_info->ht();
+ if ((err= ht->commit(ht, thd, all)))
{
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
error=1;
}
- statistic_increment(thd->status_var.ha_commit_count,&LOCK_status);
- *ht= 0;
+ status_var_increment(thd->status_var.ha_commit_count);
+ ha_info_next= ha_info->next();
+ ha_info->reset(); /* keep it conveniently zero-filled */
}
- trans->nht=0;
+ trans->ha_list= 0;
trans->no_2pc=0;
if (is_real_trans)
thd->transaction.xid_state.xid.null();
@@ -781,8 +1194,17 @@ int ha_rollback_trans(THD *thd, bool all)
{
int error=0;
THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
- bool is_real_trans=all || thd->transaction.all.nht == 0;
+ Ha_trx_info *ha_info= trans->ha_list, *ha_info_next;
+ bool is_real_trans=all || thd->transaction.all.ha_list == 0;
DBUG_ENTER("ha_rollback_trans");
+
+ /*
+ We must not rollback the normal transaction if a statement
+ transaction is pending.
+ */
+ DBUG_ASSERT(thd->transaction.stmt.ha_list == NULL ||
+ trans == &thd->transaction.stmt);
+
if (thd->in_sub_stmt)
{
/*
@@ -797,24 +1219,26 @@ int ha_rollback_trans(THD *thd, bool all)
DBUG_RETURN(1);
}
#ifdef USING_TRANSACTIONS
- if (trans->nht)
+ if (ha_info)
{
/* Close all cursors that can not survive ROLLBACK */
if (is_real_trans) /* not a statement commit */
thd->stmt_map.close_transient_cursors();
- for (handlerton **ht=trans->ht; *ht; ht++)
+ for (; ha_info; ha_info= ha_info_next)
{
int err;
- if ((err= (*(*ht)->rollback)(thd, all)))
+ handlerton *ht= ha_info->ht();
+ if ((err= ht->rollback(ht, thd, all)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
}
- statistic_increment(thd->status_var.ha_rollback_count,&LOCK_status);
- *ht= 0;
+ status_var_increment(thd->status_var.ha_rollback_count);
+ ha_info_next= ha_info->next();
+ ha_info->reset(); /* keep it conveniently zero-filled */
}
- trans->nht=0;
+ trans->ha_list= 0;
trans->no_2pc=0;
if (is_real_trans)
thd->transaction.xid_state.xid.null();
@@ -838,37 +1262,41 @@ int ha_rollback_trans(THD *thd, bool all)
message in the error log, so we don't send it.
*/
if (is_real_trans && thd->transaction.all.modified_non_trans_table &&
- !thd->slave_thread)
+ !thd->slave_thread && thd->killed != THD::KILL_CONNECTION)
push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));
DBUG_RETURN(error);
}
-/*
- This is used to commit or rollback a single statement depending on the value
- of error. Note that if the autocommit is on, then the following call inside
- InnoDB will commit or rollback the whole transaction (= the statement). The
- autocommit mechanism built into InnoDB is based on counting locks, but if
- the user has used LOCK TABLES then that mechanism does not know to do the
- commit.
-*/
+/**
+ This is used to commit or rollback a single statement depending on
+ the value of error.
+ @note
+ Note that if the autocommit is on, then the following call inside
+ InnoDB will commit or rollback the whole transaction (= the statement). The
+ autocommit mechanism built into InnoDB is based on counting locks, but if
+ the user has used LOCK TABLES then that mechanism does not know to do the
+ commit.
+*/
int ha_autocommit_or_rollback(THD *thd, int error)
{
DBUG_ENTER("ha_autocommit_or_rollback");
#ifdef USING_TRANSACTIONS
- if (thd->transaction.stmt.nht)
+ if (thd->transaction.stmt.ha_list)
{
if (!error)
{
- if (ha_commit_stmt(thd))
+ if (ha_commit_trans(thd, 0))
error=1;
}
- else if (thd->transaction_rollback_request && !thd->in_sub_stmt)
- (void) ha_rollback(thd);
- else
- (void) ha_rollback_stmt(thd);
+ else
+ {
+ (void) ha_rollback_trans(thd, 0);
+ if (thd->transaction_rollback_request && !thd->in_sub_stmt)
+ (void) ha_rollback(thd);
+ }
thd->variables.tx_isolation=thd->session_tx_isolation;
}
@@ -877,26 +1305,54 @@ int ha_autocommit_or_rollback(THD *thd, int error)
}
-int ha_commit_or_rollback_by_xid(XID *xid, bool commit)
+struct xahton_st {
+ XID *xid;
+ int result;
+};
+
+static my_bool xacommit_handlerton(THD *unused1, plugin_ref plugin,
+ void *arg)
{
- handlerton **types;
- int res= 1;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->recover)
+ {
+ hton->commit_by_xid(hton, ((struct xahton_st *)arg)->xid);
+ ((struct xahton_st *)arg)->result= 0;
+ }
+ return FALSE;
+}
- for (types= sys_table_types; *types; types++)
+static my_bool xarollback_handlerton(THD *unused1, plugin_ref plugin,
+ void *arg)
+{
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- if ((*types)->state == SHOW_OPTION_YES && (*types)->recover)
- {
- if ((*(commit ? (*types)->commit_by_xid :
- (*types)->rollback_by_xid))(xid))
- res= 0;
- }
+ hton->rollback_by_xid(hton, ((struct xahton_st *)arg)->xid);
+ ((struct xahton_st *)arg)->result= 0;
}
- return res;
+ return FALSE;
+}
+
+
+int ha_commit_or_rollback_by_xid(XID *xid, bool commit)
+{
+ struct xahton_st xaop;
+ xaop.xid= xid;
+ xaop.result= 1;
+
+ plugin_foreach(NULL, commit ? xacommit_handlerton : xarollback_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &xaop);
+
+ return xaop.result;
}
#ifndef DBUG_OFF
-/* this does not need to be multi-byte safe or anything */
+/**
+ @note
+ This does not need to be multi-byte safe or anything
+*/
static char* xid_to_str(char *buf, XID *xid)
{
int i;
@@ -948,118 +1404,138 @@ static char* xid_to_str(char *buf, XID *xid)
}
#endif
-/*
- recover() step of xa
-
- NOTE
- there are three modes of operation:
-
- - automatic recover after a crash
- in this case commit_list != 0, tc_heuristic_recover==0
- all xids from commit_list are committed, others are rolled back
-
- - manual (heuristic) recover
- in this case commit_list==0, tc_heuristic_recover != 0
- DBA has explicitly specified that all prepared transactions should
- be committed (or rolled back).
-
- - no recovery (MySQL did not detect a crash)
- in this case commit_list==0, tc_heuristic_recover == 0
- there should be no prepared transactions in this case.
+/**
+ recover() step of xa.
+
+ @note
+ there are three modes of operation:
+ - automatic recover after a crash
+ in this case commit_list != 0, tc_heuristic_recover==0
+ all xids from commit_list are committed, others are rolled back
+ - manual (heuristic) recover
+ in this case commit_list==0, tc_heuristic_recover != 0
+ DBA has explicitly specified that all prepared transactions should
+ be committed (or rolled back).
+ - no recovery (MySQL did not detect a crash)
+ in this case commit_list==0, tc_heuristic_recover == 0
+ there should be no prepared transactions in this case.
*/
-int ha_recover(HASH *commit_list)
+struct xarecover_st
{
- int len, got, found_foreign_xids=0, found_my_xids=0;
- handlerton **types;
- XID *list=0;
- bool dry_run=(commit_list==0 && tc_heuristic_recover==0);
- DBUG_ENTER("ha_recover");
-
- /* commit_list and tc_heuristic_recover cannot be set both */
- DBUG_ASSERT(commit_list==0 || tc_heuristic_recover==0);
- /* if either is set, total_ha_2pc must be set too */
- DBUG_ASSERT(dry_run || total_ha_2pc>(ulong)opt_bin_log);
-
- if (total_ha_2pc <= (ulong)opt_bin_log)
- DBUG_RETURN(0);
-
- if (commit_list)
- sql_print_information("Starting crash recovery...");
-
-#ifndef WILL_BE_DELETED_LATER
- /*
- for now, only InnoDB supports 2pc. It means we can always safely
- rollback all pending transactions, without risking inconsistent data
- */
- DBUG_ASSERT(total_ha_2pc == (ulong) opt_bin_log+1); // only InnoDB and binlog
- tc_heuristic_recover= TC_HEURISTIC_RECOVER_ROLLBACK; // forcing ROLLBACK
- dry_run=FALSE;
-#endif
+ int len, found_foreign_xids, found_my_xids;
+ XID *list;
+ HASH *commit_list;
+ bool dry_run;
+};
- for (len= MAX_XID_LIST_SIZE ; list==0 && len > MIN_XID_LIST_SIZE; len/=2)
- {
- list=(XID *)my_malloc(len*sizeof(XID), MYF(0));
- }
- if (!list)
- {
- sql_print_error(ER(ER_OUTOFMEMORY), len*sizeof(XID));
- DBUG_RETURN(1);
- }
+static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
+ void *arg)
+{
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ struct xarecover_st *info= (struct xarecover_st *) arg;
+ int got;
- for (types= sys_table_types; *types; types++)
+ if (hton->state == SHOW_OPTION_YES && hton->recover)
{
- if ((*types)->state != SHOW_OPTION_YES || !(*types)->recover)
- continue;
- while ((got=(*(*types)->recover)(list, len)) > 0 )
+ while ((got= hton->recover(hton, info->list, info->len)) > 0 )
{
sql_print_information("Found %d prepared transaction(s) in %s",
- got, (*types)->name);
+ got, ha_resolve_storage_engine_name(hton));
for (int i=0; i < got; i ++)
{
- my_xid x=list[i].get_my_xid();
+ my_xid x=info->list[i].get_my_xid();
if (!x) // not "mine" - that is generated by external TM
{
#ifndef DBUG_OFF
char buf[XIDDATASIZE*4+6]; // see xid_to_str
- sql_print_information("ignore xid %s", xid_to_str(buf, list+i));
+ sql_print_information("ignore xid %s", xid_to_str(buf, info->list+i));
#endif
- xid_cache_insert(list+i, XA_PREPARED);
- found_foreign_xids++;
+ xid_cache_insert(info->list+i, XA_PREPARED);
+ info->found_foreign_xids++;
continue;
}
- if (dry_run)
+ if (info->dry_run)
{
- found_my_xids++;
+ info->found_my_xids++;
continue;
}
// recovery mode
- if (commit_list ?
- hash_search(commit_list, (byte *)&x, sizeof(x)) != 0 :
+ if (info->commit_list ?
+ hash_search(info->commit_list, (uchar *)&x, sizeof(x)) != 0 :
tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT)
{
#ifndef DBUG_OFF
char buf[XIDDATASIZE*4+6]; // see xid_to_str
- sql_print_information("commit xid %s", xid_to_str(buf, list+i));
+ sql_print_information("commit xid %s", xid_to_str(buf, info->list+i));
#endif
- (*(*types)->commit_by_xid)(list+i);
+ hton->commit_by_xid(hton, info->list+i);
}
else
{
#ifndef DBUG_OFF
char buf[XIDDATASIZE*4+6]; // see xid_to_str
- sql_print_information("rollback xid %s", xid_to_str(buf, list+i));
+ sql_print_information("rollback xid %s",
+ xid_to_str(buf, info->list+i));
#endif
- (*(*types)->rollback_by_xid)(list+i);
+ hton->rollback_by_xid(hton, info->list+i);
}
}
- if (got < len)
+ if (got < info->len)
break;
}
}
- my_free((gptr)list, MYF(0));
- if (found_foreign_xids)
- sql_print_warning("Found %d prepared XA transactions", found_foreign_xids);
- if (dry_run && found_my_xids)
+ return FALSE;
+}
+
+int ha_recover(HASH *commit_list)
+{
+ struct xarecover_st info;
+ DBUG_ENTER("ha_recover");
+ info.found_foreign_xids= info.found_my_xids= 0;
+ info.commit_list= commit_list;
+ info.dry_run= (info.commit_list==0 && tc_heuristic_recover==0);
+ info.list= NULL;
+
+ /* commit_list and tc_heuristic_recover cannot be set both */
+ DBUG_ASSERT(info.commit_list==0 || tc_heuristic_recover==0);
+ /* if either is set, total_ha_2pc must be set too */
+ DBUG_ASSERT(info.dry_run || total_ha_2pc>(ulong)opt_bin_log);
+
+ if (total_ha_2pc <= (ulong)opt_bin_log)
+ DBUG_RETURN(0);
+
+ if (info.commit_list)
+ sql_print_information("Starting crash recovery...");
+
+#ifndef WILL_BE_DELETED_LATER
+ /*
+ for now, only InnoDB supports 2pc. It means we can always safely
+ rollback all pending transactions, without risking inconsistent data
+ */
+ DBUG_ASSERT(total_ha_2pc == (ulong) opt_bin_log+1); // only InnoDB and binlog
+ tc_heuristic_recover= TC_HEURISTIC_RECOVER_ROLLBACK; // forcing ROLLBACK
+ info.dry_run=FALSE;
+#endif
+
+ for (info.len= MAX_XID_LIST_SIZE ;
+ info.list==0 && info.len > MIN_XID_LIST_SIZE; info.len/=2)
+ {
+ info.list=(XID *)my_malloc(info.len*sizeof(XID), MYF(0));
+ }
+ if (!info.list)
+ {
+ sql_print_error(ER(ER_OUTOFMEMORY), info.len*sizeof(XID));
+ DBUG_RETURN(1);
+ }
+
+ plugin_foreach(NULL, xarecover_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &info);
+
+ my_free((uchar*)info.list, MYF(0));
+ if (info.found_foreign_xids)
+ sql_print_warning("Found %d prepared XA transactions",
+ info.found_foreign_xids);
+ if (info.dry_run && info.found_my_xids)
{
sql_print_error("Found %d prepared transactions! It means that mysqld was "
"not shut down properly last time and critical recovery "
@@ -1067,18 +1543,18 @@ int ha_recover(HASH *commit_list)
"after a crash. You have to start mysqld with "
"--tc-heuristic-recover switch to commit or rollback "
"pending transactions.",
- found_my_xids, opt_tc_log_file);
+ info.found_my_xids, opt_tc_log_file);
DBUG_RETURN(1);
}
- if (commit_list)
+ if (info.commit_list)
sql_print_information("Crash recovery finished.");
DBUG_RETURN(0);
}
-/*
- return the list of XID's to a client, the same way SHOW commands do
+/**
+ return the list of XID's to a client, the same way SHOW commands do.
- NOTE
+ @note
I didn't find in XA specs that an RM cannot return the same XID twice,
so mysql_xa_recover does not filter XID's to ensure uniqueness.
It can be easily fixed later, if necessary.
@@ -1120,11 +1596,12 @@ bool mysql_xa_recover(THD *thd)
}
pthread_mutex_unlock(&LOCK_xid_cache);
- send_eof(thd);
+ my_eof(thd);
DBUG_RETURN(0);
}
-/*
+/**
+ @details
This function should be called when MySQL sends rows of a SELECT result set
or the EOF mark to the client. It releases a possible adaptive hash index
S-latch held by thd in InnoDB and also releases a possible InnoDB query
@@ -1136,32 +1613,28 @@ bool mysql_xa_recover(THD *thd)
performs another SQL query. In MySQL-4.1 this is even more important because
there a connection can have several SELECT queries open at the same time.
- arguments:
- thd: the thread handle of the current connection
- return value: always 0
-*/
+ @param thd the thread handle of the current connection
-int ha_release_temporary_latches(THD *thd)
+ @return
+ always 0
+*/
+static my_bool release_temporary_latches(THD *thd, plugin_ref plugin,
+ void *unused)
{
-#ifdef HAVE_INNOBASE_DB
- if (opt_innodb)
- innobase_release_temporary_latches(thd);
-#endif
- return 0;
-}
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->release_temporary_latches)
+ hton->release_temporary_latches(hton, thd);
+
+ return FALSE;
+}
-/*
- Export statistics for different engines. Currently we use it only for
- InnoDB.
-*/
-int ha_update_statistics()
+int ha_release_temporary_latches(THD *thd)
{
-#ifdef HAVE_INNOBASE_DB
- if (opt_innodb)
- innodb_export_status();
-#endif
+ plugin_foreach(thd, release_temporary_latches, MYSQL_STORAGE_ENGINE_PLUGIN,
+ NULL);
+
return 0;
}
@@ -1170,77 +1643,89 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
int error=0;
THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt :
&thd->transaction.all);
- handlerton **ht=trans->ht, **end_ht;
+ Ha_trx_info *ha_info, *ha_info_next;
+
DBUG_ENTER("ha_rollback_to_savepoint");
- trans->nht=sv->nht;
trans->no_2pc=0;
- end_ht=ht+sv->nht;
/*
rolling back to savepoint in all storage engines that were part of the
transaction when the savepoint was set
*/
- for (; ht < end_ht; ht++)
+ for (ha_info= sv->ha_list; ha_info; ha_info= ha_info->next())
{
int err;
- DBUG_ASSERT((*ht)->savepoint_set != 0);
- if ((err= (*(*ht)->savepoint_rollback)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ handlerton *ht= ha_info->ht();
+ DBUG_ASSERT(ht);
+ DBUG_ASSERT(ht->savepoint_set != 0);
+ if ((err= ht->savepoint_rollback(ht, thd,
+ (uchar *)(sv+1)+ht->savepoint_offset)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
}
- statistic_increment(thd->status_var.ha_savepoint_rollback_count,&LOCK_status);
- trans->no_2pc|=(*ht)->prepare == 0;
+ status_var_increment(thd->status_var.ha_savepoint_rollback_count);
+ trans->no_2pc|= ht->prepare == 0;
}
/*
rolling back the transaction in all storage engines that were not part of
the transaction when the savepoint was set
*/
- for (; *ht ; ht++)
+ for (ha_info= trans->ha_list; ha_info != sv->ha_list;
+ ha_info= ha_info_next)
{
int err;
- if ((err= (*(*ht)->rollback)(thd, !thd->in_sub_stmt)))
+ handlerton *ht= ha_info->ht();
+ if ((err= ht->rollback(ht, thd, !thd->in_sub_stmt)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
}
- statistic_increment(thd->status_var.ha_rollback_count,&LOCK_status);
- *ht=0; // keep it conveniently zero-filled
+ status_var_increment(thd->status_var.ha_rollback_count);
+ ha_info_next= ha_info->next();
+ ha_info->reset(); /* keep it conveniently zero-filled */
}
+ trans->ha_list= sv->ha_list;
DBUG_RETURN(error);
}
-/*
- note, that according to the sql standard (ISO/IEC 9075-2:2003)
+/**
+ @note
+ according to the sql standard (ISO/IEC 9075-2:2003)
section "4.33.4 SQL-statements and transaction states",
SAVEPOINT is *not* transaction-initiating SQL-statement
*/
-
int ha_savepoint(THD *thd, SAVEPOINT *sv)
{
int error=0;
THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt :
&thd->transaction.all);
- handlerton **ht=trans->ht;
+ Ha_trx_info *ha_info= trans->ha_list;
DBUG_ENTER("ha_savepoint");
#ifdef USING_TRANSACTIONS
- for (; *ht; ht++)
+ for (; ha_info; ha_info= ha_info->next())
{
int err;
- if (! (*ht)->savepoint_set)
+ handlerton *ht= ha_info->ht();
+ DBUG_ASSERT(ht);
+ if (! ht->savepoint_set)
{
my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "SAVEPOINT");
error=1;
break;
}
- if ((err= (*(*ht)->savepoint_set)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= ht->savepoint_set(ht, thd, (uchar *)(sv+1)+ht->savepoint_offset)))
{ // cannot happen
my_error(ER_GET_ERRNO, MYF(0), err);
error=1;
}
- statistic_increment(thd->status_var.ha_savepoint_count,&LOCK_status);
+ status_var_increment(thd->status_var.ha_savepoint_count);
}
- sv->nht=trans->nht;
+ /*
+ Remember the list of registered storage engines. All new
+ engines are prepended to the beginning of the list.
+ */
+ sv->ha_list= trans->ha_list;
#endif /* USING_TRANSACTIONS */
DBUG_RETURN(error);
}
@@ -1248,18 +1733,19 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv)
int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
{
int error=0;
- THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt :
- &thd->transaction.all);
- handlerton **ht=trans->ht, **end_ht;
+ Ha_trx_info *ha_info= sv->ha_list;
DBUG_ENTER("ha_release_savepoint");
- end_ht=ht+sv->nht;
- for (; ht < end_ht; ht++)
+ for (; ha_info; ha_info= ha_info->next())
{
int err;
- if (!(*ht)->savepoint_release)
+ handlerton *ht= ha_info->ht();
+ /* Savepoint life time is enclosed into transaction life time. */
+ DBUG_ASSERT(ht);
+ if (!ht->savepoint_release)
continue;
- if ((err= (*(*ht)->savepoint_release)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset)))
+ if ((err= ht->savepoint_release(ht, thd,
+ (uchar *)(sv+1) + ht->savepoint_offset)))
{ // cannot happen
my_error(ER_GET_ERRNO, MYF(0), err);
error=1;
@@ -1269,47 +1755,120 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv)
}
+static my_bool snapshot_handlerton(THD *thd, plugin_ref plugin,
+ void *arg)
+{
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES &&
+ hton->start_consistent_snapshot)
+ {
+ hton->start_consistent_snapshot(hton, thd);
+ *((bool *)arg)= false;
+ }
+ return FALSE;
+}
+
int ha_start_consistent_snapshot(THD *thd)
{
-#ifdef HAVE_INNOBASE_DB
- if ((have_innodb == SHOW_OPTION_YES) &&
- !innobase_start_trx_and_assign_read_view(thd))
- return 0;
-#endif
+ bool warn= true;
+
+ plugin_foreach(thd, snapshot_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, &warn);
+
/*
Same idea as when one wants to CREATE TABLE in one engine which does not
exist:
*/
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
- "This MySQL server does not support any "
- "consistent-read capable storage engine");
+ if (warn)
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ "This MySQL server does not support any "
+ "consistent-read capable storage engine");
return 0;
}
-bool ha_flush_logs()
+static my_bool flush_handlerton(THD *thd, plugin_ref plugin,
+ void *arg)
{
- bool result=0;
-#ifdef HAVE_BERKELEY_DB
- if ((have_berkeley_db == SHOW_OPTION_YES) &&
- berkeley_flush_logs())
- result=1;
-#endif
-#ifdef HAVE_INNOBASE_DB
- if ((have_innodb == SHOW_OPTION_YES) &&
- innobase_flush_logs())
- result=1;
-#endif
- return result;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->flush_logs &&
+ hton->flush_logs(hton))
+ return TRUE;
+ return FALSE;
}
-/*
+
+bool ha_flush_logs(handlerton *db_type)
+{
+ if (db_type == NULL)
+ {
+ if (plugin_foreach(NULL, flush_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, 0))
+ return TRUE;
+ }
+ else
+ {
+ if (db_type->state != SHOW_OPTION_YES ||
+ (db_type->flush_logs && db_type->flush_logs(db_type)))
+ return TRUE;
+ }
+ return FALSE;
+}
+
+static const char *check_lowercase_names(handler *file, const char *path,
+ char *tmp_path)
+{
+ if (lower_case_table_names != 2 || (file->ha_table_flags() & HA_FILE_BASED))
+ return path;
+
+ /* Ensure that table handler get path in lower case */
+ if (tmp_path != path)
+ strmov(tmp_path, path);
+
+ /*
+ we only should turn into lowercase database/table part
+ so start the process after homedirectory
+ */
+ my_casedn_str(files_charset_info, tmp_path + mysql_data_home_len);
+ return tmp_path;
+}
+
+
+/**
+ An interceptor to hijack the text of the error message without
+ setting an error in the thread. We need the text to present it
+ in the form of a warning to the user.
+*/
+
+struct Ha_delete_table_error_handler: public Internal_error_handler
+{
+public:
+ virtual bool handle_error(uint sql_errno,
+ const char *message,
+ MYSQL_ERROR::enum_warning_level level,
+ THD *thd);
+ char buff[MYSQL_ERRMSG_SIZE];
+};
+
+
+bool
+Ha_delete_table_error_handler::
+handle_error(uint sql_errno,
+ const char *message,
+ MYSQL_ERROR::enum_warning_level level,
+ THD *thd)
+{
+ /* Grab the error message */
+ strmake(buff, message, sizeof(buff)-1);
+ return TRUE;
+}
+
+
+/** @brief
This should return ENOENT if the file doesn't exists.
The .frm file will be deleted only if we return 0 or ENOENT
*/
-
-int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
- const char *alias, bool generate_warning)
+int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
+ const char *db, const char *alias, bool generate_warning)
{
handler *file;
char tmp_path[FN_REFLEN];
@@ -1323,53 +1882,43 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
dummy_table.s= &dummy_share;
/* DB_TYPE_UNKNOWN is used in ALTER TABLE when renaming only .frm files */
- if (table_type == DB_TYPE_UNKNOWN ||
- ! (file=get_new_handler(&dummy_table, thd->mem_root, table_type)))
+ if (table_type == NULL ||
+ ! (file=get_new_handler((TABLE_SHARE*)0, thd->mem_root, table_type)))
DBUG_RETURN(ENOENT);
- if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED))
- {
- /* Ensure that table handler get path in lower case */
- strmov(tmp_path, path);
- my_casedn_str(files_charset_info, tmp_path);
- path= tmp_path;
- }
- if ((error= file->delete_table(path)) && generate_warning)
+ path= check_lowercase_names(file, path, tmp_path);
+ if ((error= file->ha_delete_table(path)) && generate_warning)
{
/*
Because file->print_error() use my_error() to generate the error message
- we must store the error state in thd, reset it and restore it to
- be able to get hold of the error message.
- (We should in the future either rewrite handler::print_error() or make
- a nice method of this.
+ we use an internal error handler to intercept it and store the text
+ in a temporary buffer. Later the message will be presented to user
+ as a warning.
*/
- bool query_error= thd->query_error;
- sp_rcontext *spcont= thd->spcont;
- SELECT_LEX *current_select= thd->lex->current_select;
- char buff[sizeof(thd->net.last_error)];
- char new_error[sizeof(thd->net.last_error)];
- int last_errno= thd->net.last_errno;
-
- strmake(buff, thd->net.last_error, sizeof(buff)-1);
- thd->query_error= 0;
- thd->spcont= NULL;
- thd->lex->current_select= 0;
- thd->net.last_error[0]= 0;
+ Ha_delete_table_error_handler ha_delete_table_error_handler;
/* Fill up strucutures that print_error may need */
- dummy_table.s->path= path;
+ dummy_share.path.str= (char*) path;
+ dummy_share.path.length= strlen(path);
+ dummy_share.db.str= (char*) db;
+ dummy_share.db.length= strlen(db);
+ dummy_share.table_name.str= (char*) alias;
+ dummy_share.table_name.length= strlen(alias);
dummy_table.alias= alias;
+ file->change_table_ptr(&dummy_table, &dummy_share);
+
+ thd->push_internal_handler(&ha_delete_table_error_handler);
file->print_error(error, 0);
- strmake(new_error, thd->net.last_error, sizeof(buff)-1);
- /* restore thd */
- thd->query_error= query_error;
- thd->spcont= spcont;
- thd->lex->current_select= current_select;
- thd->net.last_errno= last_errno;
- strmake(thd->net.last_error, buff, sizeof(buff)-1);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, new_error);
+ thd->pop_internal_handler();
+
+ /*
+ XXX: should we convert *all* errors to warnings here?
+ What if the error is fatal?
+ */
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error,
+ ha_delete_table_error_handler.buff);
}
delete file;
DBUG_RETURN(error);
@@ -1380,31 +1929,61 @@ int ha_delete_table(THD *thd, enum db_type table_type, const char *path,
****************************************************************************/
handler *handler::clone(MEM_ROOT *mem_root)
{
- handler *new_handler= get_new_handler(table, mem_root, table->s->db_type);
+ handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
/*
Allocate handler->ref here because otherwise ha_open will allocate it
on this->table->mem_root and we will not be able to reclaim that memory
when the clone handler object is destroyed.
*/
- if (!(new_handler->ref= (byte*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2)))
+ if (!(new_handler->ref= (uchar*) alloc_root(mem_root, ALIGN_SIZE(ref_length)*2)))
return NULL;
- if (new_handler && !new_handler->ha_open(table->s->path, table->db_stat,
+ if (new_handler && !new_handler->ha_open(table,
+ table->s->normalized_path.str,
+ table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
return new_handler;
return NULL;
}
- /* Open database-handler. Try O_RDONLY if can't open as O_RDWR */
- /* Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set */
-int handler::ha_open(const char *name, int mode, int test_if_locked)
+void handler::ha_statistic_increment(ulong SSV::*offset) const
+{
+ status_var_increment(table->in_use->status_var.*offset);
+}
+
+void **handler::ha_data(THD *thd) const
+{
+ return thd_ha_data(thd, ht);
+}
+
+THD *handler::ha_thd(void) const
+{
+ DBUG_ASSERT(!table || !table->in_use || table->in_use == current_thd);
+ return (table && table->in_use) ? table->in_use : current_thd;
+}
+
+
+/** @brief
+ Open database-handler.
+
+ IMPLEMENTATION
+ Try O_RDONLY if cannot open as O_RDWR
+ Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set
+*/
+int handler::ha_open(TABLE *table_arg, const char *name, int mode,
+ int test_if_locked)
{
int error;
DBUG_ENTER("handler::ha_open");
- DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
- name, table->s->db_type, table->db_stat, mode,
- test_if_locked));
+ DBUG_PRINT("enter",
+ ("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
+ name, ht->db_type, table_arg->db_stat, mode,
+ test_if_locked));
+
+ table= table_arg;
+ DBUG_ASSERT(table->s == table_share);
+ DBUG_ASSERT(alloc_root_inited(&table->mem_root));
if ((error=open(name,mode,test_if_locked)))
{
@@ -1417,7 +1996,7 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
}
if (error)
{
- my_errno=error; /* Safeguard */
+ my_errno= error; /* Safeguard */
DBUG_PRINT("error",("error: %d errno: %d",error,errno));
}
else
@@ -1426,39 +2005,40 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
table->db_stat|=HA_READ_ONLY;
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
- DBUG_ASSERT(alloc_root_inited(&table->mem_root));
/* ref is already allocated for us if we're called from handler::clone() */
- if (!ref && !(ref= (byte*) alloc_root(&table->mem_root,
+ if (!ref && !(ref= (uchar*) alloc_root(&table->mem_root,
ALIGN_SIZE(ref_length)*2)))
{
close();
error=HA_ERR_OUT_OF_MEM;
}
else
- dupp_ref=ref+ALIGN_SIZE(ref_length);
+ dup_ref=ref+ALIGN_SIZE(ref_length);
+ cached_table_flags= table_flags();
}
DBUG_RETURN(error);
}
-/*
- Read first row (only) from a table
- This is never called for InnoDB or BDB tables, as these table types
- has the HA_NOT_EXACT_COUNT set.
-*/
-int handler::read_first_row(byte * buf, uint primary_key)
+/**
+ Read first row (only) from a table.
+
+ This is never called for InnoDB tables, as these table types
+ has the HA_STATS_RECORDS_IS_EXACT set.
+*/
+int handler::read_first_row(uchar * buf, uint primary_key)
{
register int error;
DBUG_ENTER("handler::read_first_row");
- statistic_increment(current_thd->status_var.ha_read_first_count,&LOCK_status);
+ ha_statistic_increment(&SSV::ha_read_first_count);
/*
If there is very few deleted rows in the table, find the first row by
scanning the table.
TODO remove the test for HA_READ_ORDER
*/
- if (deleted < 10 || primary_key >= MAX_KEY ||
+ if (stats.deleted < 10 || primary_key >= MAX_KEY ||
!(index_flags(primary_key, 0, 0) & HA_READ_ORDER))
{
(void) ha_rnd_init(1);
@@ -1468,25 +2048,29 @@ int handler::read_first_row(byte * buf, uint primary_key)
else
{
/* Find the first row through the primary key */
- (void) ha_index_init(primary_key);
+ (void) ha_index_init(primary_key, 0);
error=index_first(buf);
(void) ha_index_end();
}
DBUG_RETURN(error);
}
-/*
- Generate the next auto-increment number based on increment and offset
+/**
+ Generate the next auto-increment number based on increment and offset.
+ computes the lowest number
+ - strictly greater than "nr"
+ - of the form: auto_increment_offset + N * auto_increment_increment
In most cases increment= offset= 1, in which case we get:
- 1,2,3,4,5,...
- If increment=10 and offset=5 and previous number is 1, we get:
- 1,5,15,25,35,...
+ @verbatim 1,2,3,4,5,... @endverbatim
+ If increment=10 and offset=5 and previous number is 1, we get:
+ @verbatim 1,5,15,25,35,... @endverbatim
*/
-
inline ulonglong
-next_insert_id(ulonglong nr,struct system_variables *variables)
+compute_next_insert_id(ulonglong nr,struct system_variables *variables)
{
+ if (variables->auto_increment_increment == 1)
+ return (nr+1); // optimization of the formula below
nr= (((nr+ variables->auto_increment_increment -
variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
@@ -1502,20 +2086,12 @@ void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
explicitely-specified value larger than this, we need to increase
THD::next_insert_id to be greater than the explicit value.
*/
- THD *thd= table->in_use;
- if (thd->clear_next_insert_id && (nr >= thd->next_insert_id))
- {
- if (thd->variables.auto_increment_increment != 1)
- nr= next_insert_id(nr, &thd->variables);
- else
- nr++;
- thd->next_insert_id= nr;
- DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
- }
+ if ((next_insert_id > 0) && (nr >= next_insert_id))
+ set_next_insert_id(compute_next_insert_id(nr, &table->in_use->variables));
}
-/*
+/** @brief
Computes the largest number X:
- smaller than or equal to "nr"
- of the form: auto_increment_offset + N * auto_increment_increment
@@ -1530,7 +2106,6 @@ void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
RETURN
The number X if it exists, "nr" otherwise.
*/
-
inline ulonglong
prev_insert_id(ulonglong nr, struct system_variables *variables)
{
@@ -1555,23 +2130,10 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
}
-/*
- Update the auto_increment field if necessary
-
- SYNOPSIS
- update_auto_increment()
-
- RETURN
- 0 ok
- HA_ERR_AUTOINC_READ_FAILED
- get_auto_increment() was called and returned ~(ulonglong) 0
- HA_ERR_AUTOINC_ERANGE
- storing value in field caused strict mode failure.
+/**
+ Update the auto_increment field if necessary.
-
- IMPLEMENTATION
-
- Updates columns with type NEXT_NUMBER if:
+ Updates columns with type NEXT_NUMBER if:
- If column value is set to NULL (in which case
auto_increment_field_not_null is 0)
@@ -1579,24 +2141,31 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
set. In the future we will only set NEXT_NUMBER fields if one sets them
to NULL (or they are not included in the insert list).
+ In those cases, we check if the currently reserved interval still has
+ values we have not used. If yes, we pick the smallest one and use it.
+ Otherwise:
- There are two different cases when the above is true:
-
- - thd->next_insert_id == 0 (This is the normal case)
- In this case we set the set the column for the first row to the value
- next_insert_id(get_auto_increment(column))) which is normally
- max-used-column-value +1.
+ - If a list of intervals has been provided to the statement via SET
+ INSERT_ID or via an Intvar_log_event (in a replication slave), we pick the
+ first unused interval from this list, consider it as reserved.
- We call get_auto_increment() only for the first row in a multi-row
- statement. For the following rows we generate new numbers based on the
- last used number.
+ - Otherwise we set the column for the first row to the value
+ next_insert_id(get_auto_increment(column))) which is usually
+ max-used-column-value+1.
+ We call get_auto_increment() for the first row in a multi-row
+ statement. get_auto_increment() will tell us the interval of values it
+ reserved for us.
- - thd->next_insert_id != 0. This happens when we have read a statement
- from the binary log or when one has used SET LAST_INSERT_ID=#.
+ - In both cases, for the following rows we use those reserved values without
+ calling the handler again (we just progress in the interval, computing
+ each new value from the previous one). Until we have exhausted them, then
+ we either take the next provided interval or call get_auto_increment()
+ again to reserve a new interval.
- In this case we will set the column to the value of next_insert_id.
- The next row will be given the id
- next_insert_id(next_insert_id)
+ - In both cases, the reserved intervals are remembered in
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based
+ binlogging; the last reserved interval is remembered in
+ auto_inc_interval_for_cur_row.
The idea is that generated auto_increment values are predictable and
independent of the column values in the table. This is needed to be
@@ -1607,133 +2176,273 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
inserts a column with a higher value than the last used one, we will
start counting from the inserted value.
- thd->next_insert_id is cleared after it's been used for a statement.
+ This function's "outputs" are: the table's auto_increment field is filled
+ with a value, thd->next_insert_id is filled with the value to use for the
+ next row, if a value was autogenerated for the current row it is stored in
+ thd->insert_id_for_cur_row, if get_auto_increment() was called
+ thd->auto_inc_interval_for_cur_row is modified, if that interval is not
+ present in thd->auto_inc_intervals_in_cur_stmt_for_binlog it is added to
+ this list.
+
+ @todo
+ Replace all references to "next number" or NEXT_NUMBER to
+ "auto_increment", everywhere (see below: there is
+ table->auto_increment_field_not_null, and there also exists
+ table->next_number_field, it's not consistent).
+
+ @retval
+ 0 ok
+ @retval
+ HA_ERR_AUTOINC_READ_FAILED get_auto_increment() was called and
+ returned ~(ulonglong) 0
+ @retval
+ HA_ERR_AUTOINC_ERANGE storing value in field caused strict mode
+ failure.
*/
+#define AUTO_INC_DEFAULT_NB_ROWS 1 // Some prefer 1024 here
+#define AUTO_INC_DEFAULT_NB_MAX_BITS 16
+#define AUTO_INC_DEFAULT_NB_MAX ((1 << AUTO_INC_DEFAULT_NB_MAX_BITS) - 1)
+
int handler::update_auto_increment()
{
- ulonglong nr;
+ ulonglong nr, nb_reserved_values;
+ bool append= FALSE;
THD *thd= table->in_use;
struct system_variables *variables= &thd->variables;
- bool external_auto_increment=
- table->file->table_flags() & HA_EXTERNAL_AUTO_INCREMENT;
DBUG_ENTER("handler::update_auto_increment");
/*
- We must save the previous value to be able to restore it if the
- row was not inserted
+ next_insert_id is a "cursor" into the reserved interval, it may go greater
+ than the interval, but not smaller.
*/
- thd->prev_insert_id= thd->next_insert_id;
+ DBUG_ASSERT(next_insert_id >= auto_inc_interval_for_cur_row.minimum());
if ((nr= table->next_number_field->val_int()) != 0 ||
table->auto_increment_field_not_null &&
thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO)
{
- /* Mark that we didn't generate a new value **/
- auto_increment_column_changed=0;
+ /*
+ Update next_insert_id if we had already generated a value in this
+ statement (case of INSERT VALUES(null),(3763),(null):
+ the last NULL needs to insert 3764, not the value of the first NULL plus
+ 1).
+ */
adjust_next_insert_id_after_explicit_value(nr);
+ insert_id_for_cur_row= 0; // didn't generate anything
DBUG_RETURN(0);
}
- if (external_auto_increment || !(nr= thd->next_insert_id))
- {
- if ((nr= get_auto_increment()) == ~(ulonglong) 0)
- DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
- if (!external_auto_increment && variables->auto_increment_increment != 1)
- nr= next_insert_id(nr-1, variables);
- /*
- Update next row based on the found value. This way we don't have to
- call the handler for every generated auto-increment value on a
- multi-row statement
- */
- thd->next_insert_id= nr;
+ if ((nr= next_insert_id) >= auto_inc_interval_for_cur_row.maximum())
+ {
+ /* next_insert_id is beyond what is reserved, so we reserve more. */
+ const Discrete_interval *forced=
+ thd->auto_inc_intervals_forced.get_next();
+ if (forced != NULL)
+ {
+ nr= forced->minimum();
+ nb_reserved_values= forced->values();
+ }
+ else
+ {
+ /*
+ handler::estimation_rows_to_insert was set by
+ handler::ha_start_bulk_insert(); if 0 it means "unknown".
+ */
+ uint nb_already_reserved_intervals=
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements();
+ ulonglong nb_desired_values;
+ /*
+ If an estimation was given to the engine:
+ - use it.
+ - if we already reserved numbers, it means the estimation was
+ not accurate, then we'll reserve 2*AUTO_INC_DEFAULT_NB_ROWS the 2nd
+ time, twice that the 3rd time etc.
+ If no estimation was given, use those increasing defaults from the
+ start, starting from AUTO_INC_DEFAULT_NB_ROWS.
+ Don't go beyond a max to not reserve "way too much" (because
+ reservation means potentially losing unused values).
+ */
+ if (nb_already_reserved_intervals == 0 &&
+ (estimation_rows_to_insert > 0))
+ nb_desired_values= estimation_rows_to_insert;
+ else /* go with the increasing defaults */
+ {
+ /* avoid overflow in formula, with this if() */
+ if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
+ {
+ nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
+ (1 << nb_already_reserved_intervals);
+ set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
+ }
+ else
+ nb_desired_values= AUTO_INC_DEFAULT_NB_MAX;
+ }
+ /* This call ignores all its parameters but nr, currently */
+ get_auto_increment(variables->auto_increment_offset,
+ variables->auto_increment_increment,
+ nb_desired_values, &nr,
+ &nb_reserved_values);
+ if (nr == ~(ulonglong) 0)
+ DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
+
+ /*
+ That rounding below should not be needed when all engines actually
+ respect offset and increment in get_auto_increment(). But they don't
+ so we still do it. Wonder if for the not-first-in-index we should do
+ it. Hope that this rounding didn't push us out of the interval; even
+ if it did we cannot do anything about it (calling the engine again
+ will not help as we inserted no row).
+ */
+ nr= compute_next_insert_id(nr-1, variables);
+ }
+
+ if (table->s->next_number_keypart == 0)
+ {
+ /* We must defer the appending until "nr" has been possibly truncated */
+ append= TRUE;
+ }
+ else
+ {
+ /*
+ For such auto_increment there is no notion of interval, just a
+ singleton. The interval is not even stored in
+ thd->auto_inc_interval_for_cur_row, so we are sure to call the engine
+ for next row.
+ */
+ DBUG_PRINT("info",("auto_increment: special not-first-in-index"));
+ }
}
DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr));
- /* Mark that we should clear next_insert_id before next stmt */
- thd->clear_next_insert_id= 1;
-
- if (likely(!table->next_number_field->store((longlong) nr, TRUE)))
- thd->insert_id((ulonglong) nr);
- else
- if (thd->killed != THD::KILL_BAD_DATA) /* did we fail strict mode? */
+ if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
{
/*
- overflow of the field; we'll use the max value, however we try to
- decrease it to honour auto_increment_* variables:
+ first test if the query was aborted due to strict mode constraints
+ */
+ if (thd->killed == THD::KILL_BAD_DATA)
+ DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
+
+ /*
+ field refused this value (overflow) and truncated it, use the result of
+ the truncation (which is going to be inserted); however we try to
+ decrease it to honour auto_increment_* variables.
+ That will shift the left bound of the reserved interval, we don't
+ bother shifting the right bound (anyway any other value from this
+ interval will cause a duplicate key).
*/
nr= prev_insert_id(table->next_number_field->val_int(), variables);
- thd->insert_id(nr);
if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
- thd->insert_id(nr= table->next_number_field->val_int());
+ nr= table->next_number_field->val_int();
+ }
+ if (append)
+ {
+ auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values,
+ variables->auto_increment_increment);
+ /* Row-based replication does not need to store intervals in binlog */
+ if (!thd->current_stmt_binlog_row_based)
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(),
+ auto_inc_interval_for_cur_row.values(),
+ variables->auto_increment_increment);
}
- else
- DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
/*
- We can't set next_insert_id if the auto-increment key is not the
- first key part, as there is no guarantee that the first parts will be in
- sequence
+ Record this autogenerated value. If the caller then
+ succeeds to insert this value, it will call
+ record_first_successful_insert_id_in_cur_stmt()
+ which will set first_successful_insert_id_in_cur_stmt if it's not
+ already set.
*/
- if (!table->s->next_number_key_offset)
- {
- /*
- Set next insert id to point to next auto-increment value to be able to
- handle multi-row statements
- This works even if auto_increment_increment > 1
- */
- thd->next_insert_id= next_insert_id(nr, variables);
- }
- else
- thd->next_insert_id= 0;
+ insert_id_for_cur_row= nr;
+ /*
+ Set next insert id to point to next auto-increment value to be able to
+ handle multi-row statements.
+ */
+ set_next_insert_id(compute_next_insert_id(nr, variables));
- /* Mark that we generated a new value */
- auto_increment_column_changed=1;
DBUG_RETURN(0);
}
-/*
- restore_auto_increment
- In case of error on write, we restore the last used next_insert_id value
- because the previous value was not used.
-*/
+/** @brief
+ MySQL signal that it changed the column bitmap
+
+ USAGE
+ This is for handlers that needs to setup their own column bitmaps.
+ Normally the handler should set up their own column bitmaps in
+ index_init() or rnd_init() and in any column_bitmaps_signal() call after
+ this.
-void handler::restore_auto_increment()
+ The handler is allowd to do changes to the bitmap after a index_init or
+ rnd_init() call is made as after this, MySQL will not use the bitmap
+ for any program logic checking.
+*/
+void handler::column_bitmaps_signal()
{
- THD *thd= table->in_use;
- if (thd->next_insert_id)
- {
- thd->next_insert_id= thd->prev_insert_id;
- if (thd->next_insert_id == 0)
- {
- /* we didn't generate a value, engine will be called again */
- thd->clear_next_insert_id= 0;
- }
- }
+ DBUG_ENTER("column_bitmaps_signal");
+ DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", (long) table->read_set,
+ (long) table->write_set));
+ DBUG_VOID_RETURN;
}
-ulonglong handler::get_auto_increment()
+/** @brief
+ Reserves an interval of auto_increment values from the handler.
+
+ SYNOPSIS
+ get_auto_increment()
+ offset
+ increment
+ nb_desired_values how many values we want
+ first_value (OUT) the first value reserved by the handler
+ nb_reserved_values (OUT) how many values the handler reserved
+
+ offset and increment means that we want values to be of the form
+ offset + N * increment, where N>=0 is integer.
+ If the function sets *first_value to ~(ulonglong)0 it means an error.
+ If the function sets *nb_reserved_values to ULONGLONG_MAX it means it has
+ reserved to "positive infinite".
+*/
+void handler::get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values)
{
ulonglong nr;
int error;
(void) extra(HA_EXTRA_KEYREAD);
- index_init(table->s->next_number_index);
- if (!table->s->next_number_key_offset)
+ table->mark_columns_used_by_index_no_reset(table->s->next_number_index,
+ table->read_set);
+ column_bitmaps_signal();
+ index_init(table->s->next_number_index, 1);
+ if (table->s->next_number_keypart == 0)
{ // Autoincrement at key-start
error=index_last(table->record[1]);
+ /*
+ MySQL implicitely assumes such method does locking (as MySQL decides to
+ use nr+increment without checking again with the handler, in
+ handler::update_auto_increment()), so reserves to infinite.
+ */
+ *nb_reserved_values= ULONGLONG_MAX;
}
else
{
- byte key[MAX_KEY_LENGTH];
+ uchar key[MAX_KEY_LENGTH];
key_copy(key, table->record[0],
table->key_info + table->s->next_number_index,
table->s->next_number_key_offset);
- error= index_read(table->record[1], key, table->s->next_number_key_offset,
- HA_READ_PREFIX_LAST);
+ error= index_read_map(table->record[1], key,
+ make_prev_keypart_map(table->s->next_number_keypart),
+ HA_READ_PREFIX_LAST);
+ /*
+ MySQL needs to call us for next row: assume we are inserting ("a",null)
+ here, we return 3, and next this statement will want to insert
+ ("b",null): there is no reason why ("b",3+1) would be the good row to
+ insert: maybe it already exists, maybe 3+1 is too large...
+ */
+ *nb_reserved_values= 1;
}
if (error)
@@ -1743,20 +2452,64 @@ ulonglong handler::get_auto_increment()
val_int_offset(table->s->rec_buff_length)+1);
index_end();
(void) extra(HA_EXTRA_NO_KEYREAD);
- return nr;
+ *first_value= nr;
}
-/*
- Print error that we got from handler function
+void handler::ha_release_auto_increment()
+{
+ release_auto_increment();
+ insert_id_for_cur_row= 0;
+ auto_inc_interval_for_cur_row.replace(0, 0, 0);
+ if (next_insert_id > 0)
+ {
+ next_insert_id= 0;
+ /*
+ this statement used forced auto_increment values if there were some,
+ wipe them away for other statements.
+ */
+ table->in_use->auto_inc_intervals_forced.empty();
+ }
+}
- NOTE
- In case of delete table it's only safe to use the following parts of
- the 'table' structure:
- table->s->path
- table->alias
-*/
+void handler::print_keydup_error(uint key_nr, const char *msg)
+{
+ /* Write the duplicated key in the error message */
+ char key[MAX_KEY_LENGTH];
+ String str(key,sizeof(key),system_charset_info);
+
+ if (key_nr == MAX_KEY)
+ {
+ /* Key is unknown */
+ str.copy("", 0, system_charset_info);
+ my_printf_error(ER_DUP_ENTRY, msg, MYF(0), str.c_ptr(), "*UNKNOWN*");
+ }
+ else
+ {
+ /* Table is opened and defined at this point */
+ key_unpack(&str,table,(uint) key_nr);
+ uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg);
+ if (str.length() >= max_length)
+ {
+ str.length(max_length-4);
+ str.append(STRING_WITH_LEN("..."));
+ }
+ my_printf_error(ER_DUP_ENTRY, msg,
+ MYF(0), str.c_ptr(), table->key_info[key_nr].name);
+ }
+}
+
+
+/**
+ Print error that we got from handler function.
+
+ @note
+ In case of delete table it's only safe to use the following parts of
+ the 'table' structure:
+ - table->s->path
+ - table->alias
+*/
void handler::print_error(int error, myf errflag)
{
DBUG_ENTER("handler::print_error");
@@ -1786,30 +2539,35 @@ void handler::print_error(int error, myf errflag)
uint key_nr=get_dup_key(error);
if ((int) key_nr >= 0)
{
- /* Write the dupplicated key in the error message */
+ print_keydup_error(key_nr, ER(ER_DUP_ENTRY_WITH_KEY_NAME));
+ DBUG_VOID_RETURN;
+ }
+ textno=ER_DUP_KEY;
+ break;
+ }
+ case HA_ERR_FOREIGN_DUPLICATE_KEY:
+ {
+ uint key_nr= get_dup_key(error);
+ if ((int) key_nr >= 0)
+ {
+ uint max_length;
+ /* Write the key in the error message */
char key[MAX_KEY_LENGTH];
String str(key,sizeof(key),system_charset_info);
-
- if (key_nr == MAX_KEY)
- {
- /* Key is unknown */
- str.copy("", 0, system_charset_info);
- key_nr= (uint) -1;
- }
- else
+ /* Table is opened and defined at this point */
+ key_unpack(&str,table,(uint) key_nr);
+ max_length= (MYSQL_ERRMSG_SIZE-
+ (uint) strlen(ER(ER_FOREIGN_DUPLICATE_KEY)));
+ if (str.length() >= max_length)
{
- key_unpack(&str,table,(uint) key_nr);
- uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(ER(ER_DUP_ENTRY));
- if (str.length() >= max_length)
- {
- str.length(max_length-4);
- str.append(STRING_WITH_LEN("..."));
- }
+ str.length(max_length-4);
+ str.append(STRING_WITH_LEN("..."));
}
- my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), key_nr+1);
+ my_error(ER_FOREIGN_DUPLICATE_KEY, MYF(0), table_share->table_name.str,
+ str.c_ptr(), key_nr+1);
DBUG_VOID_RETURN;
}
- textno=ER_DUP_KEY;
+ textno= ER_DUP_KEY;
break;
}
case HA_ERR_NULL_IN_SPATIAL:
@@ -1885,19 +2643,20 @@ void handler::print_error(int error, myf errflag)
textno=ER_TABLE_DEF_CHANGED;
break;
case HA_ERR_NO_SUCH_TABLE:
- {
- /*
- We have to use path to find database name instead of using
- table->table_cache_key because if the table didn't exist, then
- table_cache_key was not set up
- */
- char *db;
- char buff[FN_REFLEN];
- uint length= dirname_part(buff,table->s->path);
- buff[length-1]=0;
- db=buff+dirname_length(buff);
- my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->alias);
+ my_error(ER_NO_SUCH_TABLE, MYF(0), table_share->db.str,
+ table_share->table_name.str);
+ DBUG_VOID_RETURN;
+ case HA_ERR_RBR_LOGGING_FAILED:
+ textno= ER_BINLOG_ROW_LOGGING_FAILED;
break;
+ case HA_ERR_DROP_INDEX_FK:
+ {
+ const char *ptr= "???";
+ uint key_nr= get_dup_key(error);
+ if ((int) key_nr >= 0)
+ ptr= table->key_info[key_nr].name;
+ my_error(ER_DROP_INDEX_FK, MYF(0), ptr);
+ DBUG_VOID_RETURN;
}
case HA_ERR_TABLE_NEEDS_UPGRADE:
textno=ER_TABLE_NEEDS_UPGRADE;
@@ -1931,21 +2690,20 @@ void handler::print_error(int error, myf errflag)
DBUG_VOID_RETURN;
}
}
- my_error(textno, errflag, table->alias, error);
+ my_error(textno, errflag, table_share->table_name.str, error);
DBUG_VOID_RETURN;
}
-/*
- Return an error message specific to this handler
-
- SYNOPSIS
- error error code previously returned by handler
- buf Pointer to String where to add error message
+/**
+ Return an error message specific to this handler.
- Returns true if this is a temporary error
- */
+ @param error error code previously returned by handler
+ @param buf pointer to String where to add error message
+ @return
+ Returns true if this is a temporary error
+*/
bool handler::get_error_message(int error, String* buf)
{
return FALSE;
@@ -1971,7 +2729,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
if (!keypart->fieldnr)
continue;
Field *field= table->field[keypart->fieldnr-1];
- if (field->type() == FIELD_TYPE_BLOB)
+ if (field->type() == MYSQL_TYPE_BLOB)
{
if (check_opt->sql_flags & TT_FOR_UPGRADE)
check_opt->flags= T_MEDIUM;
@@ -1995,7 +2753,7 @@ int handler::check_old_types()
/* check for bad DECIMAL field */
for (field= table->field; (*field); field++)
{
- if ((*field)->type() == FIELD_TYPE_NEWDECIMAL)
+ if ((*field)->type() == MYSQL_TYPE_NEWDECIMAL)
{
return HA_ADMIN_NEEDS_ALTER;
}
@@ -2009,7 +2767,7 @@ int handler::check_old_types()
}
-static bool update_frm_version(TABLE *table, bool needs_lock)
+static bool update_frm_version(TABLE *table)
{
char path[FN_REFLEN];
File file;
@@ -2025,72 +2783,65 @@ static bool update_frm_version(TABLE *table, bool needs_lock)
if (table->s->mysql_version == MYSQL_VERSION_ID)
DBUG_RETURN(0);
- strxnmov(path, sizeof(path)-1, mysql_data_home, "/", table->s->db, "/",
- table->s->table_name, reg_ext, NullS);
- if (!unpack_filename(path, path))
- DBUG_RETURN(1);
-
- if (needs_lock)
- pthread_mutex_lock(&LOCK_open);
+ strxmov(path, table->s->normalized_path.str, reg_ext, NullS);
if ((file= my_open(path, O_RDWR|O_BINARY, MYF(MY_WME))) >= 0)
{
uchar version[4];
- char *key= table->s->table_cache_key;
- uint key_length= table->s->key_length;
+ char *key= table->s->table_cache_key.str;
+ uint key_length= table->s->table_cache_key.length;
TABLE *entry;
HASH_SEARCH_STATE state;
int4store(version, MYSQL_VERSION_ID);
- if ((result= my_pwrite(file,(byte*) version,4,51L,MYF_RW)))
+ if ((result= my_pwrite(file,(uchar*) version,4,51L,MYF_RW)))
goto err;
- for (entry=(TABLE*) hash_first(&open_cache,(byte*) key,key_length, &state);
+ for (entry=(TABLE*) hash_first(&open_cache,(uchar*) key,key_length, &state);
entry;
- entry= (TABLE*) hash_next(&open_cache,(byte*) key,key_length, &state))
+ entry= (TABLE*) hash_next(&open_cache,(uchar*) key,key_length, &state))
entry->s->mysql_version= MYSQL_VERSION_ID;
}
err:
if (file >= 0)
VOID(my_close(file,MYF(MY_WME)));
- if (needs_lock)
- pthread_mutex_unlock(&LOCK_open);
DBUG_RETURN(result);
}
-/* Return key if error because of duplicated keys */
-
+/**
+ @return
+ key if error because of duplicated keys
+*/
uint handler::get_dup_key(int error)
{
DBUG_ENTER("handler::get_dup_key");
table->file->errkey = (uint) -1;
- if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOUND_DUPP_UNIQUE ||
- error == HA_ERR_NULL_IN_SPATIAL)
+ if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
+ error == HA_ERR_FOUND_DUPP_UNIQUE || error == HA_ERR_NULL_IN_SPATIAL ||
+ error == HA_ERR_DROP_INDEX_FK)
info(HA_STATUS_ERRKEY | HA_STATUS_NO_LOCK);
DBUG_RETURN(table->file->errkey);
}
-/*
- Delete all files with extension from bas_ext()
+/**
+ Delete all files with extension from bas_ext().
- SYNOPSIS
- delete_table()
- name Base name of table
+ @param name Base name of table
- NOTES
+ @note
We assume that the handler may return more extensions than
was actually used for the file.
- RETURN
+ @retval
0 If we successfully deleted at least one file from base_ext and
- didn't get any other errors than ENOENT
- # Error
+ didn't get any other errors than ENOENT
+ @retval
+ !0 Error
*/
-
int handler::delete_table(const char *name)
{
int error= 0;
@@ -2099,7 +2850,7 @@ int handler::delete_table(const char *name)
for (const char **ext=bas_ext(); *ext ; ext++)
{
- fn_format(buff, name, "", *ext, 2 | 4);
+ fn_format(buff, name, "", *ext, MY_UNPACK_FILENAME|MY_APPEND_EXT);
if (my_delete_with_symlink(buff, MYF(0)))
{
if ((error= my_errno) != ENOENT)
@@ -2129,23 +2880,28 @@ int handler::rename_table(const char * from, const char * to)
}
-/*
- Performs checks upon the table.
+void handler::drop_table(const char *name)
+{
+ close();
+ delete_table(name);
+}
- SYNOPSIS
- check()
- thd thread doing CHECK TABLE operation
- check_opt options from the parser
- NOTES
+/**
+ Performs checks upon the table.
- RETURN
- HA_ADMIN_OK Successful upgrade
- HA_ADMIN_NEEDS_UPGRADE Table has structures requiring upgrade
- HA_ADMIN_NEEDS_ALTER Table has structures requiring ALTER TABLE
- HA_ADMIN_NOT_IMPLEMENTED
-*/
+ @param thd thread doing CHECK TABLE operation
+ @param check_opt options from the parser
+ @retval
+ HA_ADMIN_OK Successful upgrade
+ @retval
+ HA_ADMIN_NEEDS_UPGRADE Table has structures requiring upgrade
+ @retval
+ HA_ADMIN_NEEDS_ALTER Table has structures requiring ALTER TABLE
+ @retval
+ HA_ADMIN_NOT_IMPLEMENTED
+*/
int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
{
int error;
@@ -2166,20 +2922,429 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
}
if ((error= check(thd, check_opt)))
return error;
- return update_frm_version(table, 0);
+ return update_frm_version(table);
}
+/**
+ A helper function to mark a transaction read-write,
+ if it is started.
+*/
+
+inline
+void
+handler::mark_trx_read_write()
+{
+ Ha_trx_info *ha_info= &ha_thd()->ha_data[ht->slot].ha_info[0];
+ /*
+ When a storage engine method is called, the transaction must
+ have been started, unless it's a DDL call, for which the
+ storage engine starts the transaction internally, and commits
+ it internally, without registering in the ha_list.
+ Unfortunately here we can't know know for sure if the engine
+ has registered the transaction or not, so we must check.
+ */
+ if (ha_info->is_started())
+ {
+ DBUG_ASSERT(has_transactions());
+ /*
+ table_share can be NULL in ha_delete_table(). See implementation
+ of standalone function ha_delete_table() in sql_base.cc.
+ */
+ if (table_share == NULL || table_share->tmp_table == NO_TMP_TABLE)
+ ha_info->set_trx_read_write();
+ }
+}
+
+
+/**
+ Repair table: public interface.
+
+ @sa handler::repair()
+*/
int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt)
{
int result;
+
+ mark_trx_read_write();
+
if ((result= repair(thd, check_opt)))
return result;
- return update_frm_version(table, 0);
+ return update_frm_version(table);
}
-/*
+/**
+ Bulk update row: public interface.
+
+ @sa handler::bulk_update_row()
+*/
+
+int
+handler::ha_bulk_update_row(const uchar *old_data, uchar *new_data,
+ uint *dup_key_found)
+{
+ mark_trx_read_write();
+
+ return bulk_update_row(old_data, new_data, dup_key_found);
+}
+
+
+/**
+ Delete all rows: public interface.
+
+ @sa handler::delete_all_rows()
+*/
+
+int
+handler::ha_delete_all_rows()
+{
+ mark_trx_read_write();
+
+ return delete_all_rows();
+}
+
+
+/**
+ Reset auto increment: public interface.
+
+ @sa handler::reset_auto_increment()
+*/
+
+int
+handler::ha_reset_auto_increment(ulonglong value)
+{
+ mark_trx_read_write();
+
+ return reset_auto_increment(value);
+}
+
+
+/**
+ Backup table: public interface.
+
+ @sa handler::backup()
+*/
+
+int
+handler::ha_backup(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ mark_trx_read_write();
+
+ return backup(thd, check_opt);
+}
+
+
+/**
+ Restore table: public interface.
+
+ @sa handler::restore()
+*/
+
+int
+handler::ha_restore(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ mark_trx_read_write();
+
+ return restore(thd, check_opt);
+}
+
+
+/**
+ Optimize table: public interface.
+
+ @sa handler::optimize()
+*/
+
+int
+handler::ha_optimize(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ mark_trx_read_write();
+
+ return optimize(thd, check_opt);
+}
+
+
+/**
+ Analyze table: public interface.
+
+ @sa handler::analyze()
+*/
+
+int
+handler::ha_analyze(THD* thd, HA_CHECK_OPT* check_opt)
+{
+ mark_trx_read_write();
+
+ return analyze(thd, check_opt);
+}
+
+
+/**
+ Check and repair table: public interface.
+
+ @sa handler::check_and_repair()
+*/
+
+bool
+handler::ha_check_and_repair(THD *thd)
+{
+ mark_trx_read_write();
+
+ return check_and_repair(thd);
+}
+
+
+/**
+ Disable indexes: public interface.
+
+ @sa handler::disable_indexes()
+*/
+
+int
+handler::ha_disable_indexes(uint mode)
+{
+ mark_trx_read_write();
+
+ return disable_indexes(mode);
+}
+
+
+/**
+ Enable indexes: public interface.
+
+ @sa handler::enable_indexes()
+*/
+
+int
+handler::ha_enable_indexes(uint mode)
+{
+ mark_trx_read_write();
+
+ return enable_indexes(mode);
+}
+
+
+/**
+ Discard or import tablespace: public interface.
+
+ @sa handler::discard_or_import_tablespace()
+*/
+
+int
+handler::ha_discard_or_import_tablespace(my_bool discard)
+{
+ mark_trx_read_write();
+
+ return discard_or_import_tablespace(discard);
+}
+
+
+/**
+ Prepare for alter: public interface.
+
+ Called to prepare an *online* ALTER.
+
+ @sa handler::prepare_for_alter()
+*/
+
+void
+handler::ha_prepare_for_alter()
+{
+ mark_trx_read_write();
+
+ prepare_for_alter();
+}
+
+
+/**
+ Rename table: public interface.
+
+ @sa handler::rename_table()
+*/
+
+int
+handler::ha_rename_table(const char *from, const char *to)
+{
+ mark_trx_read_write();
+
+ return rename_table(from, to);
+}
+
+
+/**
+ Delete table: public interface.
+
+ @sa handler::delete_table()
+*/
+
+int
+handler::ha_delete_table(const char *name)
+{
+ mark_trx_read_write();
+
+ return delete_table(name);
+}
+
+
+/**
+ Drop table in the engine: public interface.
+
+ @sa handler::drop_table()
+*/
+
+void
+handler::ha_drop_table(const char *name)
+{
+ mark_trx_read_write();
+
+ return drop_table(name);
+}
+
+
+/**
+ Create a table in the engine: public interface.
+
+ @sa handler::create()
+*/
+
+int
+handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info)
+{
+ mark_trx_read_write();
+
+ return create(name, form, info);
+}
+
+
+/**
+ Create handler files for CREATE TABLE: public interface.
+
+ @sa handler::create_handler_files()
+*/
+
+int
+handler::ha_create_handler_files(const char *name, const char *old_name,
+ int action_flag, HA_CREATE_INFO *info)
+{
+ mark_trx_read_write();
+
+ return create_handler_files(name, old_name, action_flag, info);
+}
+
+
+/**
+ Change partitions: public interface.
+
+ @sa handler::change_partitions()
+*/
+
+int
+handler::ha_change_partitions(HA_CREATE_INFO *create_info,
+ const char *path,
+ ulonglong *copied,
+ ulonglong *deleted,
+ const uchar *pack_frm_data,
+ size_t pack_frm_len)
+{
+ mark_trx_read_write();
+
+ return change_partitions(create_info, path, copied, deleted,
+ pack_frm_data, pack_frm_len);
+}
+
+
+/**
+ Drop partitions: public interface.
+
+ @sa handler::drop_partitions()
+*/
+
+int
+handler::ha_drop_partitions(const char *path)
+{
+ mark_trx_read_write();
+
+ return drop_partitions(path);
+}
+
+
+/**
+ Rename partitions: public interface.
+
+ @sa handler::rename_partitions()
+*/
+
+int
+handler::ha_rename_partitions(const char *path)
+{
+ mark_trx_read_write();
+
+ return rename_partitions(path);
+}
+
+
+/**
+ Optimize partitions: public interface.
+
+ @sa handler::optimize_partitions()
+*/
+
+int
+handler::ha_optimize_partitions(THD *thd)
+{
+ mark_trx_read_write();
+
+ return optimize_partitions(thd);
+}
+
+
+/**
+ Analyze partitions: public interface.
+
+ @sa handler::analyze_partitions()
+*/
+
+int
+handler::ha_analyze_partitions(THD *thd)
+{
+ mark_trx_read_write();
+
+ return analyze_partitions(thd);
+}
+
+
+/**
+ Check partitions: public interface.
+
+ @sa handler::check_partitions()
+*/
+
+int
+handler::ha_check_partitions(THD *thd)
+{
+ mark_trx_read_write();
+
+ return check_partitions(thd);
+}
+
+
+/**
+ Repair partitions: public interface.
+
+ @sa handler::repair_partitions()
+*/
+
+int
+handler::ha_repair_partitions(THD *thd)
+{
+ mark_trx_read_write();
+
+ return repair_partitions(thd);
+}
+
+
+/**
Tell the storage engine that it is allowed to "disable transaction" in the
handler. It is a hint that ACID is not required - it is used in NDB for
ALTER TABLE, for example, when data are copied to temporary table.
@@ -2187,14 +3352,13 @@ int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt)
starts to commit every now and then automatically.
This hint can be safely ignored.
*/
-
int ha_enable_transaction(THD *thd, bool on)
{
int error=0;
-
DBUG_ENTER("ha_enable_transaction");
- thd->transaction.on= on;
- if (on)
+ DBUG_PRINT("enter", ("on: %d", (int) on));
+
+ if ((thd->transaction.on= on))
{
/*
Now all storage engines should have transaction handling enabled.
@@ -2202,24 +3366,86 @@ int ha_enable_transaction(THD *thd, bool on)
is an optimization hint that storage engine is free to ignore.
So, let's commit an open transaction (if any) now.
*/
- if (!(error= ha_commit_stmt(thd)))
+ if (!(error= ha_commit_trans(thd, 0)))
error= end_trans(thd, COMMIT);
}
DBUG_RETURN(error);
}
-int handler::index_next_same(byte *buf, const byte *key, uint keylen)
+int handler::index_next_same(uchar *buf, const uchar *key, uint keylen)
{
int error;
+ DBUG_ENTER("index_next_same");
if (!(error=index_next(buf)))
{
+ my_ptrdiff_t ptrdiff= buf - table->record[0];
+ uchar *save_record_0;
+ KEY *key_info;
+ KEY_PART_INFO *key_part;
+ KEY_PART_INFO *key_part_end;
+ LINT_INIT(save_record_0);
+ LINT_INIT(key_info);
+ LINT_INIT(key_part);
+ LINT_INIT(key_part_end);
+
+ /*
+ key_cmp_if_same() compares table->record[0] against 'key'.
+ In parts it uses table->record[0] directly, in parts it uses
+ field objects with their local pointers into table->record[0].
+ If 'buf' is distinct from table->record[0], we need to move
+ all record references. This is table->record[0] itself and
+ the field pointers of the fields used in this key.
+ */
+ if (ptrdiff)
+ {
+ save_record_0= table->record[0];
+ table->record[0]= buf;
+ key_info= table->key_info + active_index;
+ key_part= key_info->key_part;
+ key_part_end= key_part + key_info->key_parts;
+ for (; key_part < key_part_end; key_part++)
+ {
+ DBUG_ASSERT(key_part->field);
+ key_part->field->move_field_offset(ptrdiff);
+ }
+ }
+
if (key_cmp_if_same(table, key, active_index, keylen))
{
table->status=STATUS_NOT_FOUND;
error=HA_ERR_END_OF_FILE;
}
+
+ /* Move back if necessary. */
+ if (ptrdiff)
+ {
+ table->record[0]= save_record_0;
+ for (key_part= key_info->key_part; key_part < key_part_end; key_part++)
+ key_part->field->move_field_offset(-ptrdiff);
+ }
}
- return error;
+ DBUG_RETURN(error);
+}
+
+
+void handler::get_dynamic_partition_info(PARTITION_INFO *stat_info,
+ uint part_id)
+{
+ info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
+ HA_STATUS_NO_LOCK);
+ stat_info->records= stats.records;
+ stat_info->mean_rec_length= stats.mean_rec_length;
+ stat_info->data_file_length= stats.data_file_length;
+ stat_info->max_data_file_length= stats.max_data_file_length;
+ stat_info->index_file_length= stats.index_file_length;
+ stat_info->delete_length= stats.delete_length;
+ stat_info->create_time= stats.create_time;
+ stat_info->update_time= stats.update_time;
+ stat_info->check_time= stats.check_time;
+ stat_info->check_sum= 0;
+ if (table_flags() & (ulong) HA_HAS_CHECKSUM)
+ stat_info->check_sum= checksum();
+ return;
}
@@ -2227,66 +3453,75 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
** Some general functions that isn't in the handler class
****************************************************************************/
-/*
- Initiates table-file and calls apropriate database-creator
- Returns 1 if something got wrong
-*/
+/**
+ Initiates table-file and calls appropriate database-creator.
-int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
+ @retval
+ 0 ok
+ @retval
+ 1 error
+*/
+int ha_create_table(THD *thd, const char *path,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
bool update_create_info)
{
- int error;
+ int error= 1;
TABLE table;
char name_buff[FN_REFLEN];
+ const char *name;
+ TABLE_SHARE share;
DBUG_ENTER("ha_create_table");
+
+ init_tmp_table_share(thd, &share, db, 0, table_name, path);
+ if (open_table_def(thd, &share, 0) ||
+ open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table,
+ TRUE))
+ goto err;
- if (openfrm(current_thd, name,"",0,(uint) READ_ALL, 0, &table))
- DBUG_RETURN(1);
if (update_create_info)
- {
update_create_info_from_table(create_info, &table);
- }
- if (lower_case_table_names == 2 &&
- !(table.file->table_flags() & HA_FILE_BASED))
- {
- /* Ensure that handler gets name in lower case */
- strmov(name_buff, name);
- my_casedn_str(files_charset_info, name_buff);
- name= name_buff;
- }
- error=table.file->create(name,&table,create_info);
- VOID(closefrm(&table));
+ name= check_lowercase_names(table.file, share.path.str, name_buff);
+
+ error= table.file->ha_create(name, &table, create_info);
+ VOID(closefrm(&table, 0));
if (error)
- my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name,error);
+ {
+ strxmov(name_buff, db, ".", table_name, NullS);
+ my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name_buff, error);
+ }
+err:
+ free_table_share(&share);
DBUG_RETURN(error != 0);
}
-/*
- Try to discover table from engine and
- if found, write the frm file to disk.
+/**
+ Try to discover table from engine.
- RETURN VALUES:
- -1 : Table did not exists
- 0 : Table created ok
- > 0 : Error, table existed but could not be created
+ @note
+ If found, write the frm file to disk.
+ @retval
+ -1 Table did not exists
+ @retval
+ 0 Table created ok
+ @retval
+ > 0 Error, table existed but could not be created
*/
-
-int ha_create_table_from_engine(THD* thd,
- const char *db,
- const char *name)
+int ha_create_table_from_engine(THD* thd, const char *db, const char *name)
{
int error;
- const void *frmblob;
- uint frmlen;
+ uchar *frmblob;
+ size_t frmlen;
char path[FN_REFLEN];
HA_CREATE_INFO create_info;
TABLE table;
+ TABLE_SHARE share;
DBUG_ENTER("ha_create_table_from_engine");
DBUG_PRINT("enter", ("name '%s'.'%s'", db, name));
- bzero((char*) &create_info,sizeof(create_info));
+ bzero((uchar*) &create_info,sizeof(create_info));
if ((error= ha_discover(thd, db, name, &frmblob, &frmlen)))
{
/* Table could not be discovered and thus not created */
@@ -2298,27 +3533,30 @@ int ha_create_table_from_engine(THD* thd,
frmblob and frmlen are set, write the frm to disk
*/
- (void)strxnmov(path,FN_REFLEN,mysql_data_home,"/",db,"/",name,NullS);
+ build_table_filename(path, FN_REFLEN-1, db, name, "", 0);
// Save the frm file
error= writefrm(path, frmblob, frmlen);
- my_free((char*) frmblob, MYF(0));
+ my_free(frmblob, MYF(0));
if (error)
DBUG_RETURN(2);
- if (openfrm(thd, path,"",0,(uint) READ_ALL, 0, &table))
+ init_tmp_table_share(thd, &share, db, 0, name, path);
+ if (open_table_def(thd, &share, 0))
+ {
+ DBUG_RETURN(3);
+ }
+ if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table, FALSE))
+ {
+ free_table_share(&share);
DBUG_RETURN(3);
+ }
update_create_info_from_table(&create_info, &table);
create_info.table_options|= HA_OPTION_CREATE_FROM_ENGINE;
- if (lower_case_table_names == 2 &&
- !(table.file->table_flags() & HA_FILE_BASED))
- {
- /* Ensure that handler gets name in lower case */
- my_casedn_str(files_charset_info, path);
- }
- error=table.file->create(path,&table,&create_info);
- VOID(closefrm(&table));
+ check_lowercase_names(table.file, path, path);
+ error=table.file->ha_create(path, &table, &create_info);
+ VOID(closefrm(&table, 1));
DBUG_RETURN(error != 0);
}
@@ -2340,9 +3578,9 @@ void st_ha_check_opt::init()
call to ha_init_key_cache() (probably out of memory)
*****************************************************************************/
-/* Init a key cache if it has not been initied before */
-
-
+/**
+ Init a key cache if it has not been initied before.
+*/
int ha_init_key_cache(const char *name, KEY_CACHE *key_cache)
{
DBUG_ENTER("ha_init_key_cache");
@@ -2364,8 +3602,9 @@ int ha_init_key_cache(const char *name, KEY_CACHE *key_cache)
}
-/* Resize key cache */
-
+/**
+ Resize key cache.
+*/
int ha_resize_key_cache(KEY_CACHE *key_cache)
{
DBUG_ENTER("ha_resize_key_cache");
@@ -2386,8 +3625,9 @@ int ha_resize_key_cache(KEY_CACHE *key_cache)
}
-/* Change parameters for key cache (like size) */
-
+/**
+ Change parameters for key cache (like size)
+*/
int ha_change_key_cache_param(KEY_CACHE *key_cache)
{
if (key_cache->key_cache_inited)
@@ -2401,16 +3641,18 @@ int ha_change_key_cache_param(KEY_CACHE *key_cache)
return 0;
}
-/* Free memory allocated by a key cache */
-
+/**
+ Free memory allocated by a key cache.
+*/
int ha_end_key_cache(KEY_CACHE *key_cache)
{
end_key_cache(key_cache, 1); // Can never fail
return 0;
}
-/* Move all tables from one key cache to another one */
-
+/**
+ Move all tables from one key cache to another one.
+*/
int ha_change_key_cache(KEY_CACHE *old_key_cache,
KEY_CACHE *new_key_cache)
{
@@ -2419,103 +3661,311 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
}
-/*
- Try to discover one table from handler(s)
+/**
+ Try to discover one table from handler(s).
- RETURN
- -1 : Table did not exists
- 0 : OK. In this case *frmblob and *frmlen are set
- >0 : error. frmblob and frmlen may not be set
+ @retval
+ -1 Table did not exists
+ @retval
+ 0 OK. In this case *frmblob and *frmlen are set
+ @retval
+ >0 error. frmblob and frmlen may not be set
*/
+struct st_discover_args
+{
+ const char *db;
+ const char *name;
+ uchar **frmblob;
+ size_t *frmlen;
+};
+
+static my_bool discover_handlerton(THD *thd, plugin_ref plugin,
+ void *arg)
+{
+ st_discover_args *vargs= (st_discover_args *)arg;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->discover &&
+ (!(hton->discover(hton, thd, vargs->db, vargs->name,
+ vargs->frmblob,
+ vargs->frmlen))))
+ return TRUE;
+
+ return FALSE;
+}
int ha_discover(THD *thd, const char *db, const char *name,
- const void **frmblob, uint *frmlen)
+ uchar **frmblob, size_t *frmlen)
{
int error= -1; // Table does not exist in any handler
DBUG_ENTER("ha_discover");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
+ st_discover_args args= {db, name, frmblob, frmlen};
+
if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */
DBUG_RETURN(error);
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error= ndbcluster_discover(thd, db, name, frmblob, frmlen);
-#endif
+
+ if (plugin_foreach(thd, discover_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args))
+ error= 0;
+
if (!error)
- statistic_increment(thd->status_var.ha_discover_count,&LOCK_status);
+ status_var_increment(thd->status_var.ha_discover_count);
DBUG_RETURN(error);
}
-/*
+/**
Call this function in order to give the handler the possiblity
to ask engine if there are any new tables that should be written to disk
or any dropped tables that need to be removed from disk
*/
+struct st_find_files_args
+{
+ const char *db;
+ const char *path;
+ const char *wild;
+ bool dir;
+ List<LEX_STRING> *files;
+};
+
+static my_bool find_files_handlerton(THD *thd, plugin_ref plugin,
+ void *arg)
+{
+ st_find_files_args *vargs= (st_find_files_args *)arg;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+
+
+ if (hton->state == SHOW_OPTION_YES && hton->find_files)
+ if (hton->find_files(hton, thd, vargs->db, vargs->path, vargs->wild,
+ vargs->dir, vargs->files))
+ return TRUE;
+
+ return FALSE;
+}
int
ha_find_files(THD *thd,const char *db,const char *path,
- const char *wild, bool dir, List<char> *files)
+ const char *wild, bool dir, List<LEX_STRING> *files)
{
int error= 0;
DBUG_ENTER("ha_find_files");
- DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d",
- db, path, wild, dir));
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error= ndbcluster_find_files(thd, db, path, wild, dir, files);
-#endif
+ DBUG_PRINT("enter", ("db: '%s' path: '%s' wild: '%s' dir: %d",
+ db, path, wild ? wild : "NULL", dir));
+ st_find_files_args args= {db, path, wild, dir, files};
+
+ plugin_foreach(thd, find_files_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args);
+ /* The return value is not currently used */
DBUG_RETURN(error);
}
-
-/*
- Ask handler if the table exists in engine
-
- RETURN
+/**
+ Ask handler if the table exists in engine.
+ @retval
HA_ERR_NO_SUCH_TABLE Table does not exist
+ @retval
HA_ERR_TABLE_EXIST Table exists
- # Error code
+ @retval
+ \# Error code
+*/
+struct st_table_exists_in_engine_args
+{
+ const char *db;
+ const char *name;
+ int err;
+};
+
+static my_bool table_exists_in_engine_handlerton(THD *thd, plugin_ref plugin,
+ void *arg)
+{
+ st_table_exists_in_engine_args *vargs= (st_table_exists_in_engine_args *)arg;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+
+ int err= HA_ERR_NO_SUCH_TABLE;
+
+ if (hton->state == SHOW_OPTION_YES && hton->table_exists_in_engine)
+ err = hton->table_exists_in_engine(hton, thd, vargs->db, vargs->name);
+
+ vargs->err = err;
+ if (vargs->err == HA_ERR_TABLE_EXIST)
+ return TRUE;
+
+ return FALSE;
+}
- */
int ha_table_exists_in_engine(THD* thd, const char* db, const char* name)
{
- int error= HA_ERR_NO_SUCH_TABLE;
DBUG_ENTER("ha_table_exists_in_engine");
DBUG_PRINT("enter", ("db: %s, name: %s", db, name));
-#ifdef HAVE_NDBCLUSTER_DB
- if (have_ndbcluster == SHOW_OPTION_YES)
- error= ndbcluster_table_exists_in_engine(thd, db, name);
-#endif
- DBUG_PRINT("exit", ("error: %d", error));
- DBUG_RETURN(error);
+ st_table_exists_in_engine_args args= {db, name, HA_ERR_NO_SUCH_TABLE};
+ plugin_foreach(thd, table_exists_in_engine_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &args);
+ DBUG_PRINT("exit", ("error: %d", args.err));
+ DBUG_RETURN(args.err);
}
-
+#ifdef HAVE_NDB_BINLOG
/*
+ TODO: change this into a dynamic struct
+ List<handlerton> does not work as
+ 1. binlog_end is called when MEM_ROOT is gone
+ 2. cannot work with thd MEM_ROOT as memory should be freed
+*/
+#define MAX_HTON_LIST_ST 63
+struct hton_list_st
+{
+ handlerton *hton[MAX_HTON_LIST_ST];
+ uint sz;
+};
+
+struct binlog_func_st
+{
+ enum_binlog_func fn;
+ void *arg;
+};
+
+/** @brief
+ Listing handlertons first to avoid recursive calls and deadlock
+*/
+static my_bool binlog_func_list(THD *thd, plugin_ref plugin, void *arg)
+{
+ hton_list_st *hton_list= (hton_list_st *)arg;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->binlog_func)
+ {
+ uint sz= hton_list->sz;
+ if (sz == MAX_HTON_LIST_ST-1)
+ {
+ /* list full */
+ return FALSE;
+ }
+ hton_list->hton[sz]= hton;
+ hton_list->sz= sz+1;
+ }
+ return FALSE;
+}
+
+static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn)
+{
+ hton_list_st hton_list;
+ uint i, sz;
+
+ hton_list.sz= 0;
+ plugin_foreach(thd, binlog_func_list,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &hton_list);
+
+ for (i= 0, sz= hton_list.sz; i < sz ; i++)
+ hton_list.hton[i]->binlog_func(hton_list.hton[i], thd, bfn->fn, bfn->arg);
+ return FALSE;
+}
+
+int ha_reset_logs(THD *thd)
+{
+ binlog_func_st bfn= {BFN_RESET_LOGS, 0};
+ binlog_func_foreach(thd, &bfn);
+ return 0;
+}
+
+void ha_reset_slave(THD* thd)
+{
+ binlog_func_st bfn= {BFN_RESET_SLAVE, 0};
+ binlog_func_foreach(thd, &bfn);
+}
+
+void ha_binlog_wait(THD* thd)
+{
+ binlog_func_st bfn= {BFN_BINLOG_WAIT, 0};
+ binlog_func_foreach(thd, &bfn);
+}
+
+int ha_binlog_end(THD* thd)
+{
+ binlog_func_st bfn= {BFN_BINLOG_END, 0};
+ binlog_func_foreach(thd, &bfn);
+ return 0;
+}
+
+int ha_binlog_index_purge_file(THD *thd, const char *file)
+{
+ binlog_func_st bfn= {BFN_BINLOG_PURGE_FILE, (void *)file};
+ binlog_func_foreach(thd, &bfn);
+ return 0;
+}
+
+struct binlog_log_query_st
+{
+ enum_binlog_command binlog_command;
+ const char *query;
+ uint query_length;
+ const char *db;
+ const char *table_name;
+};
+
+static my_bool binlog_log_query_handlerton2(THD *thd,
+ handlerton *hton,
+ void *args)
+{
+ struct binlog_log_query_st *b= (struct binlog_log_query_st*)args;
+ if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query)
+ hton->binlog_log_query(hton, thd,
+ b->binlog_command,
+ b->query,
+ b->query_length,
+ b->db,
+ b->table_name);
+ return FALSE;
+}
+
+static my_bool binlog_log_query_handlerton(THD *thd,
+ plugin_ref plugin,
+ void *args)
+{
+ return binlog_log_query_handlerton2(thd, plugin_data(plugin, handlerton *), args);
+}
+
+void ha_binlog_log_query(THD *thd, handlerton *hton,
+ enum_binlog_command binlog_command,
+ const char *query, uint query_length,
+ const char *db, const char *table_name)
+{
+ struct binlog_log_query_st b;
+ b.binlog_command= binlog_command;
+ b.query= query;
+ b.query_length= query_length;
+ b.db= db;
+ b.table_name= table_name;
+ if (hton == 0)
+ plugin_foreach(thd, binlog_log_query_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &b);
+ else
+ binlog_log_query_handlerton2(thd, hton, &b);
+}
+#endif
+
+/**
Read the first row of a multi-range set.
- SYNOPSIS
- read_multi_range_first()
- found_range_p Returns a pointer to the element in 'ranges' that
- corresponds to the returned row.
- ranges An array of KEY_MULTI_RANGE range descriptions.
- range_count Number of ranges in 'ranges'.
- sorted If result should be sorted per key.
- buffer A HANDLER_BUFFER for internal handler usage.
-
- NOTES
- Record is read into table->record[0].
- *found_range_p returns a valid value only if read_multi_range_first()
+ @param found_range_p Returns a pointer to the element in 'ranges' that
+ corresponds to the returned row.
+ @param ranges An array of KEY_MULTI_RANGE range descriptions.
+ @param range_count Number of ranges in 'ranges'.
+ @param sorted If result should be sorted per key.
+ @param buffer A HANDLER_BUFFER for internal handler usage.
+
+ @note
+ - Record is read into table->record[0].
+ - *found_range_p returns a valid value only if read_multi_range_first()
returns 0.
- Sorting is done within each range. If you want an overall sort, enter
+ - Sorting is done within each range. If you want an overall sort, enter
'ranges' with sorted ranges.
- RETURN
+ @retval
0 OK, found a row
+ @retval
HA_ERR_END_OF_FILE No rows in range
- # Error code
+ @retval
+ \# Error code
*/
-
int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
KEY_MULTI_RANGE *ranges, uint range_count,
bool sorted, HANDLER_BUFFER *buffer)
@@ -2525,13 +3975,16 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
multi_range_sorted= sorted;
multi_range_buffer= buffer;
+ table->mark_columns_used_by_index_no_reset(active_index, table->read_set);
+ table->column_bitmaps_set(table->read_set, table->write_set);
+
for (multi_range_curr= ranges, multi_range_end= ranges + range_count;
multi_range_curr < multi_range_end;
multi_range_curr++)
{
- result= read_range_first(multi_range_curr->start_key.length ?
+ result= read_range_first(multi_range_curr->start_key.keypart_map ?
&multi_range_curr->start_key : 0,
- multi_range_curr->end_key.length ?
+ multi_range_curr->end_key.keypart_map ?
&multi_range_curr->end_key : 0,
test(multi_range_curr->range_flag & EQ_RANGE),
multi_range_sorted);
@@ -2545,25 +3998,24 @@ int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
}
-/*
+/**
Read the next row of a multi-range set.
- SYNOPSIS
- read_multi_range_next()
- found_range_p Returns a pointer to the element in 'ranges' that
- corresponds to the returned row.
+ @param found_range_p Returns a pointer to the element in 'ranges' that
+ corresponds to the returned row.
- NOTES
- Record is read into table->record[0].
- *found_range_p returns a valid value only if read_multi_range_next()
+ @note
+ - Record is read into table->record[0].
+ - *found_range_p returns a valid value only if read_multi_range_next()
returns 0.
- RETURN
+ @retval
0 OK, found a row
+ @retval
HA_ERR_END_OF_FILE No (more) rows in range
- # Error code
+ @retval
+ \# Error code
*/
-
int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p)
{
int result;
@@ -2585,6 +4037,8 @@ int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p)
}
else
{
+ if (was_semi_consistent_read())
+ goto scan_it_again;
/*
We need to set this for the last range only, but checking this
condition is more expensive than just setting the result code.
@@ -2592,14 +4046,14 @@ int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p)
result= HA_ERR_END_OF_FILE;
}
+ multi_range_curr++;
+scan_it_again:
/* Try the next range(s) until one matches a record. */
- for (multi_range_curr++;
- multi_range_curr < multi_range_end;
- multi_range_curr++)
+ for (; multi_range_curr < multi_range_end; multi_range_curr++)
{
- result= read_range_first(multi_range_curr->start_key.length ?
+ result= read_range_first(multi_range_curr->start_key.keypart_map ?
&multi_range_curr->start_key : 0,
- multi_range_curr->end_key.length ?
+ multi_range_curr->end_key.keypart_map ?
&multi_range_curr->end_key : 0,
test(multi_range_curr->range_flag & EQ_RANGE),
multi_range_sorted);
@@ -2616,27 +4070,25 @@ int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p)
}
-/*
+/**
Read first row between two ranges.
- Store ranges for future calls to read_range_next
+ Store ranges for future calls to read_range_next.
- SYNOPSIS
- read_range_first()
- start_key Start key. Is 0 if no min range
- end_key End key. Is 0 if no max range
- eq_range_arg Set to 1 if start_key == end_key and the range endpoints
- will not change during query execution.
- sorted Set to 1 if result should be sorted per key
-
- NOTES
+ @param start_key Start key. Is 0 if no min range
+ @param end_key End key. Is 0 if no max range
+ @param eq_range_arg Set to 1 if start_key == end_key
+ @param sorted Set to 1 if result should be sorted per key
+
+ @note
Record is read into table->record[0]
- RETURN
+ @retval
0 Found row
+ @retval
HA_ERR_END_OF_FILE No rows in range
- # Error code
+ @retval
+ \# Error code
*/
-
int handler::read_range_first(const key_range *start_key,
const key_range *end_key,
bool eq_range_arg, bool sorted)
@@ -2658,12 +4110,12 @@ int handler::read_range_first(const key_range *start_key,
if (!start_key) // Read first record
result= index_first(table->record[0]);
else
- result= index_read(table->record[0],
- start_key->key,
- start_key->length,
- start_key->flag);
+ result= index_read_map(table->record[0],
+ start_key->key,
+ start_key->keypart_map,
+ start_key->flag);
if (result)
- DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND)
+ DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND)
? HA_ERR_END_OF_FILE
: result);
@@ -2671,21 +4123,19 @@ int handler::read_range_first(const key_range *start_key,
}
-/*
+/**
Read next row between two ranges.
- SYNOPSIS
- read_range_next()
-
- NOTES
+ @note
Record is read into table->record[0]
- RETURN
+ @retval
0 Found row
+ @retval
HA_ERR_END_OF_FILE No rows in range
- # Error code
+ @retval
+ \# Error code
*/
-
int handler::read_range_next()
{
int result;
@@ -2705,24 +4155,21 @@ int handler::read_range_next()
}
-/*
- Compare if found key (in row) is over max-value
+/**
+ Compare if found key (in row) is over max-value.
- SYNOPSIS
- compare_key
- range range to compare to row. May be 0 for no range
+ @param range range to compare to row. May be 0 for no range
- NOTES
- See key.cc::key_cmp() for details
+ @seealso
+ key.cc::key_cmp()
- RETURN
+ @return
The return value is SIGN(key_in_row - range_key):
- 0 Key is equal to range or 'range' == 0 (no range)
- -1 Key is less than range
- 1 Key is larger than range
+ - 0 : Key is equal to range or 'range' == 0 (no range)
+ - -1 : Key is less than range
+ - 1 : Key is larger than range
*/
-
int handler::compare_key(key_range *range)
{
int cmp;
@@ -2734,67 +4181,75 @@ int handler::compare_key(key_range *range)
return cmp;
}
-int handler::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
+
+int handler::index_read_idx_map(uchar * buf, uint index, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag)
{
- int error= ha_index_init(index);
- if (!error)
- error= index_read(buf, key, key_len, find_flag);
+ int error, error1;
+ error= index_init(index, 0);
if (!error)
- error= ha_index_end();
- return error;
+ {
+ error= index_read_map(buf, key, keypart_map, find_flag);
+ error1= index_end();
+ }
+ return error ? error : error1;
}
-/*
+/**
Returns a list of all known extensions.
- SYNOPSIS
- ha_known_exts()
-
- NOTES
No mutexes, worst case race is a minor surplus memory allocation
We have to recreate the extension map if mysqld is restarted (for example
within libmysqld)
- RETURN VALUE
+ @retval
pointer pointer to TYPELIB structure
*/
+static my_bool exts_handlerton(THD *unused, plugin_ref plugin,
+ void *arg)
+{
+ List<char> *found_exts= (List<char> *) arg;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ handler *file;
+ if (hton->state == SHOW_OPTION_YES && hton->create &&
+ (file= hton->create(hton, (TABLE_SHARE*) 0, current_thd->mem_root)))
+ {
+ List_iterator_fast<char> it(*found_exts);
+ const char **ext, *old_ext;
+
+ for (ext= file->bas_ext(); *ext; ext++)
+ {
+ while ((old_ext= it++))
+ {
+ if (!strcmp(old_ext, *ext))
+ break;
+ }
+ if (!old_ext)
+ found_exts->push_back((char *) *ext);
+
+ it.rewind();
+ }
+ delete file;
+ }
+ return FALSE;
+}
TYPELIB *ha_known_exts(void)
{
- MEM_ROOT *mem_root= current_thd->mem_root;
if (!known_extensions.type_names || mysys_usage_id != known_extensions_id)
{
- handlerton **types;
List<char> found_exts;
- List_iterator_fast<char> it(found_exts);
const char **ext, *old_ext;
known_extensions_id= mysys_usage_id;
- found_exts.push_back((char*) triggers_file_ext);
- found_exts.push_back((char*) trigname_file_ext);
- for (types= sys_table_types; *types; types++)
- {
- if ((*types)->state == SHOW_OPTION_YES)
- {
- handler *file= get_new_handler(0, mem_root,
- (enum db_type) (*types)->db_type);
- for (ext= file->bas_ext(); *ext; ext++)
- {
- while ((old_ext= it++))
- {
- if (!strcmp(old_ext, *ext))
- break;
- }
- if (!old_ext)
- found_exts.push_back((char *) *ext);
-
- it.rewind();
- }
- delete file;
- }
- }
+ found_exts.push_back((char*) TRG_EXT);
+ found_exts.push_back((char*) TRN_EXT);
+
+ plugin_foreach(NULL, exts_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &found_exts);
+
ext= (const char **) my_once_alloc(sizeof(char *)*
(found_exts.elements+1),
MYF(MY_WME | MY_FAE));
@@ -2803,9 +4258,506 @@ TYPELIB *ha_known_exts(void)
known_extensions.count= found_exts.elements;
known_extensions.type_names= ext;
+ List_iterator_fast<char> it(found_exts);
while ((old_ext= it++))
*ext++= old_ext;
*ext= 0;
}
return &known_extensions;
}
+
+
+static bool stat_print(THD *thd, const char *type, uint type_len,
+ const char *file, uint file_len,
+ const char *status, uint status_len)
+{
+ Protocol *protocol= thd->protocol;
+ protocol->prepare_for_resend();
+ protocol->store(type, type_len, system_charset_info);
+ protocol->store(file, file_len, system_charset_info);
+ protocol->store(status, status_len, system_charset_info);
+ if (protocol->write())
+ return TRUE;
+ return FALSE;
+}
+
+
+static my_bool showstat_handlerton(THD *thd, plugin_ref plugin,
+ void *arg)
+{
+ enum ha_stat_type stat= *(enum ha_stat_type *) arg;
+ handlerton *hton= plugin_data(plugin, handlerton *);
+ if (hton->state == SHOW_OPTION_YES && hton->show_status &&
+ hton->show_status(hton, thd, stat_print, stat))
+ return TRUE;
+ return FALSE;
+}
+
+bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat)
+{
+ List<Item> field_list;
+ Protocol *protocol= thd->protocol;
+ bool result;
+
+ field_list.push_back(new Item_empty_string("Type",10));
+ field_list.push_back(new Item_empty_string("Name",FN_REFLEN));
+ field_list.push_back(new Item_empty_string("Status",10));
+
+ if (protocol->send_fields(&field_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ return TRUE;
+
+ if (db_type == NULL)
+ {
+ result= plugin_foreach(thd, showstat_handlerton,
+ MYSQL_STORAGE_ENGINE_PLUGIN, &stat);
+ }
+ else
+ {
+ if (db_type->state != SHOW_OPTION_YES)
+ {
+ const LEX_STRING *name=&hton2plugin[db_type->slot]->name;
+ result= stat_print(thd, name->str, name->length,
+ "", 0, "DISABLED", 8) ? 1 : 0;
+ }
+ else
+ result= db_type->show_status &&
+ db_type->show_status(db_type, thd, stat_print, stat) ? 1 : 0;
+ }
+
+ if (!result)
+ my_eof(thd);
+ return result;
+}
+
+/*
+ Function to check if the conditions for row-based binlogging is
+ correct for the table.
+
+ A row in the given table should be replicated if:
+ - Row-based replication is enabled in the current thread
+ - The binlog is enabled
+ - It is not a temporary table
+ - The binary log is open
+ - The database the table resides in shall be binlogged (binlog_*_db rules)
+ - table is not mysql.event
+*/
+
+static bool check_table_binlog_row_based(THD *thd, TABLE *table)
+{
+ if (table->s->cached_row_logging_check == -1)
+ {
+ int const check(table->s->tmp_table == NO_TMP_TABLE &&
+ binlog_filter->db_ok(table->s->db.str));
+ table->s->cached_row_logging_check= check;
+ }
+
+ DBUG_ASSERT(table->s->cached_row_logging_check == 0 ||
+ table->s->cached_row_logging_check == 1);
+
+ return (thd->current_stmt_binlog_row_based &&
+ table->s->cached_row_logging_check &&
+ (thd->options & OPTION_BIN_LOG) &&
+ mysql_bin_log.is_open());
+}
+
+
+/** @brief
+ Write table maps for all (manually or automatically) locked tables
+ to the binary log.
+
+ SYNOPSIS
+ write_locked_table_maps()
+ thd Pointer to THD structure
+
+ DESCRIPTION
+ This function will generate and write table maps for all tables
+ that are locked by the thread 'thd'. Either manually locked
+ (stored in THD::locked_tables) and automatically locked (stored
+ in THD::lock) are considered.
+
+ RETURN VALUE
+ 0 All OK
+ 1 Failed to write all table maps
+
+ SEE ALSO
+ THD::lock
+ THD::locked_tables
+*/
+
+static int write_locked_table_maps(THD *thd)
+{
+ DBUG_ENTER("write_locked_table_maps");
+ DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx thd->locked_tables: 0x%lx "
+ "thd->extra_lock: 0x%lx",
+ (long) thd, (long) thd->lock,
+ (long) thd->locked_tables, (long) thd->extra_lock));
+
+ if (thd->get_binlog_table_maps() == 0)
+ {
+ MYSQL_LOCK *locks[3];
+ locks[0]= thd->extra_lock;
+ locks[1]= thd->lock;
+ locks[2]= thd->locked_tables;
+ for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
+ {
+ MYSQL_LOCK const *const lock= locks[i];
+ if (lock == NULL)
+ continue;
+
+ TABLE **const end_ptr= lock->table + lock->table_count;
+ for (TABLE **table_ptr= lock->table ;
+ table_ptr != end_ptr ;
+ ++table_ptr)
+ {
+ TABLE *const table= *table_ptr;
+ DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
+ if (table->current_lock == F_WRLCK &&
+ check_table_binlog_row_based(thd, table))
+ {
+ int const has_trans= table->file->has_transactions();
+ int const error= thd->binlog_write_table_map(table, has_trans);
+ /*
+ If an error occurs, it is the responsibility of the caller to
+ roll back the transaction.
+ */
+ if (unlikely(error))
+ DBUG_RETURN(1);
+ }
+ }
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+typedef bool Log_func(THD*, TABLE*, bool, MY_BITMAP*,
+ uint, const uchar*, const uchar*);
+
+static int binlog_log_row(TABLE* table,
+ const uchar *before_record,
+ const uchar *after_record,
+ Log_func *log_func)
+{
+ if (table->no_replicate)
+ return 0;
+ bool error= 0;
+ THD *const thd= table->in_use;
+
+ if (check_table_binlog_row_based(thd, table))
+ {
+ MY_BITMAP cols;
+ /* Potential buffer on the stack for the bitmap */
+ uint32 bitbuf[BITMAP_STACKBUF_SIZE/sizeof(uint32)];
+ uint n_fields= table->s->fields;
+ my_bool use_bitbuf= n_fields <= sizeof(bitbuf)*8;
+
+ /*
+ If there are no table maps written to the binary log, this is
+ the first row handled in this statement. In that case, we need
+ to write table maps for all locked tables to the binary log.
+ */
+ if (likely(!(error= bitmap_init(&cols,
+ use_bitbuf ? bitbuf : NULL,
+ (n_fields + 7) & ~7UL,
+ FALSE))))
+ {
+ bitmap_set_all(&cols);
+ if (likely(!(error= write_locked_table_maps(thd))))
+ error= (*log_func)(thd, table, table->file->has_transactions(),
+ &cols, table->s->fields,
+ before_record, after_record);
+
+ if (!use_bitbuf)
+ bitmap_free(&cols);
+ }
+ }
+ return error ? HA_ERR_RBR_LOGGING_FAILED : 0;
+}
+
+int handler::ha_external_lock(THD *thd, int lock_type)
+{
+ DBUG_ENTER("handler::ha_external_lock");
+ /*
+ Whether this is lock or unlock, this should be true, and is to verify that
+ if get_auto_increment() was called (thus may have reserved intervals or
+ taken a table lock), ha_release_auto_increment() was too.
+ */
+ DBUG_ASSERT(next_insert_id == 0);
+
+ /*
+ We cache the table flags if the locking succeeded. Otherwise, we
+ keep them as they were when they were fetched in ha_open().
+ */
+ int error= external_lock(thd, lock_type);
+ if (error == 0)
+ cached_table_flags= table_flags();
+ DBUG_RETURN(error);
+}
+
+
+/** @brief
+ Check handler usage and reset state of file to after 'open'
+*/
+int handler::ha_reset()
+{
+ DBUG_ENTER("ha_reset");
+ /* Check that we have called all proper deallocation functions */
+ DBUG_ASSERT((uchar*) table->def_read_set.bitmap +
+ table->s->column_bitmap_size ==
+ (uchar*) table->def_write_set.bitmap);
+ DBUG_ASSERT(bitmap_is_set_all(&table->s->all_set));
+ DBUG_ASSERT(table->key_read == 0);
+ /* ensure that ha_index_end / ha_rnd_end has been called */
+ DBUG_ASSERT(inited == NONE);
+ /* Free cache used by filesort */
+ free_io_cache(table);
+ /* reset the bitmaps to point to defaults */
+ table->default_column_bitmaps();
+ DBUG_RETURN(reset());
+}
+
+
+int handler::ha_write_row(uchar *buf)
+{
+ int error;
+ Log_func *log_func= Write_rows_log_event::binlog_row_logging_function;
+ DBUG_ENTER("handler::ha_write_row");
+
+ mark_trx_read_write();
+
+ if (unlikely(error= write_row(buf)))
+ DBUG_RETURN(error);
+ if (unlikely(error= binlog_log_row(table, 0, buf, log_func)))
+ DBUG_RETURN(error); /* purecov: inspected */
+ DBUG_RETURN(0);
+}
+
+
+int handler::ha_update_row(const uchar *old_data, uchar *new_data)
+{
+ int error;
+ Log_func *log_func= Update_rows_log_event::binlog_row_logging_function;
+
+ /*
+ Some storage engines require that the new record is in record[0]
+ (and the old record is in record[1]).
+ */
+ DBUG_ASSERT(new_data == table->record[0]);
+
+ mark_trx_read_write();
+
+ if (unlikely(error= update_row(old_data, new_data)))
+ return error;
+ if (unlikely(error= binlog_log_row(table, old_data, new_data, log_func)))
+ return error;
+ return 0;
+}
+
+int handler::ha_delete_row(const uchar *buf)
+{
+ int error;
+ Log_func *log_func= Delete_rows_log_event::binlog_row_logging_function;
+
+ mark_trx_read_write();
+
+ if (unlikely(error= delete_row(buf)))
+ return error;
+ if (unlikely(error= binlog_log_row(table, buf, 0, log_func)))
+ return error;
+ return 0;
+}
+
+
+
+/** @brief
+ use_hidden_primary_key() is called in case of an update/delete when
+ (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
+ but we don't have a primary key
+*/
+void handler::use_hidden_primary_key()
+{
+ /* fallback to use all columns in the table to identify row */
+ table->use_all_columns();
+}
+
+
+/** @brief
+ Dummy function which accept information about log files which is not need
+ by handlers
+*/
+void signal_log_not_needed(struct handlerton, char *log_file)
+{
+ DBUG_ENTER("signal_log_not_needed");
+ DBUG_PRINT("enter", ("logfile '%s'", log_file));
+ DBUG_VOID_RETURN;
+}
+
+
+#ifdef TRANS_LOG_MGM_EXAMPLE_CODE
+/*
+ Example of transaction log management functions based on assumption that logs
+ placed into a directory
+*/
+#include <my_dir.h>
+#include <my_sys.h>
+int example_of_iterator_using_for_logs_cleanup(handlerton *hton)
+{
+ void *buffer;
+ int res= 1;
+ struct handler_iterator iterator;
+ struct handler_log_file_data data;
+
+ if (!hton->create_iterator)
+ return 1; /* iterator creator is not supported */
+
+ if ((*hton->create_iterator)(hton, HA_TRANSACTLOG_ITERATOR, &iterator) !=
+ HA_ITERATOR_OK)
+ {
+ /* error during creation of log iterator or iterator is not supported */
+ return 1;
+ }
+ while((*iterator.next)(&iterator, (void*)&data) == 0)
+ {
+ printf("%s\n", data.filename.str);
+ if (data.status == HA_LOG_STATUS_FREE &&
+ my_delete(data.filename.str, MYF(MY_WME)))
+ goto err;
+ }
+ res= 0;
+err:
+ (*iterator.destroy)(&iterator);
+ return res;
+}
+
+
+/*
+ Here we should get info from handler where it save logs but here is
+ just example, so we use constant.
+ IMHO FN_ROOTDIR ("/") is safe enough for example, because nobody has
+ rights on it except root and it consist of directories only at lest for
+ *nix (sorry, can't find windows-safe solution here, but it is only example).
+*/
+#define fl_dir FN_ROOTDIR
+
+
+/** @brief
+ Dummy function to return log status should be replaced by function which
+ really detect the log status and check that the file is a log of this
+ handler.
+*/
+enum log_status fl_get_log_status(char *log)
+{
+ MY_STAT stat_buff;
+ if (my_stat(log, &stat_buff, MYF(0)))
+ return HA_LOG_STATUS_INUSE;
+ return HA_LOG_STATUS_NOSUCHLOG;
+}
+
+
+struct fl_buff
+{
+ LEX_STRING *names;
+ enum log_status *statuses;
+ uint32 entries;
+ uint32 current;
+};
+
+
+int fl_log_iterator_next(struct handler_iterator *iterator,
+ void *iterator_object)
+{
+ struct fl_buff *buff= (struct fl_buff *)iterator->buffer;
+ struct handler_log_file_data *data=
+ (struct handler_log_file_data *) iterator_object;
+ if (buff->current >= buff->entries)
+ return 1;
+ data->filename= buff->names[buff->current];
+ data->status= buff->statuses[buff->current];
+ buff->current++;
+ return 0;
+}
+
+
+void fl_log_iterator_destroy(struct handler_iterator *iterator)
+{
+ my_free((uchar*)iterator->buffer, MYF(MY_ALLOW_ZERO_PTR));
+}
+
+
+/** @brief
+ returns buffer, to be assigned in handler_iterator struct
+*/
+enum handler_create_iterator_result
+fl_log_iterator_buffer_init(struct handler_iterator *iterator)
+{
+ MY_DIR *dirp;
+ struct fl_buff *buff;
+ char *name_ptr;
+ uchar *ptr;
+ FILEINFO *file;
+ uint32 i;
+
+ /* to be able to make my_free without crash in case of error */
+ iterator->buffer= 0;
+
+ if (!(dirp = my_dir(fl_dir, MYF(0))))
+ {
+ return HA_ITERATOR_ERROR;
+ }
+ if ((ptr= (uchar*)my_malloc(ALIGN_SIZE(sizeof(fl_buff)) +
+ ((ALIGN_SIZE(sizeof(LEX_STRING)) +
+ sizeof(enum log_status) +
+ + FN_REFLEN) *
+ (uint) dirp->number_off_files),
+ MYF(0))) == 0)
+ {
+ return HA_ITERATOR_ERROR;
+ }
+ buff= (struct fl_buff *)ptr;
+ buff->entries= buff->current= 0;
+ ptr= ptr + (ALIGN_SIZE(sizeof(fl_buff)));
+ buff->names= (LEX_STRING*) (ptr);
+ ptr= ptr + ((ALIGN_SIZE(sizeof(LEX_STRING)) *
+ (uint) dirp->number_off_files));
+ buff->statuses= (enum log_status *)(ptr);
+ name_ptr= (char *)(ptr + (sizeof(enum log_status) *
+ (uint) dirp->number_off_files));
+ for (i=0 ; i < (uint) dirp->number_off_files ; i++)
+ {
+ enum log_status st;
+ file= dirp->dir_entry + i;
+ if ((file->name[0] == '.' &&
+ ((file->name[1] == '.' && file->name[2] == '\0') ||
+ file->name[1] == '\0')))
+ continue;
+ if ((st= fl_get_log_status(file->name)) == HA_LOG_STATUS_NOSUCHLOG)
+ continue;
+ name_ptr= strxnmov(buff->names[buff->entries].str= name_ptr,
+ FN_REFLEN, fl_dir, file->name, NullS);
+ buff->names[buff->entries].length= (name_ptr -
+ buff->names[buff->entries].str) - 1;
+ buff->statuses[buff->entries]= st;
+ buff->entries++;
+ }
+
+ iterator->buffer= buff;
+ iterator->next= &fl_log_iterator_next;
+ iterator->destroy= &fl_log_iterator_destroy;
+ return HA_ITERATOR_OK;
+}
+
+
+/* An example of a iterator creator */
+enum handler_create_iterator_result
+fl_create_iterator(enum handler_iterator_type type,
+ struct handler_iterator *iterator)
+{
+ switch(type) {
+ case HA_TRANSACTLOG_ITERATOR:
+ return fl_log_iterator_buffer_init(iterator);
+ default:
+ return HA_ITERATOR_UNSUPPORTED;
+ }
+}
+#endif /*TRANS_LOG_MGM_EXAMPLE_CODE*/