summaryrefslogtreecommitdiff
path: root/sql/handler.cc
diff options
context:
space:
mode:
authorunknown <brian@brian-akers-computer.local>2004-05-22 12:18:06 -0700
committerunknown <brian@brian-akers-computer.local>2004-05-22 12:18:06 -0700
commit64285a73ffb962ac347b6179d6fbdd87d4b8de83 (patch)
tree65a44af30884ad23f595a8714864eaed832b15c0 /sql/handler.cc
parentf1b106a753d941c4ae46aa180d2cd1ea4142cea0 (diff)
downloadmariadb-git-64285a73ffb962ac347b6179d6fbdd87d4b8de83.tar.gz
Merge
configure.in: SCCS merged sql/Makefile.am: SCCS merged sql/handler.cc: SCCS merged sql/mysql_priv.h: SCCS merged
Diffstat (limited to 'sql/handler.cc')
-rw-r--r--sql/handler.cc161
1 files changed, 112 insertions, 49 deletions
diff --git a/sql/handler.cc b/sql/handler.cc
index d0dad5dcb9e..c10a61d9bfe 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -43,6 +43,9 @@
#else
#define innobase_query_caching_of_table_permitted(X,Y,Z) 1
#endif
+#ifdef HAVE_NDBCLUSTER_DB
+#include "ha_ndbcluster.h"
+#endif
#include <myisampack.h>
#include <errno.h>
@@ -54,7 +57,7 @@ ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count,
ha_read_key_count, ha_read_next_count, ha_read_prev_count,
ha_read_first_count, ha_read_last_count,
ha_commit_count, ha_rollback_count,
- ha_read_rnd_count, ha_read_rnd_next_count;
+ ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count;
static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES;
@@ -82,6 +85,10 @@ struct show_table_type_st sys_table_types[]=
"Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB},
{"BERKELEYDB",&have_berkeley_db,
"Alias for BDB", DB_TYPE_BERKELEY_DB},
+ {"NDBCLUSTER", &have_ndbcluster,
+ "Clustered, fault tolerant memory based tables", DB_TYPE_NDBCLUSTER},
+ {"NDB", &have_ndbcluster,
+ "Alias for NDBCLUSTER", DB_TYPE_NDBCLUSTER},
{"EXAMPLE",&have_example_db,
"Example storage engine", DB_TYPE_EXAMPLE_DB},
{"ARCHIVE",&have_archive_db,
@@ -101,15 +108,16 @@ TYPELIB tx_isolation_typelib= {array_elements(tx_isolation_names)-1,"",
enum db_type ha_resolve_by_name(const char *name, uint namelen)
{
- if (!my_strcasecmp(&my_charset_latin1, name, "DEFAULT")) {
- return(enum db_type) current_thd->variables.table_type;
+ THD *thd=current_thd;
+ if (thd && !my_strcasecmp(&my_charset_latin1, name, "DEFAULT")) {
+ return (enum db_type) thd->variables.table_type;
}
show_table_type_st *types;
for (types= sys_table_types; types->type; types++)
{
if (!my_strcasecmp(&my_charset_latin1, name, types->type))
- return(enum db_type)types->db_type;
+ return (enum db_type) types->db_type;
}
return DB_TYPE_UNKNOWN;
}
@@ -190,6 +198,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_ARCHIVE_DB:
return new ha_archive(table);
#endif
+#ifdef HAVE_NDBCLUSTER_DB
+ case DB_TYPE_NDBCLUSTER:
+ return new ha_ndbcluster(table);
+#endif
case DB_TYPE_HEAP:
return new ha_heap(table);
default: // should never happen
@@ -234,6 +246,18 @@ int ha_init()
opt_using_transactions=1;
}
#endif
+#ifdef HAVE_NDBCLUSTER_DB
+ if (have_ndbcluster == SHOW_OPTION_YES)
+ {
+ if (ndbcluster_init())
+ {
+ have_ndbcluster= SHOW_OPTION_DISABLED;
+ error= 1;
+ }
+ else
+ opt_using_transactions=1;
+ }
+#endif
return error;
}
@@ -261,6 +285,10 @@ int ha_panic(enum ha_panic_function flag)
if (have_innodb == SHOW_OPTION_YES)
error|=innobase_end();
#endif
+#ifdef HAVE_NDBCLUSTER_DB
+ if (have_ndbcluster == SHOW_OPTION_YES)
+ error|=ndbcluster_end();
+#endif
return error;
} /* ha_panic */
@@ -270,6 +298,10 @@ void ha_drop_database(char* path)
if (have_innodb == SHOW_OPTION_YES)
innobase_drop_database(path);
#endif
+#ifdef HAVE_NDBCLUSTER_DB
+ if (have_ndbcluster == SHOW_OPTION_YES)
+ ndbcluster_drop_database(path);
+#endif
}
void ha_close_connection(THD* thd)
@@ -278,6 +310,10 @@ void ha_close_connection(THD* thd)
if (have_innodb == SHOW_OPTION_YES)
innobase_close_connection(thd);
#endif
+#ifdef HAVE_NDBCLUSTER_DB
+ if (have_ndbcluster == SHOW_OPTION_YES)
+ ndbcluster_close_connection(thd);
+#endif
}
/*
@@ -437,6 +473,19 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans)
WRITE_CACHE, (my_off_t) 0, 0, 1);
thd->transaction.trans_log.end_of_file= max_binlog_cache_size;
}
+#ifdef HAVE_NDBCLUSTER_DB
+ if (trans->ndb_tid)
+ {
+ if ((error=ndbcluster_commit(thd,trans->ndb_tid)))
+ {
+ my_error(ER_ERROR_DURING_COMMIT, MYF(0), error);
+ error=1;
+ }
+ if (trans == &thd->transaction.all)
+ operation_done= transaction_commited= 1;
+ trans->ndb_tid=0;
+ }
+#endif
#ifdef HAVE_BERKELEY_DB
if (trans->bdb_tid)
{
@@ -490,6 +539,18 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans)
if (opt_using_transactions)
{
bool operation_done=0;
+#ifdef HAVE_NDBCLUSTER_DB
+ if (trans->ndb_tid)
+ {
+ if ((error=ndbcluster_rollback(thd, trans->ndb_tid)))
+ {
+ my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error);
+ error=1;
+ }
+ trans->ndb_tid = 0;
+ operation_done=1;
+ }
+#endif
#ifdef HAVE_BERKELEY_DB
if (trans->bdb_tid)
{
@@ -1133,7 +1194,7 @@ int handler::index_next_same(byte *buf, const byte *key, uint keylen)
int error;
if (!(error=index_next(buf)))
{
- if (key_cmp(table, key, active_index, keylen))
+ if (key_cmp_if_same(table, key, active_index, keylen))
{
table->status=STATUS_NOT_FOUND;
error=HA_ERR_END_OF_FILE;
@@ -1169,8 +1230,10 @@ bool handler::caching_allowed(THD* thd, char* table_key,
** Some general functions that isn't in the handler class
****************************************************************************/
- /* Initiates table-file and calls apropriate database-creator */
- /* Returns 1 if something got wrong */
+/*
+ Initiates table-file and calls apropriate database-creator
+ Returns 1 if something got wrong
+*/
int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
bool update_create_info)
@@ -1186,7 +1249,7 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info,
{
update_create_info_from_table(create_info, &table);
if (table.file->table_flags() & HA_DROP_BEFORE_CREATE)
- table.file->delete_table(name); // Needed for BDB tables
+ table.file->delete_table(name);
}
if (lower_case_table_names == 2 &&
!(table.file->table_flags() & HA_FILE_BASED))
@@ -1308,6 +1371,26 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
/*
+ Try to discover one table from handler(s)
+*/
+
+int ha_discover(const char* dbname, const char* name,
+ const void** frmblob, uint* frmlen)
+{
+ int error= 1; // Table does not exist in any handler
+ DBUG_ENTER("ha_discover");
+ DBUG_PRINT("enter", ("db: %s, name: %s", dbname, name));
+#ifdef HAVE_NDBCLUSTER_DB
+ if (have_ndbcluster == SHOW_OPTION_YES)
+ error= ndbcluster_discover(dbname, name, frmblob, frmlen);
+#endif
+ if (!error)
+ statistic_increment(ha_discover_count,&LOCK_status);
+ DBUG_RETURN(error);
+}
+
+
+/*
Read first row between two ranges.
Store ranges for future calls to read_range_next
@@ -1315,6 +1398,7 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
read_range_first()
start_key Start key. Is 0 if no min range
end_key End key. Is 0 if no max range
+ eq_range_arg Set to 1 if start_key == end_key
sorted Set to 1 if result should be sorted per key
NOTES
@@ -1328,11 +1412,12 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
int handler::read_range_first(const key_range *start_key,
const key_range *end_key,
- bool sorted)
+ bool eq_range_arg, bool sorted)
{
int result;
DBUG_ENTER("handler::read_range_first");
+ eq_range= eq_range_arg;
end_range= 0;
if (end_key)
{
@@ -1343,7 +1428,6 @@ int handler::read_range_first(const key_range *start_key,
}
range_key_part= table->key_info[active_index].key_part;
-
if (!start_key) // Read first record
result= index_first(table->record[0]);
else
@@ -1365,7 +1449,6 @@ int handler::read_range_first(const key_range *start_key,
SYNOPSIS
read_range_next()
- eq_range Set to 1 if start_key == end_key
NOTES
Record is read into table->record[0]
@@ -1376,17 +1459,19 @@ int handler::read_range_first(const key_range *start_key,
# Error code
*/
-int handler::read_range_next(bool eq_range)
+int handler::read_range_next()
{
int result;
DBUG_ENTER("handler::read_range_next");
if (eq_range)
- result= index_next_same(table->record[0],
- end_range->key,
- end_range->length);
- else
- result= index_next(table->record[0]);
+ {
+ /* We trust that index_next_same always gives a row in range */
+ DBUG_RETURN(index_next_same(table->record[0],
+ end_range->key,
+ end_range->length));
+ }
+ result= index_next(table->record[0]);
if (result)
DBUG_RETURN(result);
DBUG_RETURN(compare_key(end_range) <= 0 ? 0 : HA_ERR_END_OF_FILE);
@@ -1394,16 +1479,18 @@ int handler::read_range_next(bool eq_range)
/*
- Compare if found key is over max-value
+ Compare if found key (in row) is over max-value
SYNOPSIS
compare_key
- range key to compare to row
+ range range to compare to row. May be 0 for no range
NOTES
- For this to work, the row must be stored in table->record[0]
+ See key.cc::key_cmp() for details
RETURN
+ The return value is SIGN(key_in_row - range_key):
+
0 Key is equal to range or 'range' == 0 (no range)
-1 Key is less than range
1 Key is larger than range
@@ -1411,35 +1498,11 @@ int handler::read_range_next(bool eq_range)
int handler::compare_key(key_range *range)
{
- KEY_PART_INFO *key_part= range_key_part;
- uint store_length;
-
+ int cmp;
if (!range)
return 0; // No max range
-
- for (const char *key=range->key, *end=key+range->length;
- key < end;
- key+= store_length, key_part++)
- {
- int cmp;
- store_length= key_part->store_length;
- if (key_part->null_bit)
- {
- if (*key)
- {
- if (!key_part->field->is_null())
- return 1;
- continue;
- }
- else if (key_part->field->is_null())
- return 0;
- key++; // Skip null byte
- store_length--;
- }
- if ((cmp=key_part->field->key_cmp((byte*) key, key_part->length)) < 0)
- return -1;
- if (cmp > 0)
- return 1;
- }
- return key_compare_result_on_equal;
+ cmp= key_cmp(range_key_part, range->key, range->length);
+ if (!cmp)
+ cmp= key_compare_result_on_equal;
+ return cmp;
}