summaryrefslogtreecommitdiff
path: root/sql/ha_berkeley.cc
diff options
context:
space:
mode:
authorunknown <monty@mysql.com>2004-10-29 19:26:52 +0300
committerunknown <monty@mysql.com>2004-10-29 19:26:52 +0300
commitf095274fe8c3d3394d6c0ce0a68f4bea04311999 (patch)
tree23bcc9a71fe7237887a111b158e30f5a6bb665d3 /sql/ha_berkeley.cc
parentf41bba8c6156a7adf4c67dfa75e16112767a5d3c (diff)
parent5be6c328f5a9f78f37176bbbd88a538fa3b65fe9 (diff)
downloadmariadb-git-f095274fe8c3d3394d6c0ce0a68f4bea04311999.tar.gz
merge with 4.1
BitKeeper/etc/ignore: auto-union BitKeeper/etc/logging_ok: auto-union BitKeeper/triggers/post-commit: Auto merged Docs/Support/texi2html: Auto merged Makefile.am: Auto merged client/Makefile.am: Auto merged client/mysql.cc: Auto merged client/mysqldump.c: Auto merged include/my_base.h: Auto merged include/my_global.h: Auto merged include/my_pthread.h: Auto merged include/my_sys.h: Auto merged include/my_time.h: Auto merged include/mysql.h: Auto merged include/mysql_com.h: Auto merged innobase/buf/buf0buf.c: Auto merged innobase/include/row0mysql.h: Auto merged innobase/row/row0sel.c: Auto merged libmysql/libmysql.c: Auto merged libmysqld/examples/Makefile.am: Auto merged myisam/mi_check.c: Auto merged mysql-test/include/ps_modify.inc: Auto merged mysql-test/install_test_db.sh: Auto merged mysql-test/r/alter_table.result: Auto merged mysql-test/r/auto_increment.result: Auto merged mysql-test/r/bdb.result: Auto merged mysql-test/r/ctype_latin1_de.result: Auto merged mysql-test/r/ctype_recoding.result: Auto merged mysql-test/r/fulltext.result: Auto merged mysql-test/r/func_gconcat.result: Auto merged mysql-test/r/func_group.result: Auto merged mysql-test/r/func_if.result: Auto merged mysql-test/t/derived.test: Auto merged mysql-test/t/insert.test: merge with 4.1 Fixed test case to not use 'if exists' when it shouldn't mysql-test/t/range.test: merge with 4.1 Added missing drop table sql/ha_ndbcluster.cc: merge with 4.1 Simple optimization: use max() instead of ? : sql/item_func.cc: merge with 4.1 (Added back old variable names for easier merges) sql/opt_range.cc: merge with 4.1 Removed argument 'parent_alloc' from QUICK_RANGE_SELECT as this was not used Added assert if using QUICK_GROUP_MIN_MAX_SELECT with parent_alloc as the init() function can't handle this Changed back get_quick_select_for_ref() to use it's own alloc root becasue this function may be called several times for one query sql/sql_handler.cc: merge with 4.1 change variable 'err' to 'error' as same function had a label named 'err' sql/sql_update.cc: Use multi-update code from 5.0 instead of 4.1 We will fix the locking code shortly in 5.0 to be faster than in 4.1
Diffstat (limited to 'sql/ha_berkeley.cc')
-rw-r--r--sql/ha_berkeley.cc16
1 files changed, 8 insertions, 8 deletions
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index bbe73ee8674..a5d0023b875 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -25,7 +25,7 @@
We will need an updated Berkeley DB version for this.
- Killing threads that has got a 'deadlock'
- SHOW TABLE STATUS should give more information about the table.
- - Get a more accurate count of the number of rows (estimate_number_of_rows()).
+ - Get a more accurate count of the number of rows (estimate_rows_upper_bound()).
We could store the found number of rows when the table is scanned and
then increment the counter for each attempted write.
- We will need to extend the manager thread to makes checkpoints at
@@ -63,7 +63,7 @@
#define HA_BERKELEY_ROWS_IN_TABLE 10000 /* to get optimization right */
#define HA_BERKELEY_RANGE_COUNT 100
#define HA_BERKELEY_MAX_ROWS 10000000 /* Max rows in table */
-/* extra rows for estimate_number_of_rows() */
+/* extra rows for estimate_rows_upper_bound() */
#define HA_BERKELEY_EXTRA_ROWS 100
/* Bits for share->status */
@@ -90,7 +90,7 @@ const char *berkeley_lock_names[] =
u_int32_t berkeley_lock_types[]=
{ DB_LOCK_DEFAULT, DB_LOCK_OLDEST, DB_LOCK_RANDOM };
TYPELIB berkeley_lock_typelib= {array_elements(berkeley_lock_names)-1,"",
- berkeley_lock_names};
+ berkeley_lock_names, NULL};
static void berkeley_print_error(const char *db_errpfx, char *buffer);
static byte* bdb_get_key(BDB_SHARE *share,uint *length,
@@ -856,8 +856,8 @@ int ha_berkeley::write_row(byte * record)
DBUG_ENTER("write_row");
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
- if (table->timestamp_default_now)
- update_timestamp(record+table->timestamp_default_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
+ table->timestamp_field->set_time();
if (table->next_number_field && record == table->record[0])
update_auto_increment();
if ((error=pack_row(&row, record,1)))
@@ -1104,8 +1104,8 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row)
LINT_INIT(error);
statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status);
- if (table->timestamp_on_update_now)
- update_timestamp(new_row+table->timestamp_on_update_now-1);
+ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
+ table->timestamp_field->set_time();
if (hidden_primary_key)
{
@@ -2566,7 +2566,7 @@ end:
Used when sorting to allocate buffers and by the optimizer.
*/
-ha_rows ha_berkeley::estimate_number_of_rows()
+ha_rows ha_berkeley::estimate_rows_upper_bound()
{
return share->rows + HA_BERKELEY_EXTRA_ROWS;
}