summaryrefslogtreecommitdiff
path: root/storage/archive/ha_archive.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/archive/ha_archive.cc')
-rw-r--r--storage/archive/ha_archive.cc74
1 files changed, 44 insertions, 30 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index bb638e1c17b..6853e879f55 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -81,6 +81,7 @@
TODO:
Allow users to set compression level.
+ Allow adjustable block size.
Implement versioning, should be easy.
Allow for errors, find a way to mark bad rows.
Add optional feature so that rows can be flushed at interval (which will cause less
@@ -210,7 +211,8 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
/* The size of the offset value we will use for position() */
- ref_length = sizeof(my_off_t);
+ ref_length= sizeof(my_off_t);
+ archive_reader_open= FALSE;
}
int archive_discover(handlerton *hton, THD* thd, const char *db,
@@ -434,6 +436,29 @@ int ha_archive::init_archive_writer()
}
+int ha_archive::init_archive_reader()
+{
+ DBUG_ENTER("ha_archive::init_archive_reader");
+ /*
+ It is expensive to open and close the data files and since you can't have
+ a gzip file that can be both read and written we keep a writer open
+ that is shared amoung all open tables.
+ */
+ if (!archive_reader_open)
+ {
+ if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
+ {
+ DBUG_PRINT("ha_archive", ("Could not open archive read file"));
+ share->crashed= TRUE;
+ DBUG_RETURN(1);
+ }
+ archive_reader_open= TRUE;
+ }
+
+ DBUG_RETURN(0);
+}
+
+
/*
We just implement one additional file extension.
*/
@@ -477,7 +502,6 @@ int ha_archive::open(const char *name, int mode, uint open_options)
DBUG_ASSERT(share);
-
record_buffer= create_record_buffer(table->s->reclength +
ARCHIVE_ROW_HEADER_SIZE);
@@ -489,14 +513,6 @@ int ha_archive::open(const char *name, int mode, uint open_options)
thr_lock_data_init(&share->lock, &lock, NULL);
- DBUG_PRINT("ha_archive", ("archive data_file_name %s", share->data_file_name));
- if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
- {
- if (errno == EROFS || errno == EACCES)
- DBUG_RETURN(my_errno= errno);
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- }
-
DBUG_PRINT("ha_archive", ("archive table was crashed %s",
rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
@@ -533,8 +549,11 @@ int ha_archive::close(void)
destroy_record_buffer(record_buffer);
/* First close stream */
- if (azclose(&archive))
- rc= 1;
+ if (archive_reader_open)
+ {
+ if (azclose(&archive))
+ rc= 1;
+ }
/* then also close share */
rc|= free_share();
@@ -904,7 +923,7 @@ int ha_archive::index_read(byte *buf, const byte *key,
int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
uint key_len, enum ha_rkey_function find_flag)
{
- int rc= 0;
+ int rc;
bool found= 0;
KEY *mkey= &table->s->key_info[index];
current_k_offset= mkey->key_part->offset;
@@ -914,22 +933,10 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
DBUG_ENTER("ha_archive::index_read_idx");
- /*
- All of the buffer must be written out or we won't see all of the
- data
- */
- pthread_mutex_lock(&share->mutex);
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- pthread_mutex_unlock(&share->mutex);
+ rc= rnd_init(TRUE);
- /*
- Set the position of the local read thread to the beginning postion.
- */
- if (read_data_header(&archive))
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
+ if (rc)
goto error;
- }
while (!(get_row(&archive, buf)))
{
@@ -979,10 +986,11 @@ int ha_archive::rnd_init(bool scan)
if (share->crashed)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ init_archive_reader();
+
/* We rewind the file so that we can read from the beginning if scan */
if (scan)
{
- scan_rows= share->rows_recorded;
DBUG_PRINT("info", ("archive will retrieve %llu rows",
(unsigned long long) scan_rows));
stats.records= 0;
@@ -991,17 +999,18 @@ int ha_archive::rnd_init(bool scan)
If dirty, we lock, and then reset/flush the data.
I found that just calling azflush() doesn't always work.
*/
+ pthread_mutex_lock(&share->mutex);
+ scan_rows= share->rows_recorded;
if (share->dirty == TRUE)
{
- pthread_mutex_lock(&share->mutex);
if (share->dirty == TRUE)
{
DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
azflush(&(share->archive_write), Z_SYNC_FLUSH);
share->dirty= FALSE;
}
- pthread_mutex_unlock(&share->mutex);
}
+ pthread_mutex_unlock(&share->mutex);
if (read_data_header(&archive))
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
@@ -1283,6 +1292,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
azio_stream writer;
char writer_filename[FN_REFLEN];
+ init_archive_reader();
+
// now we close both our writer and our reader for the rename
if (share->archive_write_open)
{
@@ -1475,6 +1486,7 @@ int ha_archive::info(uint flag)
if (flag & HA_STATUS_AUTO)
{
+ init_archive_reader();
azflush(&archive, Z_SYNC_FLUSH);
stats.auto_increment_value= archive.auto_increment;
}
@@ -1557,6 +1569,8 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
Now we will rewind the archive file so that we are positioned at the
start of the file.
*/
+ init_archive_reader();
+
if (!rc)
read_data_header(&archive);