diff options
author | unknown <brian@zim.(none)> | 2006-11-15 17:55:17 -0800 |
---|---|---|
committer | unknown <brian@zim.(none)> | 2006-11-15 17:55:17 -0800 |
commit | 97ec4aa6878f80e6dd54a55179c5e2b3947731dd (patch) | |
tree | 6049c0acdf785252a7a48ec9ae8630214c4bd13c /storage/archive | |
parent | bfefdaf0f3072de3df46678fc79c6d93f1e0866d (diff) | |
download | mariadb-git-97ec4aa6878f80e6dd54a55179c5e2b3947731dd.tar.gz |
This finished the bzero() on varchar patch I wrote a few months ago. It also completes the data_length() method I added to Fields to determine the length of data for a particular data type. I have many uses for this :)
mysql-test/r/archive.result:
Added test case results.
mysql-test/t/archive.test:
Added varchar specifc test case.
sql/field.cc:
Just corrected interface.
sql/field.h:
Completed data_length() call.
storage/archive/ha_archive.cc:
Corrected warnings, and finished bzero() patch
Diffstat (limited to 'storage/archive')
-rw-r--r-- | storage/archive/ha_archive.cc | 64 |
1 files changed, 40 insertions, 24 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index b2b07981bdb..81cb9182e3f 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -329,10 +329,14 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point)); - DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment)); - DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes)); + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", + (long long unsigned)*rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", + (long long unsigned) check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", + (long long unsigned)*auto_increment)); + DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", + (long long unsigned)*forced_flushes)); DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path)); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); @@ -385,12 +389,14 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, (uint)ARCHIVE_CHECK_HEADER)); DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); - DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows)); - DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); + DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", + (unsigned long long)rows)); + DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", + (unsigned long long)check_point)); DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu", - auto_increment)); + (unsigned long long)auto_increment)); DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu", - forced_flushes)); + (unsigned long long)forced_flushes)); DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s", real_path)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); @@ -774,7 +780,8 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) written= azwrite(writer, buf, table->s->reclength); DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", - written, table->s->reclength)); + (uint32)written, + (uint32)table->s->reclength)); if (!delayed_insert || !bulk_insert) share->dirty= TRUE; @@ -913,18 +920,25 @@ int ha_archive::write_row(byte *buf) */ for (Field **field=table->field ; *field ; field++) { - DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length())); - DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset()))); - if ((*field)->real_type() == MYSQL_TYPE_VARCHAR) - { - uint actual_length= (*field)->data_length((char*) buf + (*field)->offset()); - uint offset= (*field)->offset() + actual_length + - (actual_length > 255 ? 2 : 1); - DBUG_PRINT("archive",("Offset is %d -> %d\n", actual_length, offset)); - /* - if ((*field)->pack_length() + (*field)->offset() != offset) - bzero(buf + offset, (size_t)((*field)->pack_length() + (actual_length > 255 ? 2 : 1) - (*field)->data_length)); + /* + Pack length will report 256 when you have 255 bytes + of data plus the single byte for length. + + Probably could have added a method to say the number + of bytes taken up by field for the length data. */ + uint32 actual_length= (*field)->data_length() + + ((*field)->pack_length() > 256 ? 2 : 1); + + if ((*field)->real_type() == MYSQL_TYPE_VARCHAR) + { + char *ptr= (*field)->ptr + actual_length; + + DBUG_ASSERT(actual_length <= (*field)->pack_length()); + + uint32 to_free= (*field)->pack_length() - actual_length; + if (to_free > 0) + bzero(ptr, to_free); } } @@ -1054,7 +1068,8 @@ int ha_archive::rnd_init(bool scan) if (scan) { scan_rows= share->rows_recorded; - DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows)); + DBUG_PRINT("info", ("archive will retrieve %llu rows", + (unsigned long long)scan_rows)); stats.records= 0; /* @@ -1096,8 +1111,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) DBUG_ENTER("ha_archive::get_row"); read= azread(file_to_read, buf, table->s->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, - table->s->reclength)); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", read, + (unsigned long)table->s->reclength)); if (read == Z_STREAM_ERROR) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -1314,7 +1329,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded++; } } - DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded)); + DBUG_PRINT("info", ("recovered %llu archive rows", + (unsigned long long)share->rows_recorded)); my_free((char*)buf, MYF(0)); if (rc && rc != HA_ERR_END_OF_FILE) |