diff options
author | unknown <gkodinov/kgeorge@macbook.gmz> | 2008-03-29 09:52:16 +0200 |
---|---|---|
committer | unknown <gkodinov/kgeorge@macbook.gmz> | 2008-03-29 09:52:16 +0200 |
commit | 18a01ce9246616e2a3f131d69c132109335c9359 (patch) | |
tree | 2375972d03b6138e81e39befae9c2ddb6ffabd86 /sql | |
parent | 396b24a57c519bb85e74c01f427a7d02b4122035 (diff) | |
download | mariadb-git-18a01ce9246616e2a3f131d69c132109335c9359.tar.gz |
fixed warnings and compile errors from the fix for bug 26243
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_federated.cc | 2 | ||||
-rw-r--r-- | sql/ha_innodb.cc | 2 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 34 | ||||
-rw-r--r-- | sql/ha_ndbcluster_cond.cc | 2 |
4 files changed, 19 insertions, 21 deletions
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 7a7d65a3d34..2ccfeba74cb 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -1095,7 +1095,7 @@ bool ha_federated::create_where_from_key(String *to, uint store_length= key_part->store_length; uint part_length= min(store_length, length); needs_quotes= 1; - DBUG_DUMP("key, start of loop", ptr, length); + DBUG_DUMP("key, start of loop", (uchar *)ptr, length); if (key_part->null_bit) { diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index a2de8ea1d0b..1be6137460b 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -4250,7 +4250,7 @@ ha_innobase::rnd_pos( int error; uint keynr = active_index; DBUG_ENTER("rnd_pos"); - DBUG_DUMP("key", pos, ref_length); + DBUG_DUMP("key", (uchar *)pos, ref_length); statistic_increment(current_thd->status_var.ha_read_rnd_count, &LOCK_status); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index e3ab2b67e26..0a75b328ca0 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -677,7 +677,7 @@ int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field, DBUG_PRINT("enter", ("%d: %s, ndb_type: %u, len=%d", fieldnr, field->field_name, field->type(), pack_len)); - DBUG_DUMP("key", (char*)field_ptr, pack_len); + DBUG_DUMP("key", (uchar*)field_ptr, pack_len); DBUG_ASSERT(ndb_supported_type(field->type())); DBUG_ASSERT(! (field->flags & BLOB_FLAG)); @@ -699,7 +699,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("enter", ("%d: %s, type: %u, len=%d, is_null=%s", fieldnr, field->field_name, field->type(), pack_len, field->is_null()?"Y":"N")); - DBUG_DUMP("value", (char*) field_ptr, pack_len); + DBUG_DUMP("value", (uchar*) field_ptr, pack_len); DBUG_ASSERT(ndb_supported_type(field->type())); { @@ -737,7 +737,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, // Set value to NULL DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0)); DBUG_PRINT("info", ("bit field")); - DBUG_DUMP("value", (char*)&bits, pack_len); + DBUG_DUMP("value", (uchar*)&bits, pack_len); #ifdef WORDS_BIGENDIAN /* store lsw first */ bits = ((bits >> 32) & 0x00000000FFFFFFFFLL) @@ -768,7 +768,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_PRINT("value", ("set blob ptr: %p len: %u", blob_ptr, blob_len)); - DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); + DBUG_DUMP("value", (uchar*)blob_ptr, min(blob_len, 26)); if (set_blob_value) *set_blob_value= TRUE; @@ -1007,8 +1007,8 @@ int ha_ndbcluster::get_metadata(const char *path) ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", pack_length, tab->getFrmLength(), memcmp(pack_data, tab->getFrmData(), pack_length))); - DBUG_DUMP("pack_data", (char*)pack_data, pack_length); - DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); + DBUG_DUMP("pack_data", (uchar*)pack_data, pack_length); + DBUG_DUMP("frm", (uchar*)tab->getFrmData(), tab->getFrmLength()); error= 3; invalidating_ndb_table= FALSE; } @@ -1502,7 +1502,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) int res; DBUG_ENTER("pk_read"); DBUG_PRINT("enter", ("key_len: %u", key_len)); - DBUG_DUMP("key", (char*)key, key_len); + DBUG_DUMP("key", (uchar*)key, key_len); NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); @@ -1514,7 +1514,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); - DBUG_DUMP("key", (char*)key, 8); + DBUG_DUMP("key", (uchar*)key, 8); if (set_hidden_key(op, no_fields, key)) ERR_RETURN(trans->getNdbError()); @@ -1797,7 +1797,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, NdbIndexOperation *op; DBUG_ENTER("ha_ndbcluster::unique_index_read"); DBUG_PRINT("enter", ("key_len: %u, index: %u", key_len, active_index)); - DBUG_DUMP("key", (char*)key, key_len); + DBUG_DUMP("key", (uchar*)key, key_len); NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); @@ -2126,7 +2126,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, { DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d", j, i, tot_len, part_len, p.part_last, p.bound_type)); - DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len); + DBUG_DUMP("info", (const uchar*)p.part_ptr, part_store_len); // Set bound if not cancelled via type -1 if (p.bound_type != -1) @@ -2644,7 +2644,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) // Require that the PK for this record has previously been // read into m_ref - DBUG_DUMP("key", m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH); + DBUG_DUMP("key", (uchar *)m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH); if (set_hidden_key(op, table->s->fields, m_ref)) ERR_RETURN(op->getNdbError()); @@ -2864,8 +2864,6 @@ void ha_ndbcluster::print_results() DBUG_ENTER("print_results"); #ifndef DBUG_OFF - if (!_db_on_) - DBUG_VOID_RETURN; char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH]; String type(buf_type, sizeof(buf_type), &my_charset_bin); @@ -3341,7 +3339,7 @@ void ha_ndbcluster::position(const byte *record) memcpy(ref, m_ref, ref_length); } - DBUG_DUMP("ref", (char*)ref, ref_length); + DBUG_DUMP("ref", (uchar*)ref, ref_length); DBUG_VOID_RETURN; } @@ -6132,14 +6130,14 @@ ha_ndbcluster::register_query_cache_table(THD *thd, if (!is_autocommit) { - DBUG_PRINT("exit", ("Can't register table during transaction")) + DBUG_PRINT("exit", ("Can't register table during transaction")); DBUG_RETURN(FALSE); } if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) { *engine_data= 0; - DBUG_PRINT("exit", ("Error, could not get commitcount")) + DBUG_PRINT("exit", ("Error, could not get commitcount")); DBUG_RETURN(FALSE); } *engine_data= commit_count; @@ -6263,7 +6261,7 @@ static int packfrm(const void *data, uint len, } DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len)); - DBUG_DUMP("compressed", (char*)data, org_len); + DBUG_DUMP("compressed", (uchar*)data, org_len); error= 2; blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len; @@ -6307,7 +6305,7 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu", ver,complen,orglen)); - DBUG_DUMP("blob->data", (char*) blob->data, complen); + DBUG_DUMP("blob->data", (uchar*) blob->data, complen); if (ver != 1) { diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc index f5b41959b40..37e710acff4 100644 --- a/sql/ha_ndbcluster_cond.cc +++ b/sql/ha_ndbcluster_cond.cc @@ -1419,7 +1419,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op, uint32 pack_len= field->pack_length(); const byte* ptr= key; DBUG_PRINT("info", ("Filtering value for %s", field->field_name)); - DBUG_DUMP("key", (char*)ptr, pack_len); + DBUG_DUMP("key", (uchar*)ptr, pack_len); if (key_part->null_bit) { DBUG_PRINT("info", ("Generating ISNULL filter")); |