summaryrefslogtreecommitdiff
path: root/sql/ha_ndbcluster.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r--sql/ha_ndbcluster.cc108
1 files changed, 54 insertions, 54 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 7700aebe367..0703e18b5f7 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -413,7 +413,8 @@ Thd_ndb::get_open_table(THD *thd, const void *key)
thd_ndb_share->stat.no_uncommitted_rows_count= 0;
thd_ndb_share->stat.records= ~(ha_rows)0;
}
- DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key));
+ DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx",
+ (long) thd_ndb_share, (long) key));
DBUG_RETURN(thd_ndb_share);
}
@@ -761,8 +762,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
blob_ptr= (char*)"";
}
- DBUG_PRINT("value", ("set blob ptr=%p len=%u",
- blob_ptr, blob_len));
+ DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u",
+ (long) blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value)
@@ -847,8 +848,8 @@ int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
uint32 len= 0xffffffff; // Max uint32
if (ndb_blob->readData(buf, len) != 0)
ERR_RETURN(ndb_blob->getNdbError());
- DBUG_PRINT("info", ("[%u] offset=%u buf=%p len=%u [ptrdiff=%d]",
- i, offset, buf, len, (int)ptrdiff));
+ DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]",
+ i, offset, (long) buf, len, (int)ptrdiff));
DBUG_ASSERT(len == len64);
// Ugly hack assumes only ptr needs to be changed
field_blob->ptr+= ptrdiff;
@@ -1171,8 +1172,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(index_name, *m_table);
if (!index)
ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
- index,
+ DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
+ (long) index,
index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24,
@@ -1215,8 +1216,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(unique_index_name, *m_table);
if (!index)
ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
- index,
+ DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
+ (long) index,
index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24,
@@ -2305,7 +2306,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
// Set bound if not done with this key
if (p.key != NULL)
{
- DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d",
+ DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d",
j, i, tot_len, part_len, p.part_last, p.bound_type));
DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len);
@@ -2462,7 +2463,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
part_spec.start_part= 0;
part_spec.end_part= m_part_info->get_tot_partitions() - 1;
prune_partition_set(table, &part_spec);
- DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@@ -2658,7 +2659,7 @@ int ha_ndbcluster::write_row(byte *record)
{
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
- "rows_inserted:%d, bulk_insert_rows: %d",
+ "rows_inserted: %d bulk_insert_rows: %d",
(int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
@@ -3108,7 +3109,8 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
char* ptr;
field_blob->get_ptr(&ptr, row_offset);
uint32 len= field_blob->get_length(row_offset);
- DBUG_PRINT("info",("[%u] SET ptr=%p len=%u", col_no, ptr, len));
+ DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u",
+ col_no, (long) ptr, len));
#endif
}
}
@@ -3350,7 +3352,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
if (m_use_partition_function)
{
get_partition_set(table, buf, active_index, start_key, &part_spec);
- DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@@ -3876,7 +3878,7 @@ int ha_ndbcluster::end_bulk_insert()
NdbTransaction *trans= m_active_trans;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
- "rows_inserted:%d, bulk_insert_rows: %d",
+ "rows_inserted: %d bulk_insert_rows: %d",
(int) m_rows_inserted, (int) m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
@@ -5101,13 +5103,12 @@ void ha_ndbcluster::prepare_for_alter()
int ha_ndbcluster::add_index(TABLE *table_arg,
KEY *key_info, uint num_of_keys)
{
- DBUG_ENTER("ha_ndbcluster::add_index");
- DBUG_PRINT("info", ("ha_ndbcluster::add_index to table %s",
- table_arg->s->table_name));
int error= 0;
uint idx;
-
+ DBUG_ENTER("ha_ndbcluster::add_index");
+ DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str));
DBUG_ASSERT(m_share->state == NSS_ALTERED);
+
for (idx= 0; idx < num_of_keys; idx++)
{
KEY *key= key_info + idx;
@@ -6662,7 +6663,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type)
void ha_ndbcluster::print_error(int error, myf errflag)
{
DBUG_ENTER("ha_ndbcluster::print_error");
- DBUG_PRINT("enter", ("error = %d", error));
+ DBUG_PRINT("enter", ("error: %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND)
m_part_info->print_no_partition_found(table);
@@ -7168,16 +7169,16 @@ static void dbug_print_open_tables()
for (uint i= 0; i < ndbcluster_open_tables.records; i++)
{
NDB_SHARE *share= (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i);
- DBUG_PRINT("share",
- ("[%d] 0x%lx key: %s key_length: %d",
- i, share, share->key, share->key_length));
- DBUG_PRINT("share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("loop",
+ ("[%d] 0x%lx key: %s key_length: %d",
+ i, (long) share, share->key, share->key_length));
+ DBUG_PRINT("loop",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (ulong) share->commit_count));
#ifdef HAVE_NDB_BINLOG
if (share->table)
- DBUG_PRINT("share",
+ DBUG_PRINT("loop",
("table->s->db.table_name: %s.%s",
share->table->s->db.str, share->table->s->table_name.str));
#endif
@@ -7330,13 +7331,13 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
share->table_name= share->db + strlen(share->db) + 1;
ha_ndbcluster::set_tabname(new_key, share->table_name);
- DBUG_PRINT("rename_share",
- ("0x%lx key: %s key_length: %d",
- share, share->key, share->key_length));
- DBUG_PRINT("rename_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("info",
+ ("share: 0x%lx key: %s key_length: %d",
+ (long) share, share->key, share->key_length));
+ DBUG_PRINT("info",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (ulong) share->commit_count));
if (share->table)
{
DBUG_PRINT("rename_share",
@@ -7371,13 +7372,13 @@ NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
dbug_print_open_tables();
- DBUG_PRINT("get_share",
- ("0x%lx key: %s key_length: %d",
- share, share->key, share->key_length));
- DBUG_PRINT("get_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("info",
+ ("share: 0x%lx key: %s key_length: %d",
+ (long) share, share->key, share->key_length));
+ DBUG_PRINT("info",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (ulong) share->commit_count));
pthread_mutex_unlock(&ndbcluster_mutex);
return share;
}
@@ -7485,13 +7486,12 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
void ndbcluster_real_free_share(NDB_SHARE **share)
{
DBUG_ENTER("ndbcluster_real_free_share");
- DBUG_PRINT("real_free_share",
- ("0x%lx key: %s key_length: %d",
- (*share), (*share)->key, (*share)->key_length));
- DBUG_PRINT("real_free_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("enter",
+ ("share: 0x%lx key: %s key_length: %d "
+ "db.tablename: %s.%s use_count: %d commit_count: %lu",
+ (long) (*share), (*share)->key, (*share)->key_length,
(*share)->db, (*share)->table_name,
- (*share)->use_count, (*share)->commit_count));
+ (*share)->use_count, (ulong) (*share)->commit_count));
hash_delete(&ndbcluster_open_tables, (byte*) *share);
thr_lock_delete(&(*share)->lock);
@@ -7539,13 +7539,13 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
else
{
dbug_print_open_tables();
- DBUG_PRINT("free_share",
- ("0x%lx key: %s key_length: %d",
- *share, (*share)->key, (*share)->key_length));
- DBUG_PRINT("free_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("info",
+ ("share: 0x%lx key: %s key_length: %d",
+ (long) *share, (*share)->key, (*share)->key_length));
+ DBUG_PRINT("info",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
(*share)->db, (*share)->table_name,
- (*share)->use_count, (*share)->commit_count));
+ (*share)->use_count, (ulong) (*share)->commit_count));
}
if (!have_lock)
pthread_mutex_unlock(&ndbcluster_mutex);
@@ -7815,7 +7815,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
get_partition_set(table, curr, active_index,
&multi_range_curr->start_key,
&part_spec);
- DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@@ -8347,8 +8347,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat) == 0)
{
char buff[22], buff2[22];
- DBUG_PRINT("ndb_util_thread",
- ("Table: %s, commit_count: %llu, rows: %llu",
+ DBUG_PRINT("info",
+ ("Table: %s commit_count: %s rows: %s",
share->key,
llstr(stat.commit_count, buff),
llstr(stat.row_count, buff2)));