summaryrefslogtreecommitdiff
path: root/storage/spider/ha_spider.cc
diff options
context:
space:
mode:
authorJacob Mathew <jacob.mathew@mariadb.com>2018-07-24 15:57:13 -0700
committerJacob Mathew <jacob.mathew@mariadb.com>2018-07-24 15:57:13 -0700
commitd6594847cff55bb6d7d094a0311f1fe3d5be789e (patch)
treec95ecb3a3c01fe152d683e71ecab3ca083e1d6db /storage/spider/ha_spider.cc
parenta78d1aaaa349ebbe3400e48f63903b349050b316 (diff)
downloadmariadb-git-d6594847cff55bb6d7d094a0311f1fe3d5be789e.tar.gz
MDEV-16246: insert timestamp into spider table from mysqldump gets wrong time zone.bb-10.4-MDEV-16246
The problem occurred because the Spider node was incorrectly handling timestamp values sent to and received from the data nodes. The problem has been corrected as follows: - Added logic to set and maintain the UTC time zone on the data nodes. To prevent timestamp ambiguity, it is necessary for the data nodes to use a time zone such as UTC which does not have daylight savings time. - Removed the spider_sync_time_zone configuration variable, which did not solve the problem and which interfered with the solution. - Added logic to convert to the UTC time zone all timestamp values sent to and received from the data nodes. This is done for both unique and non-unique timestamp columns. It is done for WHERE clauses, applying to SELECT, UPDATE and DELETE statements, and for UPDATE columns. - Disabled Spider's use of direct update when any of the columns to update is a timestamp column. This is necessary to prevent false duplicate key value errors. - Added a new test spider.timestamp to thoroughly test Spider's handling of timestamp values. Author: Jacob Mathew. Reviewer: Kentoku Shiba. Merged: Commit 97cc9d3 on branch bb-10.3-MDEV-16246
Diffstat (limited to 'storage/spider/ha_spider.cc')
-rw-r--r--storage/spider/ha_spider.cc94
1 files changed, 68 insertions, 26 deletions
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index a2543065891..e6377d566d9 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -10064,13 +10064,11 @@ int ha_spider::update_row(
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
-int ha_spider::direct_update_rows_init(
- uint mode,
- KEY_MULTI_RANGE *ranges,
- uint range_count,
- bool sorted,
- const uchar *new_data
-) {
+int ha_spider::direct_update_rows_init(List<Item> *update_fields, uint mode,
+ KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ const uchar *new_data)
+{
#if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET)
int error_num;
#endif
@@ -10098,7 +10096,7 @@ int ha_spider::direct_update_rows_init(
DBUG_RETURN(pre_direct_init_result);
}
DBUG_RETURN(bulk_access_link_exec_tgt->spider->direct_update_rows_init(
- mode, ranges, range_count, sorted, new_data));
+ update_fields, mode, ranges, range_count, sorted, new_data));
}
#endif
direct_update_init(
@@ -10202,14 +10200,46 @@ int ha_spider::direct_update_rows_init(
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
#else
-int ha_spider::direct_update_rows_init()
+/**
+ Perform initialization for a direct update request.
+
+ @param update fields Pointer to the list of fields to update.
+
+ @return >0 Error.
+ 0 Success.
+*/
+
+int ha_spider::direct_update_rows_init(List<Item> *update_fields)
{
st_select_lex *select_lex;
longlong select_limit;
longlong offset_limit;
+ List_iterator<Item> it(*update_fields);
+ Item *item;
+ Field *field;
THD *thd = trx->thd;
DBUG_ENTER("ha_spider::direct_update_rows_init");
DBUG_PRINT("info",("spider this=%p", this));
+
+ while ((item = it++))
+ {
+ if (item->type() == Item::FIELD_ITEM)
+ {
+ field = ((Item_field *)item)->field;
+
+ if (field->type() == FIELD_TYPE_TIMESTAMP &&
+ field->flags & UNIQUE_KEY_FLAG)
+ {
+ /*
+ Spider cannot perform direct update on unique timestamp fields.
+ To avoid false duplicate key errors, the table needs to be
+ updated one row at a time.
+ */
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ }
+ }
+ }
+
#ifdef HA_CAN_BULK_ACCESS
if (
bulk_access_executing &&
@@ -10227,7 +10257,8 @@ int ha_spider::direct_update_rows_init()
pre_direct_init_result));
DBUG_RETURN(pre_direct_init_result);
}
- DBUG_RETURN(bulk_access_link_exec_tgt->spider->direct_update_rows_init());
+ DBUG_RETURN(bulk_access_link_exec_tgt->spider->
+ direct_update_rows_init(List<Item> *update_fields));
}
#endif
direct_update_init(
@@ -10298,31 +10329,41 @@ int ha_spider::direct_update_rows_init()
#ifdef HA_CAN_BULK_ACCESS
#ifdef HANDLER_HAS_DIRECT_UPDATE_ROWS_WITH_HS
-int ha_spider::pre_direct_update_rows_init(
- uint mode,
- KEY_MULTI_RANGE *ranges,
- uint range_count,
- bool sorted,
- const uchar *new_data
-) {
+int ha_spider::pre_direct_update_rows_init(List<Item> *update_fields,
+ uint mode,
+ KEY_MULTI_RANGE *ranges,
+ uint range_count, bool sorted,
+ const uchar *new_data)
+{
int error_num;
DBUG_ENTER("ha_spider::pre_direct_update_rows_init");
DBUG_PRINT("info",("spider this=%p", this));
if (bulk_access_started)
{
error_num = bulk_access_link_current->spider->
- pre_direct_update_rows_init(
- mode, ranges, range_count, sorted, new_data);
+ pre_direct_update_rows_init(update_fields, mode, ranges, range_count,
+ sorted, new_data);
bulk_access_link_current->spider->bulk_access_pre_called = TRUE;
bulk_access_link_current->called = TRUE;
DBUG_RETURN(error_num);
}
- pre_direct_init_result = direct_update_rows_init(
- mode, ranges, range_count, sorted, new_data);
+ pre_direct_init_result = direct_update_rows_init(update_fields, mode,
+ ranges, range_count,
+ sorted, new_data);
DBUG_RETURN(pre_direct_init_result);
}
#else
-int ha_spider::pre_direct_update_rows_init()
+/**
+ Do initialization for performing parallel direct update
+ for a handlersocket update request.
+
+ @param update fields Pointer to the list of fields to update.
+
+ @return >0 Error.
+ 0 Success.
+*/
+
+int ha_spider::pre_direct_update_rows_init(List<Item> *update_fields)
{
int error_num;
DBUG_ENTER("ha_spider::pre_direct_update_rows_init");
@@ -10330,12 +10371,12 @@ int ha_spider::pre_direct_update_rows_init()
if (bulk_access_started)
{
error_num = bulk_access_link_current->spider->
- pre_direct_update_rows_init();
+ pre_direct_update_rows_init(update_fields);
bulk_access_link_current->spider->bulk_access_pre_called = TRUE;
bulk_access_link_current->called = TRUE;
DBUG_RETURN(error_num);
}
- pre_direct_init_result = direct_update_rows_init();
+ pre_direct_init_result = direct_update_rows_init(update_fields);
DBUG_RETURN(pre_direct_init_result);
}
#endif
@@ -15733,8 +15774,9 @@ int ha_spider::print_item_type(
dbton_hdl = dbton_handler[dbton_id];
if (
dbton_hdl->first_link_idx >= 0 &&
- (error_num = spider_db_print_item_type(item, this, str,
- alias, alias_length, dbton_id, FALSE, NULL))
+ (error_num = spider_db_print_item_type(item, NULL, this, str,
+ alias, alias_length, dbton_id,
+ FALSE, NULL))
) {
DBUG_RETURN(error_num);
}