summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/mysqlbinlog.cc333
-rw-r--r--mysql-test/main/func_str.result6
-rw-r--r--mysql-test/main/mysqlbinlog.result35
-rw-r--r--mysql-test/main/mysqlbinlog.test6
-rw-r--r--mysql-test/std_data/bug47142_master-bin.000001bin386 -> 0 bytes
-rw-r--r--mysql-test/std_data/master-bin.000001bin98 -> 0 bytes
-rw-r--r--mysql-test/std_data/trunc_binlog.000001bin174 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/r/binlog_base64_flag.result33
-rw-r--r--mysql-test/suite/binlog/r/binlog_old_versions.result70
-rw-r--r--mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001bin149436 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/bug32407.001bin368 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_5_1-telco.001bin150385 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_5_1_17.001bin150385 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_5_1_23.001bin150402 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001bin151722 -> 0 bytes
-rw-r--r--mysql-test/suite/binlog/t/binlog_base64_flag.test28
-rw-r--r--mysql-test/suite/binlog/t/binlog_old_versions.test153
-rw-r--r--mysql-test/suite/rpl/r/rpl_cross_version.result22
-rw-r--r--mysql-test/suite/rpl/t/rpl_cross_version-master.opt1
-rw-r--r--mysql-test/suite/rpl/t/rpl_cross_version.test48
-rw-r--r--sql/CMakeLists.txt1
-rw-r--r--sql/log_event.cc409
-rw-r--r--sql/log_event.h623
-rw-r--r--sql/log_event_client.cc202
-rw-r--r--sql/log_event_old.cc2749
-rw-r--r--sql/log_event_old.h569
-rw-r--r--sql/log_event_server.cc1086
-rw-r--r--sql/rpl_record_old.cc199
-rw-r--r--sql/rpl_record_old.h35
-rw-r--r--sql/rpl_rli.cc17
-rw-r--r--sql/slave.cc401
-rw-r--r--sql/sql_class.h1
-rw-r--r--sql/sql_load.cc82
-rw-r--r--sql/sql_repl.cc6
-rw-r--r--sql/sql_string.cc3
-rw-r--r--sql/wsrep_mysqld.cc2
36 files changed, 277 insertions, 6843 deletions
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 1dc96958ba5..bbcc1179bae 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -81,7 +81,7 @@ DYNAMIC_ARRAY events_in_stmt; // Storing the events that in one statement
String stop_event_string; // Storing the STOP_EVENT output string
extern "C" {
-char server_version[SERVER_VERSION_LENGTH];
+char server_version[SERVER_VERSION_LENGTH]="5.0.0";
}
static char *server_id_str;
@@ -277,16 +277,10 @@ class Load_log_processor
When we see first event corresponding to some LOAD DATA statement in
binlog, we create temporary file to store data to be loaded.
We add name of this file to file_names array using its file_id as index.
- If we have Create_file event (i.e. we have binary log in pre-5.0.3
- format) we also store save event object to be able which is needed to
- emit LOAD DATA statement when we will meet Exec_load_data event.
- If we have Begin_load_query event we simply store 0 in
- File_name_record::event field.
*/
struct File_name_record
{
char *fname;
- Create_file_log_event *event;
};
/*
@todo Should be a map (e.g., a hash map), not an array. With the
@@ -356,7 +350,6 @@ public:
if (ptr->fname)
{
my_free(ptr->fname);
- delete ptr->event;
bzero((char *)ptr, sizeof(File_name_record));
}
}
@@ -365,34 +358,6 @@ public:
}
/**
- Obtain Create_file event for LOAD DATA statement by its file_id
- and remove it from this Load_log_processor's list of events.
-
- Checks whether we have already seen a Create_file_log_event with
- the given file_id. If yes, returns a pointer to the event and
- removes the event from array describing active temporary files.
- From this moment, the caller is responsible for freeing the memory
- occupied by the event.
-
- @param[in] file_id File id identifying LOAD DATA statement.
-
- @return Pointer to Create_file_log_event, or NULL if we have not
- seen any Create_file_log_event with this file_id.
- */
- Create_file_log_event *grab_event(uint file_id)
- {
- File_name_record *ptr;
- Create_file_log_event *res;
-
- if (file_id >= file_names.elements)
- return 0;
- ptr= dynamic_element(&file_names, file_id, File_name_record*);
- if ((res= ptr->event))
- bzero((char *)ptr, sizeof(File_name_record));
- return res;
- }
-
- /**
Obtain file name of temporary file for LOAD DATA statement by its
file_id and remove it from this Load_log_processor's list of events.
@@ -415,125 +380,19 @@ public:
if (file_id >= file_names.elements)
return 0;
ptr= dynamic_element(&file_names, file_id, File_name_record*);
- if (!ptr->event)
- {
- res= ptr->fname;
- bzero((char *)ptr, sizeof(File_name_record));
- }
+ res= ptr->fname;
+ bzero((char *)ptr, sizeof(File_name_record));
return res;
}
- Exit_status process(Create_file_log_event *ce);
- Exit_status process(Begin_load_query_log_event *ce);
+ Exit_status process(Begin_load_query_log_event *blqe);
Exit_status process(Append_block_log_event *ae);
- File prepare_new_file_for_old_format(Load_log_event *le, char *filename);
- Exit_status load_old_format_file(NET* net, const char *server_fname,
- uint server_fname_len, File file);
Exit_status process_first_event(const char *bname, size_t blen,
const uchar *block,
- size_t block_len, uint file_id,
- Create_file_log_event *ce);
+ size_t block_len, uint file_id);
};
/**
- Creates and opens a new temporary file in the directory specified by previous call to init_by_dir_name() or init_by_cur_dir().
-
- @param[in] le The basename of the created file will start with the
- basename of the file pointed to by this Load_log_event.
-
- @param[out] filename Buffer to save the filename in.
-
- @return File handle >= 0 on success, -1 on error.
-*/
-File Load_log_processor::prepare_new_file_for_old_format(Load_log_event *le,
- char *filename)
-{
- size_t len;
- char *tail;
- File file;
-
- fn_format(filename, le->fname, target_dir_name, "", MY_REPLACE_DIR);
- len= strlen(filename);
- tail= filename + len;
-
- if ((file= create_unique_file(filename,tail)) < 0)
- {
- error("Could not construct local filename %s.",filename);
- return -1;
- }
-
- le->set_fname_outside_temp_buf(filename,len+strlen(tail));
-
- return file;
-}
-
-
-/**
- Reads a file from a server and saves it locally.
-
- @param[in,out] net The server to read from.
-
- @param[in] server_fname The name of the file that the server should
- read.
-
- @param[in] server_fname_len The length of server_fname.
-
- @param[in,out] file The file to write to.
-
- @retval ERROR_STOP An error occurred - the program should terminate.
- @retval OK_CONTINUE No error, the program should continue.
-*/
-Exit_status Load_log_processor::load_old_format_file(NET* net,
- const char*server_fname,
- uint server_fname_len,
- File file)
-{
- uchar buf[FN_REFLEN+1];
- buf[0] = 0;
- memcpy(buf + 1, server_fname, server_fname_len + 1);
- if (my_net_write(net, buf, server_fname_len +2) || net_flush(net))
- {
- error("Failed requesting the remote dump of %s.", server_fname);
- return ERROR_STOP;
- }
-
- for (;;)
- {
- ulong packet_len = my_net_read(net);
- if (packet_len == 0)
- {
- if (my_net_write(net, (uchar*) "", 0) || net_flush(net))
- {
- error("Failed sending the ack packet.");
- return ERROR_STOP;
- }
- /*
- we just need to send something, as the server will read but
- not examine the packet - this is because mysql_load() sends
- an OK when it is done
- */
- break;
- }
- else if (packet_len == packet_error)
- {
- error("Failed reading a packet during the dump of %s.", server_fname);
- return ERROR_STOP;
- }
-
- if (packet_len > UINT_MAX)
- {
- error("Illegal length of packet read from net.");
- return ERROR_STOP;
- }
- if (my_write(file, net->read_pos, (uint) packet_len, MYF(MY_WME|MY_NABP)))
- return ERROR_STOP;
- }
-
- return OK_CONTINUE;
-}
-
-
-/**
Process the first event in the sequence of events representing a
LOAD DATA statement.
@@ -556,8 +415,7 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
size_t blen,
const uchar *block,
size_t block_len,
- uint file_id,
- Create_file_log_event *ce)
+ uint file_id)
{
size_t full_len= target_dir_name_len + blen + 9 + 9 + 1;
Exit_status retval= OK_CONTINUE;
@@ -569,7 +427,6 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
if (!(fname= (char*) my_malloc(PSI_NOT_INSTRUMENTED, full_len,MYF(MY_WME))))
{
error("Out of memory.");
- delete ce;
DBUG_RETURN(ERROR_STOP);
}
@@ -584,12 +441,10 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
error("Could not construct local filename %s%s.",
target_dir_name,bname);
my_free(fname);
- delete ce;
DBUG_RETURN(ERROR_STOP);
}
rec.fname= fname;
- rec.event= ce;
/*
fname is freed in process_event()
@@ -600,13 +455,9 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
{
error("Out of memory.");
my_free(fname);
- delete ce;
DBUG_RETURN(ERROR_STOP);
}
- if (ce)
- ce->set_fname_outside_temp_buf(fname, strlen(fname));
-
if (my_write(file, (uchar*)block, block_len, MYF(MY_WME|MY_NABP)))
{
error("Failed writing to file.");
@@ -622,31 +473,11 @@ Exit_status Load_log_processor::process_first_event(const char *bname,
/**
- Process the given Create_file_log_event.
-
- @see Load_log_processor::process_first_event(const char*,uint,const char*,uint,uint,Create_file_log_event*)
-
- @param ce Create_file_log_event to process.
-
- @retval ERROR_STOP An error occurred - the program should terminate.
- @retval OK_CONTINUE No error, the program should continue.
-*/
-Exit_status Load_log_processor::process(Create_file_log_event *ce)
-{
- const char *bname= ce->fname + dirname_length(ce->fname);
- size_t blen= ce->fname_len - (bname-ce->fname);
-
- return process_first_event(bname, blen, ce->block, ce->block_len,
- ce->file_id, ce);
-}
-
-
-/**
Process the given Begin_load_query_log_event.
@see Load_log_processor::process_first_event(const char*,uint,const char*,uint,uint,Create_file_log_event*)
- @param ce Begin_load_query_log_event to process.
+ @param blqe Begin_load_query_log_event to process.
@retval ERROR_STOP An error occurred - the program should terminate.
@retval OK_CONTINUE No error, the program should continue.
@@ -654,7 +485,7 @@ Exit_status Load_log_processor::process(Create_file_log_event *ce)
Exit_status Load_log_processor::process(Begin_load_query_log_event *blqe)
{
return process_first_event("SQL_LOAD_MB", 11, blqe->block, blqe->block_len,
- blqe->file_id, 0);
+ blqe->file_id);
}
@@ -1245,41 +1076,6 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
break;
}
- case CREATE_FILE_EVENT:
- {
- Create_file_log_event* ce= (Create_file_log_event*)ev;
- /*
- We test if this event has to be ignored. If yes, we don't save
- this event; this will have the good side-effect of ignoring all
- related Append_block and Exec_load.
- Note that Load event from 3.23 is not tested.
- */
- if (shall_skip_database(ce->db))
- goto end; // Next event
- /*
- We print the event, but with a leading '#': this is just to inform
- the user of the original command; the command we want to execute
- will be a derivation of this original command (we will change the
- filename and use LOCAL), prepared in the 'case EXEC_LOAD_EVENT'
- below.
- */
- print_skip_replication_statement(print_event_info, ev);
- if (ce->print(result_file, print_event_info, TRUE))
- goto err;
- // If this binlog is not 3.23 ; why this test??
- if (glob_description_event->binlog_version >= 3)
- {
- /*
- transfer the responsibility for destroying the event to
- load_processor
- */
- ev= NULL;
- if ((retval= load_processor.process(ce)) != OK_CONTINUE)
- goto end;
- }
- break;
- }
-
case APPEND_BLOCK_EVENT:
/*
Append_block_log_events can safely print themselves even if
@@ -1293,36 +1089,6 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
goto end;
break;
- case EXEC_LOAD_EVENT:
- {
- if (ev->print(result_file, print_event_info))
- goto err;
- Execute_load_log_event *exv= (Execute_load_log_event*)ev;
- Create_file_log_event *ce= load_processor.grab_event(exv->file_id);
- /*
- if ce is 0, it probably means that we have not seen the Create_file
- event (a bad binlog, or most probably --start-position is after the
- Create_file event). Print a warning comment.
- */
- if (ce)
- {
- bool error;
- /*
- We must not convert earlier, since the file is used by
- my_open() in Load_log_processor::append().
- */
- convert_path_to_forward_slashes((char*) ce->fname);
- error= ce->print(result_file, print_event_info, TRUE);
- my_free((void*)ce->fname);
- delete ce;
- if (error)
- goto err;
- }
- else
- warning("Ignoring Execute_load_log_event as there is no "
- "Create_file event for file_id: %u", exv->file_id);
- break;
- }
case FORMAT_DESCRIPTION_EVENT:
delete glob_description_event;
glob_description_event= (Format_description_log_event*) ev;
@@ -1579,23 +1345,14 @@ Exit_status process_event(PRINT_EVENT_INFO *print_event_info, Log_event *ev,
destroy_evt= FALSE;
break;
}
- case PRE_GA_WRITE_ROWS_EVENT:
- case PRE_GA_DELETE_ROWS_EVENT:
- case PRE_GA_UPDATE_ROWS_EVENT:
- {
- Old_rows_log_event *e= (Old_rows_log_event*) ev;
- bool is_stmt_end= e->get_flags(Rows_log_event::STMT_END_F);
- if (print_row_event(print_event_info, ev, e->get_table_id(),
- e->get_flags(Old_rows_log_event::STMT_END_F)))
- goto err;
- DBUG_PRINT("info", ("is_stmt_end: %d", (int) is_stmt_end));
- if (!is_stmt_end && opt_flashback)
- destroy_evt= FALSE;
- break;
- }
case START_ENCRYPTION_EVENT:
glob_description_event->start_decryption((Start_encryption_log_event*)ev);
/* fall through */
+ case PRE_GA_WRITE_ROWS_EVENT:
+ case PRE_GA_DELETE_ROWS_EVENT:
+ case PRE_GA_UPDATE_ROWS_EVENT:
+ case CREATE_FILE_EVENT:
+ case EXEC_LOAD_EVENT:
default:
print_skip_replication_statement(print_event_info, ev);
if (ev->print(result_file, print_event_info))
@@ -2809,22 +2566,10 @@ static Exit_status check_master_version()
glob_description_event= NULL;
switch (version) {
- case 3:
- glob_description_event= new Format_description_log_event(1);
- break;
- case 4:
- glob_description_event= new Format_description_log_event(3);
- break;
case 5:
case 10:
case 11:
- /*
- The server is soon going to send us its Format_description log
- event, unless it is a 5.0 server with 3.23 or 4.0 binlogs.
- So we first assume that this is 4.0 (which is enough to read the
- Format_desc event if one comes).
- */
- glob_description_event= new Format_description_log_event(3);
+ glob_description_event= new Format_description_log_event(4);
break;
default:
error("Could not find server version: "
@@ -2883,8 +2628,6 @@ static Exit_status handle_event_text_mode(PRINT_EVENT_INFO *print_event_info,
}
Log_event_type type= ev->get_type_code();
- if (glob_description_event->binlog_version >= 3 ||
- (type != LOAD_EVENT && type != CREATE_FILE_EVENT))
{
/*
If this is a Rotate event, maybe it's the end of the requested binlog;
@@ -2943,31 +2686,6 @@ static Exit_status handle_event_text_mode(PRINT_EVENT_INFO *print_event_info,
if (retval != OK_CONTINUE)
DBUG_RETURN(retval);
}
- else
- {
- Load_log_event *le= (Load_log_event*)ev;
- const char *old_fname= le->fname;
- uint old_len= le->fname_len;
- File file;
- Exit_status retval;
- char fname[FN_REFLEN+1];
-
- if ((file= load_processor.prepare_new_file_for_old_format(le,fname)) < 0)
- {
- DBUG_RETURN(ERROR_STOP);
- }
-
- retval= process_event(print_event_info, ev, old_off, logname);
- if (retval != OK_CONTINUE)
- {
- my_close(file,MYF(MY_WME));
- DBUG_RETURN(retval);
- }
- retval= load_processor.load_old_format_file(net,old_fname,old_len,file);
- my_close(file,MYF(MY_WME));
- if (retval != OK_CONTINUE)
- DBUG_RETURN(retval);
- }
DBUG_RETURN(OK_CONTINUE);
}
@@ -3240,7 +2958,7 @@ static Exit_status check_header(IO_CACHE* file,
MY_STAT my_file_stat;
delete glob_description_event;
- if (!(glob_description_event= new Format_description_log_event(3)))
+ if (!(glob_description_event= new Format_description_log_event(4)))
{
error("Failed creating Format_description_log_event; out of memory?");
return ERROR_STOP;
@@ -3312,25 +3030,7 @@ static Exit_status check_header(IO_CACHE* file,
{
DBUG_PRINT("info",("buf[EVENT_TYPE_OFFSET=%d]=%d",
EVENT_TYPE_OFFSET, buf[EVENT_TYPE_OFFSET]));
- /* always test for a Start_v3, even if no --start-position */
- if (buf[EVENT_TYPE_OFFSET] == START_EVENT_V3)
- {
- /* This is 3.23 or 4.x */
- if (uint4korr(buf + EVENT_LEN_OFFSET) <
- (LOG_EVENT_MINIMAL_HEADER_LEN + START_V3_HEADER_LEN))
- {
- /* This is 3.23 (format 1) */
- delete glob_description_event;
- if (!(glob_description_event= new Format_description_log_event(1)))
- {
- error("Failed creating Format_description_log_event; "
- "out of memory?");
- return ERROR_STOP;
- }
- }
- break;
- }
- else if (tmp_pos >= start_position)
+ if (tmp_pos >= start_position)
break;
else if (buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT)
{
@@ -3803,7 +3503,6 @@ struct encryption_service_st encryption_handler=
#include "password.c"
#include "log_event.cc"
#include "log_event_client.cc"
-#include "log_event_old.cc"
#include "rpl_utility.cc"
#include "sql_string.cc"
#include "sql_list.cc"
diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result
index b912cfa7cd3..a477c3c2ec4 100644
--- a/mysql-test/main/func_str.result
+++ b/mysql-test/main/func_str.result
@@ -971,17 +971,17 @@ explain extended select length('\n\t\r\b\0\_\%\\');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select octet_length('\n \r\0008\0\\_\\%\\') AS `length('\n\t\r\b\0\_\%\\')`
+Note 1003 select octet_length('\n\t\r\b\0\\_\\%\\') AS `length('\n\t\r\b\0\_\%\\')`
explain extended select bit_length('\n\t\r\b\0\_\%\\');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select bit_length('\n \r\0008\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
+Note 1003 select bit_length('\n\t\r\b\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
explain extended select bit_length('\n\t\r\b\0\_\%\\');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
Warnings:
-Note 1003 select bit_length('\n \r\0008\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
+Note 1003 select bit_length('\n\t\r\b\0\\_\\%\\') AS `bit_length('\n\t\r\b\0\_\%\\')`
explain extended select concat('monty',' was here ','again');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
diff --git a/mysql-test/main/mysqlbinlog.result b/mysql-test/main/mysqlbinlog.result
index 1d1f0a12c14..f940a271260 100644
--- a/mysql-test/main/mysqlbinlog.result
+++ b/mysql-test/main/mysqlbinlog.result
@@ -407,16 +407,26 @@ ROLLBACK /* added by mysqlbinlog */;
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
DELIMITER /*!*/;
ROLLBACK/*!*/;
-SET TIMESTAMP=1108844556/*!*/;
+use `test`/*!*/;
+SET TIMESTAMP=1140641973/*!*/;
SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.system_versioning_insert_history=0/*!*/;
+SET @@session.sql_mode=#/*!*/;
SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=#/*!*/;
SET @@session.lc_time_names=0/*!*/;
SET @@session.collation_database=DEFAULT/*!*/;
-BEGIN
+CREATE TABLE t1(c INT)
/*!*/;
-use `test`/*!*/;
-SET TIMESTAMP=1108844555/*!*/;
-insert t1 values (1)
+SET TIMESTAMP=1140641985/*!*/;
+CREATE TABLE t2(s CHAR(200))
+/*!*/;
+SET TIMESTAMP=1140642018/*!*/;
+CREATE TRIGGER trg1 AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES(CURRENT_USER())
+/*!*/;
+SET TIMESTAMP=1140642025/*!*/;
+INSERT INTO t1 VALUES(1)
/*!*/;
DELIMITER ;
# End of log file
@@ -427,16 +437,21 @@ ROLLBACK /* added by mysqlbinlog */;
/*!40019 SET @@session.max_delayed_threads=0*/;
/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
DELIMITER /*!*/;
-SET TIMESTAMP=1108844556/*!*/;
+ROLLBACK/*!*/;
+use `test`/*!*/;
+SET TIMESTAMP=1140642018/*!*/;
SET @@session.pseudo_thread_id=999999999/*!*/;
+SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.system_versioning_insert_history=0/*!*/;
+SET @@session.sql_mode=#/*!*/;
SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
+/*!\C latin1 *//*!*/;
+SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=#/*!*/;
SET @@session.lc_time_names=0/*!*/;
SET @@session.collation_database=DEFAULT/*!*/;
-BEGIN
+CREATE TRIGGER trg1 AFTER INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES(CURRENT_USER())
/*!*/;
-use `test`/*!*/;
-SET TIMESTAMP=1108844555/*!*/;
-insert t1 values (1)
+SET TIMESTAMP=1140642025/*!*/;
+INSERT INTO t1 VALUES(1)
/*!*/;
DELIMITER ;
# End of log file
diff --git a/mysql-test/main/mysqlbinlog.test b/mysql-test/main/mysqlbinlog.test
index 4ef47978602..430cdb708cd 100644
--- a/mysql-test/main/mysqlbinlog.test
+++ b/mysql-test/main/mysqlbinlog.test
@@ -123,13 +123,13 @@ select "--- reading stdin --" as "";
--enable_query_log
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
--replace_regex /SQL_LOAD_MB-[0-9a-f]+-[0-9a-f]+/SQL_LOAD_MB-#-#/ /@@session.sql_mode=\d+/@@session.sql_mode=#/ /collation_server=\d+/collation_server=#/
---exec $MYSQL_BINLOG --short-form - < $MYSQL_TEST_DIR/std_data/trunc_binlog.000001
+--exec $MYSQL_BINLOG --short-form - < $MYSQL_TEST_DIR/std_data/bug16266.000001
--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR
--replace_regex /SQL_LOAD_MB-[0-9a-f]+-[0-9a-f]+/SQL_LOAD_MB-#-#/ /@@session.sql_mode=\d+/@@session.sql_mode=#/ /collation_server=\d+/collation_server=#/
# postion is constant to correspond to an event in pre-recorded binlog
---let $binlog_start_pos=79
---exec $MYSQL_BINLOG --short-form --start-position=$binlog_start_pos - < $MYSQL_TEST_DIR/std_data/trunc_binlog.000001
+--let $binlog_start_pos=274
+--exec $MYSQL_BINLOG --short-form --start-position=$binlog_start_pos - < $MYSQL_TEST_DIR/std_data/bug16266.000001
drop table t1,t2;
diff --git a/mysql-test/std_data/bug47142_master-bin.000001 b/mysql-test/std_data/bug47142_master-bin.000001
deleted file mode 100644
index d1a089a784a..00000000000
--- a/mysql-test/std_data/bug47142_master-bin.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/std_data/master-bin.000001 b/mysql-test/std_data/master-bin.000001
deleted file mode 100644
index 2ec2397acdd..00000000000
--- a/mysql-test/std_data/master-bin.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/std_data/trunc_binlog.000001 b/mysql-test/std_data/trunc_binlog.000001
deleted file mode 100644
index 3da2490eab2..00000000000
--- a/mysql-test/std_data/trunc_binlog.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/r/binlog_base64_flag.result b/mysql-test/suite/binlog/r/binlog_base64_flag.result
index e325feb508b..4b75b712aee 100644
--- a/mysql-test/suite/binlog/r/binlog_base64_flag.result
+++ b/mysql-test/suite/binlog/r/binlog_base64_flag.result
@@ -1,7 +1,7 @@
call mtr.add_suppression("BINLOG_BASE64_EVENT: According to the master's version");
call mtr.add_suppression("BINLOG_BASE64_EVENT: Column 1 of table 'test.char128_utf8' cannot be converted");
-DROP TABLE IF EXISTS t1;
-==== Test BUG#32407 ====
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (1), (1);
select * from t1;
a
1
@@ -49,35 +49,6 @@ a
SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL','NULL';
@binlog_fragment_0 NULL NULL
NULL NULL NULL
-==== Test --base64-output=never on a binlog with row events ====
-/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=1*/;
-/*!40019 SET @@session.max_delayed_threads=0*/;
-/*!50003 SET @OLD_COMPLETION_TYPE=@@COMPLETION_TYPE,COMPLETION_TYPE=0*/;
-DELIMITER /*!*/;
-<#>
-ROLLBACK/*!*/;
-<#>
-use `test`/*!*/;
-SET TIMESTAMP=1196959712/*!*/;
-<#>SET @@session.foreign_key_checks=1, @@session.sql_auto_is_null=1, @@session.unique_checks=1, @@session.autocommit=1, @@session.check_constraint_checks=1, @@session.system_versioning_insert_history=0/*!*/;
-SET @@session.sql_mode=0/*!*/;
-SET @@session.auto_increment_increment=1, @@session.auto_increment_offset=1/*!*/;
-/*!\C latin1 *//*!*/;
-SET @@session.character_set_client=8,@@session.collation_connection=8,@@session.collation_server=8/*!*/;
-SET @@session.lc_time_names=0/*!*/;
-SET @@session.collation_database=DEFAULT/*!*/;
-create table t1 (a int) engine= myisam
-/*!*/;
-<#>
-<#>
-<#>
-<#>
-<#>
-DELIMITER ;
-# End of log file
-ROLLBACK /* added by mysqlbinlog */;
-/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
-/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;
==== Test non-matching FD event and Row event ====
BINLOG '
4CdYRw8BAAAAYgAAAGYAAAAAAAQANS4xLjE1LW5kYi02LjEuMjQtZGVidWctbG9nAAAAAAAAAAAA
diff --git a/mysql-test/suite/binlog/r/binlog_old_versions.result b/mysql-test/suite/binlog/r/binlog_old_versions.result
deleted file mode 100644
index 30b64535eb4..00000000000
--- a/mysql-test/suite/binlog/r/binlog_old_versions.result
+++ /dev/null
@@ -1,70 +0,0 @@
-==== Read binlog with v2 row events ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-62046 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
-==== Read modern binlog (version 5.1.23) ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-674568 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
-==== Read binlog from version 5.1.17 ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-764247 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
-==== Read binlog from version 4.1 ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-4 four
-190243 random
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t3;
-==== Read binlog from telco tree (mysql-5.1-telco-6.1) ====
-SELECT * FROM t1 ORDER BY a;
-a b
-0 last_insert_id
-1 one
-3 last stm in trx: next event should be xid
-4 four
-703356 random
-SELECT * FROM t2 ORDER BY a;
-a b
-3 first stm in trx
-SELECT COUNT(*) FROM t3;
-COUNT(*)
-17920
-DROP TABLE t1, t2, t3;
diff --git a/mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001 b/mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001
deleted file mode 100644
index 66db9668d46..00000000000
--- a/mysql-test/suite/binlog/std_data/binlog_old_version_4_1.000001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/bug32407.001 b/mysql-test/suite/binlog/std_data/bug32407.001
deleted file mode 100644
index c73243707ef..00000000000
--- a/mysql-test/suite/binlog/std_data/bug32407.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_5_1-telco.001 b/mysql-test/suite/binlog/std_data/ver_5_1-telco.001
deleted file mode 100644
index 76856cb04a2..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_5_1-telco.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_5_1_17.001 b/mysql-test/suite/binlog/std_data/ver_5_1_17.001
deleted file mode 100644
index 9b6e200e492..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_5_1_17.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_5_1_23.001 b/mysql-test/suite/binlog/std_data/ver_5_1_23.001
deleted file mode 100644
index 0e9a9d1470a..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_5_1_23.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001 b/mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001
deleted file mode 100644
index 28360beca68..00000000000
--- a/mysql-test/suite/binlog/std_data/ver_trunk_row_v2.001
+++ /dev/null
Binary files differ
diff --git a/mysql-test/suite/binlog/t/binlog_base64_flag.test b/mysql-test/suite/binlog/t/binlog_base64_flag.test
index 5311da54f5f..6935f69ba36 100644
--- a/mysql-test/suite/binlog/t/binlog_base64_flag.test
+++ b/mysql-test/suite/binlog/t/binlog_base64_flag.test
@@ -2,9 +2,6 @@
# work as expected, and that BINLOG statements with row events fail if
# they are not preceded by BINLOG statements with Format description
# events.
-#
-# See also BUG#32407.
-
# BINLOG statement does not work in embedded mode.
source include/not_embedded.inc;
@@ -12,23 +9,10 @@ source include/not_embedded.inc;
call mtr.add_suppression("BINLOG_BASE64_EVENT: According to the master's version");
call mtr.add_suppression("BINLOG_BASE64_EVENT: Column 1 of table 'test.char128_utf8' cannot be converted");
-disable_warnings;
-DROP TABLE IF EXISTS t1;
-enable_warnings;
-# Test to show BUG#32407. This reads a binlog created with the
-# mysql-5.1-telco-6.1 tree, specifically at the tag
-# mysql-5.1.15-ndb-6.1.23, and applies it to the database. The test
-# should fail before BUG#32407 was fixed and succeed afterwards.
---echo ==== Test BUG#32407 ====
-
-# The binlog contains row events equivalent to:
-# CREATE TABLE t1 (a int) engine = myisam
-# INSERT INTO t1 VALUES (1), (1)
-exec $MYSQL_BINLOG suite/binlog/std_data/bug32407.001 | $MYSQL;
-# The above line should succeed and t1 should contain two ones
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES (1), (1);
select * from t1;
-
# Test that a BINLOG statement encoding a row event fails unless a
# Format_description_event as been supplied with an earlier BINLOG
# statement.
@@ -92,14 +76,6 @@ select * from t1;
# show "one-shot" feature of binlog_fragment variables
SELECT @binlog_fragment_0, @binlog_fragment_1 as 'NULL','NULL';
-# New mysqlbinlog supports --base64-output=never
---echo ==== Test --base64-output=never on a binlog with row events ====
-
-# mysqlbinlog should fail
---replace_regex /#[0-9][0-9][0-9][0-9][0-9][0-9] \N*/<#>/ /SET \@\@session.pseudo_thread_id.*/<#>/
-exec $MYSQL_BINLOG --base64-output=never --print-row-count=0 --print-row-event-positions=0 suite/binlog/std_data/bug32407.001;
-
-
# Test that the following fails cleanly: "First, read a
# Format_description event which has N event types. Then, read an
# event of type M>N"
diff --git a/mysql-test/suite/binlog/t/binlog_old_versions.test b/mysql-test/suite/binlog/t/binlog_old_versions.test
deleted file mode 100644
index 130101541e3..00000000000
--- a/mysql-test/suite/binlog/t/binlog_old_versions.test
+++ /dev/null
@@ -1,153 +0,0 @@
-# Test that old binlog formats can be read.
-
-# Some previous versions of MySQL use their own binlog format,
-# especially in row-based replication. This test uses saved binlogs
-# from those old versions to test that we can replicate from old
-# versions to the present version.
-
-# Replicating from old versions to new versions is necessary in an
-# online upgrade scenario, where the .
-
-# The previous versions we currently test are:
-# - version 5.1.17 and earlier trees
-# - mysql-5.1-wl2325-xxx trees (AKA alcatel trees)
-# - mysql-5.1-telco-6.1 trees
-# For completeness, we also test mysql-5.1-new_rpl, which is supposed
-# to be the "correct" version.
-
-# All binlogs were generated with the same commands (listed at the end
-# of this test for reference). The binlogs contain the following
-# events: Table_map, Write_rows, Update_rows, Delete_rows Query, Xid,
-# User_var, Int_var, Rand, Begin_load, Append_file, Execute_load.
-
-# Related bugs: BUG#27779, BUG#31581, BUG#31582, BUG#31583, BUG#32407
-
-source include/not_embedded.inc;
-
---echo ==== Read binlog with v2 row events ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_trunk_row_v2.001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
---echo ==== Read modern binlog (version 5.1.23) ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1_23.001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
---echo ==== Read binlog from version 5.1.17 ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1_17.001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
---echo ==== Read binlog from version 4.1 ====
-
-# In this version, neither row-based binlogging nor Xid events
-# existed, so the binlog was generated without the "row-based tests"
-# part and the "get xid event" part, and it does not create table t2.
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/binlog_old_version_4_1.000001 | $MYSQL --local-infile=1
-# Show result.
-SELECT * FROM t1 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t3;
-
-
---echo ==== Read binlog from telco tree (mysql-5.1-telco-6.1) ====
-
-# Read binlog.
---exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1-telco.001 | $MYSQL --local-infile=1
-# Show resulting tablea.
-SELECT * FROM t1 ORDER BY a;
-SELECT * FROM t2 ORDER BY a;
-SELECT COUNT(*) FROM t3;
-# Reset.
-DROP TABLE t1, t2, t3;
-
-
-#### The following commands were used to generate the binlogs ####
-#
-#source include/master-slave.inc;
-#
-## ==== initialize ====
-#USE test;
-#CREATE TABLE t1 (a int, b char(50)) ENGINE = MyISAM;
-#CREATE TABLE t2 (a int, b char(50)) ENGINE = InnoDB;
-#CREATE TABLE t3 (a char(20));
-#
-#
-## ==== row based tests ====
-#SET BINLOG_FORMAT='row';
-#
-## ---- get write, update, and delete rows events ----
-#INSERT INTO t1 VALUES (0, 'one'), (1, 'two');
-#UPDATE t1 SET a=a+1;
-#DELETE FROM t1 WHERE a=2;
-#
-#
-## ==== statement based tests ====
-#SET BINLOG_FORMAT = 'statement';
-#
-## ---- get xid events ----
-#BEGIN;
-#INSERT INTO t2 VALUES (3, 'first stm in trx');
-#INSERT INTO t1 VALUES (3, 'last stm in trx: next event should be xid');
-#COMMIT;
-#
-## ---- get user var events ----
-#SET @x = 4;
-#INSERT INTO t1 VALUES (@x, 'four');
-#
-## ---- get rand event ----
-#INSERT INTO t1 VALUES (RAND() * 1000000, 'random');
-#
-## ---- get intvar event ----
-#INSERT INTO t1 VALUES (LAST_INSERT_ID(), 'last_insert_id');
-#
-## ---- get begin, append and execute load events ----
-## double the file until we have more than 2^17 bytes, so that the
-## event has to be split and we can use Append_file_log_event.
-#
-#SET SQL_LOG_BIN=0;
-#CREATE TABLE temp (a char(20));
-#LOAD DATA INFILE '../std_data_ln/words.dat' INTO TABLE temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#INSERT INTO temp SELECT * FROM temp;
-#SELECT a FROM temp INTO OUTFILE 'big_file.dat';
-#DROP TABLE temp;
-#SET SQL_LOG_BIN=1;
-#
-#LOAD DATA INFILE 'big_file.dat' INTO TABLE t3;
-#
-#SELECT * FROM t1 ORDER BY a;
-#SELECT * FROM t2 ORDER BY a;
-#SELECT COUNT(*) FROM t3;
-#--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_cross_version.result b/mysql-test/suite/rpl/r/rpl_cross_version.result
deleted file mode 100644
index 1b67542c106..00000000000
--- a/mysql-test/suite/rpl/r/rpl_cross_version.result
+++ /dev/null
@@ -1,22 +0,0 @@
-include/master-slave.inc
-[connection master]
-==== Initialize ====
-connection slave;
-include/stop_slave.inc
-RESET SLAVE;
-include/setup_fake_relay_log.inc
-Setting up fake replication from MYSQL_TEST_DIR/suite/binlog/std_data/binlog_old_version_4_1.000001
-==== Test ====
-start slave sql_thread;
-include/wait_for_slave_param.inc [Exec_Master_Log_Pos]
-==== a prove that the fake has been processed successfully ====
-SELECT COUNT(*) - 17920 as zero FROM t3;
-zero
-0
-==== Clean up ====
-include/stop_slave_sql.inc
-include/cleanup_fake_relay_log.inc
-Warnings:
-Note 4190 RESET SLAVE is implicitly changing the value of 'Using_Gtid' from 'No' to 'Slave_Pos'
-drop table t1, t3;
-include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_cross_version-master.opt b/mysql-test/suite/rpl/t/rpl_cross_version-master.opt
deleted file mode 100644
index 815a8f81d32..00000000000
--- a/mysql-test/suite/rpl/t/rpl_cross_version-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---replicate-same-server-id --relay-log=slave-relay-bin
diff --git a/mysql-test/suite/rpl/t/rpl_cross_version.test b/mysql-test/suite/rpl/t/rpl_cross_version.test
deleted file mode 100644
index 94c9f0432ce..00000000000
--- a/mysql-test/suite/rpl/t/rpl_cross_version.test
+++ /dev/null
@@ -1,48 +0,0 @@
-# ==== Purpose ====
-#
-# Verify cross-version replication from an old master to the up-to-date slave
-#
-# ==== Implementation ====
-#
-# Feed to the slave server a binlog recorded on an old version master
-# via setting up slave-to-slave replication. The latter is done by means of
-# the opt file and include/setup_fake_relay_log.inc.
-# The master's binlog is treated as a relay log that the SQL thread executes.
-#
-
---source include/master-slave.inc
-
-#
-# Bug#31240 load data infile replication between (4.0 or 4.1) and 5.1 fails
-#
-
---echo ==== Initialize ====
---connection slave
-
---disable_query_log
-# The binlog contains the function RAND which is unsafe.
-CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---enable_query_log
-
---source include/stop_slave.inc
-RESET SLAVE;
-
-# the relay log contains create t1, t3 tables and load data infile
---let $fake_relay_log = $MYSQL_TEST_DIR/suite/binlog/std_data/binlog_old_version_4_1.000001
---source include/setup_fake_relay_log.inc
-
---echo ==== Test ====
-start slave sql_thread;
---let $slave_param = Exec_Master_Log_Pos
-# end_log_pos of the last event of the relay log
---let $slave_param_value = 149436
---source include/wait_for_slave_param.inc
---echo ==== a prove that the fake has been processed successfully ====
-SELECT COUNT(*) - 17920 as zero FROM t3;
-
---echo ==== Clean up ====
---source include/stop_slave_sql.inc
---source include/cleanup_fake_relay_log.inc
-drop table t1, t3;
---let $rpl_only_running_threads= 1
---source include/rpl_end.inc
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index 9b3beae637d..688c29cbe90 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -106,7 +106,6 @@ SET (SQL_SOURCE
key.cc log.cc lock.cc
log_event.cc log_event_server.cc
rpl_record.cc rpl_reporting.cc
- log_event_old.cc rpl_record_old.cc
mf_iocache.cc my_decimal.cc
mysqld.cc net_serv.cc keycaches.cc
../sql-common/client_plugin.c
diff --git a/sql/log_event.cc b/sql/log_event.cc
index b7a23d0d17f..788b2509964 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -728,40 +728,7 @@ Log_event::Log_event(const uchar *buf,
when_sec_part= ~0UL;
server_id= uint4korr(buf + SERVER_ID_OFFSET);
data_written= uint4korr(buf + EVENT_LEN_OFFSET);
- if (description_event->binlog_version==1)
- {
- log_pos= 0;
- flags= 0;
- return;
- }
- /* 4.0 or newer */
log_pos= uint4korr(buf + LOG_POS_OFFSET);
- /*
- If the log is 4.0 (so here it can only be a 4.0 relay log read by
- the SQL thread or a 4.0 master binlog read by the I/O thread),
- log_pos is the beginning of the event: we transform it into the end
- of the event, which is more useful.
- But how do you know that the log is 4.0: you know it if
- description_event is version 3 *and* you are not reading a
- Format_desc (remember that mysqlbinlog starts by assuming that 5.0
- logs are in 4.0 format, until it finds a Format_desc).
- */
- if (description_event->binlog_version==3 &&
- (uchar)buf[EVENT_TYPE_OFFSET]<FORMAT_DESCRIPTION_EVENT && log_pos)
- {
- /*
- If log_pos=0, don't change it. log_pos==0 is a marker to mean
- "don't change rli->group_master_log_pos" (see
- inc_group_relay_log_pos()). As it is unreal log_pos, adding the
- event len's is nonsense. For example, a fake Rotate event should
- not have its log_pos (which is 0) changed or it will modify
- Exec_master_log_pos in SHOW SLAVE STATUS, displaying a nonsense
- value of (a non-zero offset which does not exist in the master's
- binlog, so which will cause problems if the user uses this value
- in CHANGE MASTER).
- */
- log_pos+= data_written; /* purecov: inspected */
- }
DBUG_PRINT("info", ("log_pos: %llu", log_pos));
flags= uint2korr(buf + FLAGS_OFFSET);
@@ -966,7 +933,7 @@ err:
if (force_opt)
DBUG_RETURN(new Unknown_log_event());
#endif
- if (event.length() >= OLD_HEADER_LEN)
+ if (event.length() >= LOG_EVENT_MINIMAL_HEADER_LEN)
sql_print_error("Error in Log_event::read_log_event(): '%s',"
" data_len: %lu, event_type: %u", error,
(ulong) uint4korr(&event[EVENT_LEN_OFFSET]),
@@ -1128,12 +1095,6 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
ev= new Query_compressed_log_event(buf, event_len, fdle,
QUERY_COMPRESSED_EVENT);
break;
- case LOAD_EVENT:
- ev= new Load_log_event(buf, event_len, fdle);
- break;
- case NEW_LOAD_EVENT:
- ev= new Load_log_event(buf, event_len, fdle);
- break;
case ROTATE_EVENT:
ev= new Rotate_log_event(buf, event_len, fdle);
break;
@@ -1146,21 +1107,12 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
case GTID_LIST_EVENT:
ev= new Gtid_list_log_event(buf, event_len, fdle);
break;
- case CREATE_FILE_EVENT:
- ev= new Create_file_log_event(buf, event_len, fdle);
- break;
case APPEND_BLOCK_EVENT:
ev= new Append_block_log_event(buf, event_len, fdle);
break;
case DELETE_FILE_EVENT:
ev= new Delete_file_log_event(buf, event_len, fdle);
break;
- case EXEC_LOAD_EVENT:
- ev= new Execute_load_log_event(buf, event_len, fdle);
- break;
- case START_EVENT_V3: /* this is sent only by MySQL <=4.x */
- ev= new Start_log_event_v3(buf, event_len, fdle);
- break;
case STOP_EVENT:
ev= new Stop_log_event(buf, fdle);
break;
@@ -1183,15 +1135,6 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
ev= new Format_description_log_event(buf, event_len, fdle);
break;
#if defined(HAVE_REPLICATION)
- case PRE_GA_WRITE_ROWS_EVENT:
- ev= new Write_rows_log_event_old(buf, event_len, fdle);
- break;
- case PRE_GA_UPDATE_ROWS_EVENT:
- ev= new Update_rows_log_event_old(buf, event_len, fdle);
- break;
- case PRE_GA_DELETE_ROWS_EVENT:
- ev= new Delete_rows_log_event_old(buf, event_len, fdle);
- break;
case WRITE_ROWS_EVENT_V1:
case WRITE_ROWS_EVENT:
ev= new Write_rows_log_event(buf, event_len, fdle);
@@ -1247,6 +1190,14 @@ Log_event* Log_event::read_log_event(const uchar *buf, uint event_len,
case START_ENCRYPTION_EVENT:
ev= new Start_encryption_log_event(buf, event_len, fdle);
break;
+ case PRE_GA_WRITE_ROWS_EVENT:
+ case PRE_GA_UPDATE_ROWS_EVENT:
+ case PRE_GA_DELETE_ROWS_EVENT:
+ case START_EVENT_V3: /* this is sent only by MySQL <=4.x */
+ case CREATE_FILE_EVENT:
+ case EXEC_LOAD_EVENT:
+ case LOAD_EVENT:
+ case NEW_LOAD_EVENT:
default:
DBUG_PRINT("error",("Unknown event code: %d",
(uchar) buf[EVENT_TYPE_OFFSET]));
@@ -1427,11 +1378,10 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
flags2_inited(0), sql_mode_inited(0), charset_inited(0), flags2(0),
auto_increment_increment(1), auto_increment_offset(1),
time_zone_len(0), lc_time_names_number(0), charset_database_number(0),
- table_map_for_update(0), xid(0), master_data_written(0), gtid_flags_extra(0),
+ table_map_for_update(0), xid(0), gtid_flags_extra(0),
sa_seq_no(0)
{
ulong data_len;
- uint32 tmp;
uint8 common_header_len, post_header_len;
Log_event::Byte *start;
const Log_event::Byte *end;
@@ -1460,45 +1410,23 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
db_len = (uchar)buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars
error_code = uint2korr(buf + Q_ERR_CODE_OFFSET);
+ status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
/*
- 5.0 format starts here.
- Depending on the format, we may or not have affected/warnings etc
- The remnent post-header to be parsed has length:
+ Check if status variable length is corrupt and will lead to very
+ wrong data. We could be even more strict and require data_len to
+ be even bigger, but this will suffice to catch most corruption
+ errors that can lead to a crash.
*/
- tmp= post_header_len - QUERY_HEADER_MINIMAL_LEN;
- if (tmp)
- {
- status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET);
- /*
- Check if status variable length is corrupt and will lead to very
- wrong data. We could be even more strict and require data_len to
- be even bigger, but this will suffice to catch most corruption
- errors that can lead to a crash.
- */
- if (status_vars_len > MY_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS))
- {
- DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
- status_vars_len, data_len));
- query= 0;
- DBUG_VOID_RETURN;
- }
- data_len-= status_vars_len;
- DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
- (uint) status_vars_len));
- tmp-= 2;
- }
- else
+ if (status_vars_len > MY_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS))
{
- /*
- server version < 5.0 / binlog_version < 4 master's event is
- relay-logged with storing the original size of the event in
- Q_MASTER_DATA_WRITTEN_CODE status variable.
- The size is to be restored at reading Q_MASTER_DATA_WRITTEN_CODE-marked
- event from the relay log.
- */
- DBUG_ASSERT(description_event->binlog_version < 4);
- master_data_written= (uint32)data_written;
+ DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
+ status_vars_len, data_len));
+ query= 0;
+ DBUG_VOID_RETURN;
}
+ data_len-= status_vars_len;
+ DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u",
+ (uint) status_vars_len));
/*
We have parsed everything we know in the post header for QUERY_EVENT,
the rest of post header is either comes from older version MySQL or
@@ -1585,9 +1513,9 @@ Query_log_event::Query_log_event(const uchar *buf, uint event_len,
table_map_for_update= uint8korr(pos);
pos+= 8;
break;
- case Q_MASTER_DATA_WRITTEN_CODE:
+ case Q_MASTER_DATA_WRITTEN_CODE: // impossible
CHECK_SPACE(pos, end, 4);
- data_written= master_data_written= uint4korr(pos);
+ data_written= uint4korr(pos);
pos+= 4;
break;
case Q_INVOKER:
@@ -1991,32 +1919,6 @@ Query_log_event::begin_event(String *packet, ulong ev_offset,
}
-/**************************************************************************
- Start_log_event_v3 methods
-**************************************************************************/
-
-
-Start_log_event_v3::Start_log_event_v3(const uchar *buf, uint event_len,
- const Format_description_log_event
- *description_event)
- :Log_event(buf, description_event), binlog_version(BINLOG_VERSION)
-{
- if (event_len < LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET)
- {
- server_version[0]= 0;
- return;
- }
- buf+= LOG_EVENT_MINIMAL_HEADER_LEN;
- binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET);
- memcpy(server_version, buf+ST_SERVER_VER_OFFSET,
- ST_SERVER_VER_LEN);
- // prevent overrun if log is corrupted on disk
- server_version[ST_SERVER_VER_LEN-1]= 0;
- created= uint4korr(buf+ST_CREATED_OFFSET);
- dont_set_created= 1;
-}
-
-
/***************************************************************************
Format_description_log_event methods
****************************************************************************/
@@ -2040,10 +1942,10 @@ Start_log_event_v3::Start_log_event_v3(const uchar *buf, uint event_len,
Format_description_log_event::
Format_description_log_event(uint8 binlog_ver, const char* server_ver)
- :Start_log_event_v3(), event_type_permutation(0)
+ :Log_event(), created(0), binlog_version(binlog_ver),
+ dont_set_created(0), event_type_permutation(0)
{
- binlog_version= binlog_ver;
- switch (binlog_ver) {
+ switch (binlog_version) {
case 4: /* MySQL 5.0 */
memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
DBUG_EXECUTE_IF("pretend_version_50034_in_binlog",
@@ -2161,44 +2063,6 @@ Format_description_log_event(uint8 binlog_ver, const char* server_ver)
case 1: /* 3.23 */
case 3: /* 4.0.x x>=2 */
- /*
- We build an artificial (i.e. not sent by the master) event, which
- describes what those old master versions send.
- */
- if (binlog_ver==1)
- strmov(server_version, server_ver ? server_ver : "3.23");
- else
- strmov(server_version, server_ver ? server_ver : "4.0");
- common_header_len= binlog_ver==1 ? OLD_HEADER_LEN :
- LOG_EVENT_MINIMAL_HEADER_LEN;
- /*
- The first new event in binlog version 4 is Format_desc. So any event type
- after that does not exist in older versions. We use the events known by
- version 3, even if version 1 had only a subset of them (this is not a
- problem: it uses a few bytes for nothing but unifies code; it does not
- make the slave detect less corruptions).
- */
- number_of_event_types= FORMAT_DESCRIPTION_EVENT - 1;
- post_header_len=(uint8*) my_malloc(PSI_INSTRUMENT_ME,
- number_of_event_types*sizeof(uint8), MYF(0));
- if (post_header_len)
- {
- post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN;
- post_header_len[QUERY_EVENT-1]= QUERY_HEADER_MINIMAL_LEN;
- post_header_len[STOP_EVENT-1]= 0;
- post_header_len[ROTATE_EVENT-1]= (binlog_ver==1) ? 0 : ROTATE_HEADER_LEN;
- post_header_len[INTVAR_EVENT-1]= 0;
- post_header_len[LOAD_EVENT-1]= LOAD_HEADER_LEN;
- post_header_len[SLAVE_EVENT-1]= 0;
- post_header_len[CREATE_FILE_EVENT-1]= CREATE_FILE_HEADER_LEN;
- post_header_len[APPEND_BLOCK_EVENT-1]= APPEND_BLOCK_HEADER_LEN;
- post_header_len[EXEC_LOAD_EVENT-1]= EXEC_LOAD_HEADER_LEN;
- post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN;
- post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1];
- post_header_len[RAND_EVENT-1]= 0;
- post_header_len[USER_VAR_EVENT-1]= 0;
- }
- break;
default: /* Includes binlog version 2 i.e. 4.0.x x<=1 */
post_header_len= 0; /* will make is_valid() fail */
break;
@@ -2232,14 +2096,26 @@ Format_description_log_event::
Format_description_log_event(const uchar *buf, uint event_len,
const Format_description_log_event*
description_event)
- :Start_log_event_v3(buf, event_len, description_event),
+ :Log_event(buf, description_event), binlog_version(BINLOG_VERSION),
common_header_len(0), post_header_len(NULL), event_type_permutation(0)
{
DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)");
- if (!Start_log_event_v3::is_valid())
- DBUG_VOID_RETURN; /* sanity check */
+ if (event_len < LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET)
+ {
+ server_version[0]= 0;
+ DBUG_VOID_RETURN;
+ }
buf+= LOG_EVENT_MINIMAL_HEADER_LEN;
- if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < OLD_HEADER_LEN)
+ binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET);
+ memcpy(server_version, buf+ST_SERVER_VER_OFFSET, ST_SERVER_VER_LEN);
+ // prevent overrun if log is corrupted on disk
+ server_version[ST_SERVER_VER_LEN-1]= 0;
+ created= uint4korr(buf+ST_CREATED_OFFSET);
+ dont_set_created= 1;
+
+ if (server_version[0] == 0)
+ DBUG_VOID_RETURN; /* sanity check */
+ if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < LOG_EVENT_MINIMAL_HEADER_LEN)
DBUG_VOID_RETURN; /* sanity check */
number_of_event_types=
event_len - (LOG_EVENT_MINIMAL_HEADER_LEN + ST_COMMON_HEADER_LEN_OFFSET + 1);
@@ -2427,120 +2303,6 @@ Start_encryption_log_event(const uchar *buf, uint event_len,
}
-/**************************************************************************
- Load_log_event methods
- General note about Load_log_event: the binlogging of LOAD DATA INFILE is
- going to be changed in 5.0 (or maybe in 5.1; not decided yet).
- However, the 5.0 slave could still have to read such events (from a 4.x
- master), convert them (which just means maybe expand the header, when 5.0
- servers have a UID in events) (remember that whatever is after the header
- will be like in 4.x, as this event's format is not modified in 5.0 as we
- will use new types of events to log the new LOAD DATA INFILE features).
- To be able to read/convert, we just need to not assume that the common
- header is of length LOG_EVENT_HEADER_LEN (we must use the description
- event).
- Note that I (Guilhem) manually tested replication of a big LOAD DATA INFILE
- between 3.23 and 5.0, and between 4.0 and 5.0, and it works fine (and the
- positions displayed in SHOW SLAVE STATUS then are fine too).
-**************************************************************************/
-
-
-/**
- @note
- The caller must do buf[event_len]= 0 before he starts using the
- constructed event.
-*/
-
-Load_log_event::Load_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event
- *description_event)
- :Log_event(buf, description_event), num_fields(0), fields(0),
- field_lens(0),field_block_len(0),
- table_name(0), db(0), fname(0), local_fname(FALSE),
- /*
- Load_log_event which comes from the binary log does not contain
- information about the type of insert which was used on the master.
- Assume that it was an ordinary, non-concurrent LOAD DATA.
- */
- is_concurrent(FALSE)
-{
- DBUG_ENTER("Load_log_event");
- /*
- I (Guilhem) manually tested replication of LOAD DATA INFILE for 3.23->5.0,
- 4.0->5.0 and 5.0->5.0 and it works.
- */
- if (event_len)
- copy_log_event(buf, event_len,
- (((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ?
- LOAD_HEADER_LEN +
- description_event->common_header_len :
- LOAD_HEADER_LEN + LOG_EVENT_HEADER_LEN),
- description_event);
- /* otherwise it's a derived class, will call copy_log_event() itself */
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Load_log_event::copy_log_event()
-*/
-
-int Load_log_event::copy_log_event(const uchar *buf, ulong event_len,
- int body_offset,
- const Format_description_log_event
- *description_event)
-{
- DBUG_ENTER("Load_log_event::copy_log_event");
- uint data_len;
- if ((int) event_len <= body_offset)
- DBUG_RETURN(1);
- const uchar *buf_end= buf + event_len;
- /* this is the beginning of the post-header */
- const uchar *data_head= buf + description_event->common_header_len;
- thread_id= slave_proxy_id= uint4korr(data_head + L_THREAD_ID_OFFSET);
- exec_time= uint4korr(data_head + L_EXEC_TIME_OFFSET);
- skip_lines= uint4korr(data_head + L_SKIP_LINES_OFFSET);
- table_name_len= (uint)data_head[L_TBL_LEN_OFFSET];
- db_len= (uint)data_head[L_DB_LEN_OFFSET];
- num_fields= uint4korr(data_head + L_NUM_FIELDS_OFFSET);
-
- /*
- Sql_ex.init() on success returns the pointer to the first byte after
- the sql_ex structure, which is the start of field lengths array.
- */
- if (!(field_lens= (uchar*) sql_ex.init(buf + body_offset, buf_end,
- buf[EVENT_TYPE_OFFSET] != LOAD_EVENT)))
- DBUG_RETURN(1);
-
- data_len= event_len - body_offset;
- if (num_fields > data_len) // simple sanity check against corruption
- DBUG_RETURN(1);
- for (uint i= 0; i < num_fields; i++)
- field_block_len+= (uint)field_lens[i] + 1;
-
- fields= (char*) field_lens + num_fields;
- table_name= fields + field_block_len;
- if (strlen(table_name) > NAME_LEN)
- goto err;
-
- db= table_name + table_name_len + 1;
- DBUG_EXECUTE_IF("simulate_invalid_address", db_len= data_len;);
- fname= db + db_len + 1;
- if ((db_len > data_len) || (fname > (char*) buf_end))
- goto err;
- fname_len= (uint) strlen(fname);
- if ((fname_len > data_len) || (fname + fname_len > (char*) buf_end))
- goto err;
- // null termination is accomplished by the caller doing buf[event_len]=0
-
- DBUG_RETURN(0);
-
-err:
- // Invalid event.
- table_name= 0;
- DBUG_RETURN(1);
-}
-
/**************************************************************************
Rotate_log_event methods
@@ -3021,68 +2783,6 @@ err:
/**************************************************************************
- Create_file_log_event methods
-**************************************************************************/
-
-/*
- Create_file_log_event ctor
-*/
-
-Create_file_log_event::
-Create_file_log_event(const uchar *buf, uint len,
- const Format_description_log_event* description_event)
- :Load_log_event(buf,0,description_event),fake_base(0),block(0),
- inited_from_old(0)
-{
- DBUG_ENTER("Create_file_log_event::Create_file_log_event(char*,...)");
- uint block_offset;
- uint header_len= description_event->common_header_len;
- uint8 load_header_len= description_event->post_header_len[LOAD_EVENT-1];
- uint8 create_file_header_len= description_event->post_header_len[CREATE_FILE_EVENT-1];
- if (!(event_buf= (uchar*) my_memdup(PSI_INSTRUMENT_ME, buf, len,
- MYF(MY_WME))) ||
- copy_log_event(event_buf,len,
- (((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ?
- load_header_len + header_len :
- (fake_base ? (header_len+load_header_len) :
- (header_len+load_header_len) +
- create_file_header_len)),
- description_event))
- DBUG_VOID_RETURN;
- if (description_event->binlog_version!=1)
- {
- file_id= uint4korr(buf +
- header_len +
- load_header_len + CF_FILE_ID_OFFSET);
- /*
- Note that it's ok to use get_data_size() below, because it is computed
- with values we have already read from this event (because we called
- copy_log_event()); we are not using slave's format info to decode
- master's format, we are really using master's format info.
- Anyway, both formats should be identical (except the common_header_len)
- as these Load events are not changed between 4.0 and 5.0 (as logging of
- LOAD DATA INFILE does not use Load_log_event in 5.0).
-
- The + 1 is for \0 terminating fname
- */
- block_offset= (description_event->common_header_len +
- Load_log_event::get_data_size() +
- create_file_header_len + 1);
- if (len < block_offset)
- DBUG_VOID_RETURN;
- block= const_cast<uchar*>(buf) + block_offset;
- block_len= len - block_offset;
- }
- else
- {
- sql_ex.force_new_format();
- inited_from_old= 1;
- }
- DBUG_VOID_RETURN;
-}
-
-
-/**************************************************************************
Append_block_log_event methods
**************************************************************************/
@@ -3131,27 +2831,6 @@ Delete_file_log_event(const uchar *buf, uint len,
/**************************************************************************
- Execute_load_log_event methods
-**************************************************************************/
-
-/*
- Execute_load_log_event ctor
-*/
-
-Execute_load_log_event::
-Execute_load_log_event(const uchar *buf, uint len,
- const Format_description_log_event* description_event)
- :Log_event(buf, description_event), file_id(0)
-{
- uint8 common_header_len= description_event->common_header_len;
- uint8 exec_load_header_len= description_event->post_header_len[EXEC_LOAD_EVENT-1];
- if (len < (uint)(common_header_len+exec_load_header_len))
- return;
- file_id= uint4korr(buf + common_header_len + EL_FILE_ID_OFFSET);
-}
-
-
-/**************************************************************************
Begin_load_query_log_event methods
**************************************************************************/
diff --git a/sql/log_event.h b/sql/log_event.h
index 4976fcaaa17..40fdd65c171 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -169,21 +169,17 @@ class String;
See the #defines below for the format specifics.
The events which really update data are Query_log_event,
- Execute_load_query_log_event and old Load_log_event and
- Execute_load_log_event events (Execute_load_query is used together with
- Begin_load_query and Append_block events to replicate LOAD DATA INFILE.
- Create_file/Append_block/Execute_load (which includes Load_log_event)
- were used to replicate LOAD DATA before the 5.0.3).
+ Execute_load_query_log_event and Execute_load_log_event events
+ (Execute_load_query is used together with Begin_load_query and Append_block
+ events to replicate LOAD DATA INFILE.
****************************************************************************/
#define LOG_EVENT_HEADER_LEN 19 /* the fixed header length */
-#define OLD_HEADER_LEN 13 /* the fixed header length in 3.23 */
/*
- Fixed header length, where 4.x and 5.0 agree. That is, 5.0 may have a longer
- header (it will for sure when we have the unique event's ID), but at least
- the first 19 bytes are the same in 4.x and 5.0. So when we have the unique
- event's ID, LOG_EVENT_HEADER_LEN will be something like 26, but
+ Fixed header length. That is, some future version may have a longer
+ header, but at least the first 19 bytes will be the same. So
+ LOG_EVENT_HEADER_LEN will be something like 26, but
LOG_EVENT_MINIMAL_HEADER_LEN will remain 19.
*/
#define LOG_EVENT_MINIMAL_HEADER_LEN 19
@@ -604,11 +600,6 @@ enum Log_event_type
APPEND_BLOCK_EVENT= 9,
EXEC_LOAD_EVENT= 10,
DELETE_FILE_EVENT= 11,
- /*
- NEW_LOAD_EVENT is like LOAD_EVENT except that it has a longer
- sql_ex, allowing multibyte TERMINATED BY etc; both types share the
- same class (Load_log_event)
- */
NEW_LOAD_EVENT= 12,
RAND_EVENT= 13,
USER_VAR_EVENT= 14,
@@ -2098,10 +2089,9 @@ public:
uint16 error_code;
my_thread_id thread_id;
/*
- For events created by Query_log_event::do_apply_event (and
- Load_log_event::do_apply_event()) we need the *original* thread
- id, to be able to log the event with the original (=master's)
- thread id (fix for BUG#1686).
+ For events created by Query_log_event::do_apply_event we need the
+ *original* thread id, to be able to log the event with the original
+ (=master's) thread id (fix for BUG#1686).
*/
ulong slave_proxy_id;
@@ -2125,12 +2115,6 @@ public:
'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so
its first byte is its length. For now the order of status vars is:
flags2 - sql_mode - catalog - autoinc - charset
- We should add the same thing to Load_log_event, but in fact
- LOAD DATA INFILE is going to be logged with a new type of event (logging of
- the plain text query), so Load_log_event would be frozen, so no need. The
- new way of logging LOAD DATA INFILE would use a derived class of
- Query_log_event, so automatically benefit from the work already done for
- status variables in Query_log_event.
*/
uint16 status_vars_len;
@@ -2163,16 +2147,6 @@ public:
/* Xid for the event, if such exists */
ulonglong xid;
/*
- Holds the original length of a Query_log_event that comes from a
- master of version < 5.0 (i.e., binlog_version < 4). When the IO
- thread writes the relay log, it augments the Query_log_event with a
- Q_MASTER_DATA_WRITTEN_CODE status_var that holds the original event
- length. This field is initialized to non-zero in the SQL thread when
- it reads this augmented event. SQL thread does not write
- Q_MASTER_DATA_WRITTEN_CODE to the slave's server binlog.
- */
- uint32 master_data_written;
- /*
A copy of Gtid event's extra flags that is relevant for two-phase
logged ALTER.
*/
@@ -2331,413 +2305,6 @@ struct sql_ex_info
};
/**
- @class Load_log_event
-
- This log event corresponds to a "LOAD DATA INFILE" SQL query on the
- following form:
-
- @verbatim
- (1) USE db;
- (2) LOAD DATA [CONCURRENT] [LOCAL] INFILE 'file_name'
- (3) [REPLACE | IGNORE]
- (4) INTO TABLE 'table_name'
- (5) [FIELDS
- (6) [TERMINATED BY 'field_term']
- (7) [[OPTIONALLY] ENCLOSED BY 'enclosed']
- (8) [ESCAPED BY 'escaped']
- (9) ]
- (10) [LINES
- (11) [TERMINATED BY 'line_term']
- (12) [LINES STARTING BY 'line_start']
- (13) ]
- (14) [IGNORE skip_lines LINES]
- (15) (field_1, field_2, ..., field_n)@endverbatim
-
- @section Load_log_event_binary_format Binary Format
-
- The Post-Header consists of the following six components.
-
- <table>
- <caption>Post-Header for Load_log_event</caption>
-
- <tr>
- <th>Name</th>
- <th>Format</th>
- <th>Description</th>
- </tr>
-
- <tr>
- <td>slave_proxy_id</td>
- <td>4 byte unsigned integer</td>
- <td>An integer identifying the client thread that issued the
- query. The id is unique per server. (Note, however, that two
- threads on different servers may have the same slave_proxy_id.)
- This is used when a client thread creates a temporary table local
- to the client. The slave_proxy_id is used to distinguish
- temporary tables that belong to different clients.
- </td>
- </tr>
-
- <tr>
- <td>exec_time</td>
- <td>4 byte unsigned integer</td>
- <td>The time from when the query started to when it was logged in
- the binlog, in seconds.</td>
- </tr>
-
- <tr>
- <td>skip_lines</td>
- <td>4 byte unsigned integer</td>
- <td>The number on line (14) above, if present, or 0 if line (14)
- is left out.
- </td>
- </tr>
-
- <tr>
- <td>table_name_len</td>
- <td>1 byte unsigned integer</td>
- <td>The length of 'table_name' on line (4) above.</td>
- </tr>
-
- <tr>
- <td>db_len</td>
- <td>1 byte unsigned integer</td>
- <td>The length of 'db' on line (1) above.</td>
- </tr>
-
- <tr>
- <td>num_fields</td>
- <td>4 byte unsigned integer</td>
- <td>The number n of fields on line (15) above.</td>
- </tr>
- </table>
-
- The Body contains the following components.
-
- <table>
- <caption>Body of Load_log_event</caption>
-
- <tr>
- <th>Name</th>
- <th>Format</th>
- <th>Description</th>
- </tr>
-
- <tr>
- <td>sql_ex</td>
- <td>variable length</td>
-
- <td>Describes the part of the query on lines (3) and
- (5)&ndash;(13) above. More precisely, it stores the five strings
- (on lines) field_term (6), enclosed (7), escaped (8), line_term
- (11), and line_start (12); as well as a bitfield indicating the
- presence of the keywords REPLACE (3), IGNORE (3), and OPTIONALLY
- (7).
-
- The data is stored in one of two formats, called "old" and "new".
- The type field of Common-Header determines which of these two
- formats is used: type LOAD_EVENT means that the old format is
- used, and type NEW_LOAD_EVENT means that the new format is used.
- When MySQL writes a Load_log_event, it uses the new format if at
- least one of the five strings is two or more bytes long.
- Otherwise (i.e., if all strings are 0 or 1 bytes long), the old
- format is used.
-
- The new and old format differ in the way the five strings are
- stored.
-
- <ul>
- <li> In the new format, the strings are stored in the order
- field_term, enclosed, escaped, line_term, line_start. Each string
- consists of a length (1 byte), followed by a sequence of
- characters (0-255 bytes). Finally, a boolean combination of the
- following flags is stored in 1 byte: REPLACE_FLAG==0x4,
- IGNORE_FLAG==0x8, and OPT_ENCLOSED_FLAG==0x2. If a flag is set,
- it indicates the presence of the corresponding keyword in the SQL
- query.
-
- <li> In the old format, we know that each string has length 0 or
- 1. Therefore, only the first byte of each string is stored. The
- order of the strings is the same as in the new format. These five
- bytes are followed by the same 1 byte bitfield as in the new
- format. Finally, a 1 byte bitfield called empty_flags is stored.
- The low 5 bits of empty_flags indicate which of the five strings
- have length 0. For each of the following flags that is set, the
- corresponding string has length 0; for the flags that are not set,
- the string has length 1: FIELD_TERM_EMPTY==0x1,
- ENCLOSED_EMPTY==0x2, LINE_TERM_EMPTY==0x4, LINE_START_EMPTY==0x8,
- ESCAPED_EMPTY==0x10.
- </ul>
-
- Thus, the size of the new format is 6 bytes + the sum of the sizes
- of the five strings. The size of the old format is always 7
- bytes.
- </td>
- </tr>
-
- <tr>
- <td>field_lens</td>
- <td>num_fields 1 byte unsigned integers</td>
- <td>An array of num_fields integers representing the length of
- each field in the query. (num_fields is from the Post-Header).
- </td>
- </tr>
-
- <tr>
- <td>fields</td>
- <td>num_fields null-terminated strings</td>
- <td>An array of num_fields null-terminated strings, each
- representing a field in the query. (The trailing zero is
- redundant, since the length are stored in the num_fields array.)
- The total length of all strings equals to the sum of all
- field_lens, plus num_fields bytes for all the trailing zeros.
- </td>
- </tr>
-
- <tr>
- <td>table_name</td>
- <td>null-terminated string of length table_len+1 bytes</td>
- <td>The 'table_name' from the query, as a null-terminated string.
- (The trailing zero is actually redundant since the table_len is
- known from Post-Header.)
- </td>
- </tr>
-
- <tr>
- <td>db</td>
- <td>null-terminated string of length db_len+1 bytes</td>
- <td>The 'db' from the query, as a null-terminated string.
- (The trailing zero is actually redundant since the db_len is known
- from Post-Header.)
- </td>
- </tr>
-
- <tr>
- <td>file_name</td>
- <td>variable length string without trailing zero, extending to the
- end of the event (determined by the length field of the
- Common-Header)
- </td>
- <td>The 'file_name' from the query.
- </td>
- </tr>
-
- </table>
-
- @subsection Load_log_event_notes_on_previous_versions Notes on Previous Versions
-
- This event type is understood by current versions, but only
- generated by MySQL 3.23 and earlier.
-*/
-class Load_log_event: public Log_event
-{
-private:
-protected:
- int copy_log_event(const uchar *buf, ulong event_len,
- int body_offset,
- const Format_description_log_event* description_event);
-
-public:
- bool print_query(THD *thd, bool need_db, const char *cs, String *buf,
- my_off_t *fn_start, my_off_t *fn_end,
- const char *qualify_db);
- my_thread_id thread_id;
- ulong slave_proxy_id;
- uint32 table_name_len;
- /*
- No need to have a catalog, as these events can only come from 4.x.
- TODO: this may become false if Dmitri pushes his new LOAD DATA INFILE in
- 5.0 only (not in 4.x).
- */
- uint32 db_len;
- uint32 fname_len;
- uint32 num_fields;
- const char* fields;
- const uchar* field_lens;
- uint32 field_block_len;
-
- const char* table_name;
- const char* db;
- const char* fname;
- uint32 skip_lines;
- sql_ex_info sql_ex;
- bool local_fname;
- /**
- Indicates that this event corresponds to LOAD DATA CONCURRENT,
-
- @note Since Load_log_event event coming from the binary log
- lacks information whether LOAD DATA on master was concurrent
- or not, this flag is only set to TRUE for an auxiliary
- Load_log_event object which is used in mysql_load() to
- re-construct LOAD DATA statement from function parameters,
- for logging.
- */
- bool is_concurrent;
-
- /* fname doesn't point to memory inside Log_event::temp_buf */
- void set_fname_outside_temp_buf(const char *afname, size_t alen)
- {
- fname= afname;
- fname_len= (uint)alen;
- local_fname= TRUE;
- }
- /* fname doesn't point to memory inside Log_event::temp_buf */
- int check_fname_outside_temp_buf()
- {
- return local_fname;
- }
-
-#ifdef MYSQL_SERVER
- String field_lens_buf;
- String fields_buf;
-
- Load_log_event(THD* thd, const sql_exchange* ex, const char* db_arg,
- const char* table_name_arg,
- List<Item>& fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup, bool ignore,
- bool using_trans);
- void set_fields(const char* db, List<Item> &fields_arg,
- Name_resolution_context *context);
- const char* get_db() { return db; }
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool commented);
-#endif
-
- /*
- Note that for all the events related to LOAD DATA (Load_log_event,
- Create_file/Append/Exec/Delete, we pass description_event; however as
- logging of LOAD DATA is going to be changed in 4.1 or 5.0, this is only used
- for the common_header_len (post_header_len will not be changed).
- */
- Load_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event* description_event);
- ~Load_log_event()
- {}
- Log_event_type get_type_code()
- {
- return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT;
- }
-#ifdef MYSQL_SERVER
- bool write_data_header();
- bool write_data_body();
-#endif
- bool is_valid() const { return table_name != 0; }
- int get_data_size()
- {
- return (table_name_len + db_len + 2 + fname_len
- + LOAD_HEADER_LEN
- + sql_ex.data_size() + field_block_len + num_fields);
- }
-
-public: /* !!! Public in this patch to allow old usage */
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi)
- {
- return do_apply_event(thd->slave_net,rgi,0);
- }
-
- int do_apply_event(NET *net, rpl_group_info *rgi,
- bool use_rli_only_for_errors);
-#endif
-};
-
-/**
- @class Start_log_event_v3
-
- Start_log_event_v3 is the Start_log_event of binlog format 3 (MySQL 3.23 and
- 4.x).
-
- Format_description_log_event derives from Start_log_event_v3; it is
- the Start_log_event of binlog format 4 (MySQL 5.0), that is, the
- event that describes the other events' Common-Header/Post-Header
- lengths. This event is sent by MySQL 5.0 whenever it starts sending
- a new binlog if the requested position is >4 (otherwise if ==4 the
- event will be sent naturally).
-
- @section Start_log_event_v3_binary_format Binary Format
-*/
-class Start_log_event_v3: public Log_event
-{
-public:
- /*
- If this event is at the start of the first binary log since server
- startup 'created' should be the timestamp when the event (and the
- binary log) was created. In the other case (i.e. this event is at
- the start of a binary log created by FLUSH LOGS or automatic
- rotation), 'created' should be 0. This "trick" is used by MySQL
- >=4.0.14 slaves to know whether they must drop stale temporary
- tables and whether they should abort unfinished transaction.
-
- Note that when 'created'!=0, it is always equal to the event's
- timestamp; indeed Start_log_event is written only in log.cc where
- the first constructor below is called, in which 'created' is set
- to 'when'. So in fact 'created' is a useless variable. When it is
- 0 we can read the actual value from timestamp ('when') and when it
- is non-zero we can read the same value from timestamp
- ('when'). Conclusion:
- - we use timestamp to print when the binlog was created.
- - we use 'created' only to know if this is a first binlog or not.
- In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in
- 3.23.57 does not print 'created the_date' if created was zero. This is now
- fixed.
- */
- time_t created;
- uint16 binlog_version;
- char server_version[ST_SERVER_VER_LEN];
- /*
- We set this to 1 if we don't want to have the created time in the log,
- which is the case when we rollover to a new log.
- */
- bool dont_set_created;
-
-#ifdef MYSQL_SERVER
- Start_log_event_v3();
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- Start_log_event_v3() {}
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
-#endif
-
- Start_log_event_v3(const uchar *buf, uint event_len,
- const Format_description_log_event* description_event);
- ~Start_log_event_v3() {}
- Log_event_type get_type_code() { return START_EVENT_V3;}
- my_off_t get_header_len(my_off_t l __attribute__((unused)))
- { return LOG_EVENT_MINIMAL_HEADER_LEN; }
-#ifdef MYSQL_SERVER
- bool write();
-#endif
- bool is_valid() const { return server_version[0] != 0; }
- int get_data_size()
- {
- return START_V3_HEADER_LEN; //no variable-sized part
- }
-
-protected:
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
- virtual enum_skip_reason do_shall_skip(rpl_group_info*)
- {
- /*
- Events from ourself should be skipped, but they should not
- decrease the slave skip counter.
- */
- if (this->server_id == global_system_variables.server_id)
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::EVENT_SKIP_NOT;
- }
-#endif
-};
-
-/**
@class Start_encryption_log_event
Start_encryption_log_event marks the beginning of encrypted data (all events
@@ -2847,10 +2414,41 @@ public:
@section Format_description_log_event_binary_format Binary Format
*/
-class Format_description_log_event: public Start_log_event_v3
+class Format_description_log_event: public Log_event
{
public:
/*
+ If this event is at the start of the first binary log since server
+ startup 'created' should be the timestamp when the event (and the
+ binary log) was created. In the other case (i.e. this event is at
+ the start of a binary log created by FLUSH LOGS or automatic
+ rotation), 'created' should be 0. This "trick" is used by MySQL
+ >=4.0.14 slaves to know whether they must drop stale temporary
+ tables and whether they should abort unfinished transaction.
+
+ Note that when 'created'!=0, it is always equal to the event's
+ timestamp; indeed Start_log_event is written only in log.cc where
+ the first constructor below is called, in which 'created' is set
+ to 'when'. So in fact 'created' is a useless variable. When it is
+ 0 we can read the actual value from timestamp ('when') and when it
+ is non-zero we can read the same value from timestamp
+ ('when'). Conclusion:
+ - we use timestamp to print when the binlog was created.
+ - we use 'created' only to know if this is a first binlog or not.
+ In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in
+ 3.23.57 does not print 'created the_date' if created was zero. This is now
+ fixed.
+ */
+ time_t created;
+ uint16 binlog_version;
+ char server_version[ST_SERVER_VER_LEN];
+ /*
+ We set this to 1 if we don't want to have the created time in the log,
+ which is the case when we rollover to a new log.
+ */
+ bool dont_set_created;
+
+ /*
The size of the fixed header which _all_ events have
(for binlogs written by this version, this is equal to
LOG_EVENT_HEADER_LEN), except FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT
@@ -2858,8 +2456,8 @@ public:
*/
uint8 common_header_len;
uint8 number_of_event_types;
- /*
- The list of post-headers' lengths followed
+ /*
+ The list of post-headers' lengths followed
by the checksum alg description byte
*/
uint8 *post_header_len;
@@ -2888,14 +2486,18 @@ public:
my_free(post_header_len);
}
Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;}
+ my_off_t get_header_len(my_off_t) { return LOG_EVENT_MINIMAL_HEADER_LEN; }
#ifdef MYSQL_SERVER
bool write();
+#ifdef HAVE_REPLICATION
+ void pack_info(Protocol* protocol);
+#endif /* HAVE_REPLICATION */
+#else
+ bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
#endif
bool header_is_valid() const
{
- return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN :
- LOG_EVENT_MINIMAL_HEADER_LEN)) &&
- (post_header_len != NULL));
+ return common_header_len >= LOG_EVENT_MINIMAL_HEADER_LEN && post_header_len;
}
bool is_valid() const
@@ -3861,82 +3463,6 @@ public:
};
-/* the classes below are for the new LOAD DATA INFILE logging */
-
-/**
- @class Create_file_log_event
-
- @section Create_file_log_event_binary_format Binary Format
-*/
-
-class Create_file_log_event: public Load_log_event
-{
-protected:
- /*
- Pretend we are Load event, so we can write out just
- our Load part - used on the slave when writing event out to
- SQL_LOAD-*.info file
- */
- bool fake_base;
-public:
- uchar *block;
- const uchar *event_buf;
- uint block_len;
- uint file_id;
- bool inited_from_old;
-
-#ifdef MYSQL_SERVER
- Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg,
- const char* table_name_arg,
- List<Item>& fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup, bool ignore,
- uchar* block_arg, uint block_len_arg,
- bool using_trans);
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info,
- bool enable_local);
-#endif
-
- Create_file_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event* description_event);
- ~Create_file_log_event()
- {
- my_free((void*) event_buf);
- }
-
- Log_event_type get_type_code()
- {
- return fake_base ? Load_log_event::get_type_code() : CREATE_FILE_EVENT;
- }
- int get_data_size()
- {
- return (fake_base ? Load_log_event::get_data_size() :
- Load_log_event::get_data_size() +
- 4 + 1 + block_len);
- }
- bool is_valid() const { return inited_from_old || block != 0; }
-#ifdef MYSQL_SERVER
- bool write_data_header();
- bool write_data_body();
- /*
- Cut out Create_file extensions and
- write it as Load event - used on the slave
- */
- bool write_base();
-#endif
-
-private:
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
-#endif
-};
-
-
/**
@class Append_block_log_event
@@ -3956,9 +3482,7 @@ public:
used by Append_block_log_event::write()), so it can't be read in
the Append_block_log_event(const uchar *buf, int event_len)
constructor. In other words, 'db' is used only for filtering by
- binlog-*-db rules. Create_file_log_event is different: it's 'db'
- (which is inherited from Load_log_event) is written to the binlog
- and can be re-read.
+ binlog-*-db rules.
*/
const char* db;
@@ -4034,46 +3558,6 @@ private:
/**
- @class Execute_load_log_event
-
- @section Delete_file_log_event_binary_format Binary Format
-*/
-
-class Execute_load_log_event: public Log_event
-{
-public:
- uint file_id;
- const char* db; /* see comment in Append_block_log_event */
-
-#ifdef MYSQL_SERVER
- Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans);
-#ifdef HAVE_REPLICATION
- void pack_info(Protocol* protocol);
-#endif /* HAVE_REPLICATION */
-#else
- bool print(FILE* file, PRINT_EVENT_INFO* print_event_info);
-#endif
-
- Execute_load_log_event(const uchar *buf, uint event_len,
- const Format_description_log_event
- *description_event);
- ~Execute_load_log_event() {}
- Log_event_type get_type_code() { return EXEC_LOAD_EVENT;}
- int get_data_size() { return EXEC_LOAD_HEADER_LEN ;}
- bool is_valid() const { return file_id != 0; }
-#ifdef MYSQL_SERVER
- bool write();
- const char* get_db() { return db; }
-#endif
-
-private:
-#if defined(MYSQL_SERVER) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
-#endif
-};
-
-
-/**
@class Begin_load_query_log_event
Event for the first block of file to be loaded, its only difference from
@@ -5358,8 +4842,6 @@ private:
*/
virtual int do_exec_row(rpl_group_info *rli) = 0;
#endif /* defined(MYSQL_SERVER) && defined(HAVE_REPLICATION) */
-
- friend class Old_rows_log_event;
};
/**
@@ -5608,9 +5090,6 @@ private:
#endif
};
-
-#include "log_event_old.h"
-
/**
@class Incident_log_event
diff --git a/sql/log_event_client.cc b/sql/log_event_client.cc
index 15d3ae8921b..acdbcadda16 100644
--- a/sql/log_event_client.cc
+++ b/sql/log_event_client.cc
@@ -2104,9 +2104,9 @@ err:
}
-bool Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
+bool Format_description_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
- DBUG_ENTER("Start_log_event_v3::print");
+ DBUG_ENTER("Format_description_log_event::print");
Write_on_release_cache cache(&print_event_info->head_cache, file,
Write_on_release_cache::FLUSH_F);
@@ -2188,122 +2188,6 @@ bool Start_encryption_log_event::print(FILE* file,
}
-bool Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
-{
- return print(file, print_event_info, 0);
-}
-
-
-bool Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info,
- bool commented)
-{
- Write_on_release_cache cache(&print_event_info->head_cache, file_arg);
- bool different_db= 1;
- DBUG_ENTER("Load_log_event::print");
-
- if (!print_event_info->short_form)
- {
- if (print_header(&cache, print_event_info, FALSE) ||
- my_b_printf(&cache, "\tQuery\tthread_id=%ld\texec_time=%ld\n",
- thread_id, exec_time))
- goto err;
- }
-
- if (db)
- {
- /*
- If the database is different from the one of the previous statement, we
- need to print the "use" command, and we update the last_db.
- But if commented, the "use" is going to be commented so we should not
- update the last_db.
- */
- if ((different_db= memcmp(print_event_info->db, db, db_len + 1)) &&
- !commented)
- memcpy(print_event_info->db, db, db_len + 1);
- }
-
- if (db && db[0] && different_db)
- if (my_b_printf(&cache, "%suse %`s%s\n",
- commented ? "# " : "",
- db, print_event_info->delimiter))
- goto err;
-
- if (flags & LOG_EVENT_THREAD_SPECIFIC_F)
- if (my_b_printf(&cache,"%sSET @@session.pseudo_thread_id=%lu%s\n",
- commented ? "# " : "", (ulong)thread_id,
- print_event_info->delimiter))
- goto err;
- if (my_b_printf(&cache, "%sLOAD DATA ",
- commented ? "# " : ""))
- goto err;
- if (check_fname_outside_temp_buf())
- if (my_b_write_string(&cache, "LOCAL "))
- goto err;
- if (my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname))
- goto err;
-
- if (sql_ex.opt_flags & REPLACE_FLAG)
- {
- if (my_b_write_string(&cache, "REPLACE "))
- goto err;
- }
- else if (sql_ex.opt_flags & IGNORE_FLAG)
- if (my_b_write_string(&cache, "IGNORE "))
- goto err;
-
- if (my_b_printf(&cache, "INTO TABLE `%s`", table_name) ||
- my_b_write_string(&cache, " FIELDS TERMINATED BY ") ||
- pretty_print_str(&cache, sql_ex.field_term, sql_ex.field_term_len))
- goto err;
-
- if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- if (my_b_write_string(&cache, " OPTIONALLY "))
- goto err;
- if (my_b_write_string(&cache, " ENCLOSED BY ") ||
- pretty_print_str(&cache, sql_ex.enclosed, sql_ex.enclosed_len) ||
- my_b_write_string(&cache, " ESCAPED BY ") ||
- pretty_print_str(&cache, sql_ex.escaped, sql_ex.escaped_len) ||
- my_b_write_string(&cache, " LINES TERMINATED BY ") ||
- pretty_print_str(&cache, sql_ex.line_term, sql_ex.line_term_len))
- goto err;
-
- if (sql_ex.line_start)
- {
- if (my_b_write_string(&cache," STARTING BY ") ||
- pretty_print_str(&cache, sql_ex.line_start, sql_ex.line_start_len))
- goto err;
- }
- if ((long) skip_lines > 0)
- if (my_b_printf(&cache, " IGNORE %ld LINES", (long) skip_lines))
- goto err;
-
- if (num_fields)
- {
- uint i;
- const char* field = fields;
- if (my_b_write_string(&cache, " ("))
- goto err;
- for (i = 0; i < num_fields; i++)
- {
- if (i)
- if (my_b_write_byte(&cache, ','))
- goto err;
- if (my_b_printf(&cache, "%`s", field))
- goto err;
- field += field_lens[i] + 1;
- }
- if (my_b_write_byte(&cache, ')'))
- goto err;
- }
-
- if (my_b_printf(&cache, "%s\n", print_event_info->delimiter))
- goto err;
- DBUG_RETURN(cache.flush_data());
-err:
- DBUG_RETURN(1);
-}
-
-
bool Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
{
if (print_event_info->short_form)
@@ -2626,61 +2510,6 @@ bool Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
#endif
-bool Create_file_log_event::print(FILE* file,
- PRINT_EVENT_INFO* print_event_info,
- bool enable_local)
-{
- if (print_event_info->short_form)
- {
- if (enable_local && check_fname_outside_temp_buf())
- return Load_log_event::print(file, print_event_info);
- return 0;
- }
-
- Write_on_release_cache cache(&print_event_info->head_cache, file);
-
- if (enable_local)
- {
- if (Load_log_event::print(file, print_event_info,
- !check_fname_outside_temp_buf()))
- goto err;
-
- /**
- reduce the size of io cache so that the write function is called
- for every call to my_b_printf().
- */
- DBUG_EXECUTE_IF ("simulate_create_event_write_error",
- {(&cache)->write_pos= (&cache)->write_end;
- DBUG_SET("+d,simulate_file_write_error");});
- /*
- That one is for "file_id: etc" below: in mysqlbinlog we want the #, in
- SHOW BINLOG EVENTS we don't.
- */
- if (my_b_write_byte(&cache, '#'))
- goto err;
- }
-
- if (my_b_printf(&cache, " file_id: %d block_len: %d\n", file_id, block_len))
- goto err;
-
- return cache.flush_data();
-err:
- return 1;
-
-}
-
-
-bool Create_file_log_event::print(FILE* file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return print(file, print_event_info, 0);
-}
-
-
-/*
- Append_block_log_event::print()
-*/
-
bool Append_block_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
@@ -2700,10 +2529,6 @@ err:
}
-/*
- Delete_file_log_event::print()
-*/
-
bool Delete_file_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
{
@@ -2719,25 +2544,6 @@ bool Delete_file_log_event::print(FILE* file,
return cache.flush_data();
}
-/*
- Execute_load_log_event::print()
-*/
-
-bool Execute_load_log_event::print(FILE* file,
- PRINT_EVENT_INFO* print_event_info)
-{
- if (print_event_info->short_form)
- return 0;
-
- Write_on_release_cache cache(&print_event_info->head_cache, file);
-
- if (print_header(&cache, print_event_info, FALSE) ||
- my_b_printf(&cache, "\n#Exec_load: file_id=%d\n",
- file_id))
- return 1;
-
- return cache.flush_data();
-}
bool Execute_load_query_log_event::print(FILE* file,
PRINT_EVENT_INFO* print_event_info)
@@ -2995,10 +2801,6 @@ err:
where fragments are represented by a pair of indexed user
"one shot" variables.
- @note
- If any changes made don't forget to duplicate them to
- Old_rows_log_event as long as it's supported.
-
@param file pointer to IO_CACHE
@param print_event_info pointer to print_event_info specializing
what out of and how to print the event
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
deleted file mode 100644
index 1990103598e..00000000000
--- a/sql/log_event_old.cc
+++ /dev/null
@@ -1,2749 +0,0 @@
-/* Copyright (c) 2007, 2019, Oracle and/or its affiliates.
- Copyright (c) 2009, 2019, MariaDB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#include "mariadb.h"
-#include "sql_priv.h"
-#ifndef MYSQL_CLIENT
-#include "unireg.h"
-#endif
-#include "log_event.h"
-#ifndef MYSQL_CLIENT
-#include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE
-#include "sql_base.h" // close_tables_for_reopen
-#include "key.h" // key_copy
-#include "lock.h" // mysql_unlock_tables
-#include "rpl_rli.h"
-#include "rpl_utility.h"
-#endif
-#include "log_event_old.h"
-#include "rpl_record_old.h"
-#include "transaction.h"
-
-PSI_memory_key key_memory_log_event_old;
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-// Old implementation of do_apply_event()
-int
-Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, rpl_group_info *rgi)
-{
- DBUG_ENTER("Old_rows_log_event::do_apply_event(st_relay_log_info*)");
- int error= 0;
- THD *ev_thd= ev->thd;
- uchar const *row_start= ev->m_rows_buf;
- const Relay_log_info *rli= rgi->rli;
-
- /*
- If m_table_id == ~0UL, then we have a dummy event that does not
- contain any data. In that case, we just remove all tables in the
- tables_to_lock list, close the thread tables, and return with
- success.
- */
- if (ev->m_table_id == ~0UL)
- {
- /*
- This one is supposed to be set: just an extra check so that
- nothing strange has happened.
- */
- DBUG_ASSERT(ev->get_flags(Old_rows_log_event::STMT_END_F));
-
- rgi->slave_close_thread_tables(ev_thd);
- ev_thd->clear_error();
- DBUG_RETURN(0);
- }
-
- /*
- 'ev_thd' has been set by exec_relay_log_event(), just before calling
- do_apply_event(). We still check here to prevent future coding
- errors.
- */
- DBUG_ASSERT(rgi->thd == ev_thd);
-
- /*
- If there is no locks taken, this is the first binrow event seen
- after the table map events. We should then lock all the tables
- used in the transaction and proceed with execution of the actual
- event.
- */
- if (!ev_thd->lock)
- {
- /*
- Lock_tables() reads the contents of ev_thd->lex, so they must be
- initialized.
-
- We also call the THD::reset_for_next_command(), since this
- is the logical start of the next "statement". Note that this
- call might reset the value of current_stmt_binlog_format, so
- we need to do any changes to that value after this function.
- */
- delete_explain_query(thd->lex);
- lex_start(ev_thd);
- ev_thd->reset_for_next_command();
-
- /*
- This is a row injection, so we flag the "statement" as
- such. Note that this code is called both when the slave does row
- injections and when the BINLOG statement is used to do row
- injections.
- */
- ev_thd->lex->set_stmt_row_injection();
-
- if (unlikely(open_and_lock_tables(ev_thd, rgi->tables_to_lock, FALSE, 0)))
- {
- if (ev_thd->is_error())
- {
- /*
- Error reporting borrowed from Query_log_event with many excessive
- simplifications.
- We should not honour --slave-skip-errors at this point as we are
- having severe errors which should not be skipped.
- */
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
- "Error '%s' on opening tables",
- ev_thd->get_stmt_da()->message());
- ev_thd->is_slave_error= 1;
- }
- DBUG_RETURN(1);
- }
-
- /*
- When the open and locking succeeded, we check all tables to
- ensure that they still have the correct type.
- */
-
- {
- TABLE_LIST *table_list_ptr= rgi->tables_to_lock;
- for (uint i=0 ; table_list_ptr&& (i< rgi->tables_to_lock_count);
- table_list_ptr= table_list_ptr->next_global, i++)
- {
- /*
- Please see comment in log_event.cc-Rows_log_event::do_apply_event()
- function for the explanation of the below if condition
- */
- if (table_list_ptr->parent_l)
- continue;
- /*
- We can use a down cast here since we know that every table added
- to the tables_to_lock is a RPL_TABLE_LIST(or child table which is
- skipped above).
- */
- RPL_TABLE_LIST *ptr=static_cast<RPL_TABLE_LIST*>(table_list_ptr);
- DBUG_ASSERT(ptr->m_tabledef_valid);
- TABLE *conv_table;
- if (!ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
- {
- ev_thd->is_slave_error= 1;
- rgi->slave_close_thread_tables(ev_thd);
- DBUG_RETURN(Old_rows_log_event::ERR_BAD_TABLE_DEF);
- }
- DBUG_PRINT("debug", ("Table: %s.%s is compatible with master"
- " - conv_table: %p",
- ptr->table->s->db.str,
- ptr->table->s->table_name.str, conv_table));
- ptr->m_conv_table= conv_table;
- }
- }
-
- /*
- ... and then we add all the tables to the table map and remove
- them from tables to lock.
-
- We also invalidate the query cache for all the tables, since
- they will now be changed.
-
- TODO [/Matz]: Maybe the query cache should not be invalidated
- here? It might be that a table is not changed, even though it
- was locked for the statement. We do know that each
- Old_rows_log_event contain at least one row, so after processing one
- Old_rows_log_event, we can invalidate the query cache for the
- associated table.
- */
- TABLE_LIST *ptr= rgi->tables_to_lock;
- for (uint i=0; ptr && (i < rgi->tables_to_lock_count); ptr= ptr->next_global, i++)
- {
- /*
- Please see comment in log_event.cc-Rows_log_event::do_apply_event()
- function for the explanation of the below if condition
- */
- if (ptr->parent_l)
- continue;
- rgi->m_table_map.set_table(ptr->table_id, ptr->table);
- }
-#ifdef HAVE_QUERY_CACHE
- query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
-#endif
- }
-
- TABLE* table= rgi->m_table_map.get_table(ev->m_table_id);
-
- if (table)
- {
- /*
- table == NULL means that this table should not be replicated
- (this was set up by Table_map_log_event::do_apply_event()
- which tested replicate-* rules).
- */
-
- /*
- It's not needed to set_time() but
- 1) it continues the property that "Time" in SHOW PROCESSLIST shows how
- much slave is behind
- 2) it will be needed when we allow replication from a table with no
- TIMESTAMP column to a table with one.
- So we call set_time(), like in SBR. Presently it changes nothing.
- */
- ev_thd->set_time(ev->when, ev->when_sec_part);
- /*
- There are a few flags that are replicated with each row event.
- Make sure to set/clear them before executing the main body of
- the event.
- */
- if (ev->get_flags(Old_rows_log_event::NO_FOREIGN_KEY_CHECKS_F))
- ev_thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
- else
- ev_thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
-
- if (ev->get_flags(Old_rows_log_event::RELAXED_UNIQUE_CHECKS_F))
- ev_thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
- else
- ev_thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
- /* A small test to verify that objects have consistent types */
- DBUG_ASSERT(sizeof(ev_thd->variables.option_bits) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
-
- table->rpl_write_set= table->write_set;
-
- error= do_before_row_operations(table);
- while (error == 0 && row_start < ev->m_rows_end)
- {
- uchar const *row_end= NULL;
- if (unlikely((error= do_prepare_row(ev_thd, rgi, table, row_start,
- &row_end))))
- break; // We should perform the after-row operation even in
- // the case of error
-
- DBUG_ASSERT(row_end != NULL); // cannot happen
- DBUG_ASSERT(row_end <= ev->m_rows_end);
-
- /* in_use can have been set to NULL in close_tables_for_reopen */
- THD* old_thd= table->in_use;
- if (!table->in_use)
- table->in_use= ev_thd;
- error= do_exec_row(table);
- table->in_use = old_thd;
- switch (error)
- {
- /* Some recoverable errors */
- case HA_ERR_RECORD_CHANGED:
- case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
- tuple does not exist */
- error= 0;
- case 0:
- break;
-
- default:
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
- "Error in %s event: row application failed. %s",
- ev->get_type_str(),
- ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
- thd->is_slave_error= 1;
- break;
- }
-
- row_start= row_end;
- }
- DBUG_EXECUTE_IF("stop_slave_middle_group",
- const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
- error= do_after_row_operations(table, error);
- }
-
- if (unlikely(error))
- { /* error has occurred during the transaction */
- rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(), NULL,
- "Error in %s event: error during transaction execution "
- "on table %s.%s. %s",
- ev->get_type_str(), table->s->db.str,
- table->s->table_name.str,
- ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
-
- /*
- If one day we honour --skip-slave-errors in row-based replication, and
- the error should be skipped, then we would clear mappings, rollback,
- close tables, but the slave SQL thread would not stop and then may
- assume the mapping is still available, the tables are still open...
- So then we should clear mappings/rollback/close here only if this is a
- STMT_END_F.
- For now we code, knowing that error is not skippable and so slave SQL
- thread is certainly going to stop.
- rollback at the caller along with sbr.
- */
- ev_thd->reset_current_stmt_binlog_format_row();
- rgi->cleanup_context(ev_thd, error);
- ev_thd->is_slave_error= 1;
- DBUG_RETURN(error);
- }
-
- DBUG_RETURN(0);
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-/*
- Check if there are more UNIQUE keys after the given key.
-*/
-static int
-last_uniq_key(TABLE *table, uint keyno)
-{
- while (++keyno < table->s->keys)
- if (table->key_info[keyno].flags & HA_NOSAME)
- return 0;
- return 1;
-}
-
-
-/*
- Compares table->record[0] and table->record[1]
-
- Returns TRUE if different.
-*/
-static bool record_compare(TABLE *table)
-{
- bool result= FALSE;
- if (table->s->blob_fields + table->s->varchar_fields == 0)
- {
- result= cmp_record(table,record[1]);
- goto record_compare_exit;
- }
-
- /* Compare null bits */
- if (memcmp(table->null_flags,
- table->null_flags+table->s->rec_buff_length,
- table->s->null_bytes))
- {
- result= TRUE; // Diff in NULL value
- goto record_compare_exit;
- }
-
- /* Compare updated fields */
- for (Field **ptr=table->field ; *ptr ; ptr++)
- {
- if ((*ptr)->cmp_binary_offset(table->s->rec_buff_length))
- {
- result= TRUE;
- goto record_compare_exit;
- }
- }
-
-record_compare_exit:
- return result;
-}
-
-
-/*
- Copy "extra" columns from record[1] to record[0].
-
- Copy the extra fields that are not present on the master but are
- present on the slave from record[1] to record[0]. This is used
- after fetching a record that are to be updated, either inside
- replace_record() or as part of executing an update_row().
- */
-static int
-copy_extra_record_fields(TABLE *table,
- size_t master_reclength,
- my_ptrdiff_t master_fields)
-{
- DBUG_ENTER("copy_extra_record_fields(table, master_reclen, master_fields)");
- DBUG_PRINT("info", ("Copying to %p "
- "from field %lu at offset %lu "
- "to field %d at offset %lu",
- table->record[0],
- (ulong) master_fields, (ulong) master_reclength,
- table->s->fields, table->s->reclength));
- /*
- Copying the extra fields of the slave that does not exist on
- master into record[0] (which are basically the default values).
- */
-
- if (table->s->fields < (uint) master_fields)
- DBUG_RETURN(0);
-
- DBUG_ASSERT(master_reclength <= table->s->reclength);
- if (master_reclength < table->s->reclength)
- memcpy(table->record[0] + master_reclength,
- table->record[1] + master_reclength,
- table->s->reclength - master_reclength);
-
- /*
- Bit columns are special. We iterate over all the remaining
- columns and copy the "extra" bits to the new record. This is
- not a very good solution: it should be refactored on
- opportunity.
-
- REFACTORING SUGGESTION (Matz). Introduce a member function
- similar to move_field_offset() called copy_field_offset() to
- copy field values and implement it for all Field subclasses. Use
- this function to copy data from the found record to the record
- that are going to be inserted.
-
- The copy_field_offset() function need to be a virtual function,
- which in this case will prevent copying an entire range of
- fields efficiently.
- */
- {
- Field **field_ptr= table->field + master_fields;
- for ( ; *field_ptr ; ++field_ptr)
- {
- /*
- Set the null bit according to the values in record[1]
- */
- if ((*field_ptr)->maybe_null() &&
- (*field_ptr)->is_null_in_record(reinterpret_cast<uchar*>(table->record[1])))
- (*field_ptr)->set_null();
- else
- (*field_ptr)->set_notnull();
-
- /*
- Do the extra work for special columns.
- */
- switch ((*field_ptr)->real_type())
- {
- default:
- /* Nothing to do */
- break;
-
- case MYSQL_TYPE_BIT:
- Field_bit *f= static_cast<Field_bit*>(*field_ptr);
- if (f->bit_len > 0)
- {
- my_ptrdiff_t const offset= table->record[1] - table->record[0];
- uchar const bits=
- get_rec_bits(f->bit_ptr + offset, f->bit_ofs, f->bit_len);
- set_rec_bits(bits, f->bit_ptr, f->bit_ofs, f->bit_len);
- }
- break;
- }
- }
- }
- DBUG_RETURN(0); // All OK
-}
-
-
-/*
- Replace the provided record in the database.
-
- SYNOPSIS
- replace_record()
- thd Thread context for writing the record.
- table Table to which record should be written.
- master_reclength
- Offset to first column that is not present on the master,
- alternatively the length of the record on the master
- side.
-
- RETURN VALUE
- Error code on failure, 0 on success.
-
- DESCRIPTION
- Similar to how it is done in mysql_insert(), we first try to do
- a ha_write_row() and of that fails due to duplicated keys (or
- indices), we do an ha_update_row() or a ha_delete_row() instead.
- */
-static int
-replace_record(THD *thd, TABLE *table,
- ulong const master_reclength,
- uint const master_fields)
-{
- DBUG_ENTER("replace_record");
- DBUG_ASSERT(table != NULL && thd != NULL);
-
- int error;
- int keynum;
- auto_afree_ptr<char> key(NULL);
-
-#ifndef DBUG_OFF
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);
- DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set);
-#endif
-
- while (unlikely(error= table->file->ha_write_row(table->record[0])))
- {
- if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
- {
- table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
- DBUG_RETURN(error);
- }
- if (unlikely((keynum= table->file->get_dup_key(error)) < 0))
- {
- table->file->print_error(error, MYF(0));
- /*
- We failed to retrieve the duplicate key
- - either because the error was not "duplicate key" error
- - or because the information which key is not available
- */
- DBUG_RETURN(error);
- }
-
- /*
- We need to retrieve the old row into record[1] to be able to
- either update or delete the offending record. We either:
-
- - use rnd_pos() with a row-id (available as dupp_row) to the
- offending row, if that is possible (MyISAM and Blackhole), or else
-
- - use index_read_idx() with the key that is duplicated, to
- retrieve the offending row.
- */
- if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
- {
- error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("rnd_pos() returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
- else
- {
- if (unlikely(table->file->extra(HA_EXTRA_FLUSH_CACHE)))
- {
- DBUG_RETURN(my_errno);
- }
-
- if (key.get() == NULL)
- {
- key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
- if (unlikely(key.get() == NULL))
- DBUG_RETURN(ENOMEM);
- }
-
- key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
- 0);
- error= table->file->ha_index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
- if (unlikely(error))
- {
- DBUG_PRINT("info", ("index_read_idx() returns error %d", error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
-
- /*
- Now, table->record[1] should contain the offending row. That
- will enable us to update it or, alternatively, delete it (so
- that we can insert the new row afterwards).
-
- First we copy the columns into table->record[0] that are not
- present on the master from table->record[1], if there are any.
- */
- copy_extra_record_fields(table, master_reclength, master_fields);
-
- /*
- REPLACE is defined as either INSERT or DELETE + INSERT. If
- possible, we can replace it with an UPDATE, but that will not
- work on InnoDB if FOREIGN KEY checks are necessary.
-
- I (Matz) am not sure of the reason for the last_uniq_key()
- check as, but I'm guessing that it's something along the
- following lines.
-
- Suppose that we got the duplicate key to be a key that is not
- the last unique key for the table and we perform an update:
- then there might be another key for which the unique check will
- fail, so we're better off just deleting the row and inserting
- the correct row.
- */
- if (last_uniq_key(table, keynum) &&
- !table->file->referenced_by_foreign_key())
- {
- error=table->file->ha_update_row(table->record[1],
- table->record[0]);
- if (unlikely(error) && error != HA_ERR_RECORD_IS_THE_SAME)
- table->file->print_error(error, MYF(0));
- else
- error= 0;
- DBUG_RETURN(error);
- }
- else
- {
- if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
- {
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- /* Will retry ha_write_row() with the offending row removed. */
- }
- }
-
- DBUG_RETURN(error);
-}
-
-
-/**
- Find the row given by 'key', if the table has keys, or else use a table scan
- to find (and fetch) the row.
-
- If the engine allows random access of the records, a combination of
- position() and rnd_pos() will be used.
-
- @param table Pointer to table to search
- @param key Pointer to key to use for search, if table has key
-
- @pre <code>table->record[0]</code> shall contain the row to locate
- and <code>key</code> shall contain a key to use for searching, if
- the engine has a key.
-
- @post If the return value is zero, <code>table->record[1]</code>
- will contain the fetched row and the internal "cursor" will refer to
- the row. If the return value is non-zero,
- <code>table->record[1]</code> is undefined. In either case,
- <code>table->record[0]</code> is undefined.
-
- @return Zero if the row was successfully fetched into
- <code>table->record[1]</code>, error code otherwise.
- */
-
-static int find_and_fetch_row(TABLE *table, uchar *key)
-{
- DBUG_ENTER("find_and_fetch_row(TABLE *table, uchar *key, uchar *record)");
- DBUG_PRINT("enter", ("table: %p, key: %p record: %p",
- table, key, table->record[1]));
-
- DBUG_ASSERT(table->in_use != NULL);
-
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
-
- if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- table->s->primary_key < MAX_KEY)
- {
- /*
- Use a more efficient method to fetch the record given by
- table->record[0] if the engine allows it. We first compute a
- row reference using the position() member function (it will be
- stored in table->file->ref) and the use rnd_pos() to position
- the "cursor" (i.e., record[0] in this case) at the correct row.
-
- TODO: Add a check that the correct record has been fetched by
- comparing with the original record. Take into account that the
- record on the master and slave can be of different
- length. Something along these lines should work:
-
- ADD>>> store_record(table,record[1]);
- int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
- ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
- table->s->reclength) == 0);
-
- */
- table->file->position(table->record[0]);
- int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
- /*
- rnd_pos() returns the record in table->record[0], so we have to
- move it to table->record[1].
- */
- memcpy(table->record[1], table->record[0], table->s->reclength);
- DBUG_RETURN(error);
- }
-
- /* We need to retrieve all fields */
- /* TODO: Move this out from this function to main loop */
- table->use_all_columns();
-
- if (table->s->keys > 0)
- {
- int error;
- /* We have a key: search the table using the index */
- if (!table->file->inited &&
- unlikely(error= table->file->ha_index_init(0, FALSE)))
- {
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength);
-#endif
-
- /*
- We need to set the null bytes to ensure that the filler bit are
- all set when returning. There are storage engines that just set
- the necessary bits on the bytes and don't set the filler bits
- correctly.
- */
- my_ptrdiff_t const pos=
- table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
- table->record[1][pos]= 0xFF;
- if (unlikely((error= table->file->ha_index_read_map(table->record[1], key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))))
- {
- table->file->print_error(error, MYF(0));
- table->file->ha_index_end();
- DBUG_RETURN(error);
- }
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("table->record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("table->record[1]", table->record[1], table->s->reclength);
-#endif
- /*
- Below is a minor "optimization". If the key (i.e., key number
- 0) has the HA_NOSAME flag set, we know that we have found the
- correct record (since there can be no duplicates); otherwise, we
- have to compare the record with the one found to see if it is
- the correct one.
-
- CAVEAT! This behaviour is essential for the replication of,
- e.g., the mysql.proc table since the correct record *shall* be
- found using the primary key *only*. There shall be no
- comparison of non-PK columns to decide if the correct record is
- found. I can see no scenario where it would be incorrect to
- chose the row to change only using a PK or an UNNI.
- */
- if (table->key_info->flags & HA_NOSAME)
- {
- table->file->ha_index_end();
- DBUG_RETURN(0);
- }
-
- while (record_compare(table))
- {
- int error;
-
- while ((error= table->file->ha_index_next(table->record[1])))
- {
- table->file->print_error(error, MYF(0));
- table->file->ha_index_end();
- DBUG_RETURN(error);
- }
- }
-
- /*
- Have to restart the scan to be able to fetch the next row.
- */
- table->file->ha_index_end();
- }
- else
- {
- int restart_count= 0; // Number of times scanning has restarted from top
- int error;
-
- /* We don't have a key: search the table using rnd_next() */
- if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
- return error;
-
- /* Continue until we find the right record or have made a full loop */
- do
- {
- error= table->file->ha_rnd_next(table->record[1]);
-
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("record[1]", table->record[1], table->s->reclength);
-
- switch (error) {
- case 0:
- break;
-
- case HA_ERR_END_OF_FILE:
- if (++restart_count < 2)
- {
- int error2;
- if (unlikely((error2= table->file->ha_rnd_init_with_error(1))))
- DBUG_RETURN(error2);
- }
- break;
-
- default:
- table->file->print_error(error, MYF(0));
- DBUG_PRINT("info", ("Record not found"));
- (void) table->file->ha_rnd_end();
- DBUG_RETURN(error);
- }
- }
- while (restart_count < 2 && record_compare(table));
-
- /*
- Have to restart the scan to be able to fetch the next row.
- */
- DBUG_PRINT("info", ("Record %sfound", restart_count == 2 ? "not " : ""));
- table->file->ha_rnd_end();
-
- DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
- DBUG_RETURN(error);
- }
-
- DBUG_RETURN(0);
-}
-
-
-/**********************************************************
- Row handling primitives for Write_rows_log_event_old
- **********************************************************/
-
-int Write_rows_log_event_old::do_before_row_operations(TABLE *table)
-{
- int error= 0;
-
- /*
- We are using REPLACE semantics and not INSERT IGNORE semantics
- when writing rows, that is: new rows replace old rows. We need to
- inform the storage engine that it should use this behaviour.
- */
-
- /* Tell the storage engine that we are using REPLACE semantics. */
- thd->lex->duplicates= DUP_REPLACE;
-
- thd->lex->sql_command= SQLCOM_REPLACE;
- /*
- Do not raise the error flag in case of hitting to an unique attribute
- */
- table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
- table->file->ha_start_bulk_insert(0);
- return error;
-}
-
-
-int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
-{
- int local_error= 0;
- table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
- table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /*
- resetting the extra with
- table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
- fires bug#27077
- todo: explain or fix
- */
- if (unlikely((local_error= table->file->ha_end_bulk_insert())))
- {
- table->file->print_error(local_error, MYF(0));
- }
- return error? error : local_error;
-}
-
-
-int
-Write_rows_log_event_old::do_prepare_row(THD *thd_arg,
- rpl_group_info *rgi,
- TABLE *table,
- uchar const *row_start,
- uchar const **row_end)
-{
- DBUG_ASSERT(table != NULL);
- DBUG_ASSERT(row_start && row_end);
-
- int error;
- error= unpack_row_old(rgi,
- table, m_width, table->record[0],
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->write_set, PRE_GA_WRITE_ROWS_EVENT);
- bitmap_copy(table->read_set, table->write_set);
- return error;
-}
-
-
-int Write_rows_log_event_old::do_exec_row(TABLE *table)
-{
- DBUG_ASSERT(table != NULL);
- int error= replace_record(thd, table, m_master_reclength, m_width);
- return error;
-}
-
-
-/**********************************************************
- Row handling primitives for Delete_rows_log_event_old
- **********************************************************/
-
-int Delete_rows_log_event_old::do_before_row_operations(TABLE *table)
-{
- DBUG_ASSERT(m_memory == NULL);
-
- if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- table->s->primary_key < MAX_KEY)
- {
- /*
- We don't need to allocate any memory for m_after_image and
- m_key since they are not used.
- */
- return 0;
- }
-
- int error= 0;
-
- if (table->s->keys > 0)
- {
- m_memory= (uchar*) my_multi_malloc(key_memory_log_event_old, MYF(MY_WME),
- &m_after_image,
- (uint) table->s->reclength,
- &m_key,
- (uint) table->key_info->key_length,
- NullS);
- }
- else
- {
- m_after_image= (uchar*) my_malloc(key_memory_log_event_old, table->s->reclength, MYF(MY_WME));
- m_memory= (uchar*)m_after_image;
- m_key= NULL;
- }
- if (!m_memory)
- return HA_ERR_OUT_OF_MEM;
-
- return error;
-}
-
-
-int Delete_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- table->file->ha_index_or_rnd_end();
- my_free(m_memory); // Free for multi_malloc
- m_memory= NULL;
- m_after_image= NULL;
- m_key= NULL;
-
- return error;
-}
-
-
-int
-Delete_rows_log_event_old::do_prepare_row(THD *thd_arg,
- rpl_group_info *rgi,
- TABLE *table,
- uchar const *row_start,
- uchar const **row_end)
-{
- int error;
- DBUG_ASSERT(row_start && row_end);
- /*
- This assertion actually checks that there is at least as many
- columns on the slave as on the master.
- */
- DBUG_ASSERT(table->s->fields >= m_width);
-
- error= unpack_row_old(rgi,
- table, m_width, table->record[0],
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->read_set, PRE_GA_DELETE_ROWS_EVENT);
- /*
- If we will access rows using the random access method, m_key will
- be set to NULL, so we do not need to make a key copy in that case.
- */
- if (m_key)
- {
- KEY *const key_info= table->key_info;
-
- key_copy(m_key, table->record[0], key_info, 0);
- }
-
- return error;
-}
-
-
-int Delete_rows_log_event_old::do_exec_row(TABLE *table)
-{
- int error;
- DBUG_ASSERT(table != NULL);
-
- if (likely(!(error= ::find_and_fetch_row(table, m_key))))
- {
- /*
- Now we should have the right row to delete. We are using
- record[0] since it is guaranteed to point to a record with the
- correct value.
- */
- error= table->file->ha_delete_row(table->record[0]);
- }
- return error;
-}
-
-
-/**********************************************************
- Row handling primitives for Update_rows_log_event_old
- **********************************************************/
-
-int Update_rows_log_event_old::do_before_row_operations(TABLE *table)
-{
- DBUG_ASSERT(m_memory == NULL);
-
- int error= 0;
-
- if (table->s->keys > 0)
- {
- m_memory= (uchar*) my_multi_malloc(key_memory_log_event_old, MYF(MY_WME),
- &m_after_image,
- (uint) table->s->reclength,
- &m_key,
- (uint) table->key_info->key_length,
- NullS);
- }
- else
- {
- m_after_image= (uchar*) my_malloc(key_memory_log_event_old, table->s->reclength, MYF(MY_WME));
- m_memory= m_after_image;
- m_key= NULL;
- }
- if (!m_memory)
- return HA_ERR_OUT_OF_MEM;
-
- return error;
-}
-
-
-int Update_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- table->file->ha_index_or_rnd_end();
- my_free(m_memory);
- m_memory= NULL;
- m_after_image= NULL;
- m_key= NULL;
-
- return error;
-}
-
-
-int Update_rows_log_event_old::do_prepare_row(THD *thd_arg,
- rpl_group_info *rgi,
- TABLE *table,
- uchar const *row_start,
- uchar const **row_end)
-{
- int error;
- DBUG_ASSERT(row_start && row_end);
- /*
- This assertion actually checks that there is at least as many
- columns on the slave as on the master.
- */
- DBUG_ASSERT(table->s->fields >= m_width);
-
- /* record[0] is the before image for the update */
- error= unpack_row_old(rgi,
- table, m_width, table->record[0],
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->read_set, PRE_GA_UPDATE_ROWS_EVENT);
- row_start = *row_end;
- /* m_after_image is the after image for the update */
- error= unpack_row_old(rgi,
- table, m_width, m_after_image,
- row_start, m_rows_end,
- &m_cols, row_end, &m_master_reclength,
- table->write_set, PRE_GA_UPDATE_ROWS_EVENT);
-
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_DUMP("m_after_image", m_after_image, table->s->reclength);
-
- /*
- If we will access rows using the random access method, m_key will
- be set to NULL, so we do not need to make a key copy in that case.
- */
- if (m_key)
- {
- KEY *const key_info= table->key_info;
-
- key_copy(m_key, table->record[0], key_info, 0);
- }
-
- return error;
-}
-
-
-int Update_rows_log_event_old::do_exec_row(TABLE *table)
-{
- DBUG_ASSERT(table != NULL);
-
- int error= ::find_and_fetch_row(table, m_key);
- if (unlikely(error))
- return error;
-
- /*
- We have to ensure that the new record (i.e., the after image) is
- in record[0] and the old record (i.e., the before image) is in
- record[1]. This since some storage engines require this (for
- example, the partition engine).
-
- Since find_and_fetch_row() puts the fetched record (i.e., the old
- record) in record[1], we can keep it there. We put the new record
- (i.e., the after image) into record[0], and copy the fields that
- are on the slave (i.e., in record[1]) into record[0], effectively
- overwriting the default values that where put there by the
- unpack_row() function.
- */
- memcpy(table->record[0], m_after_image, table->s->reclength);
- copy_extra_record_fields(table, m_master_reclength, m_width);
-
- /*
- Now we have the right row to update. The old row (the one we're
- looking for) is in record[1] and the new row has is in record[0].
- We also have copied the original values already in the slave's
- database into the after image delivered from the master.
- */
- error= table->file->ha_update_row(table->record[1], table->record[0]);
- if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
- error= 0;
-
- return error;
-}
-
-#endif
-
-
-/**************************************************************************
- Rows_log_event member functions
-**************************************************************************/
-
-#ifndef MYSQL_CLIENT
-Old_rows_log_event::Old_rows_log_event(THD *thd_arg, TABLE *tbl_arg, ulong tid,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Log_event(thd_arg, 0, is_transactional),
- m_row_count(0),
- m_table(tbl_arg),
- m_table_id(tid),
- m_width(tbl_arg ? tbl_arg->s->fields : 1),
- m_rows_buf(0), m_rows_cur(0), m_rows_end(0), m_flags(0)
-#ifdef HAVE_REPLICATION
- , m_curr_row(NULL), m_curr_row_end(NULL), m_key(NULL)
-#endif
-{
-
- // This constructor should not be reached.
- assert(0);
-
- /*
- We allow a special form of dummy event when the table, and cols
- are null and the table id is ~0UL. This is a temporary
- solution, to be able to terminate a started statement in the
- binary log: the extraneous events will be removed in the future.
- */
- DBUG_ASSERT((tbl_arg && tbl_arg->s && tid != ~0UL) ||
- (!tbl_arg && !cols && tid == ~0UL));
-
- if (thd_arg->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS)
- set_flags(NO_FOREIGN_KEY_CHECKS_F);
- if (thd_arg->variables.option_bits & OPTION_RELAXED_UNIQUE_CHECKS)
- set_flags(RELAXED_UNIQUE_CHECKS_F);
- /* if my_bitmap_init fails, caught in is_valid() */
- if (likely(!my_bitmap_init(&m_cols,
- m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
- m_width)))
- {
- /* Cols can be zero if this is a dummy binrows event */
- if (likely(cols != NULL))
- {
- memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols));
- create_last_word_mask(&m_cols);
- }
- }
- else
- {
- // Needed because my_bitmap_init() does not set it to null on failure
- m_cols.bitmap= 0;
- }
-}
-#endif
-
-
-Old_rows_log_event::Old_rows_log_event(const uchar *buf, uint event_len,
- Log_event_type event_type,
- const Format_description_log_event
- *description_event)
- : Log_event(buf, description_event),
- m_row_count(0),
-#ifndef MYSQL_CLIENT
- m_table(NULL),
-#endif
- m_table_id(0), m_rows_buf(0), m_rows_cur(0), m_rows_end(0)
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- , m_curr_row(NULL), m_curr_row_end(NULL), m_key(NULL)
-#endif
-{
- DBUG_ENTER("Old_rows_log_event::Old_Rows_log_event(const char*,...)");
- uint8 const common_header_len= description_event->common_header_len;
- uint8 const post_header_len= description_event->post_header_len[event_type-1];
-
- DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
- "post_header_len: %d",
- event_len, common_header_len,
- post_header_len));
-
- const uchar *post_start= buf + common_header_len;
- DBUG_DUMP("post_header", post_start, post_header_len);
- post_start+= RW_MAPID_OFFSET;
- if (post_header_len == 6)
- {
- /* Master is of an intermediate source tree before 5.1.4. Id is 4 bytes */
- m_table_id= uint4korr(post_start);
- post_start+= 4;
- }
- else
- {
- m_table_id= (ulong) uint6korr(post_start);
- post_start+= RW_FLAGS_OFFSET;
- }
-
- m_flags= uint2korr(post_start);
-
- uchar const *const var_start=
- (const uchar *)buf + common_header_len + post_header_len;
- uchar const *const ptr_width= var_start;
- uchar *ptr_after_width= (uchar*) ptr_width;
- DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
- m_width = net_field_length(&ptr_after_width);
- DBUG_PRINT("debug", ("m_width=%lu", m_width));
- /* Avoid reading out of buffer */
- if (ptr_after_width + m_width > (uchar *)buf + event_len)
- {
- m_cols.bitmap= NULL;
- DBUG_VOID_RETURN;
- }
-
- /* if my_bitmap_init fails, caught in is_valid() */
- if (likely(!my_bitmap_init(&m_cols,
- m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
- m_width)))
- {
- DBUG_PRINT("debug", ("Reading from %p", ptr_after_width));
- memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8);
- create_last_word_mask(&m_cols);
- ptr_after_width+= (m_width + 7) / 8;
- DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols));
- }
- else
- {
- // Needed because my_bitmap_init() does not set it to null on failure
- m_cols.bitmap= NULL;
- DBUG_VOID_RETURN;
- }
-
- const uchar* const ptr_rows_data= (const uchar*) ptr_after_width;
- size_t const data_size= event_len - (ptr_rows_data - (const uchar *) buf);
- DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %zu",
- m_table_id, m_flags, m_width, data_size));
- DBUG_DUMP("rows_data", (uchar*) ptr_rows_data, data_size);
-
- m_rows_buf= (uchar*) my_malloc(key_memory_log_event_old, data_size, MYF(MY_WME));
- if (likely((bool)m_rows_buf))
- {
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- m_curr_row= m_rows_buf;
-#endif
- m_rows_end= m_rows_buf + data_size;
- m_rows_cur= m_rows_end;
- memcpy(m_rows_buf, ptr_rows_data, data_size);
- }
- else
- m_cols.bitmap= 0; // to not free it
-
- DBUG_VOID_RETURN;
-}
-
-
-Old_rows_log_event::~Old_rows_log_event()
-{
- if (m_cols.bitmap == m_bitbuf) // no my_malloc happened
- m_cols.bitmap= 0; // so no my_free in my_bitmap_free
- my_bitmap_free(&m_cols); // To pair with my_bitmap_init().
- my_free(m_rows_buf);
-}
-
-
-int Old_rows_log_event::get_data_size()
-{
- uchar buf[MAX_INT_WIDTH];
- uchar *end= net_store_length(buf, (m_width + 7) / 8);
-
- DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
- return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) +
- m_rows_cur - m_rows_buf););
- int data_size= ROWS_HEADER_LEN;
- data_size+= no_bytes_in_map(&m_cols);
- data_size+= (uint) (end - buf);
-
- data_size+= (uint) (m_rows_cur - m_rows_buf);
- return data_size;
-}
-
-
-#ifndef MYSQL_CLIENT
-int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length)
-{
- /*
- When the table has a primary key, we would probably want, by default, to
- log only the primary key value instead of the entire "before image". This
- would save binlog space. TODO
- */
- DBUG_ENTER("Old_rows_log_event::do_add_row_data");
- DBUG_PRINT("enter", ("row_data: %p length: %zu",row_data,
- length));
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("row_data", row_data, MY_MIN(length, 32));
-#endif
-
- DBUG_ASSERT(m_rows_buf <= m_rows_cur);
- DBUG_ASSERT(!m_rows_buf || (m_rows_end && m_rows_buf < m_rows_end));
- DBUG_ASSERT(m_rows_cur <= m_rows_end);
-
- /* The cast will always work since m_rows_cur <= m_rows_end */
- if (static_cast<size_t>(m_rows_end - m_rows_cur) <= length)
- {
- size_t const block_size= 1024;
- my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf;
- my_ptrdiff_t const new_alloc=
- block_size * ((cur_size + length + block_size - 1) / block_size);
-
- uchar* const new_buf= (uchar*)my_realloc(key_memory_log_event_old, (uchar*)m_rows_buf, (uint) new_alloc,
- MYF(MY_ALLOW_ZERO_PTR|MY_WME));
- if (unlikely(!new_buf))
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
-
- /* If the memory moved, we need to move the pointers */
- if (new_buf != m_rows_buf)
- {
- m_rows_buf= new_buf;
- m_rows_cur= m_rows_buf + cur_size;
- }
-
- /*
- The end pointer should always be changed to point to the end of
- the allocated memory.
- */
- m_rows_end= m_rows_buf + new_alloc;
- }
-
- DBUG_ASSERT(m_rows_cur + length <= m_rows_end);
- memcpy(m_rows_cur, row_data, length);
- m_rows_cur+= length;
- m_row_count++;
- DBUG_RETURN(0);
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-int Old_rows_log_event::do_apply_event(rpl_group_info *rgi)
-{
- DBUG_ENTER("Old_rows_log_event::do_apply_event(Relay_log_info*)");
- int error= 0;
- Relay_log_info const *rli= rgi->rli;
-
- /*
- If m_table_id == ~0UL, then we have a dummy event that does not
- contain any data. In that case, we just remove all tables in the
- tables_to_lock list, close the thread tables, and return with
- success.
- */
- if (m_table_id == ~0UL)
- {
- /*
- This one is supposed to be set: just an extra check so that
- nothing strange has happened.
- */
- DBUG_ASSERT(get_flags(STMT_END_F));
-
- rgi->slave_close_thread_tables(thd);
- thd->clear_error();
- DBUG_RETURN(0);
- }
-
- /*
- 'thd' has been set by exec_relay_log_event(), just before calling
- do_apply_event(). We still check here to prevent future coding
- errors.
- */
- DBUG_ASSERT(rgi->thd == thd);
-
- /*
- If there is no locks taken, this is the first binrow event seen
- after the table map events. We should then lock all the tables
- used in the transaction and proceed with execution of the actual
- event.
- */
- if (!thd->lock)
- {
- /*
- lock_tables() reads the contents of thd->lex, so they must be
- initialized. Contrary to in
- Table_map_log_event::do_apply_event() we don't call
- mysql_init_query() as that may reset the binlog format.
- */
- lex_start(thd);
-
- if (unlikely((error= lock_tables(thd, rgi->tables_to_lock,
- rgi->tables_to_lock_count, 0))))
- {
- if (thd->is_slave_error || thd->is_fatal_error)
- {
- /*
- Error reporting borrowed from Query_log_event with many excessive
- simplifications (we don't honour --slave-skip-errors)
- */
- uint actual_error= thd->net.last_errno;
- rli->report(ERROR_LEVEL, actual_error, NULL,
- "Error '%s' in %s event: when locking tables",
- (actual_error ? thd->net.last_error :
- "unexpected success or fatal error"),
- get_type_str());
- thd->is_fatal_error= 1;
- }
- else
- {
- rli->report(ERROR_LEVEL, error, NULL,
- "Error in %s event: when locking tables",
- get_type_str());
- }
- rgi->slave_close_thread_tables(thd);
- DBUG_RETURN(error);
- }
-
- /*
- When the open and locking succeeded, we check all tables to
- ensure that they still have the correct type.
- */
-
- {
- TABLE_LIST *table_list_ptr= rgi->tables_to_lock;
- for (uint i=0; table_list_ptr&& (i< rgi->tables_to_lock_count);
- table_list_ptr= static_cast<RPL_TABLE_LIST*>(table_list_ptr->next_global), i++)
- {
- /*
- Please see comment in log_event.cc-Rows_log_event::do_apply_event()
- function for the explanation of the below if condition
- */
- if (table_list_ptr->parent_l)
- continue;
- /*
- We can use a down cast here since we know that every table added
- to the tables_to_lock is a RPL_TABLE_LIST (or child table which is
- skipped above).
- */
- RPL_TABLE_LIST *ptr=static_cast<RPL_TABLE_LIST*>(table_list_ptr);
- TABLE *conv_table;
- if (ptr->m_tabledef.compatible_with(thd, rgi, ptr->table, &conv_table))
- {
- thd->is_slave_error= 1;
- rgi->slave_close_thread_tables(thd);
- DBUG_RETURN(ERR_BAD_TABLE_DEF);
- }
- ptr->m_conv_table= conv_table;
- }
- }
-
- /*
- ... and then we add all the tables to the table map but keep
- them in the tables to lock list.
-
-
- We also invalidate the query cache for all the tables, since
- they will now be changed.
-
- TODO [/Matz]: Maybe the query cache should not be invalidated
- here? It might be that a table is not changed, even though it
- was locked for the statement. We do know that each
- Old_rows_log_event contain at least one row, so after processing one
- Old_rows_log_event, we can invalidate the query cache for the
- associated table.
- */
- for (TABLE_LIST *ptr= rgi->tables_to_lock ; ptr ; ptr= ptr->next_global)
- {
- rgi->m_table_map.set_table(ptr->table_id, ptr->table);
- }
-#ifdef HAVE_QUERY_CACHE
- query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
-#endif
- }
-
- TABLE*
- table=
- m_table= rgi->m_table_map.get_table(m_table_id);
-
- if (table)
- {
- /*
- table == NULL means that this table should not be replicated
- (this was set up by Table_map_log_event::do_apply_event()
- which tested replicate-* rules).
- */
-
- /*
- It's not needed to set_time() but
- 1) it continues the property that "Time" in SHOW PROCESSLIST shows how
- much slave is behind
- 2) it will be needed when we allow replication from a table with no
- TIMESTAMP column to a table with one.
- So we call set_time(), like in SBR. Presently it changes nothing.
- */
- thd->set_time(when, when_sec_part);
- /*
- There are a few flags that are replicated with each row event.
- Make sure to set/clear them before executing the main body of
- the event.
- */
- if (get_flags(NO_FOREIGN_KEY_CHECKS_F))
- thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
- else
- thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
-
- if (get_flags(RELAXED_UNIQUE_CHECKS_F))
- thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
- else
- thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
- /* A small test to verify that objects have consistent types */
- DBUG_ASSERT(sizeof(thd->variables.option_bits) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
-
- if ( m_width == table->s->fields && bitmap_is_set_all(&m_cols))
- set_flags(COMPLETE_ROWS_F);
-
- /*
- Set tables write and read sets.
-
- Read_set contains all slave columns (in case we are going to fetch
- a complete record from slave)
-
- Write_set equals the m_cols bitmap sent from master but it can be
- longer if slave has extra columns.
- */
-
- DBUG_PRINT_BITSET("debug", "Setting table's write_set from: %s", &m_cols);
-
- bitmap_set_all(table->read_set);
- bitmap_set_all(table->write_set);
- if (!get_flags(COMPLETE_ROWS_F))
- bitmap_intersect(table->write_set,&m_cols);
- table->rpl_write_set= table->write_set;
-
- // Do event specific preparations
-
- error= do_before_row_operations(rli);
-
- // row processing loop
-
- while (error == 0 && m_curr_row < m_rows_end)
- {
- /* in_use can have been set to NULL in close_tables_for_reopen */
- THD* old_thd= table->in_use;
- if (!table->in_use)
- table->in_use= thd;
-
- error= do_exec_row(rgi);
-
- DBUG_PRINT("info", ("error: %d", error));
- DBUG_ASSERT(error != HA_ERR_RECORD_DELETED);
-
- table->in_use = old_thd;
- switch (error)
- {
- case 0:
- break;
-
- /* Some recoverable errors */
- case HA_ERR_RECORD_CHANGED:
- case HA_ERR_KEY_NOT_FOUND: /* Idempotency support: OK if
- tuple does not exist */
- error= 0;
- break;
-
- default:
- rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
- "Error in %s event: row application failed. %s",
- get_type_str(), thd->net.last_error);
- thd->is_slave_error= 1;
- break;
- }
-
- /*
- If m_curr_row_end was not set during event execution (e.g., because
- of errors) we can't proceed to the next row. If the error is transient
- (i.e., error==0 at this point) we must call unpack_current_row() to set
- m_curr_row_end.
- */
-
- DBUG_PRINT("info", ("error: %d", error));
- DBUG_PRINT("info", ("curr_row: %p; curr_row_end:%p; rows_end: %p",
- m_curr_row, m_curr_row_end, m_rows_end));
-
- if (!m_curr_row_end && likely(!error))
- unpack_current_row(rgi);
-
- // at this moment m_curr_row_end should be set
- DBUG_ASSERT(error || m_curr_row_end != NULL);
- DBUG_ASSERT(error || m_curr_row < m_curr_row_end);
- DBUG_ASSERT(error || m_curr_row_end <= m_rows_end);
-
- m_curr_row= m_curr_row_end;
-
- } // row processing loop
-
- DBUG_EXECUTE_IF("stop_slave_middle_group",
- const_cast<Relay_log_info*>(rli)->abort_slave= 1;);
- error= do_after_row_operations(rli, error);
- } // if (table)
-
- if (unlikely(error))
- { /* error has occurred during the transaction */
- rli->report(ERROR_LEVEL, thd->net.last_errno, NULL,
- "Error in %s event: error during transaction execution "
- "on table %s.%s. %s",
- get_type_str(), table->s->db.str,
- table->s->table_name.str,
- thd->net.last_error);
-
- /*
- If one day we honour --skip-slave-errors in row-based replication, and
- the error should be skipped, then we would clear mappings, rollback,
- close tables, but the slave SQL thread would not stop and then may
- assume the mapping is still available, the tables are still open...
- So then we should clear mappings/rollback/close here only if this is a
- STMT_END_F.
- For now we code, knowing that error is not skippable and so slave SQL
- thread is certainly going to stop.
- rollback at the caller along with sbr.
- */
- thd->reset_current_stmt_binlog_format_row();
- rgi->cleanup_context(thd, error);
- thd->is_slave_error= 1;
- DBUG_RETURN(error);
- }
-
- /*
- This code would ideally be placed in do_update_pos() instead, but
- since we have no access to table there, we do the setting of
- last_event_start_time here instead.
- */
- if (table && (table->s->primary_key == MAX_KEY) &&
- !use_trans_cache() && get_flags(STMT_END_F) == RLE_NO_FLAGS)
- {
- /*
- ------------ Temporary fix until WL#2975 is implemented ---------
-
- This event is not the last one (no STMT_END_F). If we stop now
- (in case of terminate_slave_thread()), how will we restart? We
- have to restart from Table_map_log_event, but as this table is
- not transactional, the rows already inserted will still be
- present, and idempotency is not guaranteed (no PK) so we risk
- that repeating leads to double insert. So we desperately try to
- continue, hope we'll eventually leave this buggy situation (by
- executing the final Old_rows_log_event). If we are in a hopeless
- wait (reached end of last relay log and nothing gets appended
- there), we timeout after one minute, and notify DBA about the
- problem. When WL#2975 is implemented, just remove the member
- Relay_log_info::last_event_start_time and all its occurrences.
- */
- rgi->last_event_start_time= my_time(0);
- }
-
- if (get_flags(STMT_END_F))
- {
- /*
- This is the end of a statement or transaction, so close (and
- unlock) the tables we opened when processing the
- Table_map_log_event starting the statement.
-
- OBSERVER. This will clear *all* mappings, not only those that
- are open for the table. There is not good handle for on-close
- actions for tables.
-
- NOTE. Even if we have no table ('table' == 0) we still need to be
- here, so that we increase the group relay log position. If we didn't, we
- could have a group relay log position which lags behind "forever"
- (assume the last master's transaction is ignored by the slave because of
- replicate-ignore rules).
- */
- int binlog_error= thd->binlog_flush_pending_rows_event(TRUE);
-
- /*
- If this event is not in a transaction, the call below will, if some
- transactional storage engines are involved, commit the statement into
- them and flush the pending event to binlog.
- If this event is in a transaction, the call will do nothing, but a
- Xid_log_event will come next which will, if some transactional engines
- are involved, commit the transaction and flush the pending event to the
- binlog.
- If there was a deadlock the transaction should have been rolled back
- already. So there should be no need to rollback the transaction.
- */
- DBUG_ASSERT(! thd->transaction_rollback_request);
- if (unlikely((error= (binlog_error ?
- trans_rollback_stmt(thd) :
- trans_commit_stmt(thd)))))
- rli->report(ERROR_LEVEL, error, NULL,
- "Error in %s event: commit of row events failed, "
- "table `%s`.`%s`",
- get_type_str(), m_table->s->db.str,
- m_table->s->table_name.str);
- error|= binlog_error;
-
- /*
- Now what if this is not a transactional engine? we still need to
- flush the pending event to the binlog; we did it with
- thd->binlog_flush_pending_rows_event(). Note that we imitate
- what is done for real queries: a call to
- ha_autocommit_or_rollback() (sometimes only if involves a
- transactional engine), and a call to be sure to have the pending
- event flushed.
- */
-
- thd->reset_current_stmt_binlog_format_row();
- rgi->cleanup_context(thd, 0);
- }
-
- DBUG_RETURN(error);
-}
-
-
-Log_event::enum_skip_reason
-Old_rows_log_event::do_shall_skip(rpl_group_info *rgi)
-{
- /*
- If the slave skip counter is 1 and this event does not end a
- statement, then we should not start executing on the next event.
- Otherwise, we defer the decision to the normal skipping logic.
- */
- if (rgi->rli->slave_skip_counter == 1 && !get_flags(STMT_END_F))
- return Log_event::EVENT_SKIP_IGNORE;
- else
- return Log_event::do_shall_skip(rgi);
-}
-
-int
-Old_rows_log_event::do_update_pos(rpl_group_info *rgi)
-{
- Relay_log_info *rli= rgi->rli;
- int error= 0;
- DBUG_ENTER("Old_rows_log_event::do_update_pos");
-
- DBUG_PRINT("info", ("flags: %s",
- get_flags(STMT_END_F) ? "STMT_END_F " : ""));
-
- if (get_flags(STMT_END_F))
- {
- /*
- Indicate that a statement is finished.
- Step the group log position if we are not in a transaction,
- otherwise increase the event log position.
- */
- error= rli->stmt_done(log_pos, thd, rgi);
- /*
- Clear any errors in thd->net.last_err*. It is not known if this is
- needed or not. It is believed that any errors that may exist in
- thd->net.last_err* are allowed. Examples of errors are "key not
- found", which is produced in the test case rpl_row_conflicts.test
- */
- thd->clear_error();
- }
- else
- {
- rgi->inc_event_relay_log_pos();
- }
-
- DBUG_RETURN(error);
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifndef MYSQL_CLIENT
-bool Old_rows_log_event::write_data_header()
-{
- uchar buf[ROWS_HEADER_LEN]; // No need to init the buffer
-
- // This method should not be reached.
- assert(0);
-
- DBUG_ASSERT(m_table_id != ~0UL);
- DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master",
- {
- int4store(buf + 0, m_table_id);
- int2store(buf + 4, m_flags);
- return write_data(buf, 6);
- });
- int6store(buf + RW_MAPID_OFFSET, (ulonglong)m_table_id);
- int2store(buf + RW_FLAGS_OFFSET, m_flags);
- return write_data(buf, ROWS_HEADER_LEN);
-}
-
-
-bool Old_rows_log_event::write_data_body()
-{
- /*
- Note that this should be the number of *bits*, not the number of
- bytes.
- */
- uchar sbuf[MAX_INT_WIDTH];
- my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf;
-
- // This method should not be reached.
- assert(0);
-
- bool res= false;
- uchar *const sbuf_end= net_store_length(sbuf, (size_t) m_width);
- DBUG_ASSERT(static_cast<size_t>(sbuf_end - sbuf) <= sizeof(sbuf));
-
- DBUG_DUMP("m_width", sbuf, (size_t) (sbuf_end - sbuf));
- res= res || write_data(sbuf, (size_t) (sbuf_end - sbuf));
-
- DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols));
- res= res || write_data((uchar*)m_cols.bitmap, no_bytes_in_map(&m_cols));
- DBUG_DUMP("rows", m_rows_buf, data_size);
- res= res || write_data(m_rows_buf, (size_t) data_size);
-
- return res;
-
-}
-#endif
-
-
-#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-void Old_rows_log_event::pack_info(Protocol *protocol)
-{
- char buf[256];
- char const *const flagstr=
- get_flags(STMT_END_F) ? " flags: STMT_END_F" : "";
- size_t bytes= my_snprintf(buf, sizeof(buf),
- "table_id: %lu%s", m_table_id, flagstr);
- protocol->store(buf, bytes, &my_charset_bin);
-}
-#endif
-
-
-#ifdef MYSQL_CLIENT
-/* Method duplicates Rows_log_event's one */
-bool Old_rows_log_event::print_helper(FILE *file,
- PRINT_EVENT_INFO *print_event_info,
- char const *const name)
-{
- IO_CACHE *const head= &print_event_info->head_cache;
- IO_CACHE *const body= &print_event_info->body_cache;
- IO_CACHE *const tail= &print_event_info->tail_cache;
- bool do_print_encoded=
- print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS &&
- print_event_info->base64_output_mode != BASE64_OUTPUT_NEVER &&
- !print_event_info->short_form;
-
- if (!print_event_info->short_form)
- {
- if (print_header(head, print_event_info, !do_print_encoded) ||
- my_b_printf(head, "\t%s: table id %lu%s\n",
- name, m_table_id,
- do_print_encoded ? " flags: STMT_END_F" : "") ||
- print_base64(body, print_event_info, do_print_encoded))
- goto err;
- }
-
- if (get_flags(STMT_END_F))
- {
- if (copy_event_cache_to_file_and_reinit(head, file) ||
- copy_cache_to_file_wrapped(body, file, do_print_encoded,
- print_event_info->delimiter,
- print_event_info->verbose) ||
- copy_event_cache_to_file_and_reinit(tail, file))
- goto err;
- }
- return 0;
-err:
- return 1;
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-/**
- Write the current row into event's table.
-
- The row is located in the row buffer, pointed by @c m_curr_row member.
- Number of columns of the row is stored in @c m_width member (it can be
- different from the number of columns in the table to which we insert).
- Bitmap @c m_cols indicates which columns are present in the row. It is assumed
- that event's table is already open and pointed by @c m_table.
-
- If the same record already exists in the table it can be either overwritten
- or an error is reported depending on the value of @c overwrite flag
- (error reporting not yet implemented). Note that the matching record can be
- different from the row we insert if we use primary keys to identify records in
- the table.
-
- The row to be inserted can contain values only for selected columns. The
- missing columns are filled with default values using @c prepare_record()
- function. If a matching record is found in the table and @c overwritte is
- true, the missing columns are taken from it.
-
- @param rli Relay log info (needed for row unpacking).
- @param overwrite
- Shall we overwrite if the row already exists or signal
- error (currently ignored).
-
- @returns Error code on failure, 0 on success.
-
- This method, if successful, sets @c m_curr_row_end pointer to point at the
- next row in the rows buffer. This is done when unpacking the row to be
- inserted.
-
- @note If a matching record is found, it is either updated using
- @c ha_update_row() or first deleted and then new record written.
-*/
-
-int
-Old_rows_log_event::write_row(rpl_group_info *rgi, const bool overwrite)
-{
- DBUG_ENTER("write_row");
- DBUG_ASSERT(m_table != NULL && thd != NULL);
-
- TABLE *table= m_table; // pointer to event's table
- int error;
- int keynum;
- auto_afree_ptr<char> key(NULL);
-
- /* fill table->record[0] with default values */
-
- if (unlikely((error=
- prepare_record(table, m_width,
- TRUE /* check if columns have def. values */))))
- DBUG_RETURN(error);
-
- /* unpack row into table->record[0] */
- if ((error= unpack_current_row(rgi)))
- DBUG_RETURN(error);
-
-#ifndef DBUG_OFF
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
- DBUG_PRINT_BITSET("debug", "write_set = %s", table->write_set);
- DBUG_PRINT_BITSET("debug", "read_set = %s", table->read_set);
-#endif
-
- /*
- Try to write record. If a corresponding record already exists in the table,
- we try to change it using ha_update_row() if possible. Otherwise we delete
- it and repeat the whole process again.
-
- TODO: Add safety measures against infinite looping.
- */
-
- while (unlikely(error= table->file->ha_write_row(table->record[0])))
- {
- if (error == HA_ERR_LOCK_DEADLOCK || error == HA_ERR_LOCK_WAIT_TIMEOUT)
- {
- table->file->print_error(error, MYF(0)); /* to check at exec_relay_log_event */
- DBUG_RETURN(error);
- }
- if (unlikely((keynum= table->file->get_dup_key(error)) < 0))
- {
- DBUG_PRINT("info",("Can't locate duplicate key (get_dup_key returns %d)",keynum));
- table->file->print_error(error, MYF(0));
- /*
- We failed to retrieve the duplicate key
- - either because the error was not "duplicate key" error
- - or because the information which key is not available
- */
- DBUG_RETURN(error);
- }
-
- /*
- We need to retrieve the old row into record[1] to be able to
- either update or delete the offending record. We either:
-
- - use rnd_pos() with a row-id (available as dupp_row) to the
- offending row, if that is possible (MyISAM and Blackhole), or else
-
- - use index_read_idx() with the key that is duplicated, to
- retrieve the offending row.
- */
- if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
- {
- DBUG_PRINT("info",("Locating offending record using rnd_pos()"));
- error= table->file->ha_rnd_pos(table->record[1], table->file->dup_ref);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("rnd_pos() returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
- else
- {
- DBUG_PRINT("info",("Locating offending record using index_read_idx()"));
-
- if (table->file->extra(HA_EXTRA_FLUSH_CACHE))
- {
- DBUG_PRINT("info",("Error when setting HA_EXTRA_FLUSH_CACHE"));
- DBUG_RETURN(my_errno);
- }
-
- if (key.get() == NULL)
- {
- key.assign(static_cast<char*>(my_alloca(table->s->max_unique_length)));
- if (unlikely(key.get() == NULL))
- {
- DBUG_PRINT("info",("Can't allocate key buffer"));
- DBUG_RETURN(ENOMEM);
- }
- }
-
- key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
- 0);
- error= table->file->ha_index_read_idx_map(table->record[1], keynum,
- (const uchar*)key.get(),
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("index_read_idx() returns error %d", error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- }
-
- /*
- Now, record[1] should contain the offending row. That
- will enable us to update it or, alternatively, delete it (so
- that we can insert the new row afterwards).
- */
-
- /*
- If row is incomplete we will use the record found to fill
- missing columns.
- */
- if (!get_flags(COMPLETE_ROWS_F))
- {
- restore_record(table,record[1]);
- error= unpack_current_row(rgi);
- }
-
-#ifndef DBUG_OFF
- DBUG_PRINT("debug",("preparing for update: before and after image"));
- DBUG_DUMP("record[1] (before)", table->record[1], table->s->reclength);
- DBUG_DUMP("record[0] (after)", table->record[0], table->s->reclength);
-#endif
-
- /*
- REPLACE is defined as either INSERT or DELETE + INSERT. If
- possible, we can replace it with an UPDATE, but that will not
- work on InnoDB if FOREIGN KEY checks are necessary.
-
- I (Matz) am not sure of the reason for the last_uniq_key()
- check as, but I'm guessing that it's something along the
- following lines.
-
- Suppose that we got the duplicate key to be a key that is not
- the last unique key for the table and we perform an update:
- then there might be another key for which the unique check will
- fail, so we're better off just deleting the row and inserting
- the correct row.
- */
- if (last_uniq_key(table, keynum) &&
- !table->file->referenced_by_foreign_key())
- {
- DBUG_PRINT("info",("Updating row using ha_update_row()"));
- error=table->file->ha_update_row(table->record[1],
- table->record[0]);
- switch (error) {
-
- case HA_ERR_RECORD_IS_THE_SAME:
- DBUG_PRINT("info",("ignoring HA_ERR_RECORD_IS_THE_SAME error from"
- " ha_update_row()"));
- error= 0;
-
- case 0:
- break;
-
- default:
- DBUG_PRINT("info",("ha_update_row() returns error %d",error));
- table->file->print_error(error, MYF(0));
- }
-
- DBUG_RETURN(error);
- }
- else
- {
- DBUG_PRINT("info",("Deleting offending row and trying to write new one again"));
- if (unlikely((error= table->file->ha_delete_row(table->record[1]))))
- {
- DBUG_PRINT("info",("ha_delete_row() returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
- /* Will retry ha_write_row() with the offending row removed. */
- }
- }
-
- DBUG_RETURN(error);
-}
-
-
-/**
- Locate the current row in event's table.
-
- The current row is pointed by @c m_curr_row. Member @c m_width tells how many
- columns are there in the row (this can be differnet from the number of columns
- in the table). It is assumed that event's table is already open and pointed
- by @c m_table.
-
- If a corresponding record is found in the table it is stored in
- @c m_table->record[0]. Note that when record is located based on a primary
- key, it is possible that the record found differs from the row being located.
-
- If no key is specified or table does not have keys, a table scan is used to
- find the row. In that case the row should be complete and contain values for
- all columns. However, it can still be shorter than the table, i.e. the table
- can contain extra columns not present in the row. It is also possible that
- the table has fewer columns than the row being located.
-
- @returns Error code on failure, 0 on success.
-
- @post In case of success @c m_table->record[0] contains the record found.
- Also, the internal "cursor" of the table is positioned at the record found.
-
- @note If the engine allows random access of the records, a combination of
- @c position() and @c rnd_pos() will be used.
-
- Note that one MUST call ha_index_or_rnd_end() after this function if
- it returns 0 as we must leave the row position in the handler intact
- for any following update/delete command.
-*/
-
-int Old_rows_log_event::find_row(rpl_group_info *rgi)
-{
- DBUG_ENTER("find_row");
-
- DBUG_ASSERT(m_table && m_table->in_use != NULL);
-
- TABLE *table= m_table;
- int error;
-
- /* unpack row - missing fields get default values */
-
- // TODO: shall we check and report errors here?
- prepare_record(table, m_width, FALSE /* don't check errors */);
- error= unpack_current_row(rgi);
-
-#ifndef DBUG_OFF
- DBUG_PRINT("info",("looking for the following record"));
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
-#endif
-
- if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- table->s->primary_key < MAX_KEY)
- {
- /*
- Use a more efficient method to fetch the record given by
- table->record[0] if the engine allows it. We first compute a
- row reference using the position() member function (it will be
- stored in table->file->ref) and the use rnd_pos() to position
- the "cursor" (i.e., record[0] in this case) at the correct row.
-
- TODO: Add a check that the correct record has been fetched by
- comparing with the original record. Take into account that the
- record on the master and slave can be of different
- length. Something along these lines should work:
-
- ADD>>> store_record(table,record[1]);
- int error= table->file->ha_rnd_pos(table->record[0], table->file->ref);
- ADD>>> DBUG_ASSERT(memcmp(table->record[1], table->record[0],
- table->s->reclength) == 0);
-
- */
- DBUG_PRINT("info",("locating record using primary key (position)"));
- int error= table->file->ha_rnd_pos_by_record(table->record[0]);
- if (unlikely(error))
- {
- DBUG_PRINT("info",("rnd_pos returns error %d",error));
- table->file->print_error(error, MYF(0));
- }
- DBUG_RETURN(error);
- }
-
- // We can't use position() - try other methods.
-
- /*
- We need to retrieve all fields
- TODO: Move this out from this function to main loop
- */
- table->use_all_columns();
-
- /*
- Save copy of the record in table->record[1]. It might be needed
- later if linear search is used to find exact match.
- */
- store_record(table,record[1]);
-
- if (table->s->keys > 0)
- {
- DBUG_PRINT("info",("locating record using primary key (index_read)"));
-
- /* We have a key: search the table using the index */
- if (!table->file->inited &&
- unlikely(error= table->file->ha_index_init(0, FALSE)))
- {
- DBUG_PRINT("info",("ha_index_init returns error %d",error));
- table->file->print_error(error, MYF(0));
- DBUG_RETURN(error);
- }
-
- /* Fill key data for the row */
-
- DBUG_ASSERT(m_key);
- key_copy(m_key, table->record[0], table->key_info, 0);
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_DUMP("key data", m_key, table->key_info->key_length);
-#endif
-
- /*
- We need to set the null bytes to ensure that the filler bit are
- all set when returning. There are storage engines that just set
- the necessary bits on the bytes and don't set the filler bits
- correctly.
- */
- my_ptrdiff_t const pos=
- table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
- table->record[0][pos]= 0xFF;
-
- if (unlikely((error= table->file->ha_index_read_map(table->record[0],
- m_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))))
- {
- DBUG_PRINT("info",("no record matching the key found in the table"));
- table->file->print_error(error, MYF(0));
- table->file->ha_index_end();
- DBUG_RETURN(error);
- }
-
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
-#ifndef HAVE_valgrind
- DBUG_PRINT("info",("found first matching record"));
- DBUG_DUMP("record[0]", table->record[0], table->s->reclength);
-#endif
- /*
- Below is a minor "optimization". If the key (i.e., key number
- 0) has the HA_NOSAME flag set, we know that we have found the
- correct record (since there can be no duplicates); otherwise, we
- have to compare the record with the one found to see if it is
- the correct one.
-
- CAVEAT! This behaviour is essential for the replication of,
- e.g., the mysql.proc table since the correct record *shall* be
- found using the primary key *only*. There shall be no
- comparison of non-PK columns to decide if the correct record is
- found. I can see no scenario where it would be incorrect to
- chose the row to change only using a PK or an UNNI.
- */
- if (table->key_info->flags & HA_NOSAME)
- {
- /* Unique does not have non nullable part */
- if (!(table->key_info->flags & (HA_NULL_PART_KEY)))
- {
- DBUG_RETURN(0);
- }
- else
- {
- KEY *keyinfo= table->key_info;
- /*
- Unique has nullable part. We need to check if there is any
- field in the BI image that is null and part of UNNI.
- */
- bool null_found= FALSE;
- for (uint i=0; i < keyinfo->user_defined_key_parts && !null_found; i++)
- {
- uint fieldnr= keyinfo->key_part[i].fieldnr - 1;
- Field **f= table->field+fieldnr;
- null_found= (*f)->is_null();
- }
-
- if (!null_found)
- {
- DBUG_RETURN(0);
- }
-
- /* else fall through to index scan */
- }
- }
-
- /*
- In case key is not unique, we still have to iterate over records found
- and find the one which is identical to the row given. A copy of the
- record we are looking for is stored in record[1].
- */
- DBUG_PRINT("info",("non-unique index, scanning it to find matching record"));
-
- while (record_compare(table))
- {
- while (unlikely(error= table->file->ha_index_next(table->record[0])))
- {
- DBUG_PRINT("info",("no record matching the given row found"));
- table->file->print_error(error, MYF(0));
- (void) table->file->ha_index_end();
- DBUG_RETURN(error);
- }
- }
- }
- else
- {
- DBUG_PRINT("info",("locating record using table scan (rnd_next)"));
-
- int restart_count= 0; // Number of times scanning has restarted from top
-
- /* We don't have a key: search the table using rnd_next() */
- if (unlikely((error= table->file->ha_rnd_init_with_error(1))))
- {
- DBUG_PRINT("info",("error initializing table scan"
- " (ha_rnd_init returns %d)",error));
- DBUG_RETURN(error);
- }
-
- /* Continue until we find the right record or have made a full loop */
- do
- {
- restart_rnd_next:
- error= table->file->ha_rnd_next(table->record[0]);
-
- switch (error) {
-
- case 0:
- break;
-
- case HA_ERR_END_OF_FILE:
- if (++restart_count < 2)
- {
- int error2;
- table->file->ha_rnd_end();
- if (unlikely((error2= table->file->ha_rnd_init_with_error(1))))
- DBUG_RETURN(error2);
- goto restart_rnd_next;
- }
- break;
-
- default:
- DBUG_PRINT("info", ("Failed to get next record"
- " (rnd_next returns %d)",error));
- table->file->print_error(error, MYF(0));
- table->file->ha_rnd_end();
- DBUG_RETURN(error);
- }
- }
- while (restart_count < 2 && record_compare(table));
-
- /*
- Note: above record_compare will take into accout all record fields
- which might be incorrect in case a partial row was given in the event
- */
-
- /*
- Have to restart the scan to be able to fetch the next row.
- */
- if (restart_count == 2)
- DBUG_PRINT("info", ("Record not found"));
- else
- DBUG_DUMP("record found", table->record[0], table->s->reclength);
- if (error)
- table->file->ha_rnd_end();
-
- DBUG_ASSERT(error == HA_ERR_END_OF_FILE || error == 0);
- DBUG_RETURN(error);
- }
-
- DBUG_RETURN(0);
-}
-
-#endif
-
-
-/**************************************************************************
- Write_rows_log_event member functions
-**************************************************************************/
-
-/*
- Constructor used to build an event for writing to the binary log.
- */
-#if !defined(MYSQL_CLIENT)
-Write_rows_log_event_old::Write_rows_log_event_old(THD *thd_arg,
- TABLE *tbl_arg,
- ulong tid_arg,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Old_rows_log_event(thd_arg, tbl_arg, tid_arg, cols, is_transactional)
-{
-
- // This constructor should not be reached.
- assert(0);
-
-}
-#endif
-
-
-/*
- Constructor used by slave to read the event from the binary log.
- */
-#ifdef HAVE_REPLICATION
-Write_rows_log_event_old::Write_rows_log_event_old(const uchar *buf,
- uint event_len,
- const Format_description_log_event
- *description_event)
-: Old_rows_log_event(buf, event_len, PRE_GA_WRITE_ROWS_EVENT,
- description_event)
-{
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-int
-Write_rows_log_event_old::do_before_row_operations(const Slave_reporting_capability *const)
-{
- int error= 0;
-
- /*
- We are using REPLACE semantics and not INSERT IGNORE semantics
- when writing rows, that is: new rows replace old rows. We need to
- inform the storage engine that it should use this behaviour.
- */
-
- /* Tell the storage engine that we are using REPLACE semantics. */
- thd->lex->duplicates= DUP_REPLACE;
-
- thd->lex->sql_command= SQLCOM_REPLACE;
- /*
- Do not raise the error flag in case of hitting to an unique attribute
- */
- m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
- m_table->file->ha_start_bulk_insert(0);
- return error;
-}
-
-
-int
-Write_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
- int error)
-{
- int local_error= 0;
- m_table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
- m_table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
- /*
- resetting the extra with
- table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
- fires bug#27077
- todo: explain or fix
- */
- if (unlikely((local_error= m_table->file->ha_end_bulk_insert())))
- {
- m_table->file->print_error(local_error, MYF(0));
- }
- return error? error : local_error;
-}
-
-
-int
-Write_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
-{
- DBUG_ASSERT(m_table != NULL);
- int error= write_row(rgi, TRUE /* overwrite */);
-
- if (unlikely(error) && !thd->net.last_errno)
- thd->net.last_errno= error;
-
- return error;
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifdef MYSQL_CLIENT
-bool Write_rows_log_event_old::print(FILE *file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return Old_rows_log_event::print_helper(file, print_event_info,
- "Write_rows_old");
-}
-#endif
-
-
-/**************************************************************************
- Delete_rows_log_event member functions
-**************************************************************************/
-
-/*
- Constructor used to build an event for writing to the binary log.
- */
-
-#ifndef MYSQL_CLIENT
-Delete_rows_log_event_old::Delete_rows_log_event_old(THD *thd_arg,
- TABLE *tbl_arg,
- ulong tid,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Old_rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional),
- m_after_image(NULL), m_memory(NULL)
-{
-
- // This constructor should not be reached.
- assert(0);
-
-}
-#endif /* #if !defined(MYSQL_CLIENT) */
-
-
-/*
- Constructor used by slave to read the event from the binary log.
- */
-#ifdef HAVE_REPLICATION
-Delete_rows_log_event_old::
-Delete_rows_log_event_old(const uchar *buf,
- uint event_len,
- const Format_description_log_event
- *description_event)
- :Old_rows_log_event(buf, event_len, PRE_GA_DELETE_ROWS_EVENT,
- description_event),
- m_after_image(NULL), m_memory(NULL)
-{
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-int Delete_rows_log_event_old::
-do_before_row_operations(const Slave_reporting_capability *const)
-{
- if ((m_table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION) &&
- m_table->s->primary_key < MAX_KEY)
- {
- /*
- We don't need to allocate any memory for m_key since it is not used.
- */
- return 0;
- }
-
- if (m_table->s->keys > 0)
- {
- // Allocate buffer for key searches
- m_key= (uchar*)my_malloc(key_memory_log_event_old, m_table->key_info->key_length, MYF(MY_WME));
- if (!m_key)
- return HA_ERR_OUT_OF_MEM;
- }
- return 0;
-}
-
-
-int
-Delete_rows_log_event_old::do_after_row_operations(const Slave_reporting_capability *const,
- int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- m_table->file->ha_index_or_rnd_end();
- my_free(m_key);
- m_key= NULL;
-
- return error;
-}
-
-
-int Delete_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
-{
- int error;
- DBUG_ASSERT(m_table != NULL);
-
- if (likely(!(error= find_row(rgi))) )
- {
- /*
- Delete the record found, located in record[0]
- */
- error= m_table->file->ha_delete_row(m_table->record[0]);
- m_table->file->ha_index_or_rnd_end();
- }
- return error;
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifdef MYSQL_CLIENT
-bool Delete_rows_log_event_old::print(FILE *file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return Old_rows_log_event::print_helper(file, print_event_info,
- "Delete_rows_old");
-}
-#endif
-
-
-/**************************************************************************
- Update_rows_log_event member functions
-**************************************************************************/
-
-/*
- Constructor used to build an event for writing to the binary log.
- */
-#if !defined(MYSQL_CLIENT)
-Update_rows_log_event_old::Update_rows_log_event_old(THD *thd_arg,
- TABLE *tbl_arg,
- ulong tid,
- MY_BITMAP const *cols,
- bool is_transactional)
- : Old_rows_log_event(thd_arg, tbl_arg, tid, cols, is_transactional),
- m_after_image(NULL), m_memory(NULL)
-{
-
- // This constructor should not be reached.
- assert(0);
-}
-#endif /* !defined(MYSQL_CLIENT) */
-
-
-/*
- Constructor used by slave to read the event from the binary log.
- */
-#ifdef HAVE_REPLICATION
-Update_rows_log_event_old::Update_rows_log_event_old(const uchar *buf,
- uint event_len,
- const
- Format_description_log_event
- *description_event)
- : Old_rows_log_event(buf, event_len, PRE_GA_UPDATE_ROWS_EVENT,
- description_event),
- m_after_image(NULL), m_memory(NULL)
-{
-}
-#endif
-
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
-int
-Update_rows_log_event_old::
-do_before_row_operations(const Slave_reporting_capability *const)
-{
- if (m_table->s->keys > 0)
- {
- // Allocate buffer for key searches
- m_key= (uchar*)my_malloc(key_memory_log_event_old,
- m_table->key_info->key_length, MYF(MY_WME));
- if (!m_key)
- return HA_ERR_OUT_OF_MEM;
- }
-
- return 0;
-}
-
-
-int
-Update_rows_log_event_old::
-do_after_row_operations(const Slave_reporting_capability *const, int error)
-{
- /*error= ToDo:find out what this should really be, this triggers close_scan in nbd, returning error?*/
- m_table->file->ha_index_or_rnd_end();
- my_free(m_key); // Free for multi_malloc
- m_key= NULL;
-
- return error;
-}
-
-
-int
-Update_rows_log_event_old::do_exec_row(rpl_group_info *rgi)
-{
- DBUG_ASSERT(m_table != NULL);
-
- int error= find_row(rgi);
- if (unlikely(error))
- {
- /*
- We need to read the second image in the event of error to be
- able to skip to the next pair of updates
- */
- m_curr_row= m_curr_row_end;
- unpack_current_row(rgi);
- return error;
- }
-
- /*
- This is the situation after locating BI:
-
- ===|=== before image ====|=== after image ===|===
- ^ ^
- m_curr_row m_curr_row_end
-
- BI found in the table is stored in record[0]. We copy it to record[1]
- and unpack AI to record[0].
- */
-
- store_record(m_table,record[1]);
-
- m_curr_row= m_curr_row_end;
- error= unpack_current_row(rgi); // this also updates m_curr_row_end
-
- /*
- Now we have the right row to update. The old row (the one we're
- looking for) is in record[1] and the new row is in record[0].
- */
-#ifndef HAVE_valgrind
- /*
- Don't print debug messages when running valgrind since they can
- trigger false warnings.
- */
- DBUG_PRINT("info",("Updating row in table"));
- DBUG_DUMP("old record", m_table->record[1], m_table->s->reclength);
- DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength);
-#endif
-
- error= m_table->file->ha_update_row(m_table->record[1], m_table->record[0]);
- m_table->file->ha_index_or_rnd_end();
-
- if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
- error= 0;
-
- return error;
-}
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
-
-#ifdef MYSQL_CLIENT
-bool Update_rows_log_event_old::print(FILE *file,
- PRINT_EVENT_INFO* print_event_info)
-{
- return Old_rows_log_event::print_helper(file, print_event_info,
- "Update_rows_old");
-}
-#endif
diff --git a/sql/log_event_old.h b/sql/log_event_old.h
deleted file mode 100644
index e5aaacec209..00000000000
--- a/sql/log_event_old.h
+++ /dev/null
@@ -1,569 +0,0 @@
-/* Copyright (c) 2007, 2013, Oracle and/or its affiliates.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#ifndef LOG_EVENT_OLD_H
-#define LOG_EVENT_OLD_H
-
-/*
- Need to include this file at the proper position of log_event.h
- */
-
-
-/**
- @file
-
- @brief This file contains classes handling old formats of row-based
- binlog events.
-*/
-/*
- Around 2007-10-31, I made these classes completely separated from
- the new classes (before, there was a complex class hierarchy
- involving multiple inheritance; see BUG#31581), by simply copying
- and pasting the entire contents of Rows_log_event into
- Old_rows_log_event and the entire contents of
- {Write|Update|Delete}_rows_log_event into
- {Write|Update|Delete}_rows_log_event_old. For clarity, I will keep
- the comments marking which code was cut-and-pasted for some time.
- With the classes collapsed into one, there is probably some
- redundancy (maybe some methods can be simplified and/or removed),
- but we keep them this way for now. /Sven
-*/
-
-/* These classes are based on the v1 RowsHeaderLen */
-#undef ROWS_HEADER_LEN
-#define ROWS_HEADER_LEN ROWS_HEADER_LEN_V1
-
-/**
- @class Old_rows_log_event
-
- Base class for the three types of row-based events
- {Write|Update|Delete}_row_log_event_old, with event type codes
- PRE_GA_{WRITE|UPDATE|DELETE}_ROWS_EVENT. These events are never
- created any more, except when reading a relay log created by an old
- server.
-*/
-class Old_rows_log_event : public Log_event
-{
- /********** BEGIN CUT & PASTE FROM Rows_log_event **********/
-public:
- /**
- Enumeration of the errors that can be returned.
- */
- enum enum_error
- {
- ERR_OPEN_FAILURE = -1, /**< Failure to open table */
- ERR_OK = 0, /**< No error */
- ERR_TABLE_LIMIT_EXCEEDED = 1, /**< No more room for tables */
- ERR_OUT_OF_MEM = 2, /**< Out of memory */
- ERR_BAD_TABLE_DEF = 3, /**< Table definition does not match */
- ERR_RBR_TO_SBR = 4 /**< daisy-chanining RBR to SBR not allowed */
- };
-
- /*
- These definitions allow you to combine the flags into an
- appropriate flag set using the normal bitwise operators. The
- implicit conversion from an enum-constant to an integer is
- accepted by the compiler, which is then used to set the real set
- of flags.
- */
- enum enum_flag
- {
- /* Last event of a statement */
- STMT_END_F = (1U << 0),
-
- /* Value of the OPTION_NO_FOREIGN_KEY_CHECKS flag in thd->options */
- NO_FOREIGN_KEY_CHECKS_F = (1U << 1),
-
- /* Value of the OPTION_RELAXED_UNIQUE_CHECKS flag in thd->options */
- RELAXED_UNIQUE_CHECKS_F = (1U << 2),
-
- /**
- Indicates that rows in this event are complete, that is contain
- values for all columns of the table.
- */
- COMPLETE_ROWS_F = (1U << 3)
- };
-
- typedef uint16 flag_set;
-
- /* Special constants representing sets of flags */
- enum
- {
- RLE_NO_FLAGS = 0U
- };
-
- virtual ~Old_rows_log_event();
-
- void set_flags(flag_set flags_arg) { m_flags |= flags_arg; }
- void clear_flags(flag_set flags_arg) { m_flags &= ~flags_arg; }
- flag_set get_flags(flag_set flags_arg) const { return m_flags & flags_arg; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual void pack_info(Protocol *protocol);
-#endif
-
-#ifdef MYSQL_CLIENT
- /* not for direct call, each derived has its own ::print() */
- virtual bool print(FILE *file, PRINT_EVENT_INFO *print_event_info)= 0;
-#endif
-
-#ifndef MYSQL_CLIENT
- int add_row_data(uchar *data, size_t length)
- {
- return do_add_row_data(data,length);
- }
-#endif
-
- /* Member functions to implement superclass interface */
- virtual int get_data_size();
-
- MY_BITMAP const *get_cols() const { return &m_cols; }
- size_t get_width() const { return m_width; }
- ulong get_table_id() const { return m_table_id; }
-
-#ifndef MYSQL_CLIENT
- virtual bool write_data_header();
- virtual bool write_data_body();
- virtual const char *get_db() { return m_table->s->db.str; }
-#endif
- /*
- Check that malloc() succeeded in allocating memory for the rows
- buffer and the COLS vector. Checking that an Update_rows_log_event_old
- is valid is done in the Update_rows_log_event_old::is_valid()
- function.
- */
- virtual bool is_valid() const
- {
- return m_rows_buf && m_cols.bitmap;
- }
- bool is_part_of_group() { return 1; }
-
- uint m_row_count; /* The number of rows added to the event */
-
-protected:
- /*
- The constructors are protected since you're supposed to inherit
- this class, not create instances of this class.
- */
-#ifndef MYSQL_CLIENT
- Old_rows_log_event(THD*, TABLE*, ulong table_id,
- MY_BITMAP const *cols, bool is_transactional);
-#endif
- Old_rows_log_event(const uchar *row_data, uint event_len,
- Log_event_type event_type,
- const Format_description_log_event *description_event);
-
-#ifdef MYSQL_CLIENT
- bool print_helper(FILE *, PRINT_EVENT_INFO *, char const *const name);
-#endif
-
-#ifndef MYSQL_CLIENT
- virtual int do_add_row_data(uchar *data, size_t length);
-#endif
-
-#ifndef MYSQL_CLIENT
- TABLE *m_table; /* The table the rows belong to */
-#endif
- ulong m_table_id; /* Table ID */
- MY_BITMAP m_cols; /* Bitmap denoting columns available */
- ulong m_width; /* The width of the columns bitmap */
-
- ulong m_master_reclength; /* Length of record on master side */
-
- /* Bit buffers in the same memory as the class */
- uint32 m_bitbuf[128/(sizeof(uint32)*8)];
- uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)];
-
- uchar *m_rows_buf; /* The rows in packed format */
- uchar *m_rows_cur; /* One-after the end of the data */
- uchar *m_rows_end; /* One-after the end of the allocated space */
-
- flag_set m_flags; /* Flags for row-level events */
-
- /* helper functions */
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- const uchar *m_curr_row; /* Start of the row being processed */
- const uchar *m_curr_row_end; /* One-after the end of the current row */
- uchar *m_key; /* Buffer to keep key value during searches */
-
- int find_row(rpl_group_info *);
- int write_row(rpl_group_info *, const bool);
-
- // Unpack the current row into m_table->record[0]
- int unpack_current_row(rpl_group_info *rgi)
- {
- DBUG_ASSERT(m_table);
- ASSERT_OR_RETURN_ERROR(m_curr_row < m_rows_end, HA_ERR_CORRUPT_EVENT);
- return ::unpack_row(rgi, m_table, m_width, m_curr_row, &m_cols,
- &m_curr_row_end, &m_master_reclength, m_rows_end);
- }
-#endif
-
-private:
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_apply_event(rpl_group_info *rgi);
- virtual int do_update_pos(rpl_group_info *rgi);
- virtual enum_skip_reason do_shall_skip(rpl_group_info *rgi);
-
- /*
- Primitive to prepare for a sequence of row executions.
-
- DESCRIPTION
-
- Before doing a sequence of do_prepare_row() and do_exec_row()
- calls, this member function should be called to prepare for the
- entire sequence. Typically, this member function will allocate
- space for any buffers that are needed for the two member
- functions mentioned above.
-
- RETURN VALUE
-
- The member function will return 0 if all went OK, or a non-zero
- error code otherwise.
- */
- virtual
- int do_before_row_operations(const Slave_reporting_capability *const log) = 0;
-
- /*
- Primitive to clean up after a sequence of row executions.
-
- DESCRIPTION
-
- After doing a sequence of do_prepare_row() and do_exec_row(),
- this member function should be called to clean up and release
- any allocated buffers.
-
- The error argument, if non-zero, indicates an error which happened during
- row processing before this function was called. In this case, even if
- function is successful, it should return the error code given in the argument.
- */
- virtual
- int do_after_row_operations(const Slave_reporting_capability *const log,
- int error) = 0;
-
- /*
- Primitive to do the actual execution necessary for a row.
-
- DESCRIPTION
- The member function will do the actual execution needed to handle a row.
- The row is located at m_curr_row. When the function returns,
- m_curr_row_end should point at the next row (one byte after the end
- of the current row).
-
- RETURN VALUE
- 0 if execution succeeded, 1 if execution failed.
-
- */
- virtual int do_exec_row(rpl_group_info *rgi) = 0;
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-
- /********** END OF CUT & PASTE FROM Rows_log_event **********/
- protected:
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-
- int do_apply_event(Old_rows_log_event*, rpl_group_info *rgi);
-
- /*
- Primitive to prepare for a sequence of row executions.
-
- DESCRIPTION
-
- Before doing a sequence of do_prepare_row() and do_exec_row()
- calls, this member function should be called to prepare for the
- entire sequence. Typically, this member function will allocate
- space for any buffers that are needed for the two member
- functions mentioned above.
-
- RETURN VALUE
-
- The member function will return 0 if all went OK, or a non-zero
- error code otherwise.
- */
- virtual int do_before_row_operations(TABLE *table) = 0;
-
- /*
- Primitive to clean up after a sequence of row executions.
-
- DESCRIPTION
-
- After doing a sequence of do_prepare_row() and do_exec_row(),
- this member function should be called to clean up and release
- any allocated buffers.
- */
- virtual int do_after_row_operations(TABLE *table, int error) = 0;
-
- /*
- Primitive to prepare for handling one row in a row-level event.
-
- DESCRIPTION
-
- The member function prepares for execution of operations needed for one
- row in a row-level event by reading up data from the buffer containing
- the row. No specific interpretation of the data is normally done here,
- since SQL thread specific data is not available: that data is made
- available for the do_exec function.
-
- A pointer to the start of the next row, or NULL if the preparation
- failed. Currently, preparation cannot fail, but don't rely on this
- behavior.
-
- RETURN VALUE
- Error code, if something went wrong, 0 otherwise.
- */
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start,
- uchar const **row_end) = 0;
-
- /*
- Primitive to do the actual execution necessary for a row.
-
- DESCRIPTION
- The member function will do the actual execution needed to handle a row.
-
- RETURN VALUE
- 0 if execution succeeded, 1 if execution failed.
-
- */
- virtual int do_exec_row(TABLE *table) = 0;
-
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-};
-
-
-/**
- @class Write_rows_log_event_old
-
- Old class for binlog events that write new rows to a table (event
- type code PRE_GA_WRITE_ROWS_EVENT). Such events are never produced
- by this version of the server, but they may be read from a relay log
- created by an old server. New servers create events of class
- Write_rows_log_event (event type code WRITE_ROWS_EVENT) instead.
-*/
-class Write_rows_log_event_old : public Old_rows_log_event
-{
- /********** BEGIN CUT & PASTE FROM Write_rows_log_event **********/
-public:
-#if !defined(MYSQL_CLIENT)
- Write_rows_log_event_old(THD*, TABLE*, ulong table_id,
- MY_BITMAP const *cols, bool is_transactional);
-#endif
-#ifdef HAVE_REPLICATION
- Write_rows_log_event_old(const uchar *buf, uint event_len,
- const Format_description_log_event *description_event);
-#endif
-#if !defined(MYSQL_CLIENT)
- static bool binlog_row_logging_function(THD *thd, TABLE *table,
- bool is_transactional,
- const uchar *before_record
- __attribute__((unused)),
- const uchar *after_record)
- {
- return thd->binlog_write_row(table, is_transactional, after_record);
- }
-#endif
-
-private:
-#ifdef MYSQL_CLIENT
- bool print(FILE *file, PRINT_EVENT_INFO *print_event_info);
-#endif
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_before_row_operations(const Slave_reporting_capability *const);
- virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
- virtual int do_exec_row(rpl_group_info *);
-#endif
- /********** END OF CUT & PASTE FROM Write_rows_log_event **********/
-
-public:
- enum
- {
- /* Support interface to THD::binlog_prepare_pending_rows_event */
- TYPE_CODE = PRE_GA_WRITE_ROWS_EVENT
- };
-
-private:
- virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- // use old definition of do_apply_event()
- virtual int do_apply_event(rpl_group_info *rgi)
- { return Old_rows_log_event::do_apply_event(this, rgi); }
-
- // primitives for old version of do_apply_event()
- virtual int do_before_row_operations(TABLE *table);
- virtual int do_after_row_operations(TABLE *table, int error);
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start, uchar const **row_end);
- virtual int do_exec_row(TABLE *table);
-
-#endif
-};
-
-
-/**
- @class Update_rows_log_event_old
-
- Old class for binlog events that modify existing rows to a table
- (event type code PRE_GA_UPDATE_ROWS_EVENT). Such events are never
- produced by this version of the server, but they may be read from a
- relay log created by an old server. New servers create events of
- class Update_rows_log_event (event type code UPDATE_ROWS_EVENT)
- instead.
-*/
-class Update_rows_log_event_old : public Old_rows_log_event
-{
- /********** BEGIN CUT & PASTE FROM Update_rows_log_event **********/
-public:
-#ifndef MYSQL_CLIENT
- Update_rows_log_event_old(THD*, TABLE*, ulong table_id,
- MY_BITMAP const *cols,
- bool is_transactional);
-#endif
-
-#ifdef HAVE_REPLICATION
- Update_rows_log_event_old(const uchar *buf, uint event_len,
- const Format_description_log_event *description_event);
-#endif
-
-#if !defined(MYSQL_CLIENT)
- static bool binlog_row_logging_function(THD *thd, TABLE *table,
- bool is_transactional,
- MY_BITMAP *cols,
- uint fields,
- const uchar *before_record,
- const uchar *after_record)
- {
- return thd->binlog_update_row(table, is_transactional,
- before_record, after_record);
- }
-#endif
-
-protected:
-#ifdef MYSQL_CLIENT
- bool print(FILE *file, PRINT_EVENT_INFO *print_event_info);
-#endif
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_before_row_operations(const Slave_reporting_capability *const);
- virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
- virtual int do_exec_row(rpl_group_info *);
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
- /********** END OF CUT & PASTE FROM Update_rows_log_event **********/
-
- uchar *m_after_image, *m_memory;
-
-public:
- enum
- {
- /* Support interface to THD::binlog_prepare_pending_rows_event */
- TYPE_CODE = PRE_GA_UPDATE_ROWS_EVENT
- };
-
-private:
- virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- // use old definition of do_apply_event()
- virtual int do_apply_event(rpl_group_info *rgi)
- { return Old_rows_log_event::do_apply_event(this, rgi); }
-
- // primitives for old version of do_apply_event()
- virtual int do_before_row_operations(TABLE *table);
- virtual int do_after_row_operations(TABLE *table, int error);
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start, uchar const **row_end);
- virtual int do_exec_row(TABLE *table);
-#endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
-};
-
-
-/**
- @class Delete_rows_log_event_old
-
- Old class for binlog events that delete existing rows from a table
- (event type code PRE_GA_DELETE_ROWS_EVENT). Such events are never
- produced by this version of the server, but they may be read from a
- relay log created by an old server. New servers create events of
- class Delete_rows_log_event (event type code DELETE_ROWS_EVENT)
- instead.
-*/
-class Delete_rows_log_event_old : public Old_rows_log_event
-{
- /********** BEGIN CUT & PASTE FROM Update_rows_log_event **********/
-public:
-#ifndef MYSQL_CLIENT
- Delete_rows_log_event_old(THD*, TABLE*, ulong,
- MY_BITMAP const *cols, bool is_transactional);
-#endif
-#ifdef HAVE_REPLICATION
- Delete_rows_log_event_old(const uchar *buf, uint event_len,
- const Format_description_log_event *description_event);
-#endif
-#if !defined(MYSQL_CLIENT)
- static bool binlog_row_logging_function(THD *thd, TABLE *table,
- bool is_transactional,
- MY_BITMAP *cols,
- uint fields,
- const uchar *before_record,
- const uchar *after_record
- __attribute__((unused)))
- {
- return thd->binlog_delete_row(table, is_transactional, before_record);
- }
-#endif
-
-protected:
-#ifdef MYSQL_CLIENT
- bool print(FILE *file, PRINT_EVENT_INFO *print_event_info);
-#endif
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- virtual int do_before_row_operations(const Slave_reporting_capability *const);
- virtual int do_after_row_operations(const Slave_reporting_capability *const,int);
- virtual int do_exec_row(rpl_group_info *);
-#endif
- /********** END CUT & PASTE FROM Delete_rows_log_event **********/
-
- uchar *m_after_image, *m_memory;
-
-public:
- enum
- {
- /* Support interface to THD::binlog_prepare_pending_rows_event */
- TYPE_CODE = PRE_GA_DELETE_ROWS_EVENT
- };
-
-private:
- virtual Log_event_type get_type_code() { return (Log_event_type)TYPE_CODE; }
-
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
- // use old definition of do_apply_event()
- virtual int do_apply_event(rpl_group_info *rgi)
- { return Old_rows_log_event::do_apply_event(this, rgi); }
-
- // primitives for old version of do_apply_event()
- virtual int do_before_row_operations(TABLE *table);
- virtual int do_after_row_operations(TABLE *table, int error);
- virtual int do_prepare_row(THD*, rpl_group_info*, TABLE*,
- uchar const *row_start, uchar const **row_end);
- virtual int do_exec_row(TABLE *table);
-#endif
-};
-
-
-#endif
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 535a2cf93de..3910d910da1 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -354,37 +354,6 @@ inline bool unexpected_error_code(int unexpected_error)
}
}
-/*
- pretty_print_str()
-*/
-
-static void
-pretty_print_str(String *packet, const char *str, int len)
-{
- const char *end= str + len;
- packet->append(STRING_WITH_LEN("'"));
- while (str < end)
- {
- char c;
- switch ((c=*str++)) {
- case '\n': packet->append(STRING_WITH_LEN("\\n")); break;
- case '\r': packet->append(STRING_WITH_LEN("\\r")); break;
- case '\\': packet->append(STRING_WITH_LEN("\\\\")); break;
- case '\b': packet->append(STRING_WITH_LEN("\\b")); break;
- case '\t': packet->append(STRING_WITH_LEN("\\t")); break;
- case '\'': packet->append(STRING_WITH_LEN("\\'")); break;
- case 0 : packet->append(STRING_WITH_LEN("\\0")); break;
- default:
- packet->append(&c, 1);
- break;
- }
- }
- packet->append(STRING_WITH_LEN("'"));
-}
-#endif /* HAVE_REPLICATION */
-
-
-#if defined(HAVE_REPLICATION)
/**
Create a prefix for the temporary files that is to be used for
@@ -607,29 +576,17 @@ int Log_event::do_update_pos(rpl_group_info *rgi)
Relay_log_info *rli= rgi->rli;
DBUG_ENTER("Log_event::do_update_pos");
+ DBUG_ASSERT(rli);
DBUG_ASSERT(!rli->belongs_to_client());
+
/*
- rli is null when (as far as I (Guilhem) know) the caller is
- Load_log_event::do_apply_event *and* that one is called from
- Execute_load_log_event::do_apply_event. In this case, we don't
- do anything here ; Execute_load_log_event::do_apply_event will
- call Log_event::do_apply_event again later with the proper rli.
- Strictly speaking, if we were sure that rli is null only in the
- case discussed above, 'if (rli)' is useless here. But as we are
- not 100% sure, keep it for now.
-
- Matz: I don't think we will need this check with this refactoring.
+ In parallel execution, delay position update for the events that are
+ not part of event groups (format description, rotate, and such) until
+ the actual event execution reaches that point.
*/
- if (rli)
- {
- /*
- In parallel execution, delay position update for the events that are
- not part of event groups (format description, rotate, and such) until
- the actual event execution reaches that point.
- */
- if (!rgi->is_parallel_exec || is_group_event(get_type_code()))
- rli->stmt_done(log_pos, thd, rgi);
- }
+ if (!rgi->is_parallel_exec || is_group_event(get_type_code()))
+ rli->stmt_done(log_pos, thd, rgi);
+
DBUG_RETURN(0); // Cannot fail currently
}
@@ -1227,18 +1184,6 @@ bool Query_log_event::write()
int8store(start, table_map_for_update);
start+= 8;
}
- if (master_data_written != 0)
- {
- /*
- Q_MASTER_DATA_WRITTEN_CODE only exists in relay logs where the master
- has binlog_version<4 and the slave has binlog_version=4. See comment
- for master_data_written in log_event.h for details.
- */
- *start++= Q_MASTER_DATA_WRITTEN_CODE;
- int4store(start, master_data_written);
- start+= 4;
- }
-
if (thd && thd->need_binlog_invoker())
{
LEX_CSTRING user;
@@ -1434,7 +1379,6 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg,
lc_time_names_number(thd_arg->variables.lc_time_names->number),
charset_database_number(0),
table_map_for_update((ulonglong)thd_arg->table_map_for_update),
- master_data_written(0),
gtid_flags_extra(thd_arg->get_binlog_flags_for_alter()),
sa_seq_no(0)
{
@@ -2275,7 +2219,7 @@ compare_errors:
expected_error,
actual_error ? thd->get_stmt_da()->message() : "no error",
actual_error,
- print_slave_db_safe(db), query_arg);
+ safe_str(db), query_arg);
thd->is_slave_error= 1;
}
/*
@@ -2465,23 +2409,11 @@ Query_log_event::peek_is_commit_rollback(const uchar *event_start,
!memcmp(event_start + (event_len-9), "\0ROLLBACK", 9);
}
-#endif
-
-
-/**************************************************************************
- Start_log_event_v3 methods
-**************************************************************************/
-
-Start_log_event_v3::Start_log_event_v3()
- :Log_event(), created(0), binlog_version(BINLOG_VERSION),
- dont_set_created(0)
-{
- memcpy(server_version, ::server_version, ST_SERVER_VER_LEN);
-}
-
+/***************************************************************************
+ Format_description_log_event methods
+****************************************************************************/
-#if defined(HAVE_REPLICATION)
-void Start_log_event_v3::pack_info(Protocol *protocol)
+void Format_description_log_event::pack_info(Protocol *protocol)
{
char buf[12 + ST_SERVER_VER_LEN + 14 + 22], *pos;
pos= strmov(buf, "Server ver: ");
@@ -2490,115 +2422,14 @@ void Start_log_event_v3::pack_info(Protocol *protocol)
pos= int10_to_str(binlog_version, pos, 10);
protocol->store(buf, (uint) (pos-buf), &my_charset_bin);
}
-#endif
-
-
-bool Start_log_event_v3::write()
-{
- char buff[START_V3_HEADER_LEN];
- int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version);
- memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN);
- if (!dont_set_created)
- created= get_time(); // this sets when and when_sec_part as a side effect
- int4store(buff + ST_CREATED_OFFSET,created);
- return write_header(sizeof(buff)) ||
- write_data(buff, sizeof(buff)) ||
- write_footer();
-}
-
-
-#if defined(HAVE_REPLICATION)
-
-/**
- Start_log_event_v3::do_apply_event() .
- The master started
-
- IMPLEMENTATION
- - To handle the case where the master died without having time to write
- DROP TEMPORARY TABLE, DO RELEASE_LOCK (prepared statements' deletion is
- TODO), we clean up all temporary tables that we got, if we are sure we
- can (see below).
-
- @todo
- - Remove all active user locks.
- Guilhem 2003-06: this is true but not urgent: the worst it can cause is
- the use of a bit of memory for a user lock which will not be used
- anymore. If the user lock is later used, the old one will be released. In
- other words, no deadlock problem.
-*/
-
-int Start_log_event_v3::do_apply_event(rpl_group_info *rgi)
-{
- DBUG_ENTER("Start_log_event_v3::do_apply_event");
- int error= 0;
- Relay_log_info *rli= rgi->rli;
-
- switch (binlog_version)
- {
- case 3:
- case 4:
- /*
- This can either be 4.x (then a Start_log_event_v3 is only at master
- startup so we are sure the master has restarted and cleared his temp
- tables; the event always has 'created'>0) or 5.0 (then we have to test
- 'created').
- */
- if (created)
- {
- rli->close_temporary_tables();
-
- /*
- The following is only false if we get here with a BINLOG statement
- */
- if (rli->mi)
- cleanup_load_tmpdir(&rli->mi->cmp_connection_name);
- }
- break;
-
- /*
- Now the older formats; in that case load_tmpdir is cleaned up by the I/O
- thread.
- */
- case 1:
- if (strncmp(rli->relay_log.description_event_for_exec->server_version,
- "3.23.57",7) >= 0 && created)
- {
- /*
- Can distinguish, based on the value of 'created': this event was
- generated at master startup.
- */
- rli->close_temporary_tables();
- }
- /*
- Otherwise, can't distinguish a Start_log_event generated at
- master startup and one generated by master FLUSH LOGS, so cannot
- be sure temp tables have to be dropped. So do nothing.
- */
- break;
- default:
- /*
- This case is not expected. It can be either an event corruption or an
- unsupported binary log version.
- */
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
- ER_THD(thd, ER_SLAVE_FATAL_ERROR),
- "Binlog version not supported");
- DBUG_RETURN(1);
- }
- DBUG_RETURN(error);
-}
#endif /* defined(HAVE_REPLICATION) */
-/***************************************************************************
- Format_description_log_event methods
-****************************************************************************/
-
bool Format_description_log_event::write()
{
bool ret;
bool no_checksum;
/*
- We don't call Start_log_event_v3::write() because this would make 2
+ We don't call Start_log_event_v::write() because this would make 2
my_b_safe_write().
*/
uchar buff[START_V3_HEADER_LEN+1];
@@ -2720,9 +2551,8 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi)
}
/*
- If this event comes from ourselves, there is no cleaning task to
- perform, we don't call Start_log_event_v3::do_apply_event()
- (this was just to update the log's description event).
+ If this event comes from ourselves, there is no cleaning task to perform,
+ we don't do cleanup (this was just to update the log's description event).
*/
if (server_id != (uint32) global_system_variables.server_id)
{
@@ -2735,7 +2565,24 @@ int Format_description_log_event::do_apply_event(rpl_group_info *rgi)
0, then 96, then jump to first really asked event (which is
>96). So this is ok.
*/
- ret= Start_log_event_v3::do_apply_event(rgi);
+ switch (binlog_version)
+ {
+ case 4:
+ if (created)
+ {
+ rli->close_temporary_tables();
+
+ /* The following is only false if we get here with a BINLOG statement */
+ if (rli->mi)
+ cleanup_load_tmpdir(&rli->mi->cmp_connection_name);
+ }
+ break;
+ default:
+ rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR,
+ ER_THD(thd, ER_SLAVE_FATAL_ERROR),
+ "Binlog version not supported");
+ ret= 1;
+ }
}
if (!ret)
@@ -2804,566 +2651,6 @@ int Start_encryption_log_event::do_update_pos(rpl_group_info *rgi)
/**************************************************************************
- Load_log_event methods
-**************************************************************************/
-
-#if defined(HAVE_REPLICATION)
-bool Load_log_event::print_query(THD *thd, bool need_db, const char *cs,
- String *buf, my_off_t *fn_start,
- my_off_t *fn_end, const char *qualify_db)
-{
- if (need_db && db && db_len)
- {
- buf->append(STRING_WITH_LEN("use "));
- append_identifier(thd, buf, db, db_len);
- buf->append(STRING_WITH_LEN("; "));
- }
-
- buf->append(STRING_WITH_LEN("LOAD DATA "));
-
- if (is_concurrent)
- buf->append(STRING_WITH_LEN("CONCURRENT "));
-
- if (fn_start)
- *fn_start= buf->length();
-
- if (check_fname_outside_temp_buf())
- buf->append(STRING_WITH_LEN("LOCAL "));
- buf->append(STRING_WITH_LEN("INFILE '"));
- buf->append_for_single_quote(fname, fname_len);
- buf->append(STRING_WITH_LEN("' "));
-
- if (sql_ex.opt_flags & REPLACE_FLAG)
- buf->append(STRING_WITH_LEN("REPLACE "));
- else if (sql_ex.opt_flags & IGNORE_FLAG)
- buf->append(STRING_WITH_LEN("IGNORE "));
-
- buf->append(STRING_WITH_LEN("INTO"));
-
- if (fn_end)
- *fn_end= buf->length();
-
- buf->append(STRING_WITH_LEN(" TABLE "));
- if (qualify_db)
- {
- append_identifier(thd, buf, qualify_db, strlen(qualify_db));
- buf->append(STRING_WITH_LEN("."));
- }
- append_identifier(thd, buf, table_name, table_name_len);
-
- if (cs != NULL)
- {
- buf->append(STRING_WITH_LEN(" CHARACTER SET "));
- buf->append(cs, strlen(cs));
- }
-
- /* We have to create all optional fields as the default is not empty */
- buf->append(STRING_WITH_LEN(" FIELDS TERMINATED BY "));
- pretty_print_str(buf, sql_ex.field_term, sql_ex.field_term_len);
- if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- buf->append(STRING_WITH_LEN(" OPTIONALLY "));
- buf->append(STRING_WITH_LEN(" ENCLOSED BY "));
- pretty_print_str(buf, sql_ex.enclosed, sql_ex.enclosed_len);
-
- buf->append(STRING_WITH_LEN(" ESCAPED BY "));
- pretty_print_str(buf, sql_ex.escaped, sql_ex.escaped_len);
-
- buf->append(STRING_WITH_LEN(" LINES TERMINATED BY "));
- pretty_print_str(buf, sql_ex.line_term, sql_ex.line_term_len);
- if (sql_ex.line_start_len)
- {
- buf->append(STRING_WITH_LEN(" STARTING BY "));
- pretty_print_str(buf, sql_ex.line_start, sql_ex.line_start_len);
- }
-
- if ((long) skip_lines > 0)
- {
- buf->append(STRING_WITH_LEN(" IGNORE "));
- buf->append_ulonglong(skip_lines);
- buf->append(STRING_WITH_LEN(" LINES "));
- }
-
- if (num_fields)
- {
- uint i;
- const char *field= fields;
- buf->append(STRING_WITH_LEN(" ("));
- for (i = 0; i < num_fields; i++)
- {
- if (i)
- {
- /*
- Yes, the space and comma is reversed here. But this is mostly dead
- code, at most used when reading really old binlogs from old servers,
- so better just leave it as is...
- */
- buf->append(STRING_WITH_LEN(" ,"));
- }
- append_identifier(thd, buf, field, field_lens[i]);
- field+= field_lens[i] + 1;
- }
- buf->append(STRING_WITH_LEN(")"));
- }
- return 0;
-}
-
-
-void Load_log_event::pack_info(Protocol *protocol)
-{
- char query_buffer[1024];
- String query_str(query_buffer, sizeof(query_buffer), system_charset_info);
-
- query_str.length(0);
- print_query(protocol->thd, TRUE, NULL, &query_str, 0, 0, NULL);
- protocol->store(query_str.ptr(), query_str.length(), &my_charset_bin);
-}
-#endif /* defined(HAVE_REPLICATION) */
-
-
-bool Load_log_event::write_data_header()
-{
- char buf[LOAD_HEADER_LEN];
- int4store(buf + L_THREAD_ID_OFFSET, slave_proxy_id);
- int4store(buf + L_EXEC_TIME_OFFSET, exec_time);
- int4store(buf + L_SKIP_LINES_OFFSET, skip_lines);
- buf[L_TBL_LEN_OFFSET] = (char)table_name_len;
- buf[L_DB_LEN_OFFSET] = (char)db_len;
- int4store(buf + L_NUM_FIELDS_OFFSET, num_fields);
- return write_data(buf, LOAD_HEADER_LEN) != 0;
-}
-
-
-bool Load_log_event::write_data_body()
-{
- if (sql_ex.write_data(writer))
- return 1;
- if (num_fields && fields && field_lens)
- {
- if (write_data(field_lens, num_fields) ||
- write_data(fields, field_block_len))
- return 1;
- }
- return (write_data(table_name, table_name_len + 1) ||
- write_data(db, db_len + 1) ||
- write_data(fname, fname_len));
-}
-
-
-Load_log_event::Load_log_event(THD *thd_arg, const sql_exchange *ex,
- const char *db_arg, const char *table_name_arg,
- List<Item> &fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup,
- bool ignore, bool using_trans)
- :Log_event(thd_arg,
- (thd_arg->used & THD::THREAD_SPECIFIC_USED)
- ? LOG_EVENT_THREAD_SPECIFIC_F : 0,
- using_trans),
- thread_id(thd_arg->thread_id),
- slave_proxy_id((ulong)thd_arg->variables.pseudo_thread_id),
- num_fields(0),fields(0),
- field_lens(0),field_block_len(0),
- table_name(table_name_arg ? table_name_arg : ""),
- db(db_arg), fname(ex->file_name), local_fname(FALSE),
- is_concurrent(is_concurrent_arg)
-{
- time_t end_time;
- time(&end_time);
- exec_time = (ulong) (end_time - thd_arg->start_time);
- /* db can never be a zero pointer in 4.0 */
- db_len = (uint32) strlen(db);
- table_name_len = (uint32) strlen(table_name);
- fname_len = (fname) ? (uint) strlen(fname) : 0;
- sql_ex.field_term = ex->field_term->ptr();
- sql_ex.field_term_len = (uint8) ex->field_term->length();
- sql_ex.enclosed = ex->enclosed->ptr();
- sql_ex.enclosed_len = (uint8) ex->enclosed->length();
- sql_ex.line_term = ex->line_term->ptr();
- sql_ex.line_term_len = (uint8) ex->line_term->length();
- sql_ex.line_start = ex->line_start->ptr();
- sql_ex.line_start_len = (uint8) ex->line_start->length();
- sql_ex.escaped = ex->escaped->ptr();
- sql_ex.escaped_len = (uint8) ex->escaped->length();
- sql_ex.opt_flags = 0;
- sql_ex.cached_new_format = -1;
-
- if (ex->dumpfile)
- sql_ex.opt_flags|= DUMPFILE_FLAG;
- if (ex->opt_enclosed)
- sql_ex.opt_flags|= OPT_ENCLOSED_FLAG;
-
- sql_ex.empty_flags= 0;
-
- switch (handle_dup) {
- case DUP_REPLACE:
- sql_ex.opt_flags|= REPLACE_FLAG;
- break;
- case DUP_UPDATE: // Impossible here
- case DUP_ERROR:
- break;
- }
- if (ignore)
- sql_ex.opt_flags|= IGNORE_FLAG;
-
- if (!ex->field_term->length())
- sql_ex.empty_flags |= FIELD_TERM_EMPTY;
- if (!ex->enclosed->length())
- sql_ex.empty_flags |= ENCLOSED_EMPTY;
- if (!ex->line_term->length())
- sql_ex.empty_flags |= LINE_TERM_EMPTY;
- if (!ex->line_start->length())
- sql_ex.empty_flags |= LINE_START_EMPTY;
- if (!ex->escaped->length())
- sql_ex.empty_flags |= ESCAPED_EMPTY;
-
- skip_lines = ex->skip_lines;
-
- List_iterator<Item> li(fields_arg);
- field_lens_buf.length(0);
- fields_buf.length(0);
- Item* item;
- while ((item = li++))
- {
- num_fields++;
- uchar len= (uchar) item->name.length;
- field_block_len += len + 1;
- fields_buf.append(item->name.str, len + 1);
- field_lens_buf.append((char*)&len, 1);
- }
-
- field_lens = (const uchar*)field_lens_buf.ptr();
- fields = fields_buf.ptr();
-}
-
-
-/**
- Load_log_event::set_fields()
-
- @note
- This function can not use the member variable
- for the database, since LOAD DATA INFILE on the slave
- can be for a different database than the current one.
- This is the reason for the affected_db argument to this method.
-*/
-
-void Load_log_event::set_fields(const char* affected_db,
- List<Item> &field_list,
- Name_resolution_context *context)
-{
- uint i;
- const char* field = fields;
- for (i= 0; i < num_fields; i++)
- {
- LEX_CSTRING field_name= {field, field_lens[i] };
- field_list.push_back(new (thd->mem_root)
- Item_field(thd, context,
- Lex_cstring_strlen(affected_db),
- Lex_cstring_strlen(table_name),
- field_name),
- thd->mem_root);
- field+= field_lens[i] + 1;
- }
-}
-
-
-#if defined(HAVE_REPLICATION)
-/**
- Does the data loading job when executing a LOAD DATA on the slave.
-
- @param net
- @param rli
- @param use_rli_only_for_errors If set to 1, rli is provided to
- Load_log_event::exec_event only for this
- function to have RPL_LOG_NAME and
- rli->last_slave_error, both being used by
- error reports. rli's position advancing
- is skipped (done by the caller which is
- Execute_load_log_event::exec_event).
- If set to 0, rli is provided for full use,
- i.e. for error reports and position
- advancing.
-
- @todo
- fix this; this can be done by testing rules in
- Create_file_log_event::exec_event() and then discarding Append_block and
- al.
- @todo
- this is a bug - this needs to be moved to the I/O thread
-
- @retval
- 0 Success
- @retval
- 1 Failure
-*/
-
-int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
- bool use_rli_only_for_errors)
-{
- Relay_log_info const *rli= rgi->rli;
- Rpl_filter *rpl_filter= rli->mi->rpl_filter;
- DBUG_ENTER("Load_log_event::do_apply_event");
-
- DBUG_ASSERT(thd->query() == 0);
- set_thd_db(thd, rpl_filter, db, db_len);
- thd->clear_error(1);
-
- /* see Query_log_event::do_apply_event() and BUG#13360 */
- DBUG_ASSERT(!rgi->m_table_map.count());
- /*
- Usually lex_start() is called by mysql_parse(), but we need it here
- as the present method does not call mysql_parse().
- */
- lex_start(thd);
- thd->lex->local_file= local_fname;
- thd->reset_for_next_command(0); // Errors are cleared above
-
- /*
- We test replicate_*_db rules. Note that we have already prepared
- the file to load, even if we are going to ignore and delete it
- now. So it is possible that we did a lot of disk writes for
- nothing. In other words, a big LOAD DATA INFILE on the master will
- still consume a lot of space on the slave (space in the relay log
- + space of temp files: twice the space of the file to load...)
- even if it will finally be ignored. TODO: fix this; this can be
- done by testing rules in Create_file_log_event::do_apply_event()
- and then discarding Append_block and al. Another way is do the
- filtering in the I/O thread (more efficient: no disk writes at
- all).
-
-
- Note: We do not need to execute reset_one_shot_variables() if this
- db_ok() test fails.
- Reason: The db stored in binlog events is the same for SET and for
- its companion query. If the SET is ignored because of
- db_ok(), the companion query will also be ignored, and if
- the companion query is ignored in the db_ok() test of
- ::do_apply_event(), then the companion SET also have so
- we don't need to reset_one_shot_variables().
- */
- if (rpl_filter->db_ok(thd->db.str))
- {
- thd->set_time(when, when_sec_part);
- thd->set_query_id(next_query_id());
- thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
-
- TABLE_LIST tables;
- LEX_CSTRING db_name= { thd->strmake(thd->db.str, thd->db.length), thd->db.length };
- if (lower_case_table_names)
- my_casedn_str(system_charset_info, (char *)table_name);
- LEX_CSTRING tbl_name= { table_name, strlen(table_name) };
- tables.init_one_table(&db_name, &tbl_name, 0, TL_WRITE);
- tables.updating= 1;
-
- // the table will be opened in mysql_load
- if (rpl_filter->is_on() && !rpl_filter->tables_ok(thd->db.str, &tables))
- {
- // TODO: this is a bug - this needs to be moved to the I/O thread
- if (net)
- skip_load_data_infile(net);
- }
- else
- {
- enum enum_duplicates handle_dup;
- bool ignore= 0;
- char query_buffer[1024];
- String query_str(query_buffer, sizeof(query_buffer), system_charset_info);
- char *load_data_query;
-
- query_str.length(0);
- /*
- Forge LOAD DATA INFILE query which will be used in SHOW PROCESS LIST
- and written to slave's binlog if binlogging is on.
- */
- print_query(thd, FALSE, NULL, &query_str, NULL, NULL, NULL);
- if (!(load_data_query= (char *)thd->strmake(query_str.ptr(),
- query_str.length())))
- {
- /*
- This will set thd->fatal_error in case of OOM. So we surely will notice
- that something is wrong.
- */
- goto error;
- }
-
- thd->set_query(load_data_query, (uint) (query_str.length()));
-
- if (sql_ex.opt_flags & REPLACE_FLAG)
- handle_dup= DUP_REPLACE;
- else if (sql_ex.opt_flags & IGNORE_FLAG)
- {
- ignore= 1;
- handle_dup= DUP_ERROR;
- }
- else
- {
- /*
- When replication is running fine, if it was DUP_ERROR on the
- master then we could choose IGNORE here, because if DUP_ERROR
- suceeded on master, and data is identical on the master and slave,
- then there should be no uniqueness errors on slave, so IGNORE is
- the same as DUP_ERROR. But in the unlikely case of uniqueness errors
- (because the data on the master and slave happen to be different
- (user error or bug), we want LOAD DATA to print an error message on
- the slave to discover the problem.
-
- If reading from net (a 3.23 master), mysql_load() will change this
- to IGNORE.
- */
- handle_dup= DUP_ERROR;
- }
- /*
- We need to set thd->lex->sql_command and thd->lex->duplicates
- since InnoDB tests these variables to decide if this is a LOAD
- DATA ... REPLACE INTO ... statement even though mysql_parse()
- is not called. This is not needed in 5.0 since there the LOAD
- DATA ... statement is replicated using mysql_parse(), which
- sets the thd->lex fields correctly.
- */
- thd->lex->sql_command= SQLCOM_LOAD;
- thd->lex->duplicates= handle_dup;
-
- sql_exchange ex((char*)fname, sql_ex.opt_flags & DUMPFILE_FLAG);
- String field_term(sql_ex.field_term,sql_ex.field_term_len,log_cs);
- String enclosed(sql_ex.enclosed,sql_ex.enclosed_len,log_cs);
- String line_term(sql_ex.line_term,sql_ex.line_term_len,log_cs);
- String line_start(sql_ex.line_start,sql_ex.line_start_len,log_cs);
- String escaped(sql_ex.escaped,sql_ex.escaped_len, log_cs);
- ex.field_term= &field_term;
- ex.enclosed= &enclosed;
- ex.line_term= &line_term;
- ex.line_start= &line_start;
- ex.escaped= &escaped;
-
- ex.opt_enclosed = (sql_ex.opt_flags & OPT_ENCLOSED_FLAG);
- if (sql_ex.empty_flags & FIELD_TERM_EMPTY)
- ex.field_term->length(0);
-
- ex.skip_lines = skip_lines;
- List<Item> field_list;
- thd->lex->first_select_lex()->context.resolve_in_table_list_only(&tables);
- set_fields(tables.db.str,
- field_list, &thd->lex->first_select_lex()->context);
- thd->variables.pseudo_thread_id= thread_id;
- if (net)
- {
- // mysql_load will use thd->net to read the file
- thd->net.vio = net->vio;
- // Make sure the client does not get confused about the packet sequence
- thd->net.pkt_nr = net->pkt_nr;
- }
- /*
- It is safe to use tmp_list twice because we are not going to
- update it inside mysql_load().
- */
- List<Item> tmp_list;
- if (thd->open_temporary_tables(&tables) ||
- mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list,
- handle_dup, ignore, net != 0))
- thd->is_slave_error= 1;
- if (thd->cuted_fields)
- {
- /* log_pos is the position of the LOAD event in the master log */
- sql_print_warning("Slave: load data infile on table '%s' at "
- "log position %llu in log '%s' produced %ld "
- "warning(s). Default database: '%s'",
- (char*) table_name, log_pos, RPL_LOG_NAME,
- (ulong) thd->cuted_fields,
- thd->get_db());
- }
- if (net)
- net->pkt_nr= thd->net.pkt_nr;
- }
- }
- else
- {
- /*
- We will just ask the master to send us /dev/null if we do not
- want to load the data.
- TODO: this a bug - needs to be done in I/O thread
- */
- if (net)
- skip_load_data_infile(net);
- }
-
-error:
- thd->net.vio = 0;
- const char *remember_db= thd->get_db();
- thd->catalog= 0;
- thd->set_db(&null_clex_str); /* will free the current database */
- thd->reset_query();
- thd->get_stmt_da()->set_overwrite_status(true);
- thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_GTID_BEGIN);
- thd->get_stmt_da()->set_overwrite_status(false);
- close_thread_tables(thd);
- /*
- - If transaction rollback was requested due to deadlock
- perform it and release metadata locks.
- - If inside a multi-statement transaction,
- defer the release of metadata locks until the current
- transaction is either committed or rolled back. This prevents
- other statements from modifying the table for the entire
- duration of this transaction. This provides commit ordering
- and guarantees serializability across multiple transactions.
- - If in autocommit mode, or outside a transactional context,
- automatically release metadata locks of the current statement.
- */
- if (thd->transaction_rollback_request)
- {
- trans_rollback_implicit(thd);
- thd->release_transactional_locks();
- }
- else if (! thd->in_multi_stmt_transaction_mode())
- thd->release_transactional_locks();
- else
- thd->mdl_context.release_statement_locks();
-
- DBUG_EXECUTE_IF("LOAD_DATA_INFILE_has_fatal_error",
- thd->is_slave_error= 0; thd->is_fatal_error= 1;);
-
- if (unlikely(thd->is_slave_error))
- {
- /* this err/sql_errno code is copy-paste from net_send_error() */
- const char *err;
- int sql_errno;
- if (thd->is_error())
- {
- err= thd->get_stmt_da()->message();
- sql_errno= thd->get_stmt_da()->sql_errno();
- }
- else
- {
- sql_errno=ER_UNKNOWN_ERROR;
- err= ER_THD(thd, sql_errno);
- }
- rli->report(ERROR_LEVEL, sql_errno, rgi->gtid_info(), "\
-Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'",
- err, (char*)table_name, remember_db);
- free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
- DBUG_RETURN(1);
- }
- free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
-
- if (unlikely(thd->is_fatal_error))
- {
- char buf[256];
- my_snprintf(buf, sizeof(buf),
- "Running LOAD DATA INFILE on table '%-.64s'."
- " Default database: '%-.64s'",
- (char*)table_name,
- remember_db);
-
- rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, rgi->gtid_info(),
- ER_THD(thd, ER_SLAVE_FATAL_ERROR), buf);
- DBUG_RETURN(1);
- }
-
- DBUG_RETURN( use_rli_only_for_errors ? 0 : Log_event::do_apply_event(rgi) );
-}
-#endif
-
-
-/**************************************************************************
Rotate_log_event methods
**************************************************************************/
@@ -4858,10 +4145,9 @@ User_var_log_event::do_shall_skip(rpl_group_info *rgi)
written all DROP TEMPORARY TABLE (prepared statements' deletion is
TODO only when we binlog prep stmts). We used to clean up
slave_load_tmpdir, but this is useless as it has been cleared at the
- end of LOAD DATA INFILE. So we have nothing to do here. The place
- were we must do this cleaning is in
- Start_log_event_v3::do_apply_event(), not here. Because if we come
- here, the master was sane.
+ end of LOAD DATA INFILE. So we have nothing to do here. The place were we
+ must do this cleaning is in Format_description_log_event::do_apply_event(),
+ not here. Because if we come here, the master was sane.
This must only be called from the Slave SQL thread, since it calls
Relay_log_info::flush().
@@ -4895,178 +4181,6 @@ int Stop_log_event::do_update_pos(rpl_group_info *rgi)
/**************************************************************************
- Create_file_log_event methods
-**************************************************************************/
-
-Create_file_log_event::
-Create_file_log_event(THD* thd_arg, sql_exchange* ex,
- const char* db_arg, const char* table_name_arg,
- List<Item>& fields_arg,
- bool is_concurrent_arg,
- enum enum_duplicates handle_dup,
- bool ignore,
- uchar* block_arg, uint block_len_arg, bool using_trans)
- :Load_log_event(thd_arg, ex, db_arg, table_name_arg, fields_arg,
- is_concurrent_arg,
- handle_dup, ignore, using_trans),
- fake_base(0), block(block_arg), event_buf(0), block_len(block_len_arg),
- file_id(thd_arg->file_id = mysql_bin_log.next_file_id())
-{
- DBUG_ENTER("Create_file_log_event");
- sql_ex.force_new_format();
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Create_file_log_event::write_data_body()
-*/
-
-bool Create_file_log_event::write_data_body()
-{
- bool res;
- if ((res= Load_log_event::write_data_body()) || fake_base)
- return res;
- return write_data("", 1) ||
- write_data(block, block_len);
-}
-
-
-/*
- Create_file_log_event::write_data_header()
-*/
-
-bool Create_file_log_event::write_data_header()
-{
- bool res;
- uchar buf[CREATE_FILE_HEADER_LEN];
- if ((res= Load_log_event::write_data_header()) || fake_base)
- return res;
- int4store(buf + CF_FILE_ID_OFFSET, file_id);
- return write_data(buf, CREATE_FILE_HEADER_LEN) != 0;
-}
-
-
-/*
- Create_file_log_event::write_base()
-*/
-
-bool Create_file_log_event::write_base()
-{
- bool res;
- fake_base= 1; // pretend we are Load event
- res= write();
- fake_base= 0;
- return res;
-}
-
-
-#if defined(HAVE_REPLICATION)
-void Create_file_log_event::pack_info(Protocol *protocol)
-{
- char buf[SAFE_NAME_LEN*2 + 30 + 21*2], *pos;
- pos= strmov(buf, "db=");
- memcpy(pos, db, db_len);
- pos= strmov(pos + db_len, ";table=");
- memcpy(pos, table_name, table_name_len);
- pos= strmov(pos + table_name_len, ";file_id=");
- pos= int10_to_str((long) file_id, pos, 10);
- pos= strmov(pos, ";block_len=");
- pos= int10_to_str((long) block_len, pos, 10);
- protocol->store(buf, (uint) (pos-buf), &my_charset_bin);
-}
-#endif /* defined(HAVE_REPLICATION) */
-
-
-/**
- Create_file_log_event::do_apply_event()
- Constructor for Create_file_log_event to intantiate an event
- from the relay log on the slave.
-
- @retval
- 0 Success
- @retval
- 1 Failure
-*/
-
-#if defined(HAVE_REPLICATION)
-int Create_file_log_event::do_apply_event(rpl_group_info *rgi)
-{
- char fname_buf[FN_REFLEN];
- char *ext;
- int fd = -1;
- IO_CACHE file;
- Log_event_writer lew(&file, 0);
- int error = 1;
- Relay_log_info const *rli= rgi->rli;
-
- THD_STAGE_INFO(thd, stage_making_temp_file_create_before_load_data);
- bzero((char*)&file, sizeof(file));
- ext= slave_load_file_stem(fname_buf, file_id, server_id, ".info",
- &rli->mi->connection_name);
- /* old copy may exist already */
- mysql_file_delete(key_file_log_event_info, fname_buf, MYF(0));
- if ((fd= mysql_file_create(key_file_log_event_info,
- fname_buf, CREATE_MODE,
- O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
- MYF(MY_WME))) < 0 ||
- init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0,
- MYF(MY_WME|MY_NABP)))
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: could not open file '%s'",
- fname_buf);
- goto err;
- }
-
- // a trick to avoid allocating another buffer
- fname= fname_buf;
- fname_len= (uint) (strmov(ext, ".data") - fname);
- writer= &lew;
- if (write_base())
- {
- strmov(ext, ".info"); // to have it right in the error message
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: could not write to file '%s'",
- fname_buf);
- goto err;
- }
- end_io_cache(&file);
- mysql_file_close(fd, MYF(0));
-
- // fname_buf now already has .data, not .info, because we did our trick
- /* old copy may exist already */
- mysql_file_delete(key_file_log_event_data, fname_buf, MYF(0));
- if ((fd= mysql_file_create(key_file_log_event_data,
- fname_buf, CREATE_MODE,
- O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW,
- MYF(MY_WME))) < 0)
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: could not open file '%s'",
- fname_buf);
- goto err;
- }
- if (mysql_file_write(fd, (uchar*) block, block_len, MYF(MY_WME+MY_NABP)))
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Create_file event: write to '%s' failed",
- fname_buf);
- goto err;
- }
- error=0; // Everything is ok
-
-err:
- if (unlikely(error))
- end_io_cache(&file);
- if (likely(fd >= 0))
- mysql_file_close(fd, MYF(0));
- return error != 0;
-}
-#endif /* defined(HAVE_REPLICATION) */
-
-
-/**************************************************************************
Append_block_log_event methods
**************************************************************************/
@@ -5228,130 +4342,6 @@ int Delete_file_log_event::do_apply_event(rpl_group_info *rgi)
/**************************************************************************
- Execute_load_log_event methods
-**************************************************************************/
-
-Execute_load_log_event::Execute_load_log_event(THD *thd_arg,
- const char* db_arg,
- bool using_trans)
- :Log_event(thd_arg, 0, using_trans), file_id(thd_arg->file_id), db(db_arg)
-{
-}
-
-
-bool Execute_load_log_event::write()
-{
- uchar buf[EXEC_LOAD_HEADER_LEN];
- int4store(buf + EL_FILE_ID_OFFSET, file_id);
- return write_header(sizeof(buf)) ||
- write_data(buf, sizeof(buf)) ||
- write_footer();
-}
-
-
-#if defined(HAVE_REPLICATION)
-void Execute_load_log_event::pack_info(Protocol *protocol)
-{
- char buf[64];
- uint length;
- length= (uint) sprintf(buf, ";file_id=%u", (uint) file_id);
- protocol->store(buf, (int32) length, &my_charset_bin);
-}
-
-
-/*
- Execute_load_log_event::do_apply_event()
-*/
-
-int Execute_load_log_event::do_apply_event(rpl_group_info *rgi)
-{
- char fname[FN_REFLEN+10];
- char *ext;
- int fd;
- int error= 1;
- IO_CACHE file;
- Load_log_event *lev= 0;
- Relay_log_info const *rli= rgi->rli;
-
- ext= slave_load_file_stem(fname, file_id, server_id, ".info",
- &rli->mi->cmp_connection_name);
- if ((fd= mysql_file_open(key_file_log_event_info,
- fname, O_RDONLY | O_BINARY | O_NOFOLLOW,
- MYF(MY_WME))) < 0 ||
- init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0,
- MYF(MY_WME|MY_NABP)))
- {
- rli->report(ERROR_LEVEL, my_errno, rgi->gtid_info(),
- "Error in Exec_load event: could not open file '%s'",
- fname);
- goto err;
- }
- if (!(lev= (Load_log_event*)
- Log_event::read_log_event(&file,
- rli->relay_log.description_event_for_exec,
- opt_slave_sql_verify_checksum)) ||
- lev->get_type_code() != NEW_LOAD_EVENT)
- {
- rli->report(ERROR_LEVEL, 0, rgi->gtid_info(), "Error in Exec_load event: "
- "file '%s' appears corrupted", fname);
- goto err;
- }
- lev->thd = thd;
- /*
- lev->do_apply_event should use rli only for errors i.e. should
- not advance rli's position.
-
- lev->do_apply_event is the place where the table is loaded (it
- calls mysql_load()).
- */
-
- if (lev->do_apply_event(0,rgi,1))
- {
- /*
- We want to indicate the name of the file that could not be loaded
- (SQL_LOADxxx).
- But as we are here we are sure the error is in rli->last_slave_error and
- rli->last_slave_errno (example of error: duplicate entry for key), so we
- don't want to overwrite it with the filename.
- What we want instead is add the filename to the current error message.
- */
- char *tmp= my_strdup(PSI_INSTRUMENT_ME, rli->last_error().message, MYF(MY_WME));
- if (tmp)
- {
- rli->report(ERROR_LEVEL, rli->last_error().number, rgi->gtid_info(),
- "%s. Failed executing load from '%s'", tmp, fname);
- my_free(tmp);
- }
- goto err;
- }
- /*
- We have an open file descriptor to the .info file; we need to close it
- or Windows will refuse to delete the file in mysql_file_delete().
- */
- if (fd >= 0)
- {
- mysql_file_close(fd, MYF(0));
- end_io_cache(&file);
- fd= -1;
- }
- mysql_file_delete(key_file_log_event_info, fname, MYF(MY_WME));
- memcpy(ext, ".data", 6);
- mysql_file_delete(key_file_log_event_data, fname, MYF(MY_WME));
- error = 0;
-
-err:
- delete lev;
- if (fd >= 0)
- {
- mysql_file_close(fd, MYF(0));
- end_io_cache(&file);
- }
- return error;
-}
-
-#endif /* defined(HAVE_REPLICATION) */
-
-/**************************************************************************
Begin_load_query_log_event methods
**************************************************************************/
@@ -6386,7 +5376,7 @@ bool Rows_log_event::write_data_header()
});
int6store(buf + RW_MAPID_OFFSET, m_table_id);
int2store(buf + RW_FLAGS_OFFSET, m_flags);
- return write_data(buf, ROWS_HEADER_LEN);
+ return write_data(buf, ROWS_HEADER_LEN_V1);
}
bool Rows_log_event::write_data_body()
diff --git a/sql/rpl_record_old.cc b/sql/rpl_record_old.cc
deleted file mode 100644
index 496e781d2eb..00000000000
--- a/sql/rpl_record_old.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-/* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#include "mariadb.h"
-#include "sql_priv.h"
-#include "rpl_rli.h"
-#include "rpl_record_old.h"
-#include "log_event.h" // Log_event_type
-
-size_t
-pack_row_old(TABLE *table, MY_BITMAP const* cols,
- uchar *row_data, const uchar *record)
-{
- Field **p_field= table->field, *field;
- int n_null_bytes= table->s->null_bytes;
- uchar *ptr;
- uint i;
- my_ptrdiff_t const rec_offset= record - table->record[0];
- my_ptrdiff_t const def_offset= table->s->default_values - table->record[0];
- memcpy(row_data, record, n_null_bytes);
- ptr= row_data+n_null_bytes;
-
- for (i= 0 ; (field= *p_field) ; i++, p_field++)
- {
- if (bitmap_is_set(cols,i))
- {
- my_ptrdiff_t const offset=
- field->is_null(rec_offset) ? def_offset : rec_offset;
- field->move_field_offset(offset);
- ptr= field->pack(ptr, field->ptr);
- field->move_field_offset(-offset);
- }
- }
- return (static_cast<size_t>(ptr - row_data));
-}
-
-
-/*
- Unpack a row into a record.
-
- SYNOPSIS
- unpack_row()
- rli Relay log info
- table Table to unpack into
- colcnt Number of columns to read from record
- record Record where the data should be unpacked
- row Packed row data
- cols Pointer to columns data to fill in
- row_end Pointer to variable that will hold the value of the
- one-after-end position for the row
- master_reclength
- Pointer to variable that will be set to the length of the
- record on the master side
- rw_set Pointer to bitmap that holds either the read_set or the
- write_set of the table
-
- DESCRIPTION
-
- The row is assumed to only consist of the fields for which the
- bitset represented by 'arr' and 'bits'; the other parts of the
- record are left alone.
-
- At most 'colcnt' columns are read: if the table is larger than
- that, the remaining fields are not filled in.
-
- RETURN VALUE
-
- Error code, or zero if no error. The following error codes can
- be returned:
-
- ER_NO_DEFAULT_FOR_FIELD
- Returned if one of the fields existing on the slave but not on
- the master does not have a default value (and isn't nullable)
- ER_SLAVE_CORRUPT_EVENT
- Wrong data for field found.
- */
-#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-int
-unpack_row_old(rpl_group_info *rgi,
- TABLE *table, uint const colcnt, uchar *record,
- uchar const *row, const uchar *row_buffer_end,
- MY_BITMAP const *cols,
- uchar const **row_end, ulong *master_reclength,
- MY_BITMAP* const rw_set, Log_event_type const event_type)
-{
- DBUG_ASSERT(record && row);
- my_ptrdiff_t const offset= record - (uchar*) table->record[0];
- size_t master_null_bytes= table->s->null_bytes;
-
- if (colcnt != table->s->fields)
- {
- Field **fptr= &table->field[colcnt-1];
- do
- master_null_bytes= (*fptr)->last_null_byte();
- while (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF &&
- fptr-- > table->field);
-
- /*
- If master_null_bytes is LAST_NULL_BYTE_UNDEF (0) at this time,
- there were no nullable fields nor BIT fields at all in the
- columns that are common to the master and the slave. In that
- case, there is only one null byte holding the X bit.
-
- OBSERVE! There might still be nullable columns following the
- common columns, so table->s->null_bytes might be greater than 1.
- */
- if (master_null_bytes == Field::LAST_NULL_BYTE_UNDEF)
- master_null_bytes= 1;
- }
-
- DBUG_ASSERT(master_null_bytes <= table->s->null_bytes);
- memcpy(record, row, master_null_bytes); // [1]
- int error= 0;
-
- bitmap_set_all(rw_set);
-
- Field **const begin_ptr = table->field;
- Field **field_ptr;
- uchar const *ptr= row + master_null_bytes;
- Field **const end_ptr= begin_ptr + colcnt;
- for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr)
- {
- Field *const f= *field_ptr;
-
- if (bitmap_is_set(cols, (uint)(field_ptr - begin_ptr)))
- {
- f->move_field_offset(offset);
- ptr= f->unpack(f->ptr, ptr, row_buffer_end, 0);
- f->move_field_offset(-offset);
- if (!ptr)
- {
- rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT, NULL,
- "Could not read field `%s` of table `%s`.`%s`",
- f->field_name.str, table->s->db.str,
- table->s->table_name.str);
- return(ER_SLAVE_CORRUPT_EVENT);
- }
- }
- else
- bitmap_clear_bit(rw_set, (uint)(field_ptr - begin_ptr));
- }
-
- *row_end = ptr;
- if (master_reclength)
- {
- if (*field_ptr)
- *master_reclength = (ulong)((*field_ptr)->ptr - table->record[0]);
- else
- *master_reclength = table->s->reclength;
- }
-
- /*
- Set properties for remaining columns, if there are any. We let the
- corresponding bit in the write_set be set, to write the value if
- it was not there already. We iterate over all remaining columns,
- even if there were an error, to get as many error messages as
- possible. We are still able to return a pointer to the next row,
- so redo that.
-
- This generation of error messages is only relevant when inserting
- new rows.
- */
- for ( ; *field_ptr ; ++field_ptr)
- {
- uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG;
-
- DBUG_PRINT("debug", ("flags = 0x%x, mask = 0x%x, flags & mask = 0x%x",
- (*field_ptr)->flags, mask,
- (*field_ptr)->flags & mask));
-
- if (event_type == WRITE_ROWS_EVENT &&
- ((*field_ptr)->flags & mask) == mask)
- {
- rgi->rli->report(ERROR_LEVEL, ER_NO_DEFAULT_FOR_FIELD, NULL,
- "Field `%s` of table `%s`.`%s` "
- "has no default value and cannot be NULL",
- (*field_ptr)->field_name.str, table->s->db.str,
- table->s->table_name.str);
- error = ER_NO_DEFAULT_FOR_FIELD;
- }
- else
- (*field_ptr)->set_default();
- }
-
- return error;
-}
-#endif
diff --git a/sql/rpl_record_old.h b/sql/rpl_record_old.h
deleted file mode 100644
index 0b2dd432138..00000000000
--- a/sql/rpl_record_old.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2007, 2010, Oracle and/or its affiliates.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
-
-#ifndef RPL_RECORD_OLD_H
-#define RPL_RECORD_OLD_H
-
-#include "log_event.h" /* Log_event_type */
-
-#ifndef MYSQL_CLIENT
-size_t pack_row_old(TABLE *table, MY_BITMAP const* cols,
- uchar *row_data, const uchar *record);
-
-#ifdef HAVE_REPLICATION
-int unpack_row_old(rpl_group_info *rgi,
- TABLE *table, uint const colcnt, uchar *record,
- uchar const *row, uchar const *row_buffer_end,
- MY_BITMAP const *cols,
- uchar const **row_end, ulong *master_reclength,
- MY_BITMAP* const rw_set,
- Log_event_type const event_type);
-#endif
-#endif
-#endif
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index 88a0e346245..edbb630c781 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -525,13 +525,7 @@ read_relay_log_description_event(IO_CACHE *cur_log, ulonglong start_pos,
Format_description_log_event *fdev;
bool found= false;
- /*
- By default the relay log is in binlog format 3 (4.0).
- Even if format is 4, this will work enough to read the first event
- (Format_desc) (remember that format 4 is just lenghtened compared to format
- 3; format 3 is a prefix of format 4).
- */
- fdev= new Format_description_log_event(3);
+ fdev= new Format_description_log_event(4);
while (!found)
{
@@ -666,14 +660,7 @@ int init_relay_log_pos(Relay_log_info* rli,const char* log,
running, say, CHANGE MASTER.
*/
delete rli->relay_log.description_event_for_exec;
- /*
- By default the relay log is in binlog format 3 (4.0).
- Even if format is 4, this will work enough to read the first event
- (Format_desc) (remember that format 4 is just lenghtened compared to format
- 3; format 3 is a prefix of format 4).
- */
- rli->relay_log.description_event_for_exec= new
- Format_description_log_event(3);
+ rli->relay_log.description_event_for_exec= new Format_description_log_event(4);
mysql_mutex_lock(log_lock);
diff --git a/sql/slave.cc b/sql/slave.cc
index 04831e1f18b..7db5a31d439 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -160,7 +160,6 @@ failed read"
typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE;
static int process_io_rotate(Master_info* mi, Rotate_log_event* rev);
-static int process_io_create_file(Master_info* mi, Create_file_log_event* cev);
static bool wait_for_relay_log_space(Relay_log_info* rli);
static bool io_slave_killed(Master_info* mi);
static bool sql_slave_killed(rpl_group_info *rgi);
@@ -1487,20 +1486,6 @@ bool net_request_file(NET* net, const char* fname)
(uchar*) "", 0));
}
-/*
- From other comments and tests in code, it looks like
- sometimes Query_log_event and Load_log_event can have db == 0
- (see rewrite_db() above for example)
- (cases where this happens are unclear; it may be when the master is 3.23).
-*/
-
-const char *print_slave_db_safe(const char* db)
-{
- DBUG_ENTER("*print_slave_db_safe");
-
- DBUG_RETURN((db ? db : ""));
-}
-
#endif /* HAVE_REPLICATION */
bool Sql_cmd_show_slave_status::execute(THD *thd)
@@ -1785,6 +1770,8 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
case 0:
case 1:
case 2:
+ case 3:
+ case 4:
errmsg= err_buff2;
snprintf(err_buff2, sizeof(err_buff2),
"Master reported unrecognized MariaDB version: %s",
@@ -1792,14 +1779,6 @@ static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi)
err_code= ER_SLAVE_FATAL_ERROR;
sprintf(err_buff, ER_DEFAULT(err_code), err_buff2);
break;
- case 3:
- mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(1, mysql->server_version);
- break;
- case 4:
- mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(3, mysql->server_version);
- break;
default:
/*
Master is MySQL >=5.0. Give a default Format_desc event, so that we can
@@ -4850,28 +4829,25 @@ connected:
goto connected;
}
- if (mi->rli.relay_log.description_event_for_queue->binlog_version > 1)
+ /*
+ Register ourselves with the master.
+ */
+ THD_STAGE_INFO(thd, stage_registering_slave_on_master);
+ if (register_slave_on_master(mysql, mi, &suppress_warnings))
{
- /*
- Register ourselves with the master.
- */
- THD_STAGE_INFO(thd, stage_registering_slave_on_master);
- if (register_slave_on_master(mysql, mi, &suppress_warnings))
+ if (!check_io_slave_killed(mi, "Slave I/O thread killed "
+ "while registering slave on master"))
{
- if (!check_io_slave_killed(mi, "Slave I/O thread killed "
- "while registering slave on master"))
- {
- sql_print_error("Slave I/O thread couldn't register on master");
- if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
- reconnect_messages[SLAVE_RECON_ACT_REG]))
- goto err;
- }
- else
+ sql_print_error("Slave I/O thread couldn't register on master");
+ if (try_to_reconnect(thd, mysql, mi, &retry_count, suppress_warnings,
+ reconnect_messages[SLAVE_RECON_ACT_REG]))
goto err;
- goto connected;
}
- DBUG_EXECUTE_IF("fail_com_register_slave", goto err;);
+ else
+ goto err;
+ goto connected;
}
+ DBUG_EXECUTE_IF("fail_com_register_slave", goto err;);
DBUG_PRINT("info",("Starting reading binary log from master"));
thd->set_command(COM_SLAVE_IO);
@@ -5847,115 +5823,6 @@ err_during_init:
/*
- process_io_create_file()
-*/
-
-static int process_io_create_file(Master_info* mi, Create_file_log_event* cev)
-{
- int error = 1;
- ulong num_bytes;
- bool cev_not_written;
- THD *thd = mi->io_thd;
- NET *net = &mi->mysql->net;
- DBUG_ENTER("process_io_create_file");
-
- if (unlikely(!cev->is_valid()))
- DBUG_RETURN(1);
-
- if (!mi->rpl_filter->db_ok(cev->db))
- {
- skip_load_data_infile(net);
- DBUG_RETURN(0);
- }
- DBUG_ASSERT(cev->inited_from_old);
- thd->file_id = cev->file_id = mi->file_id++;
- thd->variables.server_id = cev->server_id;
- cev_not_written = 1;
-
- if (unlikely(net_request_file(net,cev->fname)))
- {
- sql_print_error("Slave I/O: failed requesting download of '%s'",
- cev->fname);
- goto err;
- }
-
- /*
- This dummy block is so we could instantiate Append_block_log_event
- once and then modify it slightly instead of doing it multiple times
- in the loop
- */
- {
- Append_block_log_event aev(thd,0,0,0,0);
-
- for (;;)
- {
- if (unlikely((num_bytes=my_net_read(net)) == packet_error))
- {
- sql_print_error("Network read error downloading '%s' from master",
- cev->fname);
- goto err;
- }
- if (unlikely(!num_bytes)) /* eof */
- {
- /* 3.23 master wants it */
- net_write_command(net, 0, (uchar*) "", 0, (uchar*) "", 0);
- /*
- If we wrote Create_file_log_event, then we need to write
- Execute_load_log_event. If we did not write Create_file_log_event,
- then this is an empty file and we can just do as if the LOAD DATA
- INFILE had not existed, i.e. write nothing.
- */
- if (unlikely(cev_not_written))
- break;
- Execute_load_log_event xev(thd,0,0);
- xev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&xev)))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
- ER_THD(thd, ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
- "error writing Exec_load event to relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
- break;
- }
- if (unlikely(cev_not_written))
- {
- cev->block = net->read_pos;
- cev->block_len = num_bytes;
- if (unlikely(mi->rli.relay_log.append(cev)))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
- ER_THD(thd, ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
- "error writing Create_file event to relay log");
- goto err;
- }
- cev_not_written=0;
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total);
- }
- else
- {
- aev.block = net->read_pos;
- aev.block_len = num_bytes;
- aev.log_pos = cev->log_pos;
- if (unlikely(mi->rli.relay_log.append(&aev)))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_RELAY_LOG_WRITE_FAILURE, NULL,
- ER_THD(thd, ER_SLAVE_RELAY_LOG_WRITE_FAILURE),
- "error writing Append_block event to relay log");
- goto err;
- }
- mi->rli.relay_log.harvest_bytes_written(&mi->rli.log_space_total) ;
- }
- }
- }
- error=0;
-err:
- DBUG_RETURN(error);
-}
-
-
-/*
Start using a new binary log on the master
SYNOPSIS
@@ -5999,25 +5866,10 @@ static int process_io_rotate(Master_info *mi, Rotate_log_event *rev)
mi->events_till_disconnect++;
#endif
- /*
- If description_event_for_queue is format <4, there is conversion in the
- relay log to the slave's format (4). And Rotate can mean upgrade or
- nothing. If upgrade, it's to 5.0 or newer, so we will get a Format_desc, so
- no need to reset description_event_for_queue now. And if it's nothing (same
- master version as before), no need (still using the slave's format).
- */
+ /* this prevents a redundant FDLE in the relay log */
if (mi->rli.relay_log.description_event_for_queue->binlog_version >= 4)
- {
- DBUG_ASSERT(mi->rli.relay_log.description_event_for_queue->checksum_alg ==
- mi->rli.relay_log.relay_log_checksum_alg);
-
- delete mi->rli.relay_log.description_event_for_queue;
- /* start from format 3 (MySQL 4.0) again */
- mi->rli.relay_log.description_event_for_queue= new
- Format_description_log_event(3);
- mi->rli.relay_log.description_event_for_queue->checksum_alg=
- mi->rli.relay_log.relay_log_checksum_alg;
- }
+ mi->rli.relay_log.description_event_for_queue->binlog_version= 3;
+
/*
Rotate the relay log makes binlog format detection easier (at next slave
start or mysqlbinlog)
@@ -6026,216 +5878,9 @@ static int process_io_rotate(Master_info *mi, Rotate_log_event *rev)
}
/*
- Reads a 3.23 event and converts it to the slave's format. This code was
- copied from MySQL 4.0.
-*/
-static int queue_binlog_ver_1_event(Master_info *mi, const uchar *buf,
- ulong event_len)
-{
- const char *errmsg = 0;
- ulong inc_pos;
- bool ignore_event= 0;
- uchar *tmp_buf = 0;
- Relay_log_info *rli= &mi->rli;
- DBUG_ENTER("queue_binlog_ver_1_event");
-
- /*
- If we get Load event, we need to pass a non-reusable buffer
- to read_log_event, so we do a trick
- */
- if ((uchar)buf[EVENT_TYPE_OFFSET] == LOAD_EVENT)
- {
- if (unlikely(!(tmp_buf= (uchar*) my_malloc(key_memory_binlog_ver_1_event,
- event_len+1, MYF(MY_WME)))))
- {
- mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL,
- ER(ER_SLAVE_FATAL_ERROR), "Memory allocation failed");
- DBUG_RETURN(1);
- }
- memcpy(tmp_buf,buf,event_len);
- /*
- Create_file constructor wants a 0 as last char of buffer, this 0 will
- serve as the string-termination char for the file's name (which is at the
- end of the buffer)
- We must increment event_len, otherwise the event constructor will not see
- this end 0, which leads to segfault.
- */
- tmp_buf[event_len++]=0;
- int4store(tmp_buf+EVENT_LEN_OFFSET, event_len);
- buf= tmp_buf;
- }
- /*
- This will transform LOAD_EVENT into CREATE_FILE_EVENT, ask the master to
- send the loaded file, and write it to the relay log in the form of
- Append_block/Exec_load (the SQL thread needs the data, as that thread is not
- connected to the master).
- */
- Log_event *ev=
- Log_event::read_log_event(buf, event_len, &errmsg,
- mi->rli.relay_log.description_event_for_queue, 0);
- if (unlikely(!ev))
- {
- sql_print_error("Read invalid event from master: '%s',\
- master could be corrupt but a more likely cause of this is a bug",
- errmsg);
- my_free(tmp_buf);
- DBUG_RETURN(1);
- }
-
- mysql_mutex_lock(&mi->data_lock);
- ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */
- switch (ev->get_type_code()) {
- case STOP_EVENT:
- ignore_event= 1;
- inc_pos= event_len;
- break;
- case ROTATE_EVENT:
- if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- inc_pos= 0;
- break;
- case CREATE_FILE_EVENT:
- /*
- Yes it's possible to have CREATE_FILE_EVENT here, even if we're in
- queue_old_event() which is for 3.23 events which don't comprise
- CREATE_FILE_EVENT. This is because read_log_event() above has just
- transformed LOAD_EVENT into CREATE_FILE_EVENT.
- */
- {
- /* We come here when and only when tmp_buf != 0 */
- DBUG_ASSERT(tmp_buf != 0);
- inc_pos=event_len;
- ev->log_pos+= inc_pos;
- int error = process_io_create_file(mi,(Create_file_log_event*)ev);
- delete ev;
- mi->master_log_pos += inc_pos;
- DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- mysql_mutex_unlock(&mi->data_lock);
- my_free(tmp_buf);
- DBUG_RETURN(error);
- }
- default:
- inc_pos= event_len;
- break;
- }
- if (likely(!ignore_event))
- {
- if (ev->log_pos)
- /*
- Don't do it for fake Rotate events (see comment in
- Log_event::Log_event(const char* buf...) in log_event.cc).
- */
- ev->log_pos+= event_len; /* make log_pos be the pos of the end of the event */
- if (unlikely(rli->relay_log.append(ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- rli->relay_log.harvest_bytes_written(&rli->log_space_total);
- }
- delete ev;
- mi->master_log_pos+= inc_pos;
- DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(0);
-}
-
-/*
- Reads a 4.0 event and converts it to the slave's format. This code was copied
- from queue_binlog_ver_1_event(), with some affordable simplifications.
-*/
-static int queue_binlog_ver_3_event(Master_info *mi, const uchar *buf,
- ulong event_len)
-{
- const char *errmsg = 0;
- ulong inc_pos;
- char *tmp_buf = 0;
- Relay_log_info *rli= &mi->rli;
- DBUG_ENTER("queue_binlog_ver_3_event");
-
- /* read_log_event() will adjust log_pos to be end_log_pos */
- Log_event *ev=
- Log_event::read_log_event(buf, event_len, &errmsg,
- mi->rli.relay_log.description_event_for_queue, 0);
- if (unlikely(!ev))
- {
- sql_print_error("Read invalid event from master: '%s',\
- master could be corrupt but a more likely cause of this is a bug",
- errmsg);
- my_free(tmp_buf);
- DBUG_RETURN(1);
- }
- mysql_mutex_lock(&mi->data_lock);
- switch (ev->get_type_code()) {
- case STOP_EVENT:
- goto err;
- case ROTATE_EVENT:
- if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- inc_pos= 0;
- break;
- default:
- inc_pos= event_len;
- break;
- }
-
- if (unlikely(rli->relay_log.append(ev)))
- {
- delete ev;
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(1);
- }
- rli->relay_log.harvest_bytes_written(&rli->log_space_total);
- delete ev;
- mi->master_log_pos+= inc_pos;
-err:
- DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos));
- mysql_mutex_unlock(&mi->data_lock);
- DBUG_RETURN(0);
-}
-
-/*
- queue_old_event()
-
- Writes a 3.23 or 4.0 event to the relay log, after converting it to the 5.0
- (exactly, slave's) format. To do the conversion, we create a 5.0 event from
- the 3.23/4.0 bytes, then write this event to the relay log.
-
- TODO:
- Test this code before release - it has to be tested on a separate
- setup with 3.23 master or 4.0 master
-*/
-
-static int queue_old_event(Master_info *mi, const uchar *buf, ulong event_len)
-{
- DBUG_ENTER("queue_old_event");
-
- switch (mi->rli.relay_log.description_event_for_queue->binlog_version) {
- case 1:
- DBUG_RETURN(queue_binlog_ver_1_event(mi,buf,event_len));
- case 3:
- DBUG_RETURN(queue_binlog_ver_3_event(mi,buf,event_len));
- default: /* unsupported format; eg version 2 */
- DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
- DBUG_RETURN(1);
- }
-}
-
-/*
queue_event()
- If the event is 3.23/4.0, passes it to queue_old_event() which will convert
- it. Otherwise, writes a 5.0 (or newer) event to the relay log. Then there is
+ Writes a 5.0 (or newer) event to the relay log. Then there is
no format conversion, it's pure read/write of bytes.
So a 5.0.0 slave's relay log can contain events in the slave's format or in
any >=5.0.0 format.
@@ -6323,10 +5968,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
}
DBUG_ASSERT(((uchar) buf[FLAGS_OFFSET] & LOG_EVENT_ACCEPT_OWN_F) == 0);
- if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 &&
- buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */)
- DBUG_RETURN(queue_old_event(mi,buf,event_len));
-
#ifdef ENABLED_DEBUG_SYNC
/*
A (+d,dbug.rows_events_to_delay_relay_logging)-test is supposed to
@@ -6590,7 +6231,7 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
*/
inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0;
DBUG_PRINT("info",("binlog format is now %d",
- mi->rli.relay_log.description_event_for_queue->binlog_version));
+ mi->rli.relay_log.description_event_for_queue->binlog_version));
}
break;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index e13fe9defcb..f0bf695a1c1 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -90,7 +90,6 @@ struct rpl_group_info;
struct rpl_parallel_thread;
class Rpl_filter;
class Query_log_event;
-class Load_log_event;
class Log_event_writer;
class sp_rcontext;
class sp_cache;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index fe574db528f..73924bb9d1c 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -836,8 +836,6 @@ err:
#ifndef EMBEDDED_LIBRARY
-
-/* Not a very useful function; just to avoid duplication of code */
static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex,
const char* db_arg, /* table's database */
const char* table_name_arg,
@@ -848,27 +846,34 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex,
int errcode)
{
char *load_data_query;
- my_off_t fname_start,
- fname_end;
- List<Item> fv;
+ my_off_t fname_start, fname_end;
Item *item, *val;
int n;
- const char *tdb= (thd->db.str != NULL ? thd->db.str : db_arg);
- const char *qualify_db= NULL;
- char command_buffer[1024];
- String query_str(command_buffer, sizeof(command_buffer),
- system_charset_info);
+ StringBuffer<1024> query_str(system_charset_info);
- Load_log_event lle(thd, ex, tdb, table_name_arg, fv, is_concurrent,
- duplicates, ignore, transactional_table);
+ query_str.append(STRING_WITH_LEN("LOAD DATA "));
+
+ if (is_concurrent)
+ query_str.append(STRING_WITH_LEN("CONCURRENT "));
+
+ fname_start= query_str.length();
- /*
- force in a LOCAL if there was one in the original.
- */
if (thd->lex->local_file)
- lle.set_fname_outside_temp_buf(ex->file_name, strlen(ex->file_name));
+ query_str.append(STRING_WITH_LEN("LOCAL "));
+ query_str.append(STRING_WITH_LEN("INFILE '"));
+ query_str.append_for_single_quote(ex->file_name, strlen(ex->file_name));
+ query_str.append(STRING_WITH_LEN("' "));
- query_str.length(0);
+ if (duplicates == DUP_REPLACE)
+ query_str.append(STRING_WITH_LEN("REPLACE "));
+ else if (ignore)
+ query_str.append(STRING_WITH_LEN("IGNORE "));
+
+ query_str.append(STRING_WITH_LEN("INTO"));
+
+ fname_end= query_str.length();
+
+ query_str.append(STRING_WITH_LEN(" TABLE "));
if (!thd->db.str || strcmp(db_arg, thd->db.str))
{
/*
@@ -876,10 +881,47 @@ static bool write_execute_load_query_log_event(THD *thd, const sql_exchange* ex,
prefix table name with database name so that it
becomes a FQ name.
*/
- qualify_db= db_arg;
+ append_identifier(thd, &query_str, db_arg, strlen(db_arg));
+ query_str.append(STRING_WITH_LEN("."));
+ }
+ append_identifier(thd, &query_str, table_name_arg, strlen(table_name_arg));
+
+ if (ex->cs)
+ {
+ query_str.append(STRING_WITH_LEN(" CHARACTER SET "));
+ query_str.append(ex->cs->cs_name);
+ }
+
+ /* We have to create all optional fields as the default is not empty */
+ query_str.append(STRING_WITH_LEN(" FIELDS TERMINATED BY '"));
+ query_str.append_for_single_quote(ex->field_term);
+ query_str.append(STRING_WITH_LEN("'"));
+ if (ex->opt_enclosed)
+ query_str.append(STRING_WITH_LEN(" OPTIONALLY"));
+ query_str.append(STRING_WITH_LEN(" ENCLOSED BY '"));
+ query_str.append_for_single_quote(ex->enclosed);
+ query_str.append(STRING_WITH_LEN("'"));
+
+ query_str.append(STRING_WITH_LEN(" ESCAPED BY '"));
+ query_str.append_for_single_quote(ex->escaped);
+ query_str.append(STRING_WITH_LEN("'"));
+
+ query_str.append(STRING_WITH_LEN(" LINES TERMINATED BY '"));
+ query_str.append_for_single_quote(ex->line_term);
+ query_str.append(STRING_WITH_LEN("'"));
+ if (ex->line_start->length())
+ {
+ query_str.append(STRING_WITH_LEN(" STARTING BY '"));
+ query_str.append_for_single_quote(ex->line_start);
+ query_str.append(STRING_WITH_LEN("'"));
+ }
+
+ if (ex->skip_lines)
+ {
+ query_str.append(STRING_WITH_LEN(" IGNORE "));
+ query_str.append_ulonglong(ex->skip_lines);
+ query_str.append(STRING_WITH_LEN(" LINES "));
}
- lle.print_query(thd, FALSE, (const char*) ex->cs ? ex->cs->cs_name.str : NULL,
- &query_str, &fname_start, &fname_end, qualify_db);
/*
prepare fields-list and SET if needed; print_query won't do that for us.
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index d338dc302e6..80a746f32b7 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -1467,7 +1467,7 @@ gtid_state_from_pos(const char *name, uint32 offset,
if (unlikely((file= open_binlog(&cache, name, &errormsg)) == (File)-1))
return errormsg;
- if (!(fdev= new Format_description_log_event(3)))
+ if (!(fdev= new Format_description_log_event(4)))
{
errormsg= "Out of memory initializing format_description event "
"while scanning binlog to find start position";
@@ -2273,7 +2273,7 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log,
if (info->fdev != NULL)
delete info->fdev;
- if (!(info->fdev= new Format_description_log_event(3)))
+ if (!(info->fdev= new Format_description_log_event(4)))
{
info->errmsg= "Out of memory initializing format_description event";
info->error= ER_MASTER_FATAL_ERROR_READING_BINLOG;
@@ -4163,7 +4163,7 @@ bool mysql_show_binlog_events(THD* thd)
}
Format_description_log_event *description_event= new
- Format_description_log_event(3); /* MySQL 4.0 by default */
+ Format_description_log_event(4);
if (binary_log->is_open())
{
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index fbc97ab54fb..f4586d530c9 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -1146,7 +1146,6 @@ String_copier::well_formed_copy(CHARSET_INFO *to_cs,
}
-
/*
Append characters to a single-quoted string '...', escaping special
characters with backslashes as necessary.
@@ -1164,6 +1163,8 @@ bool String::append_for_single_quote(const char *st, size_t len)
case '\\': APPEND(STRING_WITH_LEN("\\\\"));
case '\0': APPEND(STRING_WITH_LEN("\\0"));
case '\'': APPEND(STRING_WITH_LEN("\\'"));
+ case '\b': APPEND(STRING_WITH_LEN("\\b"));
+ case '\t': APPEND(STRING_WITH_LEN("\\t"));
case '\n': APPEND(STRING_WITH_LEN("\\n"));
case '\r': APPEND(STRING_WITH_LEN("\\r"));
case '\032': APPEND(STRING_WITH_LEN("\\Z"));
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 371b7de27c5..c6595888b83 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -3413,7 +3413,7 @@ ignore_error:
WSREP_WARN("Ignoring error '%s' on query. "
"Default database: '%s'. Query: '%s', Error_code: %d",
thd->get_stmt_da()->message(),
- print_slave_db_safe(thd->db.str),
+ safe_str(thd->db.str),
thd->query(),
error);
return 1;