summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/ha_archive.h2
-rw-r--r--storage/blackhole/ha_blackhole.h2
-rw-r--r--storage/columnstore/CMakeLists.txt4
m---------storage/columnstore/columnstore0
-rw-r--r--storage/connect/connect.cc1
-rw-r--r--storage/connect/filamdbf.cpp199
-rw-r--r--storage/connect/filamdbf.h2
-rw-r--r--storage/connect/filamzip.cpp282
-rw-r--r--storage/connect/filamzip.h38
-rw-r--r--storage/connect/ha_connect.cc7
-rw-r--r--storage/connect/ha_connect.h2
-rw-r--r--storage/connect/mongo.cpp1
-rw-r--r--storage/connect/mongo.h1
-rw-r--r--storage/connect/plgxml.cpp2
-rw-r--r--storage/connect/tabcmg.cpp2
-rw-r--r--storage/connect/tabdos.cpp36
-rw-r--r--storage/connect/tabdos.h1
-rw-r--r--storage/connect/tabfix.h18
-rw-r--r--storage/connect/tabjson.cpp1
-rw-r--r--storage/connect/tabjson.h1
-rw-r--r--storage/connect/tabzip.cpp13
-rw-r--r--storage/connect/tabzip.h2
-rw-r--r--storage/csv/ha_tina.h2
-rw-r--r--storage/federatedx/ha_federatedx.h2
-rw-r--r--storage/heap/ha_heap.h2
-rw-r--r--storage/innobase/buf/buf0lru.cc5
-rw-r--r--storage/innobase/fts/fts0fts.cc6
-rw-r--r--storage/innobase/handler/ha_innodb.cc22
-rw-r--r--storage/innobase/handler/ha_innodb.h2
-rw-r--r--storage/innobase/handler/handler0alter.cc10
-rw-r--r--storage/innobase/lock/lock0wait.cc4
-rw-r--r--storage/maria/ha_maria.h153
-rw-r--r--storage/maria/ha_s3.h32
-rw-r--r--storage/maria/ma_check.c1
-rw-r--r--storage/myisam/ha_myisam.h2
-rw-r--r--storage/myisammrg/ha_myisammrg.h2
-rw-r--r--storage/myisammrg/myrg_extra.c2
-rw-r--r--storage/perfschema/ha_perfschema.cc2
-rw-r--r--storage/perfschema/ha_perfschema.h2
-rw-r--r--storage/sequence/sequence.cc2
-rw-r--r--storage/sphinx/ha_sphinx.h2
-rw-r--r--storage/spider/ha_spider.h2
-rw-r--r--storage/tokudb/CMakeLists.txt3
-rw-r--r--storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake23
-rw-r--r--storage/tokudb/PerconaFT/ft/logger/logger.cc3
-rw-r--r--storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h1
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc10
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc14
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/make-tree.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/mempool-115.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/msnfilter.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc4
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc8
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc8
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc6
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc20
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/test3884.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc2
-rw-r--r--storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc2
-rw-r--r--storage/tokudb/PerconaFT/ftcxx/cursor.hpp4
-rw-r--r--storage/tokudb/PerconaFT/locktree/lock_request.cc4
-rw-r--r--storage/tokudb/PerconaFT/locktree/lock_request.h1
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc2
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc2
-rw-r--r--storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test.h4
-rw-r--r--storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc2
-rw-r--r--storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h6
-rw-r--r--storage/tokudb/PerconaFT/src/ydb.cc4
-rw-r--r--storage/tokudb/PerconaFT/src/ydb_db.cc2
-rw-r--r--storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb-slave.opt4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result5
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_ranges.result6
85 files changed, 737 insertions, 341 deletions
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index b9fcf10f96f..c1b4f27e45e 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -73,7 +73,7 @@ public:
*/
#define ARCHIVE_VERSION 3
-class ha_archive: public handler
+class ha_archive final : public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
Archive_share *share; /* Shared lock info */
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index 646fba6da9f..c2a36a68f45 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -36,7 +36,7 @@ struct st_blackhole_share {
Class definition for the blackhole storage engine
"Dumbest named feature ever"
*/
-class ha_blackhole: public handler
+class ha_blackhole final : public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
st_blackhole_share *share;
diff --git a/storage/columnstore/CMakeLists.txt b/storage/columnstore/CMakeLists.txt
index f71cce38bd8..63d9d3b7a6a 100644
--- a/storage/columnstore/CMakeLists.txt
+++ b/storage/columnstore/CMakeLists.txt
@@ -15,14 +15,14 @@ IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR
CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64" OR
CMAKE_SYSTEM_PROCESSOR STREQUAL "i386" OR
CMAKE_SYSTEM_PROCESSOR STREQUAL "i686")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCOLUMNSTORE_MATURITY=MariaDB_PLUGIN_MATURITY_BETA")
add_subdirectory(columnstore)
IF(TARGET columnstore)
# Needed to bump the component changes up to the main scope
APPEND_FOR_CPACK(CPACK_COMPONENTS_ALL)
IF (RPM)
- APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_REQUIRES " binutils jemalloc net-tools python3")
+ APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_REQUIRES " binutils net-tools python3")
+ APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_RECOMMENDS " jemalloc")
APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_USER_FILELIST ";%ignore /var/lib;%ignore /var")
APPEND_FOR_CPACK(CPACK_RPM_columnstore-engine_PACKAGE_CONFLICTS " thrift MariaDB-columnstore-platform MariaDB-columnstore-libs")
# these three don't have the list semantics, so no append here
diff --git a/storage/columnstore/columnstore b/storage/columnstore/columnstore
-Subproject bca0d90d5537050951a6c2282411d955569a8c5
+Subproject b6b02ed516f92055127d416370799d91a82754e
diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc
index 60c10527fe9..3b58e8b5a8f 100644
--- a/storage/connect/connect.cc
+++ b/storage/connect/connect.cc
@@ -355,7 +355,6 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
} // endif mode
rcop = false;
-
} catch (int n) {
if (trace(1))
htrc("Exception %d: %s\n", n, g->Message);
diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp
index e48e40601e3..c8571cea559 100644
--- a/storage/connect/filamdbf.cpp
+++ b/storage/connect/filamdbf.cpp
@@ -49,6 +49,7 @@
#include "global.h"
#include "plgdbsem.h"
#include "filamdbf.h"
+#include "filamzip.h"
#include "tabdos.h"
#include "valblk.h"
#define NO_FUNC
@@ -139,7 +140,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
if (fread(buf, HEADLEN, 1, file) != 1) {
strcpy(g->Message, MSG(NO_READ_32));
return RC_NF;
- } // endif fread
+ } // endif fread
// Check first byte to be sure of .dbf type
if ((buf->Version & 0x03) != DBFTYPE) {
@@ -149,7 +150,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
if ((buf->Version & 0x30) == 0x30) {
strcpy(g->Message, MSG(FOXPRO_FILE));
dbc = 264; // FoxPro database container
- } // endif Version
+ } // endif Version
} else
strcpy(g->Message, MSG(DBASE_FILE));
@@ -158,12 +159,12 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
if (fseek(file, buf->Headlen() - dbc, SEEK_SET) != 0) {
sprintf(g->Message, MSG(BAD_HEADER), fn);
return RC_FX;
- } // endif fseek
+ } // endif fseek
if (fread(&endmark, 2, 1, file) != 1) {
strcpy(g->Message, MSG(BAD_HEAD_END));
return RC_FX;
- } // endif fread
+ } // endif fread
// Some files have just 1D others have 1D00 following fields
if (endmark[0] != EOH && endmark[1] != EOH) {
@@ -172,7 +173,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
if (rc == RC_OK)
return RC_FX;
- } // endif endmark
+ } // endif endmark
// Calculate here the number of fields while we have the dbc info
buf->SetFields((buf->Headlen() - dbc - 1) / 32);
@@ -180,13 +181,58 @@ static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf)
return rc;
} // end of dbfhead
+/****************************************************************************/
+/* dbfields: Analyze a DBF header and set the table fields number. */
+/* Parameters: */
+/* PGLOBAL g -- pointer to the CONNECT Global structure */
+/* DBFHEADER *hdrp -- pointer to _dbfheader structure */
+/* Returns: */
+/* RC_OK, RC_INFO, or RC_FX if error. */
+/****************************************************************************/
+static int dbfields(PGLOBAL g, DBFHEADER* hdrp)
+{
+ char* endmark;
+ int dbc = 2, rc = RC_OK;
+
+ *g->Message = '\0';
+
+ // Check first byte to be sure of .dbf type
+ if ((hdrp->Version & 0x03) != DBFTYPE) {
+ strcpy(g->Message, MSG(NOT_A_DBF_FILE));
+ rc = RC_INFO;
+
+ if ((hdrp->Version & 0x30) == 0x30) {
+ strcpy(g->Message, MSG(FOXPRO_FILE));
+ dbc = 264; // FoxPro database container
+ } // endif Version
+
+ } else
+ strcpy(g->Message, MSG(DBASE_FILE));
+
+ // Check last byte(s) of header
+ endmark = (char*)hdrp + hdrp->Headlen() - dbc;
+
+ // Some headers just have 1D others have 1D00 following fields
+ if (endmark[0] != EOH && endmark[1] != EOH) {
+ sprintf(g->Message, MSG(NO_0DH_HEAD), dbc);
+
+ if (rc == RC_OK)
+ return RC_FX;
+
+ } // endif endmark
+
+ // Calculate here the number of fields while we have the dbc info
+ hdrp->SetFields((hdrp->Headlen() - dbc - 1) / 32);
+ return rc;
+} // end of dbfields
+
/* -------------------------- Function DBFColumns ------------------------- */
/****************************************************************************/
/* DBFColumns: constructs the result blocks containing the description */
/* of all the columns of a DBF file that will be retrieved by #GetData. */
/****************************************************************************/
-PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
+PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS topt, bool info)
{
int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING,
TYPE_INT, TYPE_INT, TYPE_SHORT};
@@ -196,10 +242,12 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
char buf[2], filename[_MAX_PATH];
int ncol = sizeof(buftyp) / sizeof(int);
int rc, type, len, field, fields;
- bool bad;
- DBFHEADER mainhead;
- DESCRIPTOR thisfield;
- FILE *infile = NULL;
+ bool bad, mul;
+ PCSZ target, pwd;
+ DBFHEADER mainhead, *hp;
+ DESCRIPTOR thisfield, *tfp;
+ FILE *infile = NULL;
+ UNZIPUTL *zutp = NULL;
PQRYRES qrp;
PCOLRES crp;
@@ -217,21 +265,55 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
/************************************************************************/
PlugSetPath(filename, fn, dp);
- if (!(infile= global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb")))
- return NULL;
-
- /************************************************************************/
- /* Get the first 32 bytes of the header. */
- /************************************************************************/
- if ((rc = dbfhead(g, infile, filename, &mainhead)) == RC_FX) {
- fclose(infile);
- return NULL;
- } // endif dbfhead
-
- /************************************************************************/
- /* Allocate the structures used to refer to the result set. */
- /************************************************************************/
- fields = mainhead.Fields();
+ if (topt->zipped) {
+ target = GetStringTableOption(g, topt, "Entry", NULL);
+ mul = (target && *target) ? strchr(target, '*') || strchr(target, '?')
+ : false;
+ mul = GetBooleanTableOption(g, topt, "Mulentries", mul);
+
+ if (mul) {
+ strcpy(g->Message, "Cannot find column definition for multiple entries");
+ return NULL;
+ } // endif Multiple
+
+ pwd = GetStringTableOption(g, topt, "Password", NULL);
+ zutp = new(g) UNZIPUTL(target, pwd, mul);
+
+ if (!zutp->OpenTable(g, MODE_READ, filename))
+ hp = (DBFHEADER*)zutp->memory;
+ else
+ return NULL;
+
+ /**********************************************************************/
+ /* Set the table fields number. */
+ /**********************************************************************/
+ if ((rc = dbfields(g, hp)) == RC_FX) {
+ zutp->close();
+ return NULL;
+ } // endif dbfields
+
+ tfp = (DESCRIPTOR*)hp;
+ } else {
+ if (!(infile = global_fopen(g, MSGID_CANNOT_OPEN, filename, "rb")))
+ return NULL;
+ else
+ hp = &mainhead;
+
+ /**********************************************************************/
+ /* Get the first 32 bytes of the header. */
+ /**********************************************************************/
+ if ((rc = dbfhead(g, infile, filename, hp)) == RC_FX) {
+ fclose(infile);
+ return NULL;
+ } // endif dbfhead
+
+ tfp = &thisfield;
+ } // endif zipped
+
+ /************************************************************************/
+ /* Get the number of the table fields. */
+ /************************************************************************/
+ fields = hp->Fields();
} else
fields = 0;
@@ -241,19 +323,21 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
if (info || !qrp) {
if (infile)
fclose(infile);
+ else if (zutp)
+ zutp->close();
return qrp;
- } // endif info
+ } // endif info
if (trace(1)) {
htrc("Structure of %s\n", filename);
htrc("headlen=%hd reclen=%hd degree=%d\n",
- mainhead.Headlen(), mainhead.Reclen(), fields);
- htrc("flags(iem)=%d,%d,%d cp=%d\n", mainhead.Incompleteflag,
- mainhead.Encryptflag, mainhead.Mdxflag, mainhead.Language);
+ hp->Headlen(), hp->Reclen(), fields);
+ htrc("flags(iem)=%d,%d,%d cp=%d\n", hp->Incompleteflag,
+ hp->Encryptflag, hp->Mdxflag, hp->Language);
htrc("%hd records, last changed %02d/%02d/%d\n",
- mainhead.Records(), mainhead.Filedate[1], mainhead.Filedate[2],
- mainhead.Filedate[0] + ((mainhead.Filedate[0] <= 30) ? 2000 : 1900));
+ hp->Records(), hp->Filedate[1], hp->Filedate[2],
+ hp->Filedate[0] + ((hp->Filedate[0] <= 30) ? 2000 : 1900));
htrc("Field Type Offset Len Dec Set Mdx\n");
} // endif trace
@@ -265,21 +349,24 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
for (field = 0; field < fields; field++) {
bad = FALSE;
- if (fread(&thisfield, HEADLEN, 1, infile) != 1) {
+ if (topt->zipped) {
+ tfp = (DESCRIPTOR*)((char*)tfp + HEADLEN);
+ } else if (fread(tfp, HEADLEN, 1, infile) != 1) {
sprintf(g->Message, MSG(ERR_READING_REC), field+1, fn);
goto err;
- } else
- len = thisfield.Length;
+ } // endif fread
+
+ len = tfp->Length;
if (trace(1))
htrc("%-11s %c %6ld %3d %2d %3d %3d\n",
- thisfield.Name, thisfield.Type, thisfield.Offset, len,
- thisfield.Decimals, thisfield.Setfield, thisfield.Mdxfield);
+ tfp->Name, tfp->Type, tfp->Offset, len,
+ tfp->Decimals, tfp->Setfield, tfp->Mdxfield);
/************************************************************************/
/* Now get the results into blocks. */
/************************************************************************/
- switch (thisfield.Type) {
+ switch (tfp->Type) {
case 'C': // Characters
case 'L': // Logical 'T' or 'F' or space
type = TYPE_STRING;
@@ -294,7 +381,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
// type = TYPE_INT;
// break;
case 'N':
- type = (thisfield.Decimals) ? TYPE_DOUBLE
+ type = (tfp->Decimals) ? TYPE_DOUBLE
: (len > 10) ? TYPE_BIGINT : TYPE_INT;
break;
case 'F': // Float
@@ -306,8 +393,8 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
break;
default:
if (!info) {
- sprintf(g->Message, MSG(BAD_DBF_TYPE), thisfield.Type
- , thisfield.Name);
+ sprintf(g->Message, MSG(BAD_DBF_TYPE), tfp->Type
+ , tfp->Name);
goto err;
} // endif info
@@ -316,27 +403,31 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
} // endswitch Type
crp = qrp->Colresp; // Column Name
- crp->Kdata->SetValue(thisfield.Name, field);
+ crp->Kdata->SetValue(tfp->Name, field);
crp = crp->Next; // Data Type
crp->Kdata->SetValue((int)type, field);
crp = crp->Next; // Type Name
if (bad) {
- buf[0] = thisfield.Type;
+ buf[0] = tfp->Type;
crp->Kdata->SetValue(buf, field);
} else
crp->Kdata->SetValue(GetTypeName(type), field);
crp = crp->Next; // Precision
- crp->Kdata->SetValue((int)thisfield.Length, field);
+ crp->Kdata->SetValue((int)tfp->Length, field);
crp = crp->Next; // Length
- crp->Kdata->SetValue((int)thisfield.Length, field);
+ crp->Kdata->SetValue((int)tfp->Length, field);
crp = crp->Next; // Scale (precision)
- crp->Kdata->SetValue((int)thisfield.Decimals, field);
+ crp->Kdata->SetValue((int)tfp->Decimals, field);
} // endfor field
qrp->Nblin = field;
- fclose(infile);
+
+ if (infile)
+ fclose(infile);
+ else if (zutp)
+ zutp->close();
#if 0
if (info) {
@@ -347,9 +438,9 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
sprintf(buf,
"Ver=%02x ncol=%hu nlin=%u lrecl=%hu headlen=%hu date=%02d/%02d/%02d",
- mainhead.Version, fields, mainhead.Records, mainhead.Reclen,
- mainhead.Headlen, mainhead.Filedate[0], mainhead.Filedate[1],
- mainhead.Filedate[2]);
+ hp->Version, fields, hp->Records, hp->Reclen,
+ hp->Headlen, hp->Filedate[0], hp->Filedate[1],
+ hp->Filedate[2]);
strcat(g->Message, buf);
} // endif info
@@ -360,9 +451,13 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info)
/**************************************************************************/
return qrp;
- err:
- fclose(infile);
- return NULL;
+err:
+ if (infile)
+ fclose(infile);
+ else if (zutp)
+ zutp->close();
+
+ return NULL;
} // end of DBFColumns
/* ---------------------------- Class DBFBASE ----------------------------- */
diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h
index 640fc349b4c..dfe5cb5cfc4 100644
--- a/storage/connect/filamdbf.h
+++ b/storage/connect/filamdbf.h
@@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM;
/****************************************************************************/
/* Functions used externally. */
/****************************************************************************/
-PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info);
+PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, PTOS tiop, bool info);
/****************************************************************************/
/* This is the base class for dBASE file access methods. */
diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp
index fd1cf0ceff9..eb14e846120 100644
--- a/storage/connect/filamzip.cpp
+++ b/storage/connect/filamzip.cpp
@@ -1,11 +1,11 @@
/*********** File AM Zip C++ Program Source Code File (.CPP) ***********/
/* PROGRAM NAME: FILAMZIP */
/* ------------- */
-/* Version 1.3 */
+/* Version 1.4 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -45,6 +45,62 @@
#define WRITEBUFFERSIZE (16384)
+/****************************************************************************/
+/* Definitions used for DBF tables. */
+/****************************************************************************/
+#define HEADLEN 32 /* sizeof ( mainhead or thisfield ) */
+//efine MEMOLEN 10 /* length of memo field in .dbf */
+#define DBFTYPE 3 /* value of bits 0 and 1 if .dbf */
+#define EOH 0x0D /* end-of-header marker in .dbf file */
+
+/****************************************************************************/
+/* First 32 bytes of a DBF table. */
+/* Note: some reserved fields are used here to store info (Fields) */
+/****************************************************************************/
+typedef struct _dbfheader {
+ uchar Version; /* Version information flags */
+ char Filedate[3]; /* date, YYMMDD, binary. YY=year-1900 */
+private:
+ /* The following four members are stored in little-endian format on disk */
+ char m_RecordsBuf[4]; /* records in the file */
+ char m_HeadlenBuf[2]; /* bytes in the header */
+ char m_ReclenBuf[2]; /* bytes in a record */
+ char m_FieldsBuf[2]; /* Reserved but used to store fields */
+public:
+ char Incompleteflag; /* 01 if incomplete, else 00 */
+ char Encryptflag; /* 01 if encrypted, else 00 */
+ char Reserved2[12]; /* for LAN use */
+ char Mdxflag; /* 01 if production .mdx, else 00 */
+ char Language; /* Codepage */
+ char Reserved3[2];
+
+ uint Records(void) const { return uint4korr(m_RecordsBuf); }
+ ushort Headlen(void) const { return uint2korr(m_HeadlenBuf); }
+ ushort Reclen(void) const { return uint2korr(m_ReclenBuf); }
+ ushort Fields(void) const { return uint2korr(m_FieldsBuf); }
+
+ void SetHeadlen(ushort num) { int2store(m_HeadlenBuf, num); }
+ void SetReclen(ushort num) { int2store(m_ReclenBuf, num); }
+ void SetFields(ushort num) { int2store(m_FieldsBuf, num); }
+} DBFHEADER;
+
+/****************************************************************************/
+/* Column field descriptor of a .dbf file. */
+/****************************************************************************/
+typedef struct _descriptor {
+ char Name[11]; /* field name, in capitals, null filled*/
+ char Type; /* field type, C, D, F, L, M or N */
+ uint Offset; /* used in memvars, not in files. */
+ uchar Length; /* field length */
+ uchar Decimals; /* number of decimal places */
+ short Reserved4;
+ char Workarea; /* ??? */
+ char Reserved5[2];
+ char Setfield; /* ??? */
+ char Reserved6[7];
+ char Mdxfield; /* 01 if tag field in production .mdx */
+} DESCRIPTOR;
+
bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul);
/***********************************************************************/
@@ -214,10 +270,21 @@ bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul
buf = (char*)PlugSubAlloc(g, NULL, WRITEBUFFERSIZE);
- if (mul)
- err = ZipFiles(g, zutp, fn, buf);
- else
- err = ZipFile(g, zutp, fn, entry, buf);
+ if (!mul) {
+ PCSZ entp;
+
+ if (!entry) { // entry defaults to the file name
+ char* p = strrchr((char*)fn, '/');
+#if defined(__WIN__)
+ if (!p) p = strrchr((char*)fn, '\\');
+#endif // __WIN__
+ entp = (p) ? p + 1 : entry;
+ } else
+ entp = entry;
+
+ err = ZipFile(g, zutp, fn, entp, buf);
+ } else
+ err = ZipFiles(g, zutp, fn, buf);
zutp->close();
return err;
@@ -232,6 +299,7 @@ ZIPUTIL::ZIPUTIL(PCSZ tgt)
{
zipfile = NULL;
target = tgt;
+ pwd = NULL;
fp = NULL;
entryopen = false;
} // end of ZIPUTIL standard constructor
@@ -241,6 +309,7 @@ ZIPUTIL::ZIPUTIL(ZIPUTIL *zutp)
{
zipfile = zutp->zipfile;
target = zutp->target;
+ pwd = zutp->pwd;
fp = zutp->fp;
entryopen = zutp->entryopen;
} // end of UNZIPUTL copy constructor
@@ -385,11 +454,11 @@ void ZIPUTIL::closeEntry()
/***********************************************************************/
/* Constructors. */
/***********************************************************************/
-UNZIPUTL::UNZIPUTL(PCSZ tgt, bool mul)
+UNZIPUTL::UNZIPUTL(PCSZ tgt, PCSZ pw, bool mul)
{
zipfile = NULL;
target = tgt;
- pwd = NULL;
+ pwd = pw;
fp = NULL;
memory = NULL;
size = 0;
@@ -959,7 +1028,7 @@ int UZXFAM::Cardinality(PGLOBAL g)
} // end of Cardinality
/***********************************************************************/
-/* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */
+/* OpenTableFile: Open a FIX/UNIX table file from a ZIP file. */
/***********************************************************************/
bool UZXFAM::OpenTableFile(PGLOBAL g)
{
@@ -1015,6 +1084,197 @@ int UZXFAM::GetNext(PGLOBAL g)
return RC_OK;
} // end of GetNext
+/* -------------------------- class UZDFAM --------------------------- */
+
+/***********************************************************************/
+/* Constructors. */
+/***********************************************************************/
+UZDFAM::UZDFAM(PDOSDEF tdp) : DBMFAM(tdp)
+{
+ zutp = NULL;
+ tdfp = tdp;
+ //target = tdp->GetEntry();
+ //mul = tdp->GetMul();
+ //Lrecl = tdp->GetLrecl();
+} // end of UZXFAM standard constructor
+
+UZDFAM::UZDFAM(PUZDFAM txfp) : DBMFAM(txfp)
+{
+ zutp = txfp->zutp;
+ tdfp = txfp->tdfp;
+ //target = txfp->target;
+ //mul = txfp->mul;
+ //Lrecl = txfp->Lrecl;
+} // end of UZXFAM copy constructor
+
+#if 0
+/****************************************************************************/
+/* dbfhead: Routine to analyze a DBF header. */
+/* Parameters: */
+/* PGLOBAL g -- pointer to the CONNECT Global structure */
+/* DBFHEADER *hdrp -- pointer to _dbfheader structure */
+/* Returns: */
+/* RC_OK, RC_NF, RC_INFO, or RC_FX if error. */
+/* Side effects: */
+/* Set the fields number in the header. */
+/****************************************************************************/
+int UZDFAM::dbfhead(PGLOBAL g, void* buf)
+{
+ char *endmark;
+ int dbc = 2, rc = RC_OK;
+ DBFHEADER* hdrp = (DBFHEADER*)buf;
+
+ *g->Message = '\0';
+
+ // Check first byte to be sure of .dbf type
+ if ((hdrp->Version & 0x03) != DBFTYPE) {
+ strcpy(g->Message, MSG(NOT_A_DBF_FILE));
+ rc = RC_INFO;
+
+ if ((hdrp->Version & 0x30) == 0x30) {
+ strcpy(g->Message, MSG(FOXPRO_FILE));
+ dbc = 264; // FoxPro database container
+ } // endif Version
+
+ } else
+ strcpy(g->Message, MSG(DBASE_FILE));
+
+ // Check last byte(s) of header
+ endmark = (char*)hdrp + hdrp->Headlen() - dbc;
+
+ // Some headers just have 1D others have 1D00 following fields
+ if (endmark[0] != EOH && endmark[1] != EOH) {
+ sprintf(g->Message, MSG(NO_0DH_HEAD), dbc);
+
+ if (rc == RC_OK)
+ return RC_FX;
+
+ } // endif endmark
+
+ // Calculate here the number of fields while we have the dbc info
+ hdrp->SetFields((hdrp->Headlen() - dbc - 1) / 32);
+ return rc;
+} // end of dbfhead
+
+/****************************************************************************/
+/* ScanHeader: scan the DBF file header for number of records, record size,*/
+/* and header length. Set Records, check that Reclen is equal to lrecl and */
+/* return the header length or 0 in case of error. */
+/****************************************************************************/
+int UZDFAM::ScanHeader(PGLOBAL g, int* rln)
+{
+ int rc;
+ DBFHEADER header;
+
+ /************************************************************************/
+ /* Get the first 32 bytes of the header. */
+ /************************************************************************/
+ rc = dbfhead(g, &header);
+
+ if (rc == RC_FX)
+ return -1;
+
+ *rln = (int)header.Reclen();
+ Records = (int)header.Records();
+ return (int)header.Headlen();
+} // end of ScanHeader
+#endif // 0
+
+/***********************************************************************/
+/* ZIP GetFileLength: returns file size in number of bytes. */
+/***********************************************************************/
+int UZDFAM::GetFileLength(PGLOBAL g)
+{
+ int len;
+
+ if (!zutp && OpenTableFile(g))
+ return 0;
+
+ if (zutp->entryopen)
+ len = zutp->size;
+ else
+ len = 0;
+
+ return len;
+} // end of GetFileLength
+
+/***********************************************************************/
+/* ZIP Cardinality: return the number of rows if possible. */
+/***********************************************************************/
+int UZDFAM::Cardinality(PGLOBAL g)
+{
+ if (!g)
+ return 1;
+
+ int card = -1;
+ int len = GetFileLength(g);
+
+ card = Records;
+
+ // Set number of blocks for later use
+ Block = (card > 0) ? (card + Nrec - 1) / Nrec : 0;
+ return card;
+} // end of Cardinality
+
+/***********************************************************************/
+/* OpenTableFile: Open a DBF table file from a ZIP file. */
+/***********************************************************************/
+bool UZDFAM::OpenTableFile(PGLOBAL g)
+{
+ // May have been already opened in GetFileLength
+ if (!zutp || !zutp->zipfile) {
+ char filename[_MAX_PATH];
+ MODE mode = Tdbp->GetMode();
+
+ /*********************************************************************/
+ /* Allocate the ZIP utility class. */
+ /*********************************************************************/
+ if (!zutp)
+ zutp = new(g)UNZIPUTL(tdfp);
+
+ // We used the file name relative to recorded datapath
+ PlugSetPath(filename, To_File, Tdbp->GetPath());
+
+ if (!zutp->OpenTable(g, mode, filename)) {
+ // The pseudo "buffer" is here the entire real buffer
+ Memory = zutp->memory;
+ Top = Memory + zutp->size;
+ To_Fb = zutp->fp; // Useful when closing
+ return AllocateBuffer(g);
+ } else
+ return true;
+
+ } else
+ Reset();
+
+ return false;
+} // end of OpenTableFile
+
+/***********************************************************************/
+/* GetNext: go to next entry. */
+/***********************************************************************/
+int UZDFAM::GetNext(PGLOBAL g)
+{
+ int rc = zutp->nextEntry(g);
+
+ if (rc != RC_OK)
+ return rc;
+
+ int len = zutp->size;
+
+#if 0
+ if (len % Lrecl) {
+ sprintf(g->Message, MSG(NOT_FIXED_LEN), zutp->fn, len, Lrecl);
+ return RC_FX;
+ } // endif size
+#endif // 0
+
+ Memory = zutp->memory;
+ Top = Memory + len;
+ Rewind();
+ return RC_OK;
+} // end of GetNext
+
/* -------------------------- class ZIPFAM --------------------------- */
/***********************************************************************/
@@ -1045,7 +1305,7 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g)
strcpy(g->Message, "No insert into existing zip file");
return true;
} else if (append && len > 0) {
- UNZIPUTL *zutp = new(g) UNZIPUTL(target, false);
+ UNZIPUTL *zutp = new(g) UNZIPUTL(target, NULL, false);
if (!zutp->IsInsertOk(g, filename)) {
strcpy(g->Message, "No insert into existing entry");
@@ -1129,7 +1389,7 @@ bool ZPXFAM::OpenTableFile(PGLOBAL g)
strcpy(g->Message, "No insert into existing zip file");
return true;
} else if (append && len > 0) {
- UNZIPUTL *zutp = new(g) UNZIPUTL(target, false);
+ UNZIPUTL *zutp = new(g) UNZIPUTL(target, NULL, false);
if (!zutp->IsInsertOk(g, filename)) {
strcpy(g->Message, "No insert into existing entry");
diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h
index be17d954728..7ff1fb0a543 100644
--- a/storage/connect/filamzip.h
+++ b/storage/connect/filamzip.h
@@ -1,7 +1,7 @@
/************** filamzip H Declares Source Code File (.H) **************/
-/* Name: filamzip.h Version 1.2 */
+/* Name: filamzip.h Version 1.3 */
/* */
-/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */
+/* (C) Copyright to the author Olivier BERTRAND 2016-2020 */
/* */
/* This file contains the ZIP file access method classes declares. */
/***********************************************************************/
@@ -11,6 +11,7 @@
#include "block.h"
#include "filamap.h"
#include "filamfix.h"
+#include "filamdbf.h"
#include "zip.h"
#include "unzip.h"
@@ -18,6 +19,7 @@
typedef class UNZFAM *PUNZFAM;
typedef class UZXFAM *PUZXFAM;
+typedef class UZDFAM* PUZDFAM;
typedef class ZIPFAM *PZIPFAM;
typedef class ZPXFAM *PZPXFAM;
@@ -53,7 +55,7 @@ class DllExport ZIPUTIL : public BLOCK {
class DllExport UNZIPUTL : public BLOCK {
public:
// Constructor
- UNZIPUTL(PCSZ tgt, bool mul);
+ UNZIPUTL(PCSZ tgt, PCSZ pw, bool mul);
UNZIPUTL(PDOSDEF tdp);
// Implementation
@@ -144,6 +146,36 @@ class DllExport UZXFAM : public MPXFAM {
}; // end of UZXFAM
/***********************************************************************/
+/* This is the fixed unzip file access method. */
+/***********************************************************************/
+class DllExport UZDFAM : public DBMFAM {
+ //friend class UNZFAM;
+public:
+ // Constructors
+ UZDFAM(PDOSDEF tdp);
+ UZDFAM(PUZDFAM txfp);
+
+ // Implementation
+ virtual AMT GetAmType(void) { return TYPE_AM_ZIP; }
+ virtual PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UZDFAM(this); }
+
+ // Methods
+ virtual int GetFileLength(PGLOBAL g);
+ virtual int Cardinality(PGLOBAL g);
+ virtual bool OpenTableFile(PGLOBAL g);
+ virtual int GetNext(PGLOBAL g);
+ //virtual int ReadBuffer(PGLOBAL g);
+
+protected:
+ int dbfhead(PGLOBAL g, void* buf);
+ int ScanHeader(PGLOBAL g, int* rln);
+
+ // Members
+ UNZIPUTL* zutp;
+ PDOSDEF tdfp;
+}; // end of UZDFAM
+
+/***********************************************************************/
/* This is the zip file access method. */
/***********************************************************************/
class DllExport ZIPFAM : public DOSFAM {
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index b1b44085e53..a5d000d566d 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -5883,7 +5883,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
} else switch (ttp) {
case TAB_DBF:
- qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL);
+ qrp= DBFColumns(g, dpath, fn, topt, fnc == FNC_COL);
break;
#if defined(ODBC_SUPPORT)
case TAB_ODBC:
@@ -6734,11 +6734,6 @@ int ha_connect::create(const char *name, TABLE *table_arg,
PCSZ m= GetListOption(g, "Mulentries", options->oplist, "NO");
bool mul= *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON");
- if (!entry && !mul) {
- my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0));
- DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
- } // endif entry
-
strcat(strcat(strcpy(dbpath, "./"), table->s->db.str), "/");
PlugSetPath(zbuf, options->filename, dbpath);
PlugSetPath(buf, fn, dbpath);
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 09e8c42b443..8be1fe262b6 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -144,7 +144,7 @@ typedef class ha_connect *PHC;
/** @brief
Class definition for the storage engine
*/
-class ha_connect: public handler
+class ha_connect final : public handler
{
THR_LOCK_DATA lock; ///< MySQL lock
CONNECT_SHARE *share; ///< Shared lock info
diff --git a/storage/connect/mongo.cpp b/storage/connect/mongo.cpp
index 53e2bf377c4..bd3d3b893c1 100644
--- a/storage/connect/mongo.cpp
+++ b/storage/connect/mongo.cpp
@@ -380,7 +380,6 @@ MGODEF::MGODEF(void)
Uri = NULL;
Colist = NULL;
Filter = NULL;
- Level = 0;
Base = 0;
Version = 0;
Pipe = false;
diff --git a/storage/connect/mongo.h b/storage/connect/mongo.h
index 97c391a217f..dcefac372c0 100644
--- a/storage/connect/mongo.h
+++ b/storage/connect/mongo.h
@@ -82,7 +82,6 @@ protected:
PSZ Wrapname; /* Java wrapper name */
PCSZ Colist; /* Options list */
PCSZ Filter; /* Filtering query */
- int Level; /* Used for catalog table */
int Base; /* The array index base */
int Version; /* The Java driver version */
bool Pipe; /* True is Colist is a pipeline */
diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp
index f3d3a010266..8c5cc261899 100644
--- a/storage/connect/plgxml.cpp
+++ b/storage/connect/plgxml.cpp
@@ -49,7 +49,7 @@ bool XMLDOCUMENT::InitZip(PGLOBAL g, PCSZ entry)
{
#if defined(ZIP_SUPPORT)
bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false;
- zip = new(g) UNZIPUTL(entry, mul);
+ zip = new(g) UNZIPUTL(entry, NULL, mul);
return zip == NULL;
#else // !ZIP_SUPPORT
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
diff --git a/storage/connect/tabcmg.cpp b/storage/connect/tabcmg.cpp
index b9b7f6e4b60..f2ff721627c 100644
--- a/storage/connect/tabcmg.cpp
+++ b/storage/connect/tabcmg.cpp
@@ -26,6 +26,8 @@
#include "tabmul.h"
#include "filter.h"
+PQRYRES MGOColumns(PGLOBAL g, PCSZ db, PCSZ uri, PTOS topt, bool info);
+
/* -------------------------- Class CMGDISC -------------------------- */
/***********************************************************************/
diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp
index 32f549b0f79..4f5a9661369 100644
--- a/storage/connect/tabdos.cpp
+++ b/storage/connect/tabdos.cpp
@@ -1,11 +1,11 @@
/************* TabDos C++ Program Source Code File (.CPP) **************/
/* PROGRAM NAME: TABDOS */
/* ------------- */
-/* Version 4.9.4 */
+/* Version 4.9.5 */
/* */
/* COPYRIGHT: */
/* ---------- */
-/* (C) Copyright to the author Olivier BERTRAND 1998-2019 */
+/* (C) Copyright to the author Olivier BERTRAND 1998-2020 */
/* */
/* WHAT THIS PROGRAM DOES: */
/* ----------------------- */
@@ -359,7 +359,26 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
/* Allocate table and file processing class of the proper type. */
/* Column blocks will be allocated only when needed. */
/*********************************************************************/
- if (Zipped) {
+ if (Recfm == RECFM_DBF) {
+ if (Catfunc == FNC_NO) {
+ if (Zipped) {
+ if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
+ txfp = new(g) UZDFAM(this);
+ } else {
+ strcpy(g->Message, "Zipped DBF tables are read only");
+ return NULL;
+ } // endif's mode
+
+ } else if (map)
+ txfp = new(g) DBMFAM(this);
+ else
+ txfp = new(g) DBFFAM(this);
+
+ tdbp = new(g) TDBFIX(this, txfp);
+ } else
+ tdbp = new(g) TDBDCL(this); // Catfunc should be 'C'
+
+ } else if (Zipped) {
#if defined(ZIP_SUPPORT)
if (Recfm == RECFM_VAR) {
if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) {
@@ -389,17 +408,6 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode)
sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP");
return NULL;
#endif // !ZIP_SUPPORT
- } else if (Recfm == RECFM_DBF) {
- if (Catfunc == FNC_NO) {
- if (map)
- txfp = new(g) DBMFAM(this);
- else
- txfp = new(g) DBFFAM(this);
-
- tdbp = new(g) TDBFIX(this, txfp);
- } else // Catfunc should be 'C'
- tdbp = new(g) TDBDCL(this);
-
} else if (Recfm != RECFM_VAR && Compressed < 2) {
if (Huge)
txfp = new(g) BGXFAM(this);
diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h
index 207a1277fce..80dfe63845d 100644
--- a/storage/connect/tabdos.h
+++ b/storage/connect/tabdos.h
@@ -30,6 +30,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */
friend class DBFBASE;
friend class UNZIPUTL;
friend class JSONCOL;
+ friend class TDBDCL;
public:
// Constructor
DOSDEF(void);
diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h
index 53c0af1c422..5f859a2bffe 100644
--- a/storage/connect/tabfix.h
+++ b/storage/connect/tabfix.h
@@ -98,18 +98,20 @@ class DllExport BINCOL : public DOSCOL {
/* This is the class declaration for the DBF columns catalog table. */
/***********************************************************************/
class TDBDCL : public TDBCAT {
- public:
- // Constructor
- TDBDCL(PDOSDEF tdp) : TDBCAT(tdp) {Fn = tdp->GetFn();}
+public:
+ // Constructor
+ TDBDCL(PDOSDEF tdp) : TDBCAT(tdp)
+ {Fn = tdp->GetFn(); Topt = tdp->GetTopt();}
- protected:
+protected:
// Specific routines
- virtual PQRYRES GetResult(PGLOBAL g)
- {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);}
+ virtual PQRYRES GetResult(PGLOBAL g)
+ {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, Topt, false);}
- // Members
+ // Members
PCSZ Fn; // The DBF file (path) name
- }; // end of class TDBOCL
+ PTOS Topt;
+}; // end of class TDBOCL
#endif // __TABFIX__
diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp
index a8e96e2fe8d..692ca9d0258 100644
--- a/storage/connect/tabjson.cpp
+++ b/storage/connect/tabjson.cpp
@@ -741,6 +741,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m)
/***********************************************************************/
TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp)
{
+ G = NULL;
Top = NULL;
Row = NULL;
Val = NULL;
diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h
index 8721a2a5ab7..8c3f1013919 100644
--- a/storage/connect/tabjson.h
+++ b/storage/connect/tabjson.h
@@ -104,7 +104,6 @@ public:
PCSZ Xcol; /* Name of expandable column */
int Limit; /* Limit of multiple values */
int Pretty; /* Depends on file structure */
- int Level; /* Used for catalog table */
int Base; /* The array index base */
bool Strict; /* Strict syntax checking */
char Sep; /* The Jpath separator */
diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp
index c026744dba8..d9c13e2a58a 100644
--- a/storage/connect/tabzip.cpp
+++ b/storage/connect/tabzip.cpp
@@ -23,6 +23,7 @@
#include "filamzip.h"
#include "resource.h" // for IDS_COLUMNS
#include "tabdos.h"
+#include "tabmul.h"
#include "tabzip.h"
/* -------------------------- Class ZIPDEF --------------------------- */
@@ -41,7 +42,14 @@ bool ZIPDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
/***********************************************************************/
PTDB ZIPDEF::GetTable(PGLOBAL g, MODE m)
{
- return new(g) TDBZIP(this);
+ PTDB tdbp = NULL;
+
+ tdbp = new(g) TDBZIP(this);
+
+ if (Multiple)
+ tdbp = new(g) TDBMUL(tdbp);
+
+ return tdbp;
} // end of GetTable
/* ------------------------------------------------------------------- */
@@ -108,7 +116,7 @@ int TDBZIP::Cardinality(PGLOBAL g)
Cardinal = (err == UNZ_OK) ? (int)ginfo.number_entry : 0;
} else
- Cardinal = 0;
+ Cardinal = 10; // Dummy for multiple tables
} // endif Cardinal
@@ -187,6 +195,7 @@ int TDBZIP::DeleteDB(PGLOBAL g, int irc)
void TDBZIP::CloseDB(PGLOBAL g)
{
close();
+ nexterr = UNZ_OK; // For multiple tables
Use = USE_READY; // Just to be clean
} // end of CloseDB
diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h
index 32b15281f81..d36e4dc01d0 100644
--- a/storage/connect/tabzip.h
+++ b/storage/connect/tabzip.h
@@ -48,6 +48,8 @@ public:
// Implementation
virtual AMT GetAmType(void) {return TYPE_AM_ZIP;}
+ virtual PCSZ GetFile(PGLOBAL) {return zfn;}
+ virtual void SetFile(PGLOBAL, PCSZ fn) {zfn = fn;}
// Methods
virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n);
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index aae535c271e..043183444da 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -57,7 +57,7 @@ struct tina_set {
my_off_t end;
};
-class ha_tina: public handler
+class ha_tina final : public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
TINA_SHARE *share; /* Shared lock info */
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index 67fe5f8cc22..333028da587 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -260,7 +260,7 @@ public:
/*
Class definition for the storage engine
*/
-class ha_federatedx: public handler
+class ha_federatedx final : public handler
{
friend int federatedx_db_init(void *p);
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index 50d3c0afb6c..3a41028c719 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -25,7 +25,7 @@
#include <heap.h>
#include "sql_class.h" /* THD */
-class ha_heap: public handler
+class ha_heap final : public handler
{
HP_INFO *file;
HP_SHARE *internal_share;
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 5d9a39b0c9e..26c5850d290 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -570,8 +570,9 @@ static bool buf_LRU_free_from_common_LRU_list(bool scan_all)
ever being accessed. This gives us a measure of
the effectiveness of readahead */
++buf_pool.stat.n_ra_pages_evicted;
- break;
}
+
+ break;
}
}
@@ -673,7 +674,7 @@ static void buf_LRU_check_size_of_non_data_objects()
+ UT_LIST_GET_LEN(buf_pool.LRU))
< buf_pool.curr_size / 3) {
- if (!buf_lru_switched_on_innodb_mon) {
+ if (!buf_lru_switched_on_innodb_mon && srv_monitor_timer) {
/* Over 67 % of the buffer pool is occupied by lock
heaps or the adaptive hash index. This may be a memory
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 145f5c27e5f..b5083b51061 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1092,9 +1092,6 @@ fts_cache_clear(
index_cache->doc_stats = NULL;
}
- mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg));
- cache->sync_heap->arg = NULL;
-
fts_need_sync = false;
cache->total_size = 0;
@@ -1102,6 +1099,9 @@ fts_cache_clear(
mutex_enter((ib_mutex_t*) &cache->deleted_lock);
cache->deleted_doc_ids = NULL;
mutex_exit((ib_mutex_t*) &cache->deleted_lock);
+
+ mem_heap_free(static_cast<mem_heap_t*>(cache->sync_heap->arg));
+ cache->sync_heap->arg = NULL;
}
/*********************************************************************//**
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index cf51b91af64..4742cd05bf5 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -4029,16 +4029,7 @@ innobase_commit_low(
const bool is_wsrep = trx->is_wsrep();
THD* thd = trx->mysql_thd;
if (is_wsrep) {
-#ifdef WSREP_PROC_INFO
- char info[64];
- info[sizeof(info) - 1] = '\0';
- snprintf(info, sizeof(info) - 1,
- "innobase_commit_low():trx_commit_for_mysql(%lld)",
- (long long) wsrep_thd_trx_seqno(thd));
- tmp = thd_proc_info(thd, info);
-#else
tmp = thd_proc_info(thd, "innobase_commit_low()");
-#endif /* WSREP_PROC_INFO */
}
#endif /* WITH_WSREP */
if (trx_is_started(trx)) {
@@ -18567,11 +18558,14 @@ static
void
innodb_status_output_update(THD*,st_mysql_sys_var*,void*var,const void*save)
{
- *static_cast<my_bool*>(var) = *static_cast<const my_bool*>(save);
- mysql_mutex_unlock(&LOCK_global_system_variables);
- /* Wakeup server monitor. */
- srv_monitor_timer_schedule_now();
- mysql_mutex_lock(&LOCK_global_system_variables);
+ if (srv_monitor_timer)
+ {
+ *static_cast<my_bool*>(var)= *static_cast<const my_bool*>(save);
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ /* Wakeup server monitor. */
+ srv_monitor_timer_schedule_now();
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ }
}
/** Update the system variable innodb_encryption_threads.
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 499b250d880..fc4b2cf84ac 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -58,7 +58,7 @@ struct st_handler_tablename
const char *tablename;
};
/** The class defining a handle to an Innodb table */
-class ha_innobase final: public handler
+class ha_innobase final : public handler
{
public:
ha_innobase(handlerton* hton, TABLE_SHARE* table_arg);
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 253675266c9..95f3b52d41e 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -6124,9 +6124,13 @@ prepare_inplace_alter_table_dict(
user_table = ctx->new_table;
- if (ha_alter_info->inplace_supported == HA_ALTER_INPLACE_INSTANT) {
- /* If we promised ALGORITHM=INSTANT capability, we must
- retain the original ROW_FORMAT of the table. */
+ switch (ha_alter_info->inplace_supported) {
+ default: break;
+ case HA_ALTER_INPLACE_INSTANT:
+ case HA_ALTER_INPLACE_NOCOPY_LOCK:
+ case HA_ALTER_INPLACE_NOCOPY_NO_LOCK:
+ /* If we promised ALGORITHM=NOCOPY or ALGORITHM=INSTANT,
+ we must retain the original ROW_FORMAT of the table. */
flags = (user_table->flags & (DICT_TF_MASK_COMPACT
| DICT_TF_MASK_ATOMIC_BLOBS))
| (flags & ~(DICT_TF_MASK_COMPACT
diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc
index 6b6e2c04659..7278e0f353d 100644
--- a/storage/innobase/lock/lock0wait.cc
+++ b/storage/innobase/lock/lock0wait.cc
@@ -195,8 +195,8 @@ wsrep_is_BF_lock_timeout(
const trx_t* trx,
bool locked = true)
{
- if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, FALSE)
- && trx->error_state != DB_DEADLOCK) {
+ if (trx->error_state != DB_DEADLOCK && trx->is_wsrep() &&
+ srv_monitor_timer && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
ib::info() << "WSREP: BF lock wait long for trx:" << ib::hex(trx->id)
<< " query: " << wsrep_thd_query(trx->mysql_thd);
if (!locked) {
diff --git a/storage/maria/ha_maria.h b/storage/maria/ha_maria.h
index ddf8fc6f229..b2c664a072d 100644
--- a/storage/maria/ha_maria.h
+++ b/storage/maria/ha_maria.h
@@ -39,6 +39,11 @@ C_MODE_END
extern TYPELIB maria_recover_typelib;
extern ulonglong maria_recover_options;
+/*
+ In the ha_maria class there are a few virtual methods that are not marked as
+ 'final'. This is because they are re-defined by the ha_s3 engine.
+*/
+
class __attribute__((visibility("default"))) ha_maria :public handler
{
public:
@@ -60,99 +65,99 @@ private:
public:
ha_maria(handlerton *hton, TABLE_SHARE * table_arg);
~ha_maria() {}
- handler *clone(const char *name, MEM_ROOT *mem_root);
- const char *index_type(uint key_number);
- ulonglong table_flags() const
+ handler *clone(const char *name, MEM_ROOT *mem_root) override final;
+ const char *index_type(uint key_number) override final;
+ ulonglong table_flags() const override final
{ return int_table_flags; }
- ulong index_flags(uint inx, uint part, bool all_parts) const;
- uint max_supported_keys() const
+ ulong index_flags(uint inx, uint part, bool all_parts) const override final;
+ uint max_supported_keys() const override final
{ return MARIA_MAX_KEY; }
- uint max_supported_key_length() const;
- uint max_supported_key_part_length() const
+ uint max_supported_key_length() const override final;
+ uint max_supported_key_part_length() const override final
{ return max_supported_key_length(); }
- enum row_type get_row_type() const;
- void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
- virtual double scan_time();
-
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(const uchar * buf);
- int update_row(const uchar * old_data, const uchar * new_data);
- int delete_row(const uchar * buf);
+ enum row_type get_row_type() const override final;
+ void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share) override final;
+ virtual double scan_time() override final;
+
+ int open(const char *name, int mode, uint test_if_locked) override;
+ int close(void) override final;
+ int write_row(const uchar * buf) override;
+ int update_row(const uchar * old_data, const uchar * new_data) override;
+ int delete_row(const uchar * buf) override;
int index_read_map(uchar * buf, const uchar * key, key_part_map keypart_map,
- enum ha_rkey_function find_flag);
+ enum ha_rkey_function find_flag) override final;
int index_read_idx_map(uchar * buf, uint idx, const uchar * key,
key_part_map keypart_map,
- enum ha_rkey_function find_flag);
+ enum ha_rkey_function find_flag) override final;
int index_read_last_map(uchar * buf, const uchar * key,
- key_part_map keypart_map);
- int index_next(uchar * buf);
- int index_prev(uchar * buf);
- int index_first(uchar * buf);
- int index_last(uchar * buf);
- int index_next_same(uchar * buf, const uchar * key, uint keylen);
- int ft_init()
+ key_part_map keypart_map) override final;
+ int index_next(uchar * buf) override final;
+ int index_prev(uchar * buf) override final;
+ int index_first(uchar * buf) override final;
+ int index_last(uchar * buf) override final;
+ int index_next_same(uchar * buf, const uchar * key, uint keylen) override final;
+ int ft_init() override final
{
if (!ft_handler)
return 1;
ft_handler->please->reinit_search(ft_handler);
return 0;
}
- FT_INFO *ft_init_ext(uint flags, uint inx, String * key);
- int ft_read(uchar * buf);
- int index_init(uint idx, bool sorted);
- int index_end();
- int rnd_init(bool scan);
- int rnd_end(void);
- int rnd_next(uchar * buf);
- int rnd_pos(uchar * buf, uchar * pos);
- int remember_rnd_pos();
- int restart_rnd_next(uchar * buf);
- void position(const uchar * record);
- int info(uint);
+ FT_INFO *ft_init_ext(uint flags, uint inx, String * key) override final;
+ int ft_read(uchar * buf) override final;
+ int index_init(uint idx, bool sorted) override final;
+ int index_end() override final;
+ int rnd_init(bool scan) override final;
+ int rnd_end(void) override final;
+ int rnd_next(uchar * buf) override final;
+ int rnd_pos(uchar * buf, uchar * pos) override final;
+ int remember_rnd_pos() override final;
+ int restart_rnd_next(uchar * buf) override final;
+ void position(const uchar * record) override final;
+ int info(uint) override final;
int info(uint, my_bool);
- int extra(enum ha_extra_function operation);
- int extra_opt(enum ha_extra_function operation, ulong cache_size);
- int reset(void);
- int external_lock(THD * thd, int lock_type);
- int start_stmt(THD *thd, thr_lock_type lock_type);
- int delete_all_rows(void);
- int disable_indexes(uint mode);
- int enable_indexes(uint mode);
- int indexes_are_disabled(void);
- void start_bulk_insert(ha_rows rows, uint flags);
- int end_bulk_insert();
+ int extra(enum ha_extra_function operation) override final;
+ int extra_opt(enum ha_extra_function operation, ulong cache_size) override final;
+ int reset(void) override final;
+ int external_lock(THD * thd, int lock_type) override;
+ int start_stmt(THD *thd, thr_lock_type lock_type) override final;
+ int delete_all_rows(void) override final;
+ int disable_indexes(uint mode) override final;
+ int enable_indexes(uint mode) override final;
+ int indexes_are_disabled(void) override final;
+ void start_bulk_insert(ha_rows rows, uint flags) override final;
+ int end_bulk_insert() override final;
ha_rows records_in_range(uint inx, const key_range *min_key,
const key_range *max_key,
- page_range *pages);
- void update_create_info(HA_CREATE_INFO * create_info);
- int create(const char *name, TABLE * form, HA_CREATE_INFO * create_info);
+ page_range *pages) override final;
+ void update_create_info(HA_CREATE_INFO * create_info) override final;
+ int create(const char *name, TABLE * form, HA_CREATE_INFO * create_info) override;
THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
- enum thr_lock_type lock_type);
+ enum thr_lock_type lock_type) override final;
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
- ulonglong *nb_reserved_values);
- int rename_table(const char *from, const char *to);
- int delete_table(const char *name);
- void drop_table(const char *name);
- int check(THD * thd, HA_CHECK_OPT * check_opt);
- int analyze(THD * thd, HA_CHECK_OPT * check_opt);
- int repair(THD * thd, HA_CHECK_OPT * check_opt);
- bool check_and_repair(THD * thd);
- bool is_crashed() const;
+ ulonglong *nb_reserved_values) override final;
+ int rename_table(const char *from, const char *to) override;
+ int delete_table(const char *name) override;
+ void drop_table(const char *name) override;
+ int check(THD * thd, HA_CHECK_OPT * check_opt) override;
+ int analyze(THD * thd, HA_CHECK_OPT * check_opt) override;
+ int repair(THD * thd, HA_CHECK_OPT * check_opt) override;
+ bool check_and_repair(THD * thd) override final;
+ bool is_crashed() const override final;
bool is_changed() const;
- bool auto_repair(int error) const;
- int optimize(THD * thd, HA_CHECK_OPT * check_opt);
- int assign_to_keycache(THD * thd, HA_CHECK_OPT * check_opt);
- int preload_keys(THD * thd, HA_CHECK_OPT * check_opt);
- bool check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes);
+ bool auto_repair(int error) const override final;
+ int optimize(THD * thd, HA_CHECK_OPT * check_opt) override final;
+ int assign_to_keycache(THD * thd, HA_CHECK_OPT * check_opt) override final;
+ int preload_keys(THD * thd, HA_CHECK_OPT * check_opt) override;
+ bool check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes) override final;
#ifdef HAVE_QUERY_CACHE
my_bool register_query_cache_table(THD *thd, const char *table_key,
uint key_length,
qc_engine_callback
*engine_callback,
- ulonglong *engine_data);
+ ulonglong *engine_data) override final;
#endif
MARIA_HA *file_ptr(void)
{
@@ -164,21 +169,21 @@ public:
* Multi Range Read interface
*/
int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
- uint n_ranges, uint mode, HANDLER_BUFFER *buf);
- int multi_range_read_next(range_id_t *range_info);
+ uint n_ranges, uint mode, HANDLER_BUFFER *buf) override final;
+ int multi_range_read_next(range_id_t *range_info) override final;
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
void *seq_init_param,
uint n_ranges, uint *bufsz,
- uint *flags, Cost_estimate *cost);
+ uint *flags, Cost_estimate *cost) override final;
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
uint key_parts, uint *bufsz,
- uint *flags, Cost_estimate *cost);
- int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size);
+ uint *flags, Cost_estimate *cost) override final;
+ int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size) override final;
/* Index condition pushdown implementation */
- Item *idx_cond_push(uint keyno, Item* idx_cond);
+ Item *idx_cond_push(uint keyno, Item* idx_cond) override final;
- int find_unique_row(uchar *record, uint unique_idx);
+ int find_unique_row(uchar *record, uint unique_idx) override final;
/* Following functions are needed by the S3 handler */
virtual S3_INFO *s3_open_args() { return 0; }
diff --git a/storage/maria/ha_s3.h b/storage/maria/ha_s3.h
index 0777debc8d8..0dd36609a9e 100644
--- a/storage/maria/ha_s3.h
+++ b/storage/maria/ha_s3.h
@@ -19,7 +19,7 @@
#include "ha_maria.h"
-class ha_s3 :public ha_maria
+class ha_s3 final :public ha_maria
{
enum alter_table_op
{ S3_NO_ALTER, S3_ALTER_TABLE, S3_ADD_PARTITION, S3_ADD_TMP_PARTITION };
@@ -31,52 +31,52 @@ public:
~ha_s3() {}
int create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *ha_create_info) final;
- int open(const char *name, int mode, uint open_flags) final;
- int write_row(const uchar *buf) final;
- int update_row(const uchar * old_data, const uchar * new_data) final
+ HA_CREATE_INFO *ha_create_info);
+ int open(const char *name, int mode, uint open_flags);
+ int write_row(const uchar *buf);
+ int update_row(const uchar * old_data, const uchar * new_data)
{
DBUG_ENTER("update_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
- int delete_row(const uchar * buf) final
+ int delete_row(const uchar * buf)
{
DBUG_ENTER("delete_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
- int check(THD * thd, HA_CHECK_OPT * check_opt) final
+ int check(THD * thd, HA_CHECK_OPT * check_opt)
{
DBUG_ENTER("delete_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
- int analyze(THD * thd, HA_CHECK_OPT * check_opt) final
+ int analyze(THD * thd, HA_CHECK_OPT * check_opt)
{
DBUG_ENTER("analyze");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
- int repair(THD * thd, HA_CHECK_OPT * check_opt) final
+ int repair(THD * thd, HA_CHECK_OPT * check_opt)
{
DBUG_ENTER("repair");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
- int preload_keys(THD * thd, HA_CHECK_OPT * check_opt) final
+ int preload_keys(THD * thd, HA_CHECK_OPT * check_opt)
{
DBUG_ENTER("preload_keys");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
- int external_lock(THD * thd, int lock_type) final;
+ int external_lock(THD * thd, int lock_type);
/*
drop_table() is only used for internal temporary tables,
not applicable for s3
*/
- void drop_table(const char *name) final
+ void drop_table(const char *name)
{
}
- int delete_table(const char *name) final;
- int rename_table(const char *from, const char *to) final;
+ int delete_table(const char *name);
+ int rename_table(const char *from, const char *to);
int discover_check_version() override;
int rebind();
- S3_INFO *s3_open_args() final { return open_args; }
- void register_handler(MARIA_HA *file) final;
+ S3_INFO *s3_open_args() { return open_args; }
+ void register_handler(MARIA_HA *file);
};
#endif /* HA_S3_INCLUDED */
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 6840dcb76cb..45865709cd3 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -3218,6 +3218,7 @@ static int write_page(MARIA_SHARE *share, File file,
args.page= buff;
args.pageno= (pgcache_page_no_t) (pos / share->block_size);
args.data= (uchar*) share;
+ args.crypt_buf= NULL;
(* share->kfile.pre_write_hook)(&args);
res= (int)my_pwrite(file, args.page, block_size, pos, myf_rw);
(* share->kfile.post_write_hook)(res, &args);
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index 7729ee2e9e7..3843004cc6e 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -41,7 +41,7 @@ C_MODE_START
check_result_t index_cond_func_myisam(void *arg);
C_MODE_END
-class ha_myisam: public handler
+class ha_myisam final : public handler
{
MI_INFO *file;
ulonglong int_table_flags;
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index d5d62a002aa..6da327ec84b 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -68,7 +68,7 @@ public:
};
-class ha_myisammrg: public handler
+class ha_myisammrg final : public handler
{
MYRG_INFO *file;
my_bool is_cloned; /* This instance has been cloned */
diff --git a/storage/myisammrg/myrg_extra.c b/storage/myisammrg/myrg_extra.c
index 43dfc18c710..2b3861b9f7f 100644
--- a/storage/myisammrg/myrg_extra.c
+++ b/storage/myisammrg/myrg_extra.c
@@ -31,7 +31,7 @@ int myrg_extra(MYRG_INFO *info,enum ha_extra_function function,
DBUG_PRINT("info",("function: %lu", (ulong) function));
if (!info->children_attached)
- DBUG_RETURN(1);
+ DBUG_RETURN(0);
if (function == HA_EXTRA_CACHE)
{
info->cache_in_use=1;
diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc
index f54d46ce979..da0c2c0813b 100644
--- a/storage/perfschema/ha_perfschema.cc
+++ b/storage/perfschema/ha_perfschema.cc
@@ -229,7 +229,7 @@ maria_declare_plugin(perfschema)
0x0001,
pfs_status_vars,
NULL,
- "5.6.40",
+ "5.7.31",
MariaDB_PLUGIN_MATURITY_STABLE
}
maria_declare_plugin_end;
diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h
index 36ea124056d..690bf8d13a6 100644
--- a/storage/perfschema/ha_perfschema.h
+++ b/storage/perfschema/ha_perfschema.h
@@ -41,7 +41,7 @@ class PFS_engine_table;
extern const char *pfs_engine_name;
/** A handler for a PERFORMANCE_SCHEMA table. */
-class ha_perfschema : public handler
+class ha_perfschema final : public handler
{
public:
/**
diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc
index c8f3e76b873..d7f9014f691 100644
--- a/storage/sequence/sequence.cc
+++ b/storage/sequence/sequence.cc
@@ -53,7 +53,7 @@ public:
}
};
-class ha_seq: public handler
+class ha_seq final : public handler
{
private:
THR_LOCK_DATA lock;
diff --git a/storage/sphinx/ha_sphinx.h b/storage/sphinx/ha_sphinx.h
index cb46cb3dcbc..f03e9d8c797 100644
--- a/storage/sphinx/ha_sphinx.h
+++ b/storage/sphinx/ha_sphinx.h
@@ -30,7 +30,7 @@ struct CSphSEStats;
struct CSphSEThreadTable;
/// Sphinx SE handler class
-class ha_sphinx : public handler
+class ha_sphinx final : public handler
{
protected:
THR_LOCK_DATA m_tLock; ///< MySQL lock
diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h
index db184baf682..847f7a8e170 100644
--- a/storage/spider/ha_spider.h
+++ b/storage/spider/ha_spider.h
@@ -49,7 +49,7 @@ struct st_spider_ft_info
String *key;
};
-class ha_spider: public handler
+class ha_spider final : public handler
{
public:
SPIDER_SHARE *share;
diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt
index a87d30d2a57..bc7e5826cac 100644
--- a/storage/tokudb/CMakeLists.txt
+++ b/storage/tokudb/CMakeLists.txt
@@ -1,4 +1,4 @@
-SET(TOKUDB_VERSION 5.6.41-84.1)
+SET(TOKUDB_VERSION 5.6.49-89.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(WIN32)
# tokudb never worked there
@@ -137,6 +137,7 @@ IF(DEFINED TOKUDB_NOPATCH_CONFIG)
ADD_DEFINITIONS("-DTOKUDB_NOPATCH_CONFIG=${TOKUDB_NOPATCH_CONFIG}")
ENDIF()
+MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-missing-format-attribute)
MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-missing-field-initializers)
IF (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/PerconaFT/")
diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
index 3c607d9eb3a..c82521db3e9 100644
--- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
+++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake
@@ -47,20 +47,18 @@ include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
## adds a compiler flag if the compiler supports it
-macro(set_cflags_if_supported)
+macro(prepend_cflags_if_supported)
foreach(flag ${ARGN})
MY_CHECK_AND_SET_COMPILER_FLAG(${flag})
endforeach(flag)
-endmacro(set_cflags_if_supported)
+endmacro(prepend_cflags_if_supported)
if (NOT DEFINED MYSQL_PROJECT_NAME_DOCSTRING)
set (OPTIONAL_CFLAGS "${OPTIONAL_CFLAGS} -Wmissing-format-attribute")
endif()
## disable some warnings
-## missing-format-attribute causes warnings in some MySQL include files
-## if the library is built as a part of TokuDB MySQL storage engine
-set_cflags_if_supported(
+prepend_cflags_if_supported(
-Wno-missing-field-initializers
-Wstrict-null-sentinel
-Winit-self
@@ -76,28 +74,21 @@ set_cflags_if_supported(
-fno-exceptions
-Wno-error=nonnull-compare
)
-## set_cflags_if_supported_named("-Weffc++" -Weffcpp)
## Clang has stricter POD checks. So, only enable this warning on our other builds (Linux + GCC)
if (NOT CMAKE_CXX_COMPILER_ID MATCHES Clang)
- set_cflags_if_supported(
+ prepend_cflags_if_supported(
-Wpacked
)
endif ()
option (PROFILING "Allow profiling and debug" ON)
if (PROFILING)
- set_cflags_if_supported(
+ prepend_cflags_if_supported(
-fno-omit-frame-pointer
)
endif ()
-## this hits with optimized builds somewhere in ftleaf_split, we don't
-## know why but we don't think it's a big deal
-set_cflags_if_supported(
- -Wno-error=strict-overflow
- )
-
# new flag sets in MySQL 8.0 seem to explicitly disable this
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fexceptions")
@@ -135,7 +126,7 @@ else ()
endif ()
## set warnings
-set_cflags_if_supported(
+prepend_cflags_if_supported(
-Wextra
-Wbad-function-cast
-Wno-missing-noreturn
@@ -158,7 +149,7 @@ set_cflags_if_supported(
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL Clang)
# Disabling -Wcast-align with clang. TODO: fix casting and re-enable it, someday.
- set_cflags_if_supported(-Wcast-align)
+ prepend_cflags_if_supported(-Wcast-align)
endif ()
## never want these
diff --git a/storage/tokudb/PerconaFT/ft/logger/logger.cc b/storage/tokudb/PerconaFT/ft/logger/logger.cc
index d66a1deecf9..3965714e8cb 100644
--- a/storage/tokudb/PerconaFT/ft/logger/logger.cc
+++ b/storage/tokudb/PerconaFT/ft/logger/logger.cc
@@ -51,6 +51,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "util/status.h"
int writing_rollback = 0;
+extern "C" {
+ uint force_recovery = 0;
+}
static const int log_format_version = TOKU_LOG_VERSION;
diff --git a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
index eb8c953b08c..31ffd7e1617 100644
--- a/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
+++ b/storage/tokudb/PerconaFT/ft/serialize/rbtree_mhs.h
@@ -193,6 +193,7 @@ namespace MhsRbTree {
BlockPair(OUUInt64 o, OUUInt64 s) : _offset(o), _size(s) {}
BlockPair(const BlockPair &o)
: _offset(o._offset), _size(o._size) {}
+ BlockPair& operator=(const BlockPair&) = default;
int operator<(const BlockPair &rhs) const {
return _offset < rhs._offset;
diff --git a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc
index c1c4cb4f16e..a13e6d26d15 100644
--- a/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/cachetable-simple-close.cc
@@ -195,13 +195,13 @@ static void test_multiple_cachefiles(bool use_same_hash) {
char fname1[strlen(TOKU_TEST_FILENAME) + sizeof("_1")];
strcpy(fname1, TOKU_TEST_FILENAME);
- strncat(fname1, "_1", sizeof("_1"));
+ strcat(fname1, "_1");
char fname2[strlen(TOKU_TEST_FILENAME) + sizeof("_2")];
strcpy(fname2, TOKU_TEST_FILENAME);
- strncat(fname2, "_2", sizeof("_2"));
+ strcat(fname2, "_2");
char fname3[strlen(TOKU_TEST_FILENAME) + sizeof("_3")];
strcpy(fname3, TOKU_TEST_FILENAME);
- strncat(fname3, "_3", sizeof("_3"));
+ strcat(fname3, "_3");
unlink(fname1);
unlink(fname2);
@@ -280,10 +280,10 @@ static void test_evictor(void) {
char fname1[strlen(TOKU_TEST_FILENAME) + sizeof("_1")];
strcpy(fname1, TOKU_TEST_FILENAME);
- strncat(fname1, "_1", sizeof("_1"));
+ strcat(fname1, "_1");
char fname2[strlen(TOKU_TEST_FILENAME) + sizeof("_2")];
strcpy(fname2, TOKU_TEST_FILENAME);
- strncat(fname2, "_2", sizeof("_2"));
+ strcat(fname2, "_2");
unlink(fname1);
unlink(fname2);
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc b/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc
index 7abd2267a7e..1d6bc2fba7a 100644
--- a/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-bfe-query.cc
@@ -337,7 +337,7 @@ static void test_prefetching(void) {
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 1;
sn.n_children = 3;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
uint64_t key1 = 100;
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc
index 00ff8cf204b..1a708b8e3cc 100644
--- a/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-clock-test.cc
@@ -133,7 +133,7 @@ static void test1(int fd, FT ft_h, FTNODE *dn) {
for (int i = 0; i < (*dn)->n_children; i++) {
invariant(BP_STATE(*dn, i) == PT_AVAIL);
}
- (*dn)->dirty = 1;
+ (*dn)->set_dirty();
toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
toku_ftnode_pe_callback(*dn, attr, ft_h, def_pe_finalize_impl, nullptr);
@@ -246,7 +246,7 @@ static void test_serialize_nonleaf(void) {
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 1;
sn.n_children = 2;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(2, sn.bp);
DBT pivotkey;
@@ -384,7 +384,7 @@ static void test_serialize_leaf(void) {
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 2;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
DBT pivotkey;
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc
index d50488ae197..bd5df7862cd 100644
--- a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-benchmark.cc
@@ -95,7 +95,7 @@ static void test_serialize_leaf(int valsize,
sn->layout_version_original = FT_LAYOUT_VERSION;
sn->height = 0;
sn->n_children = 8;
- sn->dirty = 1;
+ sn->set_dirty();
sn->oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn->n_children, sn->bp);
sn->pivotkeys.create_empty();
@@ -173,7 +173,7 @@ static void test_serialize_leaf(int valsize,
for (int i = 0; i < ser_runs; i++) {
gettimeofday(&t[0], NULL);
ndd = NULL;
- sn->dirty = 1;
+ sn->set_dirty();
r = toku_serialize_ftnode_to(
fd, make_blocknum(20), sn, &ndd, true, ft->ft, false);
invariant(r == 0);
@@ -265,7 +265,7 @@ static void test_serialize_nonleaf(int valsize,
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 1;
sn.n_children = 8;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
sn.pivotkeys.create_empty();
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc
index 0cddaf19651..4fca8efad35 100644
--- a/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-serialize-test.cc
@@ -238,7 +238,7 @@ static void test_serialize_leaf_check_msn(enum ftnode_verify_type bft,
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 2;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
DBT pivotkey;
@@ -381,7 +381,7 @@ static void test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft,
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = nrows;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
@@ -538,7 +538,7 @@ static void test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft,
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 1;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
XMALLOC_N(sn.n_children, sn.bp);
@@ -693,7 +693,7 @@ static void test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft,
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 1;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
@@ -845,7 +845,7 @@ static void test_serialize_leaf_with_empty_basement_nodes(
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 7;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
DBT pivotkeys[6];
@@ -989,7 +989,7 @@ static void test_serialize_leaf_with_multiple_empty_basement_nodes(
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 4;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
DBT pivotkeys[3];
@@ -1100,7 +1100,7 @@ static void test_serialize_nonleaf(enum ftnode_verify_type bft, bool do_clone) {
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 1;
sn.n_children = 2;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(2, sn.bp);
DBT pivotkey;
diff --git a/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc b/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc
index a23a3a60879..c668b9410c9 100644
--- a/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/ft-test-header.cc
@@ -57,7 +57,7 @@ static void test_header (void) {
assert(r==0);
// now insert some info into the header
FT ft = t->ft;
- ft->h->dirty = 1;
+ ft->h->set_dirty();
// cast away const because we actually want to fiddle with the header
// in this test
*((int *) &ft->h->layout_version_original) = 13;
diff --git a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
index 761d672539b..fe950b60972 100644
--- a/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/make-tree.cc
@@ -88,7 +88,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
leafnode->max_msn_applied_to_node_on_disk = msn;
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc b/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc
index e3a3bfa28dc..bf9a1aa1484 100644
--- a/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/mempool-115.cc
@@ -102,7 +102,7 @@ public:
sn.layout_version_original = FT_LAYOUT_VERSION;
sn.height = 0;
sn.n_children = 2;
- sn.dirty = 1;
+ sn.set_dirty();
sn.oldest_referenced_xid_known = TXNID_NONE;
MALLOC_N(sn.n_children, sn.bp);
DBT pivotkey;
diff --git a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
index c37dcd089f8..6d13eabfd93 100644
--- a/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/msnfilter.cc
@@ -161,7 +161,7 @@ append_leaf(FT_HANDLE ft, FTNODE leafnode, void *key, uint32_t keylen, void *val
}
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc b/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc
index 8ac1cd62c50..02dc63fca7e 100644
--- a/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/recovery-test5123.cc
@@ -49,9 +49,9 @@ static void test_5123(void) {
test_setup(TOKU_TEST_FILENAME, &logger, &ct);
int r;
- TXNID_PAIR one = {.parent_id64 = (TXNID)1, TXNID_NONE};
- TXNID_PAIR two = {.parent_id64 = (TXNID)2, TXNID_NONE};
- TXNID_PAIR three = {.parent_id64 = (TXNID)3, TXNID_NONE};
+ TXNID_PAIR one = { (TXNID)1, TXNID_NONE};
+ TXNID_PAIR two = { (TXNID)2, TXNID_NONE};
+ TXNID_PAIR three = { (TXNID)3, TXNID_NONE};
toku_log_xbegin(logger, NULL, false, one, TXNID_PAIR_NONE);
toku_log_xbegin(logger, NULL, false, three, TXNID_PAIR_NONE);
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc
index 06a26614885..5c73d281b98 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-flush.cc
@@ -245,7 +245,7 @@ doit (bool after_child_pin) {
true
);
assert(node->height == 1);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
if (after_child_pin) {
assert(toku_bnc_nbytesinbuf(BNC(node, 0)) == 0);
@@ -265,7 +265,7 @@ doit (bool after_child_pin) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
if (after_child_pin) {
assert(BLB_NBYTESINDATA(node,0) > 0);
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc
index 1029dfef320..cab370274cb 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-merge.cc
@@ -270,7 +270,7 @@ doit (int state) {
true
);
assert(node->height == 1);
- assert(!node->dirty);
+ assert(!node->dirty());
BLOCKNUM left_child, right_child;
// cases where we expect the checkpoint to contain the merge
if (state == ft_flush_aflter_merge || state == flt_flush_before_unpin_remove) {
@@ -301,7 +301,7 @@ doit (int state) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 1);
toku_unpin_ftnode(c_ft->ft, node);
@@ -318,7 +318,7 @@ doit (int state) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 1);
toku_unpin_ftnode(c_ft->ft, node);
@@ -336,7 +336,7 @@ doit (int state) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 2);
toku_unpin_ftnode(c_ft->ft, node);
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc
index 208ebe3ca31..87f66512642 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-rebalance.cc
@@ -284,7 +284,7 @@ doit (int state) {
true
);
assert(node->height == 1);
- assert(!node->dirty);
+ assert(!node->dirty());
BLOCKNUM left_child, right_child;
assert(node->n_children == 2);
@@ -304,7 +304,7 @@ doit (int state) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 2);
toku_unpin_ftnode(c_ft->ft, node);
@@ -319,7 +319,7 @@ doit (int state) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 2);
toku_unpin_ftnode(c_ft->ft, node);
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc
index 2b29de409b1..d5f7fe50f46 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-checkpoint-during-split.cc
@@ -260,7 +260,7 @@ doit (bool after_split) {
true
);
assert(node->height == 1);
- assert(!node->dirty);
+ assert(!node->dirty());
BLOCKNUM left_child, right_child;
if (after_split) {
assert(node->n_children == 2);
@@ -287,7 +287,7 @@ doit (bool after_split) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 1);
toku_unpin_ftnode(c_ft->ft, node);
@@ -302,7 +302,7 @@ doit (bool after_split) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 1);
toku_unpin_ftnode(c_ft->ft, node);
@@ -318,7 +318,7 @@ doit (bool after_split) {
true
);
assert(node->height == 0);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 1);
assert(BLB_DATA(node, 0)->num_klpairs() == 2);
toku_unpin_ftnode(c_ft->ft, node);
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc b/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc
index 460134ec353..e1937538471 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-dirty-flushes-on-cleaner.cc
@@ -199,7 +199,7 @@ doit (void) {
&node,
true
);
- assert(node->dirty);
+ assert(node->dirty());
assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_AVAIL);
@@ -229,7 +229,7 @@ doit (void) {
&node,
true
);
- assert(node->dirty);
+ assert(node->dirty());
assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL);
assert(BP_STATE(node,1) == PT_AVAIL);
@@ -250,7 +250,7 @@ doit (void) {
&node,
true
);
- assert(node->dirty);
+ assert(node->dirty());
// we expect that this flushes its buffer, that
// a merge is not done, and that the lookup
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc b/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc
index 89d7130e5f7..f9d4d1646b8 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-flushes-on-cleaner.cc
@@ -203,7 +203,7 @@ doit (bool keep_other_bn_in_memory) {
&node,
true
);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 2);
// a hack to get the basement nodes evicted
for (int i = 0; i < 20; i++) {
@@ -249,7 +249,7 @@ doit (bool keep_other_bn_in_memory) {
&node,
true
);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(node->n_children == 2);
assert(BP_STATE(node,0) == PT_AVAIL);
if (keep_other_bn_in_memory) {
@@ -273,7 +273,7 @@ doit (bool keep_other_bn_in_memory) {
&node,
true
);
- assert(!node->dirty);
+ assert(!node->dirty());
// we expect that this flushes its buffer, that
// a merge is not done, and that the lookup
diff --git a/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc b/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc
index 83dfd0244f4..29d07483f99 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test-pick-child-to-flush.cc
@@ -194,7 +194,7 @@ doit (void) {
toku_pin_node_with_min_bfe(&node, node_internal, t);
toku_ftnode_assert_fully_in_memory(node);
assert(node->n_children == 2);
- assert(!node->dirty);
+ assert(!node->dirty());
assert(toku_bnc_n_entries(node->bp[0].ptr.u.nonleaf) > 0);
assert(toku_bnc_n_entries(node->bp[1].ptr.u.nonleaf) > 0);
@@ -216,7 +216,7 @@ doit (void) {
toku_pin_node_with_min_bfe(&node, node_internal, t);
toku_ftnode_assert_fully_in_memory(node);
- assert(node->dirty);
+ assert(node->dirty());
assert(node->n_children == 2);
// child 0 should have empty buffer because it flushed
// child 1 should still have message in buffer
@@ -226,14 +226,14 @@ doit (void) {
r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
toku_pin_node_with_min_bfe(&node, node_internal, t);
- assert(!node->dirty);
+ assert(!node->dirty());
curr_child_to_flush = 1;
num_flushes_called = 0;
toku_ft_flush_some_child(t->ft, node, &fa);
assert(num_flushes_called == 1);
toku_pin_node_with_min_bfe(&node, node_internal, t);
- assert(node->dirty);
+ assert(node->dirty());
toku_ftnode_assert_fully_in_memory(node);
assert(node->n_children == 2);
// both buffers should be empty now
@@ -244,14 +244,14 @@ doit (void) {
r = toku_checkpoint(cp, NULL, NULL, NULL, NULL, NULL, CLIENT_CHECKPOINT);
assert_zero(r);
toku_pin_node_with_min_bfe(&node, node_internal, t);
- assert(!node->dirty);
+ assert(!node->dirty());
curr_child_to_flush = 0;
num_flushes_called = 0;
toku_ft_flush_some_child(t->ft, node, &fa);
assert(num_flushes_called == 1);
toku_pin_node_with_min_bfe(&node, node_internal, t);
- assert(node->dirty); // nothing was flushed, but since we were trying to flush to a leaf, both become dirty
+ assert(node->dirty()); // nothing was flushed, but since we were trying to flush to a leaf, both become dirty
toku_ftnode_assert_fully_in_memory(node);
assert(node->n_children == 2);
// both buffers should be empty now
@@ -280,17 +280,17 @@ doit (void) {
assert(num_flushes_called == 2);
toku_pin_node_with_min_bfe(&node, node_internal, t);
- assert(node->dirty);
+ assert(node->dirty());
toku_unpin_ftnode(t->ft, node);
toku_pin_node_with_min_bfe(&node, node_leaf[0], t);
- assert(node->dirty);
+ assert(node->dirty());
toku_unpin_ftnode(t->ft, node);
toku_pin_node_with_min_bfe(&node, node_leaf[1], t);
if (i == 0) {
- assert(!node->dirty);
+ assert(!node->dirty());
}
else {
- assert(node->dirty);
+ assert(node->dirty());
}
toku_unpin_ftnode(t->ft, node);
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/test3884.cc b/storage/tokudb/PerconaFT/ft/tests/test3884.cc
index cfb76424668..5de55b0daff 100644
--- a/storage/tokudb/PerconaFT/ft/tests/test3884.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/test3884.cc
@@ -105,7 +105,7 @@ setup_ftnode_header(struct ftnode *node)
node->layout_version = FT_LAYOUT_VERSION;
node->layout_version_original = FT_LAYOUT_VERSION;
node->height = 0;
- node->dirty = 1;
+ node->set_dirty();
node->oldest_referenced_xid_known = TXNID_NONE;
}
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
index b10885c2e62..1ba5f1c2503 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-msn.cc
@@ -93,7 +93,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
// leafnode->max_msn_applied_to_node = msn;
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
index c1d08ce41a6..42415a07765 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-bad-pivots.cc
@@ -77,7 +77,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL);
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
index 22a29c0ff69..e31b13c4f4d 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-in-leaf.cc
@@ -78,7 +78,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL);
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
index 80189dd9804..009eda63999 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-dup-pivots.cc
@@ -77,7 +77,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL);
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
index a84aac1f063..5c639d8d28a 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-misrouted-msgs.cc
@@ -78,7 +78,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL);
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
index ca413f52567..d55ec7a736f 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-leaf.cc
@@ -80,7 +80,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL);
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
index 6efa06913c2..ff231001c77 100644
--- a/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
+++ b/storage/tokudb/PerconaFT/ft/tests/verify-unsorted-pivots.cc
@@ -77,7 +77,7 @@ append_leaf(FTNODE leafnode, void *key, size_t keylen, void *val, size_t vallen)
NULL);
// don't forget to dirty the node
- leafnode->dirty = 1;
+ leafnode->set_dirty();
}
static void
diff --git a/storage/tokudb/PerconaFT/ftcxx/cursor.hpp b/storage/tokudb/PerconaFT/ftcxx/cursor.hpp
index 9ecc4d173c6..bde5dbf2c19 100644
--- a/storage/tokudb/PerconaFT/ftcxx/cursor.hpp
+++ b/storage/tokudb/PerconaFT/ftcxx/cursor.hpp
@@ -398,8 +398,8 @@ namespace ftcxx {
{}
bool operator()(const DBT *key, const DBT *val) {
- _key = std::move(Slice(*key).owned());
- _val = std::move(Slice(*val).owned());
+ _key = Slice(*key).owned();
+ _val = Slice(*val).owned();
// Don't bulk fetch.
return false;
diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.cc b/storage/tokudb/PerconaFT/locktree/lock_request.cc
index 0a95dc417d6..cc6fcafcd5d 100644
--- a/storage/tokudb/PerconaFT/locktree/lock_request.cc
+++ b/storage/tokudb/PerconaFT/locktree/lock_request.cc
@@ -93,6 +93,10 @@ void lock_request::destroy(void) {
toku_cond_destroy(&m_wait_cond);
}
+void lock_request::clearmem(char c) {
+ memset(this, c, sizeof(* this));
+}
+
// set the lock request parameters. this API allows a lock request to be reused.
void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT *right_key, lock_request::type lock_type, bool big_txn, void *extra) {
invariant(m_state != state::PENDING);
diff --git a/storage/tokudb/PerconaFT/locktree/lock_request.h b/storage/tokudb/PerconaFT/locktree/lock_request.h
index 8b22241e276..e16e77ed6f4 100644
--- a/storage/tokudb/PerconaFT/locktree/lock_request.h
+++ b/storage/tokudb/PerconaFT/locktree/lock_request.h
@@ -89,6 +89,7 @@ public:
// effect: Destroys a lock request.
void destroy(void);
+ void clearmem(char c);
// effect: Resets the lock request parameters, allowing it to be reused.
// requires: Lock request was already created at some point
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc
index 5c28701c49e..83436a651e1 100644
--- a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race.cc
@@ -83,7 +83,7 @@ namespace toku {
}
request.destroy();
- memset(&request, 0xab, sizeof request);
+ request.clearmem(0xab);
toku_pthread_yield();
if ((i % 10) == 0)
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc
index 8458bae6b8c..6748ae30ee1 100644
--- a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_race_3.cc
@@ -96,7 +96,7 @@ namespace toku {
}
request.destroy();
- memset(&request, 0xab, sizeof request);
+ request.clearmem(0xab);
toku_pthread_yield();
if ((i % 10) == 0)
diff --git a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc
index 4b6dadd440f..cd3dc7b37ef 100644
--- a/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc
+++ b/storage/tokudb/PerconaFT/locktree/tests/lock_request_start_retry_wait_race_2.cc
@@ -98,7 +98,7 @@ namespace toku {
}
request.destroy();
- memset(&request, 0xab, sizeof request);
+ request.clearmem(0xab);
toku_pthread_yield();
if ((i % 10) == 0)
diff --git a/storage/tokudb/PerconaFT/src/tests/test.h b/storage/tokudb/PerconaFT/src/tests/test.h
index ff464f55890..c5214961afd 100644
--- a/storage/tokudb/PerconaFT/src/tests/test.h
+++ b/storage/tokudb/PerconaFT/src/tests/test.h
@@ -428,14 +428,14 @@ static int env_del_multiple_test_no_array(
/* Some macros for evaluating blocks or functions within the scope of a
* transaction. */
#define IN_TXN_COMMIT(env, parent, txn, flags, expr) ({ \
- DB_TXN *(txn); \
+ DB_TXN *txn; \
{ int chk_r = (env)->txn_begin((env), (parent), &(txn), (flags)); CKERR(chk_r); } \
(expr); \
{ int chk_r = (txn)->commit((txn), 0); CKERR(chk_r); } \
})
#define IN_TXN_ABORT(env, parent, txn, flags, expr) ({ \
- DB_TXN *(txn); \
+ DB_TXN *txn; \
{ int chk_r = (env)->txn_begin((env), (parent), &(txn), (flags)); CKERR(chk_r); } \
(expr); \
{ int chk_r = (txn)->abort(txn); CKERR(chk_r); } \
diff --git a/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc b/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc
index ecd88f3c5fe..55b2943e67f 100644
--- a/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc
+++ b/storage/tokudb/PerconaFT/src/tests/test_mostly_seq.cc
@@ -68,7 +68,7 @@ seqinsert (int n, float p) {
int v = i;
DBT key, val;
r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0); assert(r == 0);
- if (random() <= RAND_MAX * p) {
+ if (random() <= static_cast<float>(RAND_MAX) * p) {
k = htonl(i-1);
v = i-1;
r = db->put(db, 0, dbt_init(&key, &k, sizeof k), dbt_init(&val, &v, sizeof v), 0); assert(r == 0);
diff --git a/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h b/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h
index e232f327d10..1d8833adcba 100644
--- a/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h
+++ b/storage/tokudb/PerconaFT/src/tests/threaded_stress_test_helpers.h
@@ -432,17 +432,17 @@ tsv_print_perf_totals(const struct cli_args *cli_args, uint64_t *counters[], con
}
const struct perf_formatter perf_formatters[] = {
- [HUMAN] = {
+ { /* HUMAN */
.header = human_print_perf_header,
.iteration = human_print_perf_iteration,
.totals = human_print_perf_totals
},
- [CSV] = {
+ { /* CSV */
.header = csv_print_perf_header,
.iteration = csv_print_perf_iteration,
.totals = csv_print_perf_totals
},
- [TSV] = {
+ { /* TSV */
.header = tsv_print_perf_header,
.iteration = tsv_print_perf_iteration,
.totals = tsv_print_perf_totals
diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc
index b041928b679..1378c05b4c4 100644
--- a/storage/tokudb/PerconaFT/src/ydb.cc
+++ b/storage/tokudb/PerconaFT/src/ydb.cc
@@ -91,9 +91,7 @@ extern int writing_rollback;
int toku_close_trace_file (void) { return 0; }
#endif
-extern "C" {
- uint force_recovery = 0;
-}
+extern uint force_recovery;
// Set when env is panicked, never cleared.
static int env_is_panicked = 0;
diff --git a/storage/tokudb/PerconaFT/src/ydb_db.cc b/storage/tokudb/PerconaFT/src/ydb_db.cc
index 8b2b162abd2..5707415b72d 100644
--- a/storage/tokudb/PerconaFT/src/ydb_db.cc
+++ b/storage/tokudb/PerconaFT/src/ydb_db.cc
@@ -509,7 +509,7 @@ int toku_db_open_iname(DB * db, DB_TXN * txn, const char *iname_in_env, uint32_t
struct lt_on_create_callback_extra on_create_extra = {
.txn = txn,
.ft_handle = db->i->ft_handle,
- open_rw
+ .open_rw = false
};
db->i->lt = db->dbenv->i->ltm.get_lt(db->i->dict_id,
toku_ft_get_comparator(db->i->ft_handle),
diff --git a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb-slave.opt b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb-slave.opt
index b351df53683..127c28fabfd 100644
--- a/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb-slave.opt
+++ b/storage/tokudb/mysql-test/rpl/t/rpl_parallel_tokudb-slave.opt
@@ -1,5 +1 @@
--log-warnings=0 --slave-transaction-retries=0
-
-
-
-
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result
index ad920deeda4..17229fa5956 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_newdecimal.result
@@ -1533,11 +1533,8 @@ select (1.20396873 * 0.89550000 * 0.68000000 * 1.08721696 * 0.99500000 *
1.01500000 * 1.01500000 * 0.99500000)
0.81298807395367312459230693948000000000
create table t1 as select 5.05 / 0.014;
-Warnings:
-Note 1265 Data truncated for column '5.05 / 0.014' at row 1
show warnings;
Level Code Message
-Note 1265 Data truncated for column '5.05 / 0.014' at row 1
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1652,8 +1649,6 @@ my_col
0.12345678912345678912345678912345678912
DROP TABLE t1;
CREATE TABLE t1 SELECT 1 / .123456789123456789123456789123456789123456789123456789123456789123456789123456789 AS my_col;
-Warnings:
-Note 1265 Data truncated for column 'my_col' at row 1
DESCRIBE t1;
Field Type Null Key Default Extra
my_col decimal(65,4) YES NULL
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_ranges.result b/storage/tokudb/mysql-test/tokudb/r/type_ranges.result
index 38252e870df..e915d56f21f 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_ranges.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_ranges.result
@@ -92,8 +92,6 @@ DROP INDEX test ON t1;
insert into t1 values (10, 1,1,1,1,1,1,1,1,1,1,1,1,1,NULL,0,0,0,1,1,1,1,'one','one');
insert into t1 values (NULL,2,2,2,2,2,2,2,2,2,2,2,2,2,NULL,NULL,NULL,NULL,NULL,NULL,2,2,'two','two,one');
insert into t1 values (0,1/3,3,3,3,3,3,3,3,3,3,3,3,3,NULL,'19970303','10:10:10','19970303101010','','','','3',3,3);
-Warnings:
-Warning 1265 Data truncated for column 'string' at row 1
insert into t1 values (0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,NULL,19970807,080706,19970403090807,-1,-1,-1,'-1',-1,-1);
Warnings:
Warning 1264 Out of range value for column 'utiny' at row 1
@@ -131,7 +129,7 @@ select auto,string,tiny,short,medium,long_int,longlong,real_float,real_double,ut
auto string tiny short medium long_int longlong real_float real_double utiny ushort umedium ulong ulonglong mod(floor(time_stamp/1000000),1000000)-mod(curdate(),1000000) date_field time_field date_time blob_col tinyblob_col mediumblob_col longblob_col
10 1 1 1 1 1 1 1.0 1.0000 1 00001 1 1 1 0 0000-00-00 00:00:00 0000-00-00 00:00:00 1 1 1 1
11 2 2 2 2 2 2 2.0 2.0000 2 00002 2 2 2 0 NULL NULL NULL NULL NULL 2 2
-12 0.33333333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
+12 0.3333 3 3 3 3 3 3.0 3.0000 3 00003 3 3 3 0 1997-03-03 10:10:10 1997-03-03 10:10:10 3
13 -1 -1 -1 -1 -1 -1 -1.0 -1.0000 0 00000 0 0 0 0 1997-08-07 08:07:06 1997-04-03 09:08:07 -1 -1 -1 -1
14 -429496729 -128 -32768 -8388608 -2147483648 -4294967295 -4294967296.0 -4294967295.0000 0 00000 0 0 0 0 0000-00-00 00:00:00 0000-00-00 00:00:00 -4294967295 -4294967295 -4294967295 -4294967295
15 4294967295 127 32767 8388607 2147483647 4294967295 4294967296.0 4294967295.0000 255 65535 16777215 4294967295 4294967295 0 0000-00-00 00:00:00 0000-00-00 00:00:00 4294967295 4294967295 4294967295 4294967295
@@ -183,7 +181,7 @@ Warning 1265 Data truncated for column 'new_field' at row 7
select * from t2;
auto string mediumblob_col new_field
1 2 2 ne
-2 0.33333333 ne
+2 0.3333 ne
3 -1 -1 ne
4 -429496729 -4294967295 ne
5 4294967295 4294967295 ne