From 92d283c026b66ae772b4343f366f0da6321daa28 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Mon, 6 Mar 2017 17:23:56 +0100 Subject: Fix MDEV-12142 crash when creating CSV table Was an unprepared longjmp (now throw) Also fix a wrong calculation of To_Line sometimes causing a crash because of buffer overflow. modified: storage/connect/tabdos.cpp Fix a wrong setting of USER for JDBC tables in connect_assisted_discovery. Update jdbc_new.test after that fix, which changed errors. modified: storage/connect/ha_connect.cc modified: storage/connect/mysql-test/connect/r/jdbc_new.result modified: storage/connect/mysql-test/connect/t/jdbc_new.test Make using try/catch/throw the default option modified: storage/connect/CMakeLists.txt Typo modified: storage/connect/xindex.cpp Replace setjmp-longjmp's by try_catch-throw modified: storage/connect/CMakeLists.txt modified: storage/connect/array.cpp modified: storage/connect/blkfil.cpp modified: storage/connect/colblk.cpp modified: storage/connect/connect.cc modified: storage/connect/filamtxt.cpp modified: storage/connect/filamvct.cpp modified: storage/connect/filter.cpp modified: storage/connect/global.h modified: storage/connect/ha_connect.cc modified: storage/connect/jdbconn.cpp modified: storage/connect/json.cpp modified: storage/connect/jsonudf.cpp modified: storage/connect/odbconn.cpp modified: storage/connect/osutil.c modified: storage/connect/plgdbutl.cpp deleted: storage/connect/plugutil.c added: storage/connect/plugutil.cpp modified: storage/connect/tabdos.cpp modified: storage/connect/tabfix.cpp modified: storage/connect/tabfmt.cpp modified: storage/connect/tabjdbc.cpp modified: storage/connect/tabjdbc.h modified: storage/connect/tabjson.cpp modified: storage/connect/tabmul.cpp modified: storage/connect/tabmul.h modified: storage/connect/tabmysql.cpp modified: storage/connect/tabodbc.cpp modified: storage/connect/tabodbc.h modified: storage/connect/tabpivot.cpp modified: storage/connect/tabsys.cpp modified: storage/connect/tabvct.cpp modified: storage/connect/tabvir.cpp modified: storage/connect/tabxml.cpp modified: storage/connect/valblk.cpp modified: storage/connect/value.cpp modified: storage/connect/xindex.cpp modified: storage/connect/xobject.cpp --- storage/connect/CMakeLists.txt | 8 +- storage/connect/array.cpp | 28 +- storage/connect/blkfil.cpp | 12 +- storage/connect/colblk.cpp | 24 +- storage/connect/connect.cc | 372 +++++++---- storage/connect/filamtxt.cpp | 20 +- storage/connect/filamvct.cpp | 108 ++- storage/connect/filter.cpp | 24 +- storage/connect/global.h | 7 +- storage/connect/ha_connect.cc | 734 ++++++++++++--------- storage/connect/jdbconn.cpp | 12 + storage/connect/json.cpp | 122 +++- storage/connect/jsonudf.cpp | 109 ++- .../connect/mysql-test/connect/r/jdbc_new.result | 4 +- storage/connect/mysql-test/connect/t/jdbc_new.test | 4 +- storage/connect/odbconn.cpp | 47 +- storage/connect/osutil.c | 52 -- storage/connect/plgdbutl.cpp | 89 ++- storage/connect/plugutil.c | 592 ----------------- storage/connect/plugutil.cpp | 596 +++++++++++++++++ storage/connect/tabdos.cpp | 95 ++- storage/connect/tabfix.cpp | 76 ++- storage/connect/tabfmt.cpp | 32 +- storage/connect/tabjdbc.cpp | 134 ---- storage/connect/tabjdbc.h | 40 -- storage/connect/tabjson.cpp | 30 +- storage/connect/tabmul.cpp | 16 +- storage/connect/tabmysql.cpp | 8 +- storage/connect/tabodbc.cpp | 414 ------------ storage/connect/tabodbc.h | 52 +- storage/connect/tabpivot.cpp | 52 +- storage/connect/tabsys.cpp | 76 ++- storage/connect/tabvct.cpp | 32 +- storage/connect/tabvir.cpp | 12 +- storage/connect/tabxml.cpp | 130 +++- storage/connect/valblk.cpp | 76 ++- storage/connect/value.cpp | 73 +- storage/connect/xindex.cpp | 12 +- storage/connect/xobject.cpp | 22 +- 39 files changed, 2254 insertions(+), 2092 deletions(-) delete mode 100644 storage/connect/plugutil.c create mode 100644 storage/connect/plugutil.cpp (limited to 'storage') diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index a602084b5bd..e4b0a372ee0 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -18,10 +18,10 @@ SET(CONNECT_PLUGIN_DYNAMIC "connect") SET(CONNECT_SOURCES ha_connect.cc connect.cc user_connect.cc mycat.cc -fmdlex.c osutil.c plugutil.c rcmsg.c rcmsg.h +fmdlex.c osutil.c rcmsg.c rcmsg.h array.cpp blkfil.cpp colblk.cpp csort.cpp -filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp -filter.cpp json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp +filamap.cpp filamdbf.cpp filamfix.cpp filamgz.cpp filamtxt.cpp filter.cpp +json.cpp jsonudf.cpp maputil.cpp myconn.cpp myutil.cpp plgdbutl.cpp plugutil.cpp reldef.cpp tabcol.cpp tabdos.cpp tabext.cpp tabfix.cpp tabfmt.cpp tabjson.cpp table.cpp tabmul.cpp tabmysql.cpp taboccur.cpp tabpivot.cpp tabsys.cpp tabtbl.cpp tabutil.cpp tabvir.cpp tabxcl.cpp valblk.cpp value.cpp xindex.cpp xobject.cpp @@ -38,7 +38,7 @@ user_connect.h valblk.h value.h xindex.h xobject.h xtable.h) # Definitions that are shared for all OSes # add_definitions( -DMARIADB -DFORCE_INIT_OF_VARS -Dconnect_EXPORTS) -add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT -DPIVOT_SUPPORT ) +add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT -DPIVOT_SUPPORT -DUSE_TRY ) # diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 1998ab890e9..beb58baa107 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -518,8 +518,12 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm) vp = valp; } else if (opc != OP_EXIST) { - sprintf(g->Message, MSG(MISSING_ARG), opc); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); + sprintf(g->Message, MSG(MISSING_ARG), opc); +#if defined(USE_TRY) + throw TYPE_ARRAY; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#endif // !USE_TRY } else // OP_EXIST return Nval > 0; @@ -681,15 +685,23 @@ void ARRAY::SetPrecision(PGLOBAL g, int p) { if (Vblp == NULL) { strcpy(g->Message, MSG(PREC_VBLP_NULL)); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#if defined(USE_TRY) + throw TYPE_ARRAY; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#endif // !USE_TRY } // endif Vblp bool was = Vblp->IsCi(); if (was && !p) { strcpy(g->Message, MSG(BAD_SET_CASE)); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); - } // endif Vblp +#if defined(USE_TRY) + throw TYPE_ARRAY; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#endif // !USE_TRY + } // endif Vblp if (was || !p) return; @@ -699,7 +711,11 @@ void ARRAY::SetPrecision(PGLOBAL g, int p) if (!was && Type == TYPE_STRING) // Must be resorted to eliminate duplicate strings if (Sort(g)) - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#if defined(USE_TRY) + throw TYPE_ARRAY; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#endif // !USE_TRY } // end of SetPrecision diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp index 1f5a1a27ae5..4e5c7484057 100644 --- a/storage/connect/blkfil.cpp +++ b/storage/connect/blkfil.cpp @@ -1,11 +1,11 @@ /************* BlkFil C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: BLKFIL */ /* ------------- */ -/* Version 2.5 */ +/* Version 2.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -595,8 +595,12 @@ BLKFILIN::BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) if (Colp->GetResultType() != Type) { sprintf(g->Message, "BLKFILIN: %s", MSG(VALTYPE_NOMATCH)); - longjmp(g->jumper[g->jump_level], 99); - } else if (Colp->GetValue()->IsCi()) +#if defined(USE_TRY) + throw g->Message; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 99); +#endif // !USE_TRY + } else if (Colp->GetValue()->IsCi()) Arap->SetPrecision(g, 1); // Case insensitive Sorted = Colp->IsSorted() > 0; diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index 58841387249..a1169296a89 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -197,8 +197,12 @@ int COLBLK::GetLengthEx(void) void COLBLK::ReadColumn(PGLOBAL g) { sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of ReadColumn +#if defined(USE_TRY) + throw TYPE_COLBLK; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_COLBLK); +#endif // !USE_TRY +} // end of ReadColumn /***********************************************************************/ /* WriteColumn: what this routine does is to access the last line */ @@ -208,8 +212,12 @@ void COLBLK::ReadColumn(PGLOBAL g) void COLBLK::WriteColumn(PGLOBAL g) { sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn"); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn +#if defined(USE_TRY) + throw TYPE_COLBLK; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_COLBLK); +#endif // !USE_TRY +} // end of WriteColumn /***********************************************************************/ /* Make file output of a column descriptor block. */ @@ -262,8 +270,12 @@ SPCBLK::SPCBLK(PCOLUMN cp) void SPCBLK::WriteColumn(PGLOBAL g) { sprintf(g->Message, MSG(SPCOL_READONLY), Name); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of WriteColumn +#if defined(USE_TRY) + throw TYPE_COLBLK; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_COLBLK); +#endif // !USE_TRY +} // end of WriteColumn /***********************************************************************/ /* RIDBLK constructor for the ROWID special column. */ diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index a17c5dafa43..6be5bd349dc 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -1,4 +1,4 @@ -/* Copyright (C) Olivier Bertrand 2004 - 2015 +/* Copyright (C) Olivier Bertrand 2004 - 2017 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -15,10 +15,10 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /***********************************************************************/ -/* Author Olivier BERTRAND bertrandop@gmail.com 2004-2015 */ +/* Author Olivier BERTRAND bertrandop@gmail.com 2004-2017 */ /* */ -/* WHAT THIS PROGRAM DOES: */ -/* ----------------------- */ +/* WHAT THIS PROGRAM DOES: */ +/* ----------------------- */ /* This program are the CONNECT general purpose semantic routines. */ /***********************************************************************/ #ifdef USE_PRAGMA_IMPLEMENTATION @@ -188,47 +188,60 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info) /* GetTDB: Get the table description block of a CONNECT table. */ /***********************************************************************/ PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h) - { - int rc; - PTDB tdbp; - PTABLE tabp; - PDBUSER dup= PlgGetUser(g); - volatile PCATLG cat= (dup) ? dup->Catalog : NULL; // Safe over longjmp - - if (trace) - printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat); - - if (!cat) - return NULL; - - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - tdbp= NULL; - goto err; - } // endif rc - - // Get table object from the catalog - tabp= new(g) XTAB(name); - - if (trace) - printf("CntGetTDB: tabp=%p\n", tabp); - - // Perhaps this should be made thread safe - ((MYCAT*)cat)->SetHandler(h); - - if (!(tdbp= cat->GetTable(g, tabp, mode))) - printf("CntGetTDB: %s\n", g->Message); +{ + PTDB tdbp; + PTABLE tabp; + PDBUSER dup = PlgGetUser(g); + volatile PCATLG cat = (dup) ? dup->Catalog : NULL; // Safe over longjmp + + if (trace) + printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat); + + if (!cat) + return NULL; + +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return NULL; + } // endif jump_level + + if (setjmp(g->jumper[++g->jump_level])) { + tdbp = NULL; + goto err; + } // endif rc +#endif // !USE_TRY + + // Get table object from the catalog + tabp = new(g) XTAB(name); + + if (trace) + printf("CntGetTDB: tabp=%p\n", tabp); + + // Perhaps this should be made thread safe + ((MYCAT*)cat)->SetHandler(h); + + if (!(tdbp = cat->GetTable(g, tabp, mode))) + printf("CntGetTDB: %s\n", g->Message); + +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY - err: if (trace) printf("Returning tdbp=%p mode=%d\n", tdbp, mode); - g->jump_level--; return tdbp; } // end of CntGetTDB @@ -239,7 +252,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, bool del, PHC) { char *p; - int i, n, rc; + int i, n; bool rcop= true; PCOL colp; //PCOLUMN cp; @@ -254,15 +267,19 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, return true; } // endif tdbp - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif jump_level +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - goto err; - } // endif rc + if (setjmp(g->jumper[++g->jump_level])) { + goto err; + } // endif rc +#endif // !USE_TRY if (!c1) { if (mode == MODE_INSERT) @@ -281,7 +298,11 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, if (g->Message[0] == 0) sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName()); - goto err; +#if defined(USE_TRY) + throw 1; +#else // !USE_TRY + goto err; +#endif // !USE_TRY } // endif colp n= strlen(p) + 1; @@ -289,12 +310,20 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, for (i= 0, colp= tdbp->GetColumns(); colp; i++, colp= colp->GetNext()) { if (colp->InitValue(g)) - goto err; +#if defined(USE_TRY) + throw 2; +#else // !USE_TRY + goto err; +#endif // !USE_TRY if (mode == MODE_INSERT) // Allow type conversion if (colp->SetBuffer(g, colp->GetValue(), true, false)) - goto err; +#if defined(USE_TRY) + throw 3; +#else // !USE_TRY + goto err; +#endif // !USE_TRY colp->AddColUse(U_P); // For PLG tables } // endfor colp @@ -309,8 +338,12 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, if (!(utp= (PTDBASE)tdbp->Duplicate(g))) { sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName()); - goto err; - } // endif tp +#if defined(USE_TRY) + throw 4; +#else // !USE_TRY + goto err; +#endif // !USE_TRY + } // endif tp if (!c2) // Allocate all column blocks for that table @@ -323,10 +356,18 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, for (i= 0, colp= utp->GetColumns(); colp; i++, colp= colp->GetNext()) { if (colp->InitValue(g)) - goto err; +#if defined(USE_TRY) + throw 5; +#else // !USE_TRY + goto err; +#endif // !USE_TRY if (colp->SetBuffer(g, colp->GetValue(), true, false)) - goto err; +#if defined(USE_TRY) + throw 6; +#else // !USE_TRY + goto err; +#endif // !USE_TRY } // endfor colp @@ -358,16 +399,29 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, if (mode != MODE_ANY && mode != MODE_ALTER) { if (tdbp->OpenDB(g)) { printf("%s\n", g->Message); - goto err; - } else +#if defined(USE_TRY) + throw 7; +#else // !USE_TRY + goto err; +#endif // !USE_TRY + } else tdbp->SetNext(NULL); } // endif mode rcop= false; - err: - g->jump_level--; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY return rcop; } // end of CntOpenTable @@ -391,23 +445,27 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr) RCODE rc= RC_OK; PCOL colp; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - if (trace) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - printf("EvalColumns: %s\n", g->Message); - } // endif +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + if (trace) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + printf("EvalColumns: %s\n", g->Message); + } // endif - return RC_FX; - } // endif jump_level + return RC_FX; + } // endif jump_level - if (setjmp(g->jumper[++g->jump_level]) != 0) { - if (trace) - printf("Error reading columns: %s\n", g->Message); + if (setjmp(g->jumper[++g->jump_level]) != 0) { + if (trace) + printf("Error reading columns: %s\n", g->Message); - rc= RC_FX; - goto err; - } // endif rc + rc = RC_FX; + goto err; + } // endif rc +#endif // !USE_TRY for (colp= tdbp->GetColumns(); rc == RC_OK && colp; colp= colp->GetNext()) { @@ -421,8 +479,19 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr) } // endfor colp - err: - g->jump_level--; +#if defined(USE_TRY) +} catch (int n) { + if (trace) + printf("Error %d reading columns: %s\n", n, g->Message); + + rc = RC_FX; +} catch (const char *msg) { + strcpy(g->Message, msg); +} // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY return rc; } // end of EvalColumns @@ -445,16 +514,20 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) ((PTDBASE)tdbp)->ResetKindex(g, NULL); } // endif index - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return RC_FX; - } // endif jump_level - - if ((setjmp(g->jumper[++g->jump_level])) != 0) { - rc= RC_FX; - goto err; - } // endif rc +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return RC_FX; + } // endif jump_level + + if ((setjmp(g->jumper[++g->jump_level])) != 0) { + rc = RC_FX; + goto err; + } // endif rc +#endif // !USE_TRY // Do it now to avoid double eval when filtering for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) @@ -470,8 +543,17 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) if (rc == RC_OK) rc= EvalColumns(g, tdbp, false); - err: - g->jump_level--; +#if defined(USE_TRY) + } catch (int) { + rc = RC_FX; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; + } // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY return rc; } // end of CntReadNext @@ -487,17 +569,21 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) if (!tdbp) return RC_FX; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return RC_FX; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - rc= RC_FX; - goto err; - } // endif rc +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return RC_FX; + } // endif jump_level + + if (setjmp(g->jumper[++g->jump_level]) != 0) { + printf("%s\n", g->Message); + rc = RC_FX; + goto err; + } // endif rc +#endif // !USE_TRY // Store column values in table write buffer(s) for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext()) @@ -511,8 +597,18 @@ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) // Return result code from write operation rc= (RCODE)tdbp->WriteDB(g); - err: - g->jump_level--; +#if defined(USE_TRY) +} catch (int n) { + printf("Exception %d: %s\n", n, g->Message); + rc = RC_FX; +} catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; +} // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY return rc; } // end of CntWriteRow @@ -598,49 +694,59 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) break; } // endswitch rc - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - rc= RC_FX; - goto err; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - rc= RC_FX; - g->jump_level--; - goto err; - } // endif +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + rc = RC_FX; + goto err; + } // endif + + if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { + rc = RC_FX; + goto err; + } // endif +#endif // !USE_TRY // This will close the table file(s) and also finalize write // operations such as Insert, Update, or Delete. tdbp->SetAbort(abort); tdbp->CloseDB(g); tdbp->SetAbort(false); - g->jump_level--; if (trace > 1) printf("Table %s closed\n", tdbp->GetName()); -//if (!((PTDBDOX)tdbp)->GetModified()) -// return 0; - - if (nox || tdbp->GetMode() == MODE_READ || tdbp->GetMode() == MODE_ANY) - return 0; - - if (trace > 1) - printf("About to reset opt\n"); - - if (!tdbp->IsRemote()) { - // Make all the eventual indexes - PTDBDOX tbxp = (PTDBDOX)tdbp; - tbxp->ResetKindex(g, NULL); - tbxp->SetKey_Col(NULL); - rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); - } // endif remote + if (!nox && tdbp->GetMode() != MODE_READ && tdbp->GetMode() != MODE_ANY) { + if (trace > 1) + printf("About to reset opt\n"); + + if (!tdbp->IsRemote()) { + // Make all the eventual indexes + PTDBDOX tbxp = (PTDBDOX)tdbp; + tbxp->ResetKindex(g, NULL); + tbxp->SetKey_Col(NULL); + rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); + } // endif remote + + } // endif nox + +#if defined(USE_TRY) +} catch (int) { + rc = RC_FX; +} catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; +} // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY - err: if (trace > 1) - printf("Done rc=%d\n", rc); + htrc("Done rc=%d\n", rc); return (rc == RC_OK || rc == RC_INFO) ? 0 : rc; } // end of CntCloseTable diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e53cdcd9ba9..99b51632dc9 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1,11 +1,11 @@ /*********** File AM Txt C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMTXT */ /* ------------- */ -/* Version 1.6 */ +/* Version 1.7 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -1161,13 +1161,21 @@ int DOSFAM::RenameTempFile(PGLOBAL g) if (rename(filename, filetemp)) { // Save file for security sprintf(g->Message, MSG(RENAME_ERROR), filename, filetemp, strerror(errno)); - longjmp(g->jumper[g->jump_level], 51); - } else if (rename(tempname, filename)) { +#if defined(USE_TRY) + throw 51; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 51); +#endif // !USE_TRY + } else if (rename(tempname, filename)) { sprintf(g->Message, MSG(RENAME_ERROR), tempname, filename, strerror(errno)); rc = rename(filetemp, filename); // Restore saved file - longjmp(g->jumper[g->jump_level], 52); - } else if (remove(filetemp)) { +#if defined(USE_TRY) + throw 52; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 52); +#endif // !USE_TRY + } else if (remove(filetemp)) { sprintf(g->Message, MSG(REMOVE_ERROR), filetemp, strerror(errno)); rc = RC_INFO; // Acceptable diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index fdc5433f4a4..8e5c31d6b49 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -560,6 +560,8 @@ bool VCTFAM::AllocateBuffer(PGLOBAL g) /***********************************************************************/ bool VCTFAM::InitInsert(PGLOBAL g) { + bool rc = false; + // We come here in MODE_INSERT only if (Last == Nrec) { CurBlk = Block; @@ -573,27 +575,44 @@ bool VCTFAM::InitInsert(PGLOBAL g) CurBlk = Block - 1; CurNum = Last; - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - g->jump_level--; - return true; - } // endif +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif + + if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { + g->jump_level--; + return true; + } // endif +#endif // !USE_TRY // Last block must be updated by new values for (; cp; cp = (PVCTCOL)cp->Next) cp->ReadBlock(g); - g->jump_level--; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = true; + } // end catch +#else // !USE_TRY + g->jump_level--; +#endif // !USE_TRY } // endif Last - // We are not currently using a temporary file for Insert - T_Stream = Stream; - return false; + if (!rc) + // We are not currently using a temporary file for Insert + T_Stream = Stream; + + return rc; } // end of InitInsert /***********************************************************************/ @@ -1107,7 +1126,11 @@ void VCTFAM::CloseTableFile(PGLOBAL g, bool abort) } else if (AddBlock) { // Last block was not written rc = ResetTableSize(g, CurBlk, Nrec); - longjmp(g->jumper[g->jump_level], 44); +#if defined(USE_TRY) + throw 44; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 44); +#endif // !USE_TRY } // endif } else if (mode == MODE_UPDATE) { @@ -1528,7 +1551,7 @@ bool VCMFAM::AllocateBuffer(PGLOBAL g) /***********************************************************************/ bool VCMFAM::InitInsert(PGLOBAL g) { - int rc; + bool rc = false; volatile PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns(); // We come here in MODE_INSERT only @@ -1542,23 +1565,38 @@ bool VCMFAM::InitInsert(PGLOBAL g) CurNum = Last; } // endif Last - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - g->jump_level--; - return true; - } // endif +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif + + if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { + g->jump_level--; + return true; + } // endif +#endif // !USE_TRY // Initialize the column block pointer for (; cp; cp = (PVCTCOL)cp->Next) cp->ReadBlock(g); - g->jump_level--; - return false; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = true; + } // end catch +#else // !USE_TRY + g->jump_level--; +#endif // !USE_TRY + return rc; } // end of InitInsert /***********************************************************************/ @@ -2503,7 +2541,11 @@ void VECFAM::CloseTableFile(PGLOBAL g, bool abort) if (wrc != RC_FX) rc = ResetTableSize(g, Block, Last); else - longjmp(g->jumper[g->jump_level], 44); +#if defined(USE_TRY) + throw 44; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 44); +#endif // !USE_TRY } else if (mode == MODE_UPDATE) { if (UseTemp && !InitUpdate && !Abort) { @@ -4164,8 +4206,12 @@ void BGVFAM::CloseTableFile(PGLOBAL g, bool abort) } else if (AddBlock) { // Last block was not written rc = ResetTableSize(g, CurBlk, Nrec); - longjmp(g->jumper[g->jump_level], 44); - } // endif +#if defined(USE_TRY) + throw 44; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 44); +#endif // !USE_TRY + } // endif } else if (mode == MODE_UPDATE) { // Write back to file any pending modifications diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 262d6b58a70..db96647f634 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -1,7 +1,7 @@ /***************** Filter C++ Class Filter Code (.CPP) *****************/ -/* Name: FILTER.CPP Version 3.9 */ +/* Name: FILTER.CPP Version 4.0 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* This file contains the class FILTER function code. */ /***********************************************************************/ @@ -87,8 +87,12 @@ BYTE OpBmp(PGLOBAL g, OPVAL opc) case OP_EXIST: bt = 0x00; break; default: sprintf(g->Message, MSG(BAD_FILTER_OP), opc); - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); - } // endswitch opc +#if defined(USE_TRY) + throw TYPE_ARRAY; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_ARRAY); +#endif // !USE_TRY + } // endswitch opc return bt; } // end of OpBmp @@ -1711,7 +1715,11 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) break; // Remove eventual ending separator(s) // if (fp->Convert(g, having)) -// longjmp(g->jumper[g->jump_level], TYPE_FILTER); +//#if defined(USE_TRY) +// throw TYPE_ARRAY; +//#else // !USE_TRY +// longjmp(g->jumper[g->jump_level], TYPE_FILTER); +//#endif // !USE_TRY filp = fp; fp = fp->Next; @@ -1744,7 +1752,11 @@ DllExport bool ApplyFilter(PGLOBAL g, PFIL filp) // return TRUE; if (filp->Eval(g)) - longjmp(g->jumper[g->jump_level], TYPE_FILTER); +#if defined(USE_TRY) + throw TYPE_FILTER; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_FILTER); +#endif // !USE_TRY if (trace > 1) htrc("PlugFilter filp=%p result=%d\n", diff --git a/storage/connect/global.h b/storage/connect/global.h index 4d01a3ff05b..5579e8bd248 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -1,6 +1,6 @@ /***********************************************************************/ /* GLOBAL.H: Declaration file used by all CONNECT implementations. */ -/* (C) Copyright Olivier Bertrand 1993-2014 */ +/* (C) Copyright Olivier Bertrand 1993-2017 */ /***********************************************************************/ /***********************************************************************/ @@ -229,9 +229,10 @@ typedef struct _parm { typedef struct _global { /* Global structure */ void *Sarea; /* Points to work area */ uint Sarea_Size; /* Work area size */ - PACTIVITY Activityp, ActivityStart; + PACTIVITY Activityp; char Message[MAX_STR]; - int Createas; /* To pass info to created table */ + ulong More; /* Used by jsonudf */ + int Createas; /* To pass info to created table */ void *Xchk; /* indexes in create/alter */ short Alchecked; /* Checked for ALTER */ short Mrr; /* True when doing mrr */ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index d1ab18f52d5..930e7604fbd 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1895,40 +1895,62 @@ int ha_connect::OpenTable(PGLOBAL g, bool del) bool ha_connect::CheckColumnList(PGLOBAL g) { // Check the list of used fields (columns) - int rc; bool brc= false; PCOL colp; Field* *field; Field* fp; MY_BITMAP *map= table->read_set; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif jump_level +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) == 0) { + if (!setjmp(g->jumper[++g->jump_level])) { +#endif // !USE_TRY for (field= table->field; fp= *field; field++) if (bitmap_is_set(map, fp->field_index)) { if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) { sprintf(g->Message, "Column %s not found in %s", fp->field_name, tdbp->GetName()); - brc= true; - goto fin; - } // endif colp +#if defined(USE_TRY) + throw 1; +#else // !USE_TRY + brc = true; + goto fin; +#endif // !USE_TRY + } // endif colp if ((brc= colp->InitValue(g))) - goto fin; +#if defined(USE_TRY) + throw 2; +#else // !USE_TRY + goto fin; +#endif // !USE_TRY colp->AddColUse(U_P); // For PLG tables } // endif - } else - brc= true; - - fin: - g->jump_level--; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + brc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + brc = true; + } // end catch +#else // !USE_TRY + } else + brc = true; + + fin: + g->jump_level--; +#endif // !USE_TRY return brc; } // end of CheckColumnList @@ -3054,7 +3076,6 @@ const COND *ha_connect::cond_push(const COND *cond) DBUG_ENTER("ha_connect::cond_push"); if (tdbp) { - int rc; PGLOBAL& g= xp->g; AMT tty= tdbp->GetAmType(); bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); @@ -3062,15 +3083,19 @@ const COND *ha_connect::cond_push(const COND *cond) tty == TYPE_AM_TBL || tty == TYPE_AM_MYSQL || tty == TYPE_AM_PLG || tty == TYPE_AM_JDBC || x); - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - DBUG_RETURN(cond); - } // endif jump_level + // This should never happen but is done to avoid crashing +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + DBUG_RETURN(cond); + } // endif jump_level - // This should never happen but is done to avoid crashing - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) - goto fin; + if (setjmp(g->jumper[++g->jump_level])) + goto fin; +#endif // !USE_TRY if (b) { PCFIL filp; @@ -3078,7 +3103,7 @@ const COND *ha_connect::cond_push(const COND *cond) if ((filp= tdbp->GetCondFil()) && filp->Cond == cond && filp->Idx == active_index && filp->Type == tty) - goto fin; // Already done + goto fin; filp= new(g) CONDFIL(cond, active_index, tty); rc = filp->Init(g, this); @@ -3111,9 +3136,20 @@ const COND *ha_connect::cond_push(const COND *cond) } else if (tty != TYPE_AM_JSN && tty != TYPE_AM_JSON) tdbp->SetFilter(CondFilter(g, (Item *)cond)); - fin: - g->jump_level--; - } // endif tdbp +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch + + fin:; +#else // !USE_TRY + fin: + g->jump_level--; +#endif // !USE_TRY + } // endif tdbp // Let MySQL do the filtering DBUG_RETURN(cond); @@ -3263,6 +3299,22 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*) PGLOBAL& g= xp->g; PDBUSER dup= PlgGetUser(g); +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif + + if (setjmp(g->jumper[++g->jump_level])) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif setjmp +#endif // !USE_TRY + // Ignore error on the opt file dup->Check &= ~CHK_OPT; tdbp= GetTDB(g); @@ -3284,6 +3336,20 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*) } else if (!tdbp) rc= HA_ERR_INTERNAL_ERROR; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = HA_ERR_INTERNAL_ERROR; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = HA_ERR_INTERNAL_ERROR; + } // end catch +#else // !USE_TRY +err: + g->jump_level--; +#endif // !USE_TRY + return rc; } // end of optimize @@ -5292,7 +5358,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, if (topt->oplist) { host= GetListOption(g, "host", topt->oplist, "localhost"); user= GetListOption(g, "user", topt->oplist, - (ttp == TAB_ODBC ? NULL : "root")); + ((ttp == TAB_ODBC || ttp == TAB_JDBC) ? NULL : "root")); // Default value db can come from the DBNAME=xxx option. db= GetListOption(g, "database", topt->oplist, db); col= GetListOption(g, "colist", topt->oplist, col); @@ -5329,22 +5395,26 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // ZIP_SUPPORT } else { host= "localhost"; - user= (ttp == TAB_ODBC ? NULL : "root"); + user= ((ttp == TAB_ODBC || ttp == TAB_JDBC) ? NULL : "root"); } // endif option_list if (!(shm= (char*)db)) db= table_s->db.str; // Default value +#if defined(USE_TRY) + try { +#else // !USE_TRY // Save stack and allocation environment and prepare error return if (g->jump_level == MAX_JUMP) { strcpy(g->Message, MSG(TOO_MANY_JUMPS)); goto jer; } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + if (setjmp(g->jumper[++g->jump_level]) != 0) { + rc = HA_ERR_INTERNAL_ERROR; goto err; } // endif rc +#endif // !USE_TRY // Check table type if (ttp == TAB_UNDEF) { @@ -5354,7 +5424,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); } else if (ttp == TAB_NIY) { sprintf(g->Message, "Unsupported table type %s", topt->type); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + rc = HA_ERR_INTERNAL_ERROR; goto err; } // endif ttp @@ -5365,8 +5435,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, if (!tbl) { strcpy(g->Message, "Missing table list"); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; + rc = HA_ERR_INTERNAL_ERROR; + goto err; } // endif tbl tab= PlugDup(g, tbl); @@ -5558,320 +5628,320 @@ static int connect_assisted_discovery(handlerton *, THD* thd, if (src && fnc != FNC_NO) { strcpy(g->Message, "Cannot make catalog table from srcdef"); ok= false; - } // endif src + } // endif src - if (ok) { - char *cnm, *rem, *dft, *xtra, *key, *fmt; - int i, len, prec, dec, typ, flg; + if (ok) { + char *cnm, *rem, *dft, *xtra, *key, *fmt; + int i, len, prec, dec, typ, flg; -// if (cat) -// cat->SetDataPath(g, table_s->db.str); -// else -// return HA_ERR_INTERNAL_ERROR; // Should never happen + // if (cat) + // cat->SetDataPath(g, table_s->db.str); + // else + // return HA_ERR_INTERNAL_ERROR; // Should never happen - dpath= SetPath(g, table_s->db.str); + dpath = SetPath(g, table_s->db.str); if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) { - qrp= SrcColumns(g, host, db, user, pwd, src, port); + qrp = SrcColumns(g, host, db, user, pwd, src, port); - if (qrp && ttp == TAB_OCCUR) - if (OcrSrcCols(g, qrp, col, ocl, rnk)) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif OcrSrcCols + if (qrp && ttp == TAB_OCCUR) + if (OcrSrcCols(g, qrp, col, ocl, rnk)) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif OcrSrcCols - } else switch (ttp) { - case TAB_DBF: - qrp= DBFColumns(g, dpath, fn, fnc == FNC_COL); - break; + } else switch (ttp) { + case TAB_DBF: + qrp = DBFColumns(g, dpath, fn, fnc == FNC_COL); + break; #if defined(ODBC_SUPPORT) - case TAB_ODBC: - switch (fnc) { - case FNC_NO: - case FNC_COL: - if (src) { - qrp= ODBCSrcCols(g, dsn, (char*)src, sop); - src= NULL; // for next tests - } else - qrp= ODBCColumns(g, dsn, shm, tab, NULL, - mxr, fnc == FNC_COL, sop); - - break; - case FNC_TABLE: - qrp= ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); - break; - case FNC_DSN: - qrp= ODBCDataSources(g, mxr, true); - break; - case FNC_DRIVER: - qrp= ODBCDrivers(g, mxr, true); - break; - default: - sprintf(g->Message, "invalid catfunc %s", fncn); - break; - } // endswitch info + case TAB_ODBC: + switch (fnc) { + case FNC_NO: + case FNC_COL: + if (src) { + qrp = ODBCSrcCols(g, dsn, (char*)src, sop); + src = NULL; // for next tests + } else + qrp = ODBCColumns(g, dsn, shm, tab, NULL, + mxr, fnc == FNC_COL, sop); + + break; + case FNC_TABLE: + qrp = ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); + break; + case FNC_DSN: + qrp = ODBCDataSources(g, mxr, true); + break; + case FNC_DRIVER: + qrp = ODBCDrivers(g, mxr, true); + break; + default: + sprintf(g->Message, "invalid catfunc %s", fncn); + break; + } // endswitch info - break; + break; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) case TAB_JDBC: switch (fnc) { - case FNC_NO: - case FNC_COL: - if (src) { - qrp= JDBCSrcCols(g, (char*)src, sjp); - src= NULL; // for next tests - } else - qrp= JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp); + case FNC_NO: + case FNC_COL: + if (src) { + qrp = JDBCSrcCols(g, (char*)src, sjp); + src = NULL; // for next tests + } else + qrp = JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp); - break; - case FNC_TABLE: - qrp= JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); - break; + break; + case FNC_TABLE: + qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); + break; #if 0 - case FNC_DSN: - qrp= JDBCDataSources(g, mxr, true); - break; + case FNC_DSN: + qrp = JDBCDataSources(g, mxr, true); + break; #endif // 0 - case FNC_DRIVER: - qrp= JDBCDrivers(g, mxr, true); - break; - default: - sprintf(g->Message, "invalid catfunc %s", fncn); - break; + case FNC_DRIVER: + qrp = JDBCDrivers(g, mxr, true); + break; + default: + sprintf(g->Message, "invalid catfunc %s", fncn); + break; } // endswitch info break; #endif // JDBC_SUPPORT case TAB_MYSQL: - qrp= MyColumns(g, thd, host, db, user, pwd, tab, - NULL, port, fnc == FNC_COL); - break; - case TAB_CSV: + qrp = MyColumns(g, thd, host, db, user, pwd, tab, + NULL, port, fnc == FNC_COL); + break; + case TAB_CSV: qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL); - break; + break; #if defined(__WIN__) - case TAB_WMI: - qrp= WMIColumns(g, nsp, cls, fnc == FNC_COL); - break; + case TAB_WMI: + qrp = WMIColumns(g, nsp, cls, fnc == FNC_COL); + break; #endif // __WIN__ - case TAB_PRX: - case TAB_TBL: - case TAB_XCL: - case TAB_OCCUR: - bif= fnc == FNC_COL; - qrp= TabColumns(g, thd, db, tab, bif); - - if (!qrp && bif && fnc != FNC_COL) // tab is a view - qrp= MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false); - - if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL) - if (OcrColumns(g, qrp, col, ocl, rnk)) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif OcrColumns + case TAB_PRX: + case TAB_TBL: + case TAB_XCL: + case TAB_OCCUR: + bif = fnc == FNC_COL; + qrp = TabColumns(g, thd, db, tab, bif); + + if (!qrp && bif && fnc != FNC_COL) // tab is a view + qrp = MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false); + + if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL) + if (OcrColumns(g, qrp, col, ocl, rnk)) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif OcrColumns - break; + break; #if defined(PIVOT_SUPPORT) - case TAB_PIVOT: - qrp= PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port); - break; + case TAB_PIVOT: + qrp = PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port); + break; #endif // PIVOT_SUPPORT - case TAB_VIR: - qrp= VirColumns(g, fnc == FNC_COL); - break; - case TAB_JSON: - qrp= JSONColumns(g, (char*)db, topt, fnc == FNC_COL); - break; + case TAB_VIR: + qrp = VirColumns(g, fnc == FNC_COL); + break; + case TAB_JSON: + qrp = JSONColumns(g, (char*)db, topt, fnc == FNC_COL); + break; #if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT) - case TAB_XML: - qrp= XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL); - break; + case TAB_XML: + qrp = XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL); + break; #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT - case TAB_OEM: - qrp= OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL); - break; - default: - strcpy(g->Message, "System error during assisted discovery"); - break; - } // endswitch ttp - - if (!qrp) { - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif !qrp - - if (fnc != FNC_NO || src || ttp == TAB_PIVOT) { - // Catalog like table - for (crp= qrp->Colresp; !rc && crp; crp= crp->Next) { - cnm= (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name); - typ= crp->Type; - len= crp->Length; - dec= crp->Prec; - flg= crp->Flag; - v= (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var; - tm= (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG; - - if (!len && typ == TYPE_STRING) - len= 256; // STRBLK's have 0 length - - // Now add the field - if (add_field(&sql, cnm, typ, len, dec, NULL, tm, - NULL, NULL, NULL, NULL, flg, dbf, v)) - rc= HA_ERR_OUT_OF_MEM; - } // endfor crp + case TAB_OEM: + qrp = OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL); + break; + default: + strcpy(g->Message, "System error during assisted discovery"); + break; + } // endswitch ttp + + if (!qrp) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif !qrp + + if (fnc != FNC_NO || src || ttp == TAB_PIVOT) { + // Catalog like table + for (crp = qrp->Colresp; !rc && crp; crp = crp->Next) { + cnm = (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name); + typ = crp->Type; + len = crp->Length; + dec = crp->Prec; + flg = crp->Flag; + v = (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var; + tm = (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG; + + if (!len && typ == TYPE_STRING) + len = 256; // STRBLK's have 0 length + + // Now add the field + if (add_field(&sql, cnm, typ, len, dec, NULL, tm, + NULL, NULL, NULL, NULL, flg, dbf, v)) + rc = HA_ERR_OUT_OF_MEM; + } // endfor crp - } else { - char *schem= NULL; - char *tn= NULL; + } else { + char *schem = NULL; + char *tn = NULL; - // Not a catalog table - if (!qrp->Nblin) { - if (tab) - sprintf(g->Message, "Cannot get columns from %s", tab); - else - strcpy(g->Message, "Fail to retrieve columns"); + // Not a catalog table + if (!qrp->Nblin) { + if (tab) + sprintf(g->Message, "Cannot get columns from %s", tab); + else + strcpy(g->Message, "Fail to retrieve columns"); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif !nblin - - for (i= 0; !rc && i < qrp->Nblin; i++) { - typ= len= prec= dec= 0; - tm= NOT_NULL_FLAG; - cnm= (char*)"noname"; - dft= xtra= key= fmt= tn= NULL; - v= ' '; - rem= NULL; - - for (crp= qrp->Colresp; crp; crp= crp->Next) - switch (crp->Fld) { - case FLD_NAME: - if (ttp == TAB_PRX || - (ttp == TAB_CSV && topt->data_charset && - (!stricmp(topt->data_charset, "UTF8") || - !stricmp(topt->data_charset, "UTF-8")))) - cnm= crp->Kdata->GetCharValue(i); - else - cnm= encode(g, crp->Kdata->GetCharValue(i)); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif !nblin + + for (i = 0; !rc && i < qrp->Nblin; i++) { + typ = len = prec = dec = 0; + tm = NOT_NULL_FLAG; + cnm = (char*)"noname"; + dft = xtra = key = fmt = tn = NULL; + v = ' '; + rem = NULL; + + for (crp = qrp->Colresp; crp; crp = crp->Next) + switch (crp->Fld) { + case FLD_NAME: + if (ttp == TAB_PRX || + (ttp == TAB_CSV && topt->data_charset && + (!stricmp(topt->data_charset, "UTF8") || + !stricmp(topt->data_charset, "UTF-8")))) + cnm = crp->Kdata->GetCharValue(i); + else + cnm = encode(g, crp->Kdata->GetCharValue(i)); - break; - case FLD_TYPE: - typ= crp->Kdata->GetIntValue(i); - v = (crp->Nulls) ? crp->Nulls[i] : 0; - break; + break; + case FLD_TYPE: + typ = crp->Kdata->GetIntValue(i); + v = (crp->Nulls) ? crp->Nulls[i] : 0; + break; case FLD_TYPENAME: - tn= crp->Kdata->GetCharValue(i); + tn = crp->Kdata->GetCharValue(i); break; case FLD_PREC: - // PREC must be always before LENGTH - len= prec= crp->Kdata->GetIntValue(i); - break; - case FLD_LENGTH: - len= crp->Kdata->GetIntValue(i); - break; - case FLD_SCALE: + // PREC must be always before LENGTH + len = prec = crp->Kdata->GetIntValue(i); + break; + case FLD_LENGTH: + len = crp->Kdata->GetIntValue(i); + break; + case FLD_SCALE: dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1; - break; - case FLD_NULL: - if (crp->Kdata->GetIntValue(i)) - tm= 0; // Nullable + break; + case FLD_NULL: + if (crp->Kdata->GetIntValue(i)) + tm = 0; // Nullable - break; - case FLD_FORMAT: - fmt= (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL; - break; - case FLD_REM: - rem= crp->Kdata->GetCharValue(i); - break; -// case FLD_CHARSET: - // No good because remote table is already translated -// if (*(csn= crp->Kdata->GetCharValue(i))) -// cs= get_charset_by_name(csn, 0); - -// break; - case FLD_DEFAULT: - dft= crp->Kdata->GetCharValue(i); - break; - case FLD_EXTRA: - xtra= crp->Kdata->GetCharValue(i); + break; + case FLD_FORMAT: + fmt = (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL; + break; + case FLD_REM: + rem = crp->Kdata->GetCharValue(i); + break; + // case FLD_CHARSET: + // No good because remote table is already translated + // if (*(csn= crp->Kdata->GetCharValue(i))) + // cs= get_charset_by_name(csn, 0); + + // break; + case FLD_DEFAULT: + dft = crp->Kdata->GetCharValue(i); + break; + case FLD_EXTRA: + xtra = crp->Kdata->GetCharValue(i); - // Auto_increment is not supported yet - if (!stricmp(xtra, "AUTO_INCREMENT")) - xtra= NULL; + // Auto_increment is not supported yet + if (!stricmp(xtra, "AUTO_INCREMENT")) + xtra = NULL; - break; - case FLD_KEY: - if (ttp == TAB_VIR) - key= crp->Kdata->GetCharValue(i); + break; + case FLD_KEY: + if (ttp == TAB_VIR) + key = crp->Kdata->GetCharValue(i); - break; - case FLD_SCHEM: + break; + case FLD_SCHEM: #if defined(ODBC_SUPPORT) || defined(JDBC_SUPPORT) - if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { - if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { - sprintf(g->Message, - "Several %s tables found, specify DBNAME", tab); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } else if (!schem) - schem= crp->Kdata->GetCharValue(i); - - } // endif ttp + if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { + if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { + sprintf(g->Message, + "Several %s tables found, specify DBNAME", tab); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } else if (!schem) + schem = crp->Kdata->GetCharValue(i); + + } // endif ttp #endif // ODBC_SUPPORT || JDBC_SUPPORT - default: - break; // Ignore - } // endswitch Fld + default: + break; // Ignore + } // endswitch Fld #if defined(ODBC_SUPPORT) - if (ttp == TAB_ODBC) { - int plgtyp; - bool w= false; // Wide character type - - // typ must be PLG type, not SQL type - if (!(plgtyp= TranslateSQLType(typ, dec, prec, v, w))) { - if (GetTypeConv() == TPC_SKIP) { - // Skip this column - sprintf(g->Message, "Column %s skipped (unsupported type %d)", - cnm, typ); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - continue; - } else { - sprintf(g->Message, "Unsupported SQL type %d", typ); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - goto err; - } // endif type_conv + if (ttp == TAB_ODBC) { + int plgtyp; + bool w = false; // Wide character type + + // typ must be PLG type, not SQL type + if (!(plgtyp = TranslateSQLType(typ, dec, prec, v, w))) { + if (GetTypeConv() == TPC_SKIP) { + // Skip this column + sprintf(g->Message, "Column %s skipped (unsupported type %d)", + cnm, typ); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + continue; + } else { + sprintf(g->Message, "Unsupported SQL type %d", typ); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif type_conv - } else - typ= plgtyp; + } else + typ = plgtyp; - switch (typ) { - case TYPE_STRING: - if (w) { - sprintf(g->Message, "Column %s is wide characters", cnm); - push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message); - } // endif w + switch (typ) { + case TYPE_STRING: + if (w) { + sprintf(g->Message, "Column %s is wide characters", cnm); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message); + } // endif w - break; - case TYPE_DOUBLE: - // Some data sources do not count dec in length (prec) - prec += (dec + 2); // To be safe - break; - case TYPE_DECIM: - prec= len; - break; - default: - dec= 0; - } // endswitch typ + break; + case TYPE_DOUBLE: + // Some data sources do not count dec in length (prec) + prec += (dec + 2); // To be safe + break; + case TYPE_DECIM: + prec = len; + break; + default: + dec = 0; + } // endswitch typ - } else + } else #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) if (ttp == TAB_JDBC) { int plgtyp; // typ must be PLG type, not SQL type - if (!(plgtyp= TranslateJDBCType(typ, tn, dec, prec, v))) { + if (!(plgtyp = TranslateJDBCType(typ, tn, dec, prec, v))) { if (GetTypeConv() == TPC_SKIP) { // Skip this column sprintf(g->Message, "Column %s skipped (unsupported type %d)", @@ -5880,55 +5950,73 @@ static int connect_assisted_discovery(handlerton *, THD* thd, continue; } else { sprintf(g->Message, "Unsupported SQL type %d", typ); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + rc = HA_ERR_INTERNAL_ERROR; goto err; } // endif type_conv } else - typ= plgtyp; + typ = plgtyp; switch (typ) { - case TYPE_DOUBLE: - case TYPE_DECIM: - // Some data sources do not count dec in length (prec) - prec += (dec + 2); // To be safe - break; - default: - dec= 0; + case TYPE_DOUBLE: + case TYPE_DECIM: + // Some data sources do not count dec in length (prec) + prec += (dec + 2); // To be safe + break; + default: + dec = 0; } // endswitch typ } else #endif // ODBC_SUPPORT // Make the arguments as required by add_fields - if (typ == TYPE_DOUBLE) - prec= len; + if (typ == TYPE_DOUBLE) + prec = len; - if (typ == TYPE_DATE) - prec= 0; + if (typ == TYPE_DATE) + prec = 0; - // Now add the field - if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra, - fmt, 0, dbf, v)) - rc= HA_ERR_OUT_OF_MEM; - } // endfor i + // Now add the field + if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra, + fmt, 0, dbf, v)) + rc = HA_ERR_OUT_OF_MEM; + } // endfor i - } // endif fnc + } // endif fnc - if (!rc) - rc= init_table_share(thd, table_s, create_info, &sql); - - g->jump_level--; - PopUser(xp); - return rc; - } // endif ok + if (!rc) + rc = init_table_share(thd, table_s, create_info, &sql); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + //g->jump_level--; + //PopUser(xp); + //return rc; + } else { + rc = HA_ERR_UNSUPPORTED; + } // endif ok + +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + rc = HA_ERR_INTERNAL_ERROR; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = HA_ERR_INTERNAL_ERROR; + } // end catch + err: +#else // !USE_TRY err: g->jump_level--; +#endif // !USE_TRY + if (rc) + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + +#if !defined(USE_TRY) jer: +#endif // !USE_TRY PopUser(xp); - return HA_ERR_INTERNAL_ERROR; + return rc; } // end of connect_assisted_discovery /** diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index c1d077406b7..02adff5d2c8 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -1200,7 +1200,11 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (rank == 0) if (!name || (jn = env->NewStringUTF(name)) == nullptr) { sprintf(g->Message, "Fail to allocate jstring %s", SVP(name)); +#if defined(USE_TRY) + throw TYPE_AM_JDBC; +#else // !USE_TRY longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); +#endif // !USE_TRY } // endif name // Returns 666 is case of error @@ -1208,7 +1212,11 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (Check((ctyp == 666) ? -1 : 1)) { sprintf(g->Message, "Getting ctyp: %s", Msg); +#if defined(USE_TRY) + throw TYPE_AM_JDBC; +#else // !USE_TRY longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); +#endif // !USE_TRY } // endif Check if (val->GetNullable()) @@ -1314,7 +1322,11 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) env->DeleteLocalRef(jn); sprintf(g->Message, "SetColumnValue: %s rank=%d ctyp=%d", Msg, rank, (int)ctyp); +#if defined(USE_TRY) + throw TYPE_AM_JDBC; +#else // !USE_TRY longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); +#endif // !USE_TRY } // endif Check if (rank == 0) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index b473871e9f7..6817439a635 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -60,7 +60,7 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e); /***********************************************************************/ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) { - int i, rc, pretty = (ptyp) ? *ptyp : 3; + int i, pretty = (ptyp) ? *ptyp : 3; bool b = false, pty[3] = {true, true, true}; PJSON jsp = NULL; STRG src; @@ -82,36 +82,40 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) pty[0] = false; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return NULL; + } // endif jump_level #if defined(SE_CATCH) - // Let's try to recover from any kind of interrupt + // Let's try to recover from any kind of interrupt _se_translator_function f = _set_se_translator(trans_func); try { #endif // SE_CATCH --------------------- try section -------------------- - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - goto err; + if (setjmp(g->jumper[++g->jump_level])) { + goto fin; } // endif rc #if defined(SE_CATCH) // ------------- end of try section ----------------- - } catch (SE_Exception e) { - sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)", - GetExceptionDesc(g, e.nSE), e.nSE); - _set_se_translator(f); - goto err; - } catch (...) { - strcpy(g->Message, "Exception doing setjmp"); - _set_se_translator(f); - goto err; - } // end of try-catches - +} catch (SE_Exception e) { + sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)", + GetExceptionDesc(g, e.nSE), e.nSE); + _set_se_translator(f); + goto err; +} catch (...) { + strcpy(g->Message, "Exception doing setjmp"); _set_se_translator(f); + goto err; +} // end of try-catches + +_set_se_translator(f); #endif // SE_CATCH +#endif // !USE_TRY for (i = 0; i < len; i++) switch (s[i]) { @@ -119,14 +123,22 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) if (jsp) goto tryit; else if (!(jsp = ParseArray(g, ++i, src, pty))) - goto err; +#if defined(USE_TRY) + throw 1; +#else // !USE_TRY + goto fin; +#endif // !USE_TRY break; case '{': if (jsp) goto tryit; else if (!(jsp = ParseObject(g, ++i, src, pty))) - goto err; +#if defined(USE_TRY) + throw 2; +#else // !USE_TRY + goto fin; +#endif // !USE_TRY break; case ' ': @@ -144,7 +156,12 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) } // endif pretty sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); - goto err; +#if defined(USE_TRY) + throw 3; +#else // !USE_TRY + jsp = NULL; + goto fin; +#endif // !USE_TRY case '(': b = true; break; @@ -158,7 +175,11 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) if (jsp) goto tryit; else if (!(jsp = ParseValue(g, i, src, pty))) - goto err; +#if defined(USE_TRY) + throw 4; +#else // !USE_TRY + goto fin; +#endif // !USE_TRY break; }; // endswitch s[i] @@ -176,22 +197,36 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) } // endif ptyp - g->jump_level--; - return jsp; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + jsp = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + jsp = NULL; + } // end catch + + return jsp; +#else // !USE_TRY +fin: + g->jump_level--; + return jsp; +#endif // !USE_TRY tryit: if (pty[0] && (!pretty || pretty > 2)) { if ((jsp = ParseArray(g, (i = 0), src, pty)) && ptyp && pretty == 3) *ptyp = (pty[0]) ? 0 : 3; +#if !defined(USE_TRY) g->jump_level--; +#endif // !USE_TRY return jsp; } else strcpy(g->Message, "More than one item in file"); -err: - g->jump_level--; - return NULL; + return NULL; } // end of ParseJson /***********************************************************************/ @@ -591,6 +626,9 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) g->Message[0] = 0; +#if defined(USE_TRY) + try { +#else // !USE_TRY // Save stack and allocation environment and prepare error return if (g->jump_level == MAX_JUMP) { strcpy(g->Message, MSG(TOO_MANY_JUMPS)); @@ -601,11 +639,16 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) str = NULL; goto fin; } // endif jmp +#endif // !USE_TRY if (!jsp) { strcpy(g->Message, "Null json tree"); - goto fin; - } else if (!fn) { +#if defined(USE_TRY) + throw 1; +#else // !USE_TRY + goto fin; +#endif // !USE_TRY + } else if (!fn) { // Serialize to a string jp = new(g) JOUTSTR(g); b = pretty == 1; @@ -614,7 +657,11 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "w", (int)errno, fn); strcat(strcat(g->Message, ": "), strerror(errno)); - goto fin;; +#if defined(USE_TRY) + throw 2; +#else // !USE_TRY + goto fin; +#endif // !USE_TRY } else if (pretty >= 2) { // Serialize to a pretty file jp = new(g)JOUTPRT(g, fs); @@ -655,8 +702,19 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) } // endif's +#if defined(USE_TRY) +} catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + str = NULL; +} catch (const char *msg) { + strcpy(g->Message, msg); + str = NULL; +} // end catch +#else // !USE_TRY fin: g->jump_level--; +#endif // !USE_TRY return str; } // end of Serialize diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 0f693c3c0d6..9ea391edae9 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1104,7 +1104,7 @@ static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, } // endif g g->Mrr = (args->arg_count && args->args[0]) ? 1 : 0; - g->ActivityStart = (PACTIVITY)more; + g->More = more; initid->maybe_null = mbn; initid->max_length = reslen; initid->ptr = (char*)g; @@ -1449,7 +1449,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, } // endif b - ml += (unsigned long)g->ActivityStart; // more + ml += g->More; if (ml > g->Sarea_Size) { free(g->Sarea); @@ -2914,7 +2914,6 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *) { char *p, *path, *str = NULL; - int rc; PJSON jsp; PJSNX jsx; PJVAL jvp; @@ -2926,6 +2925,9 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; +#if defined(USE_TRY) + try { +#else // !USE_TRY // Save stack and allocation environment and prepare error return if (g->jump_level == MAX_JUMP) { PUSH_WARNING(MSG(TOO_MANY_JUMPS)); @@ -2933,11 +2935,12 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, return NULL; } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { + if (setjmp(g->jumper[++g->jump_level])) { PUSH_WARNING(g->Message); str = NULL; goto err; } // endif rc +#endif // !USE_TRY if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, true)) { @@ -2980,8 +2983,23 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, // Keep result of constant function g->Activityp = (PACTIVITY)str; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch + + err: +#else // !USE_TRY err: g->jump_level--; +#endif // !USE_TRY fin: if (!str) { @@ -3254,7 +3272,7 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *p, *path = NULL; - int k, rc; + int k; PJVAL jvp, jvp2; PJSON jsp; PJSNX jsx; @@ -3274,6 +3292,9 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; +#if defined(USE_TRY) + try { +#else // !USE_TRY // Save stack and allocation environment and prepare error return if (g->jump_level == MAX_JUMP) { PUSH_WARNING(MSG(TOO_MANY_JUMPS)); @@ -3282,12 +3303,13 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, return NULL; } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { + if (setjmp(g->jumper[++g->jump_level])) { PUSH_WARNING(g->Message); *error = 1; path = NULL; goto err; } // endif rc +#endif // !USE_TRY if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, !g->Xchk)) { @@ -3326,8 +3348,25 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, // Keep result of constant function g->Activityp = (PACTIVITY)path; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + + err: +#else // !USE_TRY err: g->jump_level--; +#endif // !USE_TRY if (!path) { *res_length = 0; @@ -3379,7 +3418,7 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *p, *path = NULL; - int rc, mx = 10; + int mx = 10; PJVAL jvp, jvp2; PJSON jsp; PJSNX jsx; @@ -3400,6 +3439,9 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; +#if defined(USE_TRY) + try { +#else // !USE_TRY // Save stack and allocation environment and prepare error return if (g->jump_level == MAX_JUMP) { PUSH_WARNING(MSG(TOO_MANY_JUMPS)); @@ -3408,12 +3450,13 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, return NULL; } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { + if (setjmp(g->jumper[++g->jump_level])) { PUSH_WARNING(g->Message); *error = 1; path = NULL; goto err; } // endif rc +#endif // !USE_TRY if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, true)) { @@ -3453,8 +3496,25 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, // Keep result of constant function g->Activityp = (PACTIVITY)path; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + + err: +#else // !USE_TRY err: g->jump_level--; +#endif // !USE_TRY if (!path) { *res_length = 0; @@ -3637,7 +3697,7 @@ char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *p, *path, *str = NULL; - int w, rc; + int w; my_bool b = true; PJSON jsp; PJSNX jsx; @@ -3659,33 +3719,45 @@ char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, w = 2; else { PUSH_WARNING("Logical error, please contact CONNECT developer"); - goto err; + goto fin; } // endelse - // Save stack and allocation environment and prepare error return +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return if (g->jump_level == MAX_JUMP) { PUSH_WARNING(MSG(TOO_MANY_JUMPS)); *error = 1; goto fin; } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { + if (setjmp(g->jumper[++g->jump_level])) { PUSH_WARNING(g->Message); str = NULL; goto err; } // endif rc +#endif // !USE_TRY if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, true, false, true)) { PUSH_WARNING("CheckMemory error"); +#if defined(USE_TRY) + throw 1; +#else // !USE_TRY goto err; +#endif // !USE_TRY } else jvp = MakeValue(g, args, 0); if ((p = jvp->GetString())) { if (!(jsp = ParseJson(g, p, strlen(p)))) { +#if defined(USE_TRY) + throw 2; +#else // !USE_TRY PUSH_WARNING(g->Message); goto err; +#endif // !USE_TRY } // endif jsp } else @@ -3729,8 +3801,21 @@ char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, // Keep result of constant function g->Activityp = (PACTIVITY)str; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch +#else // !USE_TRY err: g->jump_level--; +#endif // !USE_TRY fin: if (!str) { diff --git a/storage/connect/mysql-test/connect/r/jdbc_new.result b/storage/connect/mysql-test/connect/r/jdbc_new.result index 5cc4826213d..6f977166598 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_new.result +++ b/storage/connect/mysql-test/connect/r/jdbc_new.result @@ -14,9 +14,7 @@ NULL NULL SET GLOBAL time_zone='+1:00'; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/test?user=unknown'; -SELECT * FROM t1; -ERROR HY000: Got error 174 'Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2' from CONNECT -DROP TABLE t1; +ERROR HY000: Connecting: java.sql.SQLException: Access denied for user 'unknown'@'localhost' (using password: NO) rc=-2 CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:SLAVE_PORT/unknown?user=root'; ERROR HY000: Connecting: java.sql.SQLSyntaxErrorException: Unknown database 'unknown' rc=-2 diff --git a/storage/connect/mysql-test/connect/t/jdbc_new.test b/storage/connect/mysql-test/connect/t/jdbc_new.test index 5586cf8c027..86c4ad57c5f 100644 --- a/storage/connect/mysql-test/connect/t/jdbc_new.test +++ b/storage/connect/mysql-test/connect/t/jdbc_new.test @@ -24,11 +24,9 @@ SET GLOBAL time_zone='+1:00'; # Bad user name # Suppress "mysql_real_connect failed:" (printed in _DEBUG build) --replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " "" +--error ER_UNKNOWN_ERROR eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:mysql://127.0.0.1:$SLAVE_MYPORT/test?user=unknown'; ---error ER_GET_ERRMSG -SELECT * FROM t1; -DROP TABLE t1; # Bad database name --replace_result $SLAVE_MYPORT SLAVE_PORT "mysql_real_connect failed: " "" diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index 433e392eace..cf55846765f 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -1,7 +1,7 @@ -/************ Odbconn C++ Functions Source Code File (.CPP) ************/ -/* Name: ODBCONN.CPP Version 2.2 */ +/***********************************************************************/ +/* Name: ODBCONN.CPP Version 2.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* This file contains the ODBC connection classes functions. */ /***********************************************************************/ @@ -249,17 +249,21 @@ static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, char *db, assert(qrp); #endif - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - cap = NULL; - goto fin; - } // endif rc +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return NULL; + } // endif jump_level + + if (setjmp(g->jumper[++g->jump_level]) != 0) { + printf("%s\n", g->Message); + cap = NULL; + goto fin; + } // endif rc +#endif // !USE_TRY m = (size_t)qrp->Maxres; n = (size_t)qrp->Nbcol; @@ -276,9 +280,20 @@ static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, char *db, cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); +#if defined(USE_TRY) +} catch (int n) { + htrc("Exeption %d: %s\n", n, g->Message); + cap = NULL; +} catch (const char *msg) { + htrc(g->Message, msg); + printf("%s\n", g->Message); + cap = NULL; +} // end catch +#else // !USE_TRY fin: - g->jump_level--; - return cap; + g->jump_level--; +#endif // !USE_TRY + return cap; } // end of AllocCatInfo #if 0 diff --git a/storage/connect/osutil.c b/storage/connect/osutil.c index 2e9e120b0c8..66743c7403b 100644 --- a/storage/connect/osutil.c +++ b/storage/connect/osutil.c @@ -43,34 +43,6 @@ PSZ strlwr(PSZ p) return (p); } /* end of strlwr */ -#if defined(NOT_USED) /*&& !defined(sun) && !defined(LINUX) && !defined(AIX)*/ -/***********************************************************************/ -/* Define stricmp function not existing in some UNIX libraries. */ -/***********************************************************************/ -int stricmp(char *str1, char *str2) - { - register int i; - int n; - char c; - char *sup1 = malloc(strlen(str1) + 1); - char *sup2 = malloc(strlen(str2) + 1); - - for (i = 0; c = str1[i]; i++) - sup1[i] = toupper(c); - - sup1[i] = 0; - - for (i = 0; c = str2[i]; i++) - sup2[i] = toupper(c); - - sup2[i] = 0; - n = strcmp(sup1, sup2); - free(sup1); - free(sup2); - return (n); - } /* end of stricmp */ -#endif /* sun */ - /***********************************************************************/ /* Define the splitpath function not existing in the UNIX library. */ /***********************************************************************/ @@ -143,13 +115,6 @@ my_bool CloseFileHandle(HANDLE h) return (close(h)) ? TRUE : FALSE; } /* end of CloseFileHandle */ -#if 0 -void Sleep(DWORD time) - { - //FIXME: TODO - } /* end of Sleep */ -#endif - int GetLastError() { return errno; @@ -210,21 +175,4 @@ BOOL MessageBeep(uint i __attribute__((unused))) return TRUE; } /* end of MessageBeep */ -#if 0 -/* This function is ridiculous and should be revisited */ -DWORD FormatMessage(DWORD dwFlags, LPCVOID lpSource, DWORD dwMessageId, - DWORD dwLanguageId, LPSTR lpBuffer, DWORD nSize, ...) - { - char buff[32]; - int n; - -//if (dwFlags & FORMAT_MESSAGE_ALLOCATE_BUFFER) -// return 0; /* means error */ - - n = sprintf(buff, "Error code: %d", (int) dwMessageId); - strncpy(lpBuffer, buff, nSize); - return min(n, nSize); - } /* end of FormatMessage */ -#endif - #endif // UNIX diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index 1910cdcdec8..299332f51b4 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -244,17 +244,21 @@ PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, PCOLRES *pcrp, crp; PQRYRES qrp; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - qrp = NULL; - goto fin; - } // endif rc +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return NULL; + } // endif jump_level + + if (setjmp(g->jumper[++g->jump_level]) != 0) { + printf("%s\n", g->Message); + qrp = NULL; + goto fin; + } // endif rc +#endif // !USE_TRY /************************************************************************/ /* Allocate the structure used to contain the result set. */ @@ -316,9 +320,20 @@ PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, *pcrp = NULL; +#if defined(USE_TRY) + } catch (int n) { + htrc("Exception %d: %s\n", n, g->Message); + qrp = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + htrc("%s\n", g->Message); + qrp = NULL; + } // end catch +#else // !USE_TRY fin: - g->jump_level--; - return qrp; + g->jump_level--; +#endif // !USE_TRY + return qrp; } // end of PlgAllocResult /***********************************************************************/ @@ -365,8 +380,12 @@ PCATLG PlgGetCatalog(PGLOBAL g, bool jump) if (!cat && jump) { // Raise exception so caller doesn't have to check return value strcpy(g->Message, MSG(NO_ACTIVE_DB)); - longjmp(g->jumper[g->jump_level], 1); - } // endif cat +#if defined(USE_TRY) + throw 1; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 1); +#endif // !USE_TRY + } // endif cat return cat; } // end of PlgGetCatalog @@ -476,8 +495,12 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) tp = g->Message; else if (!(tp = new char[strlen(pat) + strlen(strg) + 2])) { strcpy(g->Message, MSG(NEW_RETURN_NULL)); - longjmp(g->jumper[g->jump_level], OP_LIKE); - } /* endif tp */ +#if defined(USE_TRY) + throw OP_LIKE; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], OP_LIKE); +#endif // !USE_TRY + } /* endif tp */ sp = tp + strlen(pat) + 1; strlwr(strcpy(tp, pat)); /* Make a lower case copy of pat */ @@ -487,8 +510,12 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) tp = g->Message; /* Use this as temporary work space. */ else if (!(tp = new char[strlen(pat) + 1])) { strcpy(g->Message, MSG(NEW_RETURN_NULL)); - longjmp(g->jumper[g->jump_level], OP_LIKE); - } /* endif tp */ +#if defined(USE_TRY) + throw OP_LIKE; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], OP_LIKE); +#endif // !USE_TRY + } /* endif tp */ strcpy(tp, pat); /* Make a copy to be worked into */ sp = (char*)strg; @@ -1520,8 +1547,12 @@ DllExport void NewPointer(PTABS t, void *oldv, void *newv) PGLOBAL g = t->G; sprintf(g->Message, "NewPointer: %s", MSG(MEM_ALLOC_ERROR)); - longjmp(g->jumper[g->jump_level], 3); - } else { +#if defined(USE_TRY) + throw 3; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 3); +#endif // !USE_TRY + } else { tp->Next = t->P1; tp->Num = 0; t->P1 = tp; @@ -1557,15 +1588,23 @@ int FileComp(PGLOBAL g, char *file1, char *file2) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "rb", (int)errno, fn[i]); strcat(strcat(g->Message, ": "), strerror(errno)); - longjmp(g->jumper[g->jump_level], 666); -// } else +#if defined(USE_TRY) + throw 666; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 666); +#endif // !USE_TRY + // } else // len[i] = 0; // File does not exist yet } else { if ((len[i] = _filelength(h[i])) < 0) { sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", fn[i]); - longjmp(g->jumper[g->jump_level], 666); - } // endif len +#if defined(USE_TRY) + throw 666; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 666); +#endif // !USE_TRY + } // endif len } // endif h diff --git a/storage/connect/plugutil.c b/storage/connect/plugutil.c deleted file mode 100644 index bfac8a5fd99..00000000000 --- a/storage/connect/plugutil.c +++ /dev/null @@ -1,592 +0,0 @@ -/************** PlugUtil C Program Source Code File (.C) ***************/ -/* */ -/* PROGRAM NAME: PLUGUTIL */ -/* ------------- */ -/* Version 2.9 */ -/* */ -/* COPYRIGHT: */ -/* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1993-2015 */ -/* */ -/* WHAT THIS PROGRAM DOES: */ -/* ----------------------- */ -/* This program are initialization and utility Plug routines. */ -/* */ -/* WHAT YOU NEED TO COMPILE THIS PROGRAM: */ -/* -------------------------------------- */ -/* */ -/* REQUIRED FILES: */ -/* --------------- */ -/* See Readme.C for a list and description of required SYSTEM files. */ -/* */ -/* PLUG.C - Source code */ -/* GLOBAL.H - Global declaration file */ -/* OPTION.H - Option declaration file */ -/* */ -/* REQUIRED LIBRARIES: */ -/* ------------------- */ -/* */ -/* OS2.LIB - OS2 libray */ -/* LLIBCE.LIB - Protect mode/standard combined large model C */ -/* library */ -/* */ -/* REQUIRED PROGRAMS: */ -/* ------------------ */ -/* */ -/* IBM C Compiler */ -/* IBM Linker */ -/* */ -/***********************************************************************/ - -/***********************************************************************/ -/* */ -/* Include relevant MariaDB header file. */ -/* */ -/***********************************************************************/ -#include "my_global.h" -#if defined(__WIN__) -//#include -#else -#if defined(UNIX) || defined(UNIV_LINUX) -#include -#include -//#define __stdcall -#else -#include -#endif -#include -#endif - -#if defined(WIN) -#include -#endif -#include /* definitions of ERANGE ENOMEM */ -#if !defined(UNIX) && !defined(UNIV_LINUX) -#include /* Directory management library */ -#endif - -/***********************************************************************/ -/* */ -/* Include application header files */ -/* */ -/* global.h is header containing all global declarations. */ -/* */ -/***********************************************************************/ -#define STORAGE /* Initialize global variables */ - -#include "osutil.h" -#include "global.h" -#if defined(NEWMSG) -#include "rcmsg.h" -#endif // NEWMSG - -#if defined(__WIN__) -extern HINSTANCE s_hModule; /* Saved module handle */ -#endif // __WIN__ - -#if defined(XMSG) -extern char *msg_path; -char *msglang(void); -#endif // XMSG - -/***********************************************************************/ -/* Local Definitions and static variables */ -/***********************************************************************/ -typedef struct { - ushort Segsize; - ushort Size; - } AREASIZE; - -ACTIVITY defActivity = { /* Describes activity and language */ - NULL, /* Points to user work area(s) */ - "Unknown"}; /* Application name */ - -#if defined(XMSG) || defined(NEWMSG) - static char stmsg[200]; -#endif // XMSG || NEWMSG - -#if defined(UNIX) || defined(UNIV_LINUX) -#include "rcmsg.h" -#endif // UNIX - -/**************************************************************************/ -/* Tracing output function. */ -/**************************************************************************/ -void htrc(char const *fmt, ...) - { - va_list ap; - va_start (ap, fmt); - - -//if (trace == 1) -// vfprintf(debug, fmt, ap); -//else - vfprintf(stderr, fmt, ap); - - va_end (ap); - } // end of htrc - -/***********************************************************************/ -/* Plug initialization routine. */ -/* Language points on initial language name and eventual path. */ -/* Return value is the pointer to the Global structure. */ -/***********************************************************************/ -PGLOBAL PlugInit(LPCSTR Language, uint worksize) - { - PGLOBAL g; - - if (trace > 1) - htrc("PlugInit: Language='%s'\n", - ((!Language) ? "Null" : (char*)Language)); - - if (!(g = malloc(sizeof(GLOBAL)))) { - fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); - return NULL; - } else { - g->Sarea = NULL; - g->Createas = 0; - g->Alchecked = 0; - g->Mrr = 0; - g->Activityp = g->ActivityStart = NULL; - g->Xchk = NULL; - g->N = 0; - strcpy(g->Message, ""); - - /*******************************************************************/ - /* Allocate the main work segment. */ - /*******************************************************************/ - if (worksize && !(g->Sarea = PlugAllocMem(g, worksize))) { - char errmsg[256]; - sprintf(errmsg, MSG(WORK_AREA), g->Message); - strcpy(g->Message, errmsg); - g->Sarea_Size = 0; - } else - g->Sarea_Size = worksize; - - } /* endif g */ - - g->jump_level = -1; /* New setting to allow recursive call of Plug */ - return(g); - } /* end of PlugInit */ - -/***********************************************************************/ -/* PlugExit: Terminate Plug operations. */ -/***********************************************************************/ -int PlugExit(PGLOBAL g) - { - int rc = 0; - - if (!g) - return rc; - - if (g->Sarea) - free(g->Sarea); - - free(g); - return rc; - } /* end of PlugExit */ - -/***********************************************************************/ -/* Remove the file type from a file name. */ -/* Note: this routine is not really implemented for Unix. */ -/***********************************************************************/ -LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) - { -#if defined(__WIN__) - char drive[_MAX_DRIVE]; -#else - char *drive = NULL; -#endif - char direc[_MAX_DIR]; - char fname[_MAX_FNAME]; - char ftype[_MAX_EXT]; - - _splitpath(FileName, drive, direc, fname, ftype); - - if (trace > 1) { - htrc("after _splitpath: FileName=%s\n", FileName); - htrc("drive=%s dir=%s fname=%s ext=%s\n", - SVP(drive), direc, fname, ftype); - } // endif trace - - _makepath(pBuff, drive, direc, fname, ""); - - if (trace > 1) - htrc("buff='%s'\n", pBuff); - - return pBuff; - } // end of PlugRemoveType - - -BOOL PlugIsAbsolutePath(LPCSTR path) -{ -#if defined(__WIN__) - return ((path[0] >= 'a' && path[0] <= 'z') || - (path[0] >= 'A' && path[0] <= 'Z')) && path[1] == ':'; -#else - return path[0] == '/'; -#endif -} - -/***********************************************************************/ -/* Set the full path of a file relatively to a given path. */ -/* Note: this routine is not really implemented for Unix. */ -/***********************************************************************/ -LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) - { - char newname[_MAX_PATH]; - char direc[_MAX_DIR], defdir[_MAX_DIR], tmpdir[_MAX_DIR]; - char fname[_MAX_FNAME]; - char ftype[_MAX_EXT]; -#if defined(__WIN__) - char drive[_MAX_DRIVE], defdrv[_MAX_DRIVE]; -#else - char *drive = NULL, *defdrv = NULL; -#endif - - if (trace > 1) - htrc("prefix=%s fn=%s path=%s\n", prefix, FileName, defpath); - - if (!strncmp(FileName, "//", 2) || !strncmp(FileName, "\\\\", 2)) { - strcpy(pBuff, FileName); // Remote file - return pBuff; - } // endif - - if (PlugIsAbsolutePath(FileName)) - { - strcpy(pBuff, FileName); // FileName includes absolute path - return pBuff; - } // endif - -#if !defined(__WIN__) - if (*FileName == '~') { - if (_fullpath(pBuff, FileName, _MAX_PATH)) { - if (trace > 1) - htrc("pbuff='%s'\n", pBuff); - - return pBuff; - } else - return FileName; // Error, return unchanged name - - } // endif FileName -#endif // !__WIN__ - - if (prefix && strcmp(prefix, ".") && !PlugIsAbsolutePath(defpath)) - { - char tmp[_MAX_PATH]; - int len= snprintf(tmp, sizeof(tmp) - 1, "%s%s%s", - prefix, defpath, FileName); - memcpy(pBuff, tmp, (size_t) len); - pBuff[len]= '\0'; - return pBuff; - } - - _splitpath(FileName, drive, direc, fname, ftype); - - if (defpath) { - char c = defpath[strlen(defpath) - 1]; - - strcpy(tmpdir, defpath); - - if (c != '/' && c != '\\') - strcat(tmpdir, "/"); - - } else - strcpy(tmpdir, "./"); - - _splitpath(tmpdir, defdrv, defdir, NULL, NULL); - - if (trace > 1) { - htrc("after _splitpath: FileName=%s\n", FileName); -#if defined(__WIN__) - htrc("drive=%s dir=%s fname=%s ext=%s\n", drive, direc, fname, ftype); - htrc("defdrv=%s defdir=%s\n", defdrv, defdir); -#else - htrc("dir=%s fname=%s ext=%s\n", direc, fname, ftype); -#endif - } // endif trace - - if (drive && !*drive) - strcpy(drive, defdrv); - - switch (*direc) { - case '\0': - strcpy(direc, defdir); - break; - case '\\': - case '/': - break; - default: - // This supposes that defdir ends with a SLASH - strcpy(direc, strcat(defdir, direc)); - } // endswitch - - _makepath(newname, drive, direc, fname, ftype); - - if (trace > 1) - htrc("newname='%s'\n", newname); - - if (_fullpath(pBuff, newname, _MAX_PATH)) { - if (trace > 1) - htrc("pbuff='%s'\n", pBuff); - - return pBuff; - } else - return FileName; // Error, return unchanged name - - } // end of PlugSetPath - -#if defined(XMSG) -/***********************************************************************/ -/* PlugGetMessage: get a message from the message file. */ -/***********************************************************************/ -char *PlugReadMessage(PGLOBAL g, int mid, char *m) - { - char msgfile[_MAX_PATH], msgid[32], buff[256]; - char *msg; - FILE *mfile = NULL; - -//GetPrivateProfileString("Message", msglang, "Message\\english.msg", -// msgfile, _MAX_PATH, plgini); -//strcat(strcat(strcpy(msgfile, msg_path), msglang()), ".msg"); - strcat(strcpy(buff, msglang()), ".msg"); - PlugSetPath(msgfile, NULL, buff, msg_path); - - if (!(mfile = fopen(msgfile, "rt"))) { - sprintf(stmsg, "Fail to open message file %s", msgfile); - goto err; - } // endif mfile - - for (;;) - if (!fgets(buff, 256, mfile)) { - sprintf(stmsg, "Cannot get message %d %s", mid, SVP(m)); - goto fin; - } else - if (atoi(buff) == mid) - break; - - if (sscanf(buff, " %*d %s \"%[^\"]", msgid, stmsg) < 2) { - // Old message file - if (!sscanf(buff, " %*d \"%[^\"]", stmsg)) { - sprintf(stmsg, "Bad message file for %d %s", mid, SVP(m)); - goto fin; - } else - m = NULL; - - } // endif sscanf - - if (m && strcmp(m, msgid)) { - // Message file is out of date - strcpy(stmsg, m); - goto fin; - } // endif m - - fin: - fclose(mfile); - - err: - if (g) { - // Called by STEP - msg = PlugDup(g, stmsg); - } else // Called by MSG or PlgGetErrorMsg - msg = stmsg; - - return msg; - } // end of PlugReadMessage - -#elif defined(NEWMSG) -/***********************************************************************/ -/* PlugGetMessage: get a message from the resource string table. */ -/***********************************************************************/ -char *PlugGetMessage(PGLOBAL g, int mid) - { - char *msg; - -#if 0 // was !defined(UNIX) && !defined(UNIV_LINUX) - int n = LoadString(s_hModule, (uint)mid, (LPTSTR)stmsg, 200); - - if (n == 0) { - DWORD rc = GetLastError(); - msg = (char*)PlugSubAlloc(g, NULL, 512); // Extend buf allocation - n = sprintf(msg, "Message %d, rc=%d: ", mid, rc); - FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, - (LPTSTR)(msg + n), 512 - n, NULL); - return msg; - } // endif n - -#else // ALL - if (!GetRcString(mid, stmsg, 200)) - sprintf(stmsg, "Message %d not found", mid); -#endif // ALL - - if (g) { - // Called by STEP - msg = PlugDup(g, stmsg); - } else // Called by MSG or PlgGetErrorMsg - msg = stmsg; - - return msg; - } // end of PlugGetMessage -#endif // NEWMSG - -#if defined(__WIN__) -/***********************************************************************/ -/* Return the line length of the console screen buffer. */ -/***********************************************************************/ -short GetLineLength(PGLOBAL g) - { - CONSOLE_SCREEN_BUFFER_INFO coninfo; - HANDLE hcons = GetStdHandle(STD_OUTPUT_HANDLE); - BOOL b = GetConsoleScreenBufferInfo(hcons, &coninfo); - - return (b) ? coninfo.dwSize.X : 0; - } // end of GetLineLength -#endif // __WIN__ - -/***********************************************************************/ -/* Program for memory allocation of work and language areas. */ -/***********************************************************************/ -void *PlugAllocMem(PGLOBAL g, uint size) - { - void *areap; /* Pointer to allocated area */ - - /*********************************************************************/ - /* This is the allocation routine for the WIN32/UNIX/AIX version. */ - /*********************************************************************/ - if (!(areap = malloc(size))) - sprintf(g->Message, MSG(MALLOC_ERROR), "malloc"); - - if (trace > 1) { - if (areap) - htrc("Memory of %u allocated at %p\n", size, areap); - else - htrc("PlugAllocMem: %s\n", g->Message); - - } // endif trace - - return (areap); - } /* end of PlugAllocMem */ - -/***********************************************************************/ -/* Program for SubSet initialization of memory pools. */ -/* Here there should be some verification done such as validity of */ -/* the address and size not larger than memory size. */ -/***********************************************************************/ -BOOL PlugSubSet(PGLOBAL g __attribute__((unused)), void *memp, uint size) - { - PPOOLHEADER pph = memp; - - pph->To_Free = (OFFSET)sizeof(POOLHEADER); - pph->FreeBlk = size - pph->To_Free; - - return FALSE; - } /* end of PlugSubSet */ - -/***********************************************************************/ -/* Program for sub-allocating one item in a storage area. */ -/* Note: SubAlloc routines of OS/2 are no more used to increase the */ -/* code portability and avoid problems when a grammar compiled under */ -/* one version of OS/2 is used under another version. */ -/* The simple way things are done here is also based on the fact */ -/* that no freeing of suballocated blocks is permitted in Plug. */ -/***********************************************************************/ -void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) - { - PPOOLHEADER pph; /* Points on area header. */ - - if (!memp) - /*******************************************************************/ - /* Allocation is to be done in the Sarea. */ - /*******************************************************************/ - memp = g->Sarea; - -//size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ - size = ((size + 7) / 8) * 8; /* Round up size to multiple of 8 */ - pph = (PPOOLHEADER)memp; - - if (trace > 3) - htrc("SubAlloc in %p size=%d used=%d free=%d\n", - memp, size, pph->To_Free, pph->FreeBlk); - - if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */ - char *pname = "Work"; - - sprintf(g->Message, - "Not enough memory in %s area for request of %u (used=%d free=%d)", - pname, (uint) size, pph->To_Free, pph->FreeBlk); - - if (trace) - htrc("PlugSubAlloc: %s\n", g->Message); - - /* Nothing we can do if longjmp is not initialized. */ - assert(g->jump_level >= 0); - longjmp(g->jumper[g->jump_level], 1); - } /* endif size OS32 code */ - - /*********************************************************************/ - /* Do the suballocation the simplest way. */ - /*********************************************************************/ - memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ - pph->To_Free += size; /* New offset of pool free block */ - pph->FreeBlk -= size; /* New size of pool free block */ - - if (trace > 3) - htrc("Done memp=%p used=%d free=%d\n", - memp, pph->To_Free, pph->FreeBlk); - - return (memp); - } /* end of PlugSubAlloc */ - -/***********************************************************************/ -/* Program for sub-allocating and copying a string in a storage area. */ -/***********************************************************************/ -char *PlugDup(PGLOBAL g, const char *str) - { - if (str) { - char *sm = (char*)PlugSubAlloc(g, NULL, strlen(str) + 1); - - strcpy(sm, str); - return sm; - } else - return NULL; - - } // end of PlugDup - -#if 0 -/***********************************************************************/ -/* This routine suballocate a copy of the passed string. */ -/***********************************************************************/ -char *PlugDup(PGLOBAL g, const char *str) - { - char *buf; - size_t len; - - if (str && (len = strlen(str))) { - buf = (char*)PlugSubAlloc(g, NULL, len + 1); - strcpy(buf, str); - } else - buf = NULL; - - return(buf); - } /* end of PlugDup */ -#endif // 0 - -/***********************************************************************/ -/* This routine makes a pointer from an offset to a memory pointer. */ -/***********************************************************************/ -void *MakePtr(void *memp, OFFSET offset) - { - return ((offset == 0) ? NULL : &((char *)memp)[offset]); - } /* end of MakePtr */ - -/***********************************************************************/ -/* This routine makes an offset from a pointer new format. */ -/***********************************************************************/ -#if 0 -OFFSET MakeOff(void *memp, void *ptr) - { - return ((!ptr) ? 0 : (OFFSET)((char *)ptr - (char *)memp)); - } /* end of MakeOff */ -#endif -/*--------------------- End of PLUGUTIL program -----------------------*/ diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp new file mode 100644 index 00000000000..d34b43a63d2 --- /dev/null +++ b/storage/connect/plugutil.cpp @@ -0,0 +1,596 @@ +/************** PlugUtil C Program Source Code File (.C) ***************/ +/* */ +/* PROGRAM NAME: PLUGUTIL */ +/* ------------- */ +/* Version 2.9 */ +/* */ +/* COPYRIGHT: */ +/* ---------- */ +/* (C) Copyright to the author Olivier BERTRAND 1993-2015 */ +/* */ +/* WHAT THIS PROGRAM DOES: */ +/* ----------------------- */ +/* This program are initialization and utility Plug routines. */ +/* */ +/* WHAT YOU NEED TO COMPILE THIS PROGRAM: */ +/* -------------------------------------- */ +/* */ +/* REQUIRED FILES: */ +/* --------------- */ +/* See Readme.C for a list and description of required SYSTEM files. */ +/* */ +/* PLUG.C - Source code */ +/* GLOBAL.H - Global declaration file */ +/* OPTION.H - Option declaration file */ +/* */ +/* REQUIRED LIBRARIES: */ +/* ------------------- */ +/* */ +/* OS2.LIB - OS2 libray */ +/* LLIBCE.LIB - Protect mode/standard combined large model C */ +/* library */ +/* */ +/* REQUIRED PROGRAMS: */ +/* ------------------ */ +/* */ +/* IBM C Compiler */ +/* IBM Linker */ +/* */ +/***********************************************************************/ + +/***********************************************************************/ +/* */ +/* Include relevant MariaDB header file. */ +/* */ +/***********************************************************************/ +#include "my_global.h" +#if defined(__WIN__) +//#include +#else +#if defined(UNIX) || defined(UNIV_LINUX) +#include +#include +//#define __stdcall +#else +#include +#endif +#include +#endif + +#if defined(WIN) +#include +#endif +#include /* definitions of ERANGE ENOMEM */ +#if !defined(UNIX) && !defined(UNIV_LINUX) +#include /* Directory management library */ +#endif + +/***********************************************************************/ +/* */ +/* Include application header files */ +/* */ +/* global.h is header containing all global declarations. */ +/* */ +/***********************************************************************/ +#define STORAGE /* Initialize global variables */ + +#include "osutil.h" +#include "global.h" +#if defined(NEWMSG) +#include "rcmsg.h" +#endif // NEWMSG + +#if defined(__WIN__) +extern HINSTANCE s_hModule; /* Saved module handle */ +#endif // __WIN__ + +#if defined(XMSG) +extern char *msg_path; +char *msglang(void); +#endif // XMSG + +/***********************************************************************/ +/* Local Definitions and static variables */ +/***********************************************************************/ +typedef struct { + ushort Segsize; + ushort Size; + } AREASIZE; + +ACTIVITY defActivity = { /* Describes activity and language */ + NULL, /* Points to user work area(s) */ + "Unknown"}; /* Application name */ + +#if defined(XMSG) || defined(NEWMSG) + static char stmsg[200]; +#endif // XMSG || NEWMSG + +#if defined(UNIX) || defined(UNIV_LINUX) +#include "rcmsg.h" +#endif // UNIX + +/**************************************************************************/ +/* Tracing output function. */ +/**************************************************************************/ +void htrc(char const *fmt, ...) + { + va_list ap; + va_start (ap, fmt); + + +//if (trace == 1) +// vfprintf(debug, fmt, ap); +//else + vfprintf(stderr, fmt, ap); + + va_end (ap); + } // end of htrc + +/***********************************************************************/ +/* Plug initialization routine. */ +/* Language points on initial language name and eventual path. */ +/* Return value is the pointer to the Global structure. */ +/***********************************************************************/ +PGLOBAL PlugInit(LPCSTR Language, uint worksize) + { + PGLOBAL g; + + if (trace > 1) + htrc("PlugInit: Language='%s'\n", + ((!Language) ? "Null" : (char*)Language)); + + if (!(g = (PGLOBAL)malloc(sizeof(GLOBAL)))) { + fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); + return NULL; + } else { + g->Sarea = NULL; + g->Createas = 0; + g->Alchecked = 0; + g->Mrr = 0; + g->Activityp = NULL; + g->Xchk = NULL; + g->N = 0; + g->More = 0; + strcpy(g->Message, ""); + + /*******************************************************************/ + /* Allocate the main work segment. */ + /*******************************************************************/ + if (worksize && !(g->Sarea = PlugAllocMem(g, worksize))) { + char errmsg[256]; + sprintf(errmsg, MSG(WORK_AREA), g->Message); + strcpy(g->Message, errmsg); + g->Sarea_Size = 0; + } else + g->Sarea_Size = worksize; + + } /* endif g */ + + g->jump_level = -1; /* New setting to allow recursive call of Plug */ + return(g); + } /* end of PlugInit */ + +/***********************************************************************/ +/* PlugExit: Terminate Plug operations. */ +/***********************************************************************/ +int PlugExit(PGLOBAL g) + { + int rc = 0; + + if (!g) + return rc; + + if (g->Sarea) + free(g->Sarea); + + free(g); + return rc; + } /* end of PlugExit */ + +/***********************************************************************/ +/* Remove the file type from a file name. */ +/* Note: this routine is not really implemented for Unix. */ +/***********************************************************************/ +LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) + { +#if defined(__WIN__) + char drive[_MAX_DRIVE]; +#else + char *drive = NULL; +#endif + char direc[_MAX_DIR]; + char fname[_MAX_FNAME]; + char ftype[_MAX_EXT]; + + _splitpath(FileName, drive, direc, fname, ftype); + + if (trace > 1) { + htrc("after _splitpath: FileName=%s\n", FileName); + htrc("drive=%s dir=%s fname=%s ext=%s\n", + SVP(drive), direc, fname, ftype); + } // endif trace + + _makepath(pBuff, drive, direc, fname, ""); + + if (trace > 1) + htrc("buff='%s'\n", pBuff); + + return pBuff; + } // end of PlugRemoveType + + +BOOL PlugIsAbsolutePath(LPCSTR path) +{ +#if defined(__WIN__) + return ((path[0] >= 'a' && path[0] <= 'z') || + (path[0] >= 'A' && path[0] <= 'Z')) && path[1] == ':'; +#else + return path[0] == '/'; +#endif +} + +/***********************************************************************/ +/* Set the full path of a file relatively to a given path. */ +/* Note: this routine is not really implemented for Unix. */ +/***********************************************************************/ +LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) + { + char newname[_MAX_PATH]; + char direc[_MAX_DIR], defdir[_MAX_DIR], tmpdir[_MAX_DIR]; + char fname[_MAX_FNAME]; + char ftype[_MAX_EXT]; +#if defined(__WIN__) + char drive[_MAX_DRIVE], defdrv[_MAX_DRIVE]; +#else + char *drive = NULL, *defdrv = NULL; +#endif + + if (trace > 1) + htrc("prefix=%s fn=%s path=%s\n", prefix, FileName, defpath); + + if (!strncmp(FileName, "//", 2) || !strncmp(FileName, "\\\\", 2)) { + strcpy(pBuff, FileName); // Remote file + return pBuff; + } // endif + + if (PlugIsAbsolutePath(FileName)) + { + strcpy(pBuff, FileName); // FileName includes absolute path + return pBuff; + } // endif + +#if !defined(__WIN__) + if (*FileName == '~') { + if (_fullpath(pBuff, FileName, _MAX_PATH)) { + if (trace > 1) + htrc("pbuff='%s'\n", pBuff); + + return pBuff; + } else + return FileName; // Error, return unchanged name + + } // endif FileName +#endif // !__WIN__ + + if (prefix && strcmp(prefix, ".") && !PlugIsAbsolutePath(defpath)) + { + char tmp[_MAX_PATH]; + int len= snprintf(tmp, sizeof(tmp) - 1, "%s%s%s", + prefix, defpath, FileName); + memcpy(pBuff, tmp, (size_t) len); + pBuff[len]= '\0'; + return pBuff; + } + + _splitpath(FileName, drive, direc, fname, ftype); + + if (defpath) { + char c = defpath[strlen(defpath) - 1]; + + strcpy(tmpdir, defpath); + + if (c != '/' && c != '\\') + strcat(tmpdir, "/"); + + } else + strcpy(tmpdir, "./"); + + _splitpath(tmpdir, defdrv, defdir, NULL, NULL); + + if (trace > 1) { + htrc("after _splitpath: FileName=%s\n", FileName); +#if defined(__WIN__) + htrc("drive=%s dir=%s fname=%s ext=%s\n", drive, direc, fname, ftype); + htrc("defdrv=%s defdir=%s\n", defdrv, defdir); +#else + htrc("dir=%s fname=%s ext=%s\n", direc, fname, ftype); +#endif + } // endif trace + + if (drive && !*drive) + strcpy(drive, defdrv); + + switch (*direc) { + case '\0': + strcpy(direc, defdir); + break; + case '\\': + case '/': + break; + default: + // This supposes that defdir ends with a SLASH + strcpy(direc, strcat(defdir, direc)); + } // endswitch + + _makepath(newname, drive, direc, fname, ftype); + + if (trace > 1) + htrc("newname='%s'\n", newname); + + if (_fullpath(pBuff, newname, _MAX_PATH)) { + if (trace > 1) + htrc("pbuff='%s'\n", pBuff); + + return pBuff; + } else + return FileName; // Error, return unchanged name + + } // end of PlugSetPath + +#if defined(XMSG) +/***********************************************************************/ +/* PlugGetMessage: get a message from the message file. */ +/***********************************************************************/ +char *PlugReadMessage(PGLOBAL g, int mid, char *m) + { + char msgfile[_MAX_PATH], msgid[32], buff[256]; + char *msg; + FILE *mfile = NULL; + +//GetPrivateProfileString("Message", msglang, "Message\\english.msg", +// msgfile, _MAX_PATH, plgini); +//strcat(strcat(strcpy(msgfile, msg_path), msglang()), ".msg"); + strcat(strcpy(buff, msglang()), ".msg"); + PlugSetPath(msgfile, NULL, buff, msg_path); + + if (!(mfile = fopen(msgfile, "rt"))) { + sprintf(stmsg, "Fail to open message file %s", msgfile); + goto err; + } // endif mfile + + for (;;) + if (!fgets(buff, 256, mfile)) { + sprintf(stmsg, "Cannot get message %d %s", mid, SVP(m)); + goto fin; + } else + if (atoi(buff) == mid) + break; + + if (sscanf(buff, " %*d %s \"%[^\"]", msgid, stmsg) < 2) { + // Old message file + if (!sscanf(buff, " %*d \"%[^\"]", stmsg)) { + sprintf(stmsg, "Bad message file for %d %s", mid, SVP(m)); + goto fin; + } else + m = NULL; + + } // endif sscanf + + if (m && strcmp(m, msgid)) { + // Message file is out of date + strcpy(stmsg, m); + goto fin; + } // endif m + + fin: + fclose(mfile); + + err: + if (g) { + // Called by STEP + msg = PlugDup(g, stmsg); + } else // Called by MSG or PlgGetErrorMsg + msg = stmsg; + + return msg; + } // end of PlugReadMessage + +#elif defined(NEWMSG) +/***********************************************************************/ +/* PlugGetMessage: get a message from the resource string table. */ +/***********************************************************************/ +char *PlugGetMessage(PGLOBAL g, int mid) + { + char *msg; + +#if 0 // was !defined(UNIX) && !defined(UNIV_LINUX) + int n = LoadString(s_hModule, (uint)mid, (LPTSTR)stmsg, 200); + + if (n == 0) { + DWORD rc = GetLastError(); + msg = (char*)PlugSubAlloc(g, NULL, 512); // Extend buf allocation + n = sprintf(msg, "Message %d, rc=%d: ", mid, rc); + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, + (LPTSTR)(msg + n), 512 - n, NULL); + return msg; + } // endif n + +#else // ALL + if (!GetRcString(mid, stmsg, 200)) + sprintf(stmsg, "Message %d not found", mid); +#endif // ALL + + if (g) { + // Called by STEP + msg = PlugDup(g, stmsg); + } else // Called by MSG or PlgGetErrorMsg + msg = stmsg; + + return msg; + } // end of PlugGetMessage +#endif // NEWMSG + +#if defined(__WIN__) +/***********************************************************************/ +/* Return the line length of the console screen buffer. */ +/***********************************************************************/ +short GetLineLength(PGLOBAL g) + { + CONSOLE_SCREEN_BUFFER_INFO coninfo; + HANDLE hcons = GetStdHandle(STD_OUTPUT_HANDLE); + BOOL b = GetConsoleScreenBufferInfo(hcons, &coninfo); + + return (b) ? coninfo.dwSize.X : 0; + } // end of GetLineLength +#endif // __WIN__ + +/***********************************************************************/ +/* Program for memory allocation of work and language areas. */ +/***********************************************************************/ +void *PlugAllocMem(PGLOBAL g, uint size) + { + void *areap; /* Pointer to allocated area */ + + /*********************************************************************/ + /* This is the allocation routine for the WIN32/UNIX/AIX version. */ + /*********************************************************************/ + if (!(areap = malloc(size))) + sprintf(g->Message, MSG(MALLOC_ERROR), "malloc"); + + if (trace > 1) { + if (areap) + htrc("Memory of %u allocated at %p\n", size, areap); + else + htrc("PlugAllocMem: %s\n", g->Message); + + } // endif trace + + return (areap); + } /* end of PlugAllocMem */ + +/***********************************************************************/ +/* Program for SubSet initialization of memory pools. */ +/* Here there should be some verification done such as validity of */ +/* the address and size not larger than memory size. */ +/***********************************************************************/ +BOOL PlugSubSet(PGLOBAL g __attribute__((unused)), void *memp, uint size) + { + PPOOLHEADER pph = (PPOOLHEADER)memp; + + pph->To_Free = (OFFSET)sizeof(POOLHEADER); + pph->FreeBlk = size - pph->To_Free; + + return FALSE; + } /* end of PlugSubSet */ + +/***********************************************************************/ +/* Program for sub-allocating one item in a storage area. */ +/* Note: SubAlloc routines of OS/2 are no more used to increase the */ +/* code portability and avoid problems when a grammar compiled under */ +/* one version of OS/2 is used under another version. */ +/* The simple way things are done here is also based on the fact */ +/* that no freeing of suballocated blocks is permitted in Plug. */ +/***********************************************************************/ +void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) + { + PPOOLHEADER pph; /* Points on area header. */ + + if (!memp) + /*******************************************************************/ + /* Allocation is to be done in the Sarea. */ + /*******************************************************************/ + memp = g->Sarea; + + size = ((size + 7) / 8) * 8; /* Round up size to multiple of 8 */ + pph = (PPOOLHEADER)memp; + + if (trace > 3) + htrc("SubAlloc in %p size=%d used=%d free=%d\n", + memp, size, pph->To_Free, pph->FreeBlk); + + if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */ + char *pname = "Work"; + + sprintf(g->Message, + "Not enough memory in %s area for request of %u (used=%d free=%d)", + pname, (uint)size, pph->To_Free, pph->FreeBlk); + + if (trace) + htrc("PlugSubAlloc: %s\n", g->Message); + +#if defined(USE_TRY) + throw 1234; +#else // !USE_TRY + /* Nothing we can do if longjmp is not initialized. */ + assert(g->jump_level >= 0); + longjmp(g->jumper[g->jump_level], 1); +#endif // !USE_TRY + } /* endif size OS32 code */ + + /*********************************************************************/ + /* Do the suballocation the simplest way. */ + /*********************************************************************/ + memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ + pph->To_Free += (OFFSET)size; /* New offset of pool free block */ + pph->FreeBlk -= (uint)size; /* New size of pool free block */ + + if (trace > 3) + htrc("Done memp=%p used=%d free=%d\n", + memp, pph->To_Free, pph->FreeBlk); + + return (memp); + } /* end of PlugSubAlloc */ + +/***********************************************************************/ +/* Program for sub-allocating and copying a string in a storage area. */ +/***********************************************************************/ +char *PlugDup(PGLOBAL g, const char *str) + { + if (str) { + char *sm = (char*)PlugSubAlloc(g, NULL, strlen(str) + 1); + + strcpy(sm, str); + return sm; + } else + return NULL; + + } // end of PlugDup + +#if 0 +/***********************************************************************/ +/* This routine suballocate a copy of the passed string. */ +/***********************************************************************/ +char *PlugDup(PGLOBAL g, const char *str) + { + char *buf; + size_t len; + + if (str && (len = strlen(str))) { + buf = (char*)PlugSubAlloc(g, NULL, len + 1); + strcpy(buf, str); + } else + buf = NULL; + + return(buf); + } /* end of PlugDup */ +#endif // 0 + +/***********************************************************************/ +/* This routine makes a pointer from an offset to a memory pointer. */ +/***********************************************************************/ +void *MakePtr(void *memp, OFFSET offset) + { + return ((offset == 0) ? NULL : &((char *)memp)[offset]); + } /* end of MakePtr */ + +/***********************************************************************/ +/* This routine makes an offset from a pointer new format. */ +/***********************************************************************/ +#if 0 +OFFSET MakeOff(void *memp, void *ptr) + { + return ((!ptr) ? 0 : (OFFSET)((char *)ptr - (char *)memp)); + } /* end of MakeOff */ +#endif +/*--------------------- End of PLUGUTIL program -----------------------*/ diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index d2bb3d7a4af..f1bbef19a22 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1,11 +1,11 @@ /************* TabDos C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABDOS */ /* ------------- */ -/* Version 4.9.2 */ +/* Version 4.9.3 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -1509,8 +1509,12 @@ PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv) if (n == 8 && ctype != TYPE_LIST) { // Should never happen strcpy(g->Message, "Block opt: bad constant"); - longjmp(g->jumper[g->jump_level], 99); - } // endif Conv +#if defined(USE_TRY) + throw 99; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 99); +#endif // !USE_TRY + } // endif Conv if (type[0] == 1) { // Make it always as Column-op-Value @@ -1791,7 +1795,7 @@ err: /***********************************************************************/ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) { - int k, rc; + int k; volatile bool dynamic; bool brc; PCOL colp; @@ -1861,13 +1865,17 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } else // Column contains same values as ROWID kxp = new(g) XXROW(this); - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if (!(rc = setjmp(g->jumper[++g->jump_level])) != 0) { +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return true; + } // endif + + if (!setjmp(g->jumper[++g->jump_level])) { +#endif // !USE_TRY if (dynamic) { ResetBlockFilter(g); kxp->SetDynamic(dynamic); @@ -1892,11 +1900,22 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } // endif brc - } else - brc = true; - - g->jump_level--; - return brc; +#if defined(USE_TRY) + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + brc = true; + } catch (const char *msg) { + strcpy(g->Message, msg); + brc = true; + } // end catch +#else // !USE_TRY + } else + brc = true; + + g->jump_level--; +#endif // !USE_TRY + return brc; } // end of InitialyzeIndex /***********************************************************************/ @@ -2156,16 +2175,18 @@ bool TDBDOS::OpenDB(PGLOBAL g) To_BlkFil = InitBlockFilter(g, To_Filter); /*********************************************************************/ - /* Allocate the line buffer plus a null character. */ - /*********************************************************************/ - To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); + /* Lrecl does not include line ending */ + /*********************************************************************/ + size_t linelen = Lrecl + ((PDOSDEF)To_Def)->Ending + 1; + + To_Line = (char*)PlugSubAlloc(g, NULL, linelen); if (Mode == MODE_INSERT) { // Spaces between fields must be filled with blanks memset(To_Line, ' ', Lrecl); To_Line[Lrecl] = '\0'; } else - memset(To_Line, 0, Lrecl + 1); + memset(To_Line, 0, linelen); if (trace) htrc("OpenDos: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); @@ -2514,8 +2535,12 @@ void DOSCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 11); - } // endif +#if defined(USE_TRY) + throw 11; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 11); +#endif // !USE_TRY + } // endif p = tdbp->To_Line + Deplac; field = Long; @@ -2570,8 +2595,12 @@ void DOSCOL::ReadColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_RECFM), tdbp->Ftype); - longjmp(g->jumper[g->jump_level], 34); - } // endswitch Ftype +#if defined(USE_TRY) + throw 34; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 34); +#endif // !USE_TRY + } // endswitch Ftype // Set null when applicable if (Nullable) @@ -2679,8 +2708,12 @@ void DOSCOL::WriteColumn(PGLOBAL g) break; default: sprintf(g->Message, "Invalid field format for column %s", Name); - longjmp(g->jumper[g->jump_level], 31); - } // endswitch BufType +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endswitch BufType p2 = Buf; } else // Standard CONNECT format @@ -2691,8 +2724,12 @@ void DOSCOL::WriteColumn(PGLOBAL g) if ((len = strlen(p2)) > field) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p2, Name, field); - longjmp(g->jumper[g->jump_level], 31); - } else if (Dsp) +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (Dsp) for (i = 0; i < len; i++) if (p2[i] == '.') p2[i] = Dsp; diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp index bf123cd36c8..fb25ab3d5c8 100644 --- a/storage/connect/tabfix.cpp +++ b/storage/connect/tabfix.cpp @@ -1,11 +1,11 @@ /************* TabFix C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABFIX */ /* ------------- */ -/* Version 4.9.1 */ +/* Version 4.9.2 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -411,8 +411,12 @@ BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) case 'D': M = sizeof(double); break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); - longjmp(g->jumper[g->jump_level], 11); - } // endswitch Fmt +#if defined(USE_TRY) + throw 11; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 11); +#endif // !USE_TRY + } // endswitch Fmt } else if (IsTypeChar(Buf_Type)) Eds = 0; @@ -486,8 +490,12 @@ void BINCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 11); - } // endif +#if defined(USE_TRY) + throw 11; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 11); +#endif // !USE_TRY + } // endif p = tdbp->To_Line + Deplac; @@ -545,8 +553,12 @@ void BINCOL::ReadColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); - longjmp(g->jumper[g->jump_level], 11); - } // endswitch Fmt +#if defined(USE_TRY) + throw 11; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 11); +#endif // !USE_TRY + } // endswitch Fmt // Set null when applicable if (Nullable) @@ -595,8 +607,12 @@ void BINCOL::WriteColumn(PGLOBAL g) } else if (Value->GetBinValue(p, Long, Status)) { sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, Value->GetSize(), Long); - longjmp(g->jumper[g->jump_level], 31); - } // endif p +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endif p break; case 'S': // Short integer @@ -604,8 +620,12 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > 32767LL || n < -32768LL) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); - longjmp(g->jumper[g->jump_level], 31); - } else if (Status) +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (Status) Value->GetValueNonAligned(p, (short)n); break; @@ -614,8 +634,12 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > 255LL || n < -256LL) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); - longjmp(g->jumper[g->jump_level], 31); - } else if (Status) +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (Status) *p = (char)n; break; @@ -624,8 +648,12 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > INT_MAX || n < INT_MIN) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); - longjmp(g->jumper[g->jump_level], 31); - } else if (Status) +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (Status) Value->GetValueNonAligned(p, (int)n); break; @@ -648,8 +676,12 @@ void BINCOL::WriteColumn(PGLOBAL g) case 'C': // Characters if ((n = (signed)strlen(Value->GetCharString(Buf))) > Long) { sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, (int) n, Long); - longjmp(g->jumper[g->jump_level], 31); - } // endif n +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endif n if (Status) { s = Value->GetCharString(Buf); @@ -660,8 +692,12 @@ void BINCOL::WriteColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); - longjmp(g->jumper[g->jump_level], 11); - } // endswitch Fmt +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endswitch Fmt if (Eds && Status) { p = tdbp->To_Line + Deplac; diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 0da67ef5e7f..be288b3858d 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -1435,8 +1435,12 @@ void CSVCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 34); - } // endif +#if defined(USE_TRY) + throw 34; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 34); +#endif // !USE_TRY + } // endif if (tdbp->Mode != MODE_UPDATE) { int colen = Long; // Column length @@ -1453,8 +1457,12 @@ void CSVCOL::ReadColumn(PGLOBAL g) Long = colen; // Restore column length sprintf(g->Message, MSG(FLD_TOO_LNG_FOR), Fldnum + 1, Name, To_Tdb->RowNumber(g), tdbp->GetFile(g)); - longjmp(g->jumper[g->jump_level], 34); - } // endif Long +#if defined(USE_TRY) + throw 34; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 34); +#endif // !USE_TRY + } // endif Long // Now do the reading DOSCOL::ReadColumn(g); @@ -1516,8 +1524,12 @@ void CSVCOL::WriteColumn(PGLOBAL g) if ((signed)strlen(p) > flen) { sprintf(g->Message, MSG(BAD_FLD_LENGTH), Name, p, flen, tdbp->RowNumber(g), tdbp->GetFile(g)); - longjmp(g->jumper[g->jump_level], 34); - } else if (Dsp) +#if defined(USE_TRY) + throw 34; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 34); +#endif // !USE_TRY + } else if (Dsp) for (int i = 0; p[i]; i++) if (p[i] == '.') p[i] = Dsp; @@ -1532,8 +1544,12 @@ void CSVCOL::WriteColumn(PGLOBAL g) if (Fldnum < 0) { // This can happen for wrong offset value in XDB files sprintf(g->Message, MSG(BAD_FIELD_RANK), Fldnum + 1, Name); - longjmp(g->jumper[g->jump_level], 34); - } else +#if defined(USE_TRY) + throw 34; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 34); +#endif // !USE_TRY + } else strncpy(tdbp->Field[Fldnum], p, flen); if (trace > 1) diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index 5431e35e0ec..48427b9620c 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -227,41 +227,8 @@ bool JDBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (rc == RC_FX) // Error return true; -//else if (rc == RC_OK) { // Url was not a server name -// Tabname = GetStringCatInfo(g, "Name", -// (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); -// Tabname = GetStringCatInfo(g, "Tabname", Tabname); -// Username = GetStringCatInfo(g, "User", NULL); -// Password = GetStringCatInfo(g, "Password", NULL); -//} // endif rc - -//if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) -// Read_Only = true; Wrapname = GetStringCatInfo(g, "Wrapper", NULL); -//Prop = GetStringCatInfo(g, "Properties", NULL); -//Tabcat = GetStringCatInfo(g, "Qualifier", NULL); -//Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); -//Tabschema = GetStringCatInfo(g, "Dbname", NULL); -//Tabschema = GetStringCatInfo(g, "Schema", Tabschema); - -//if (Catfunc == FNC_COL) -// Colpat = GetStringCatInfo(g, "Colpat", NULL); - -//if (Catfunc == FNC_TABLE) -// Tabtyp = GetStringCatInfo(g, "Tabtype", NULL); - -//Qrystr = GetStringCatInfo(g, "Query_String", "?"); -//Sep = GetStringCatInfo(g, "Separator", NULL); -//Xsrc = GetBoolCatInfo("Execsrc", FALSE); -//Maxerr = GetIntCatInfo("Maxerr", 0); -//Maxres = GetIntCatInfo("Maxres", 0); -//Quoted = GetIntCatInfo("Quoted", 0); -// Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT); -// Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT); -//Scrollable = GetBoolCatInfo("Scrollable", false); -//Memory = GetIntCatInfo("Memory", 0); -//Pseudo = 2; // FILID is Ok but not ROWID return false; } // end of DefineAM @@ -341,9 +308,6 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp) WrapName = tdp->Wrapname; Ops.User = tdp->Username; Ops.Pwd = tdp->Password; -// Ops.Properties = tdp->Prop; -// Ops.Cto = tdp->Cto; -// Ops.Qto = tdp->Qto; Ops.Scrollable = tdp->Scrollable; } else { WrapName = NULL; @@ -351,13 +315,9 @@ TDBJDBC::TDBJDBC(PJDBCDEF tdp) : TDBEXT(tdp) Ops.Url = NULL; Ops.User = NULL; Ops.Pwd = NULL; -// Ops.Properties = NULL; -// Ops.Cto = DEFAULT_LOGIN_TIMEOUT; -// Ops.Qto = DEFAULT_QUERY_TIMEOUT; Ops.Scrollable = false; } // endif tdp -//Ncol = 0; Prepared = false; Werr = false; Rerr = false; @@ -370,7 +330,6 @@ TDBJDBC::TDBJDBC(PTDBJDBC tdbp) : TDBEXT(tdbp) Cnp = tdbp->Cnp; WrapName = tdbp->WrapName; Ops = tdbp->Ops; -//Ncol = tdbp->Ncol; Prepared = tdbp->Prepared; Werr = tdbp->Werr; Rerr = tdbp->Rerr; @@ -737,18 +696,12 @@ bool TDBJDBC::SetRecpos(PGLOBAL g, int recpos) { if (Jcp->m_Full) { Fpos = 0; -// CurNum = 0; CurNum = 1; } else if (Memory == 3) { -// Fpos = recpos; -// CurNum = -1; Fpos = 0; CurNum = recpos; } else if (Ops.Scrollable) { // Is new position in the current row set? -// if (recpos >= Curpos && recpos < Curpos + Rbuf) { -// CurNum = recpos - Curpos; -// Fpos = 0; if (recpos > 0 && recpos <= Rbuf) { CurNum = recpos; Fpos = recpos; @@ -990,11 +943,6 @@ int TDBJDBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ void TDBJDBC::CloseDB(PGLOBAL g) { - //if (To_Kindex) { - // To_Kindex->Close(); - // To_Kindex = NULL; - // } // endif - if (Jcp) Jcp->Close(); @@ -1039,54 +987,6 @@ JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) { } // end of JDBCCOL copy constructor -#if 0 -/***********************************************************************/ -/* SetBuffer: prepare a column block for write operation. */ -/***********************************************************************/ -bool JDBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) -{ - if (!(To_Val = value)) { - sprintf(g->Message, MSG(VALUE_ERROR), Name); - return true; - } else if (Buf_Type == value->GetType()) { - // Values are of the (good) column type - if (Buf_Type == TYPE_DATE) { - // If any of the date values is formatted - // output format must be set for the receiving table - if (GetDomain() || ((DTVAL *)value)->IsFormatted()) - goto newval; // This will make a new value; - - } else if (Buf_Type == TYPE_DOUBLE) - // Float values must be written with the correct (column) precision - // Note: maybe this should be forced by ShowValue instead of this ? - value->SetPrec(GetScale()); - - Value = value; // Directly access the external value - } else { - // Values are not of the (good) column type - if (check) { - sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name, - GetTypeName(Buf_Type), GetTypeName(value->GetType())); - return true; - } // endif check - - newval: - if (InitValue(g)) // Allocate the matching value block - return true; - - } // endif's Value, Buf_Type - - // Because Colblk's have been made from a copy of the original TDB in - // case of Update, we must reset them to point to the original one. - if (To_Tdb->GetOrig()) - To_Tdb = (PTDB)To_Tdb->GetOrig(); - - // Set the Column - Status = (ok) ? BUF_EMPTY : BUF_NO; - return false; -} // end of SetBuffer -#endif // 0 - /***********************************************************************/ /* ReadColumn: when SQLFetch is used there is nothing to do as the */ /* column buffer was bind to the record set. This is also the case */ @@ -1196,26 +1096,6 @@ PCMD TDBXJDC::MakeCMD(PGLOBAL g) return xcmd; } // end of MakeCMD -#if 0 -/***********************************************************************/ -/* JDBC Bind Parameter function. */ -/***********************************************************************/ -bool TDBXJDC::BindParameters(PGLOBAL g) -{ - PJDBCCOL colp; - - for (colp = (PJDBCCOL)Columns; colp; colp = (PJDBCCOL)colp->Next) { - colp->AllocateBuffers(g, 0); - - if (Jcp->BindParam(colp)) - return true; - - } // endfor colp - - return false; -} // end of BindParameters -#endif // 0 - /***********************************************************************/ /* XDBC GetMaxSize: returns table size (not always one row). */ /***********************************************************************/ @@ -1416,17 +1296,3 @@ PQRYRES TDBJDBCL::GetResult(PGLOBAL g) { return JDBCColumns(g, Schema, Tab, Colpat, Maxres, false, &Ops); } // end of GetResult - -#if 0 -/* ---------------------------TDBJSRC class -------------------------- */ - -/***********************************************************************/ -/* GetResult: Get the list of JDBC data sources. */ -/***********************************************************************/ -PQRYRES TDBJSRC::GetResult(PGLOBAL g) -{ - return JDBCDataSources(g, Maxres, false); -} // end of GetResult - -/* ------------------------ End of TabJDBC --------------------------- */ -#endif // 0 diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index 46d2073e923..212f2a58b58 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -14,9 +14,6 @@ typedef class TDBJDBC *PTDBJDBC; typedef class JDBCCOL *PJDBCCOL; typedef class TDBXJDC *PTDBXJDC; typedef class JSRCCOL *PJSRCCOL; -//typedef class TDBOIF *PTDBOIF; -//typedef class OIFCOL *POIFCOL; -//typedef class TDBJSRC *PTDBJSRC; /***********************************************************************/ /* JDBC table. */ @@ -68,20 +65,14 @@ public: // Methods virtual PTDB Clone(PTABS t); -//virtual int GetRecpos(void); virtual bool SetRecpos(PGLOBAL g, int recpos); -//virtual PSZ GetFile(PGLOBAL g); -//virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); -//virtual int GetAffectedRows(void) {return AftRows;} virtual PSZ GetServer(void) { return "JDBC"; } virtual int Indexable(void) { return 2; } // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); virtual int Cardinality(PGLOBAL g); -//virtual int GetMaxSize(PGLOBAL g); -//virtual int GetProgMax(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual int WriteDB(PGLOBAL g); @@ -91,21 +82,14 @@ public: protected: // Internal functions -//int Decode(char *utf, char *buf, size_t n); -//bool MakeSQL(PGLOBAL g, bool cnt); bool MakeInsert(PGLOBAL g); -//virtual bool MakeCommand(PGLOBAL g); -//bool MakeFilter(PGLOBAL g, bool c); bool SetParameters(PGLOBAL g); -//char *MakeUpdate(PGLOBAL g); -//char *MakeDelete(PGLOBAL g); // Members JDBConn *Jcp; // Points to a JDBC connection class JDBCCOL *Cnp; // Points to count(*) column JDBCPARM Ops; // Additional parameters char *WrapName; // Points to Java wrapper name -//int Ncol; // The column number bool Prepared; // True when using prepared statement bool Werr; // Write error bool Rerr; // Rewind error @@ -152,12 +136,6 @@ public: virtual AMT GetAmType(void) {return TYPE_AM_XDBC;} // Methods - //virtual int GetRecpos(void); - //virtual PSZ GetFile(PGLOBAL g); - //virtual void SetFile(PGLOBAL g, PSZ fn); - //virtual void ResetSize(void); - //virtual int GetAffectedRows(void) {return AftRows;} - //virtual PSZ GetServer(void) {return "JDBC";} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -172,7 +150,6 @@ public: protected: // Internal functions PCMD MakeCMD(PGLOBAL g); - //bool BindParameters(PGLOBAL g); // Members PCMD Cmdlist; // The commands to execute @@ -255,21 +232,4 @@ protected: char *Colpat; // Points to catalog column pattern }; // end of class TDBJDBCL -#if 0 -/***********************************************************************/ -/* This is the class declaration for the Data Sources catalog table. */ -/***********************************************************************/ -class TDBJSRC : public TDBJDRV { -public: - // Constructor - TDBJSRC(PJDBCDEF tdp) : TDBJDRV(tdp) {} - -protected: - // Specific routines - virtual PQRYRES GetResult(PGLOBAL g); - - // No additional Members -}; // end of class TDBJSRC -#endif // 0 - #endif // !NJDBC diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 1e11d454cfc..4b2ab4fd240 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1,6 +1,6 @@ /************* tabjson C++ Program Source Code File (.CPP) *************/ -/* PROGRAM NAME: tabjson Version 1.3 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2016 */ +/* PROGRAM NAME: tabjson Version 1.4 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ /* This program are the JSON class DB execution routines. */ /***********************************************************************/ @@ -168,7 +168,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) G->Sarea_Size = tdp->Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G, G->Sarea, G->Sarea_Size); - G->jump_level = -1; + G->jump_level = 0; tjnp->SetG(G); #else tjnp->SetG(g); @@ -478,7 +478,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) G->Sarea_Size = Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G, G->Sarea, G->Sarea_Size); - G->jump_level = -1; + G->jump_level = 0; ((TDBJSN*)tdbp)->G = G; #else ((TDBJSN*)tdbp)->G = g; @@ -1289,8 +1289,12 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) { strcpy(g->Message, "Logical error expanding array"); - longjmp(g->jumper[g->jump_level], 666); - } // endif jvp +#if defined(USE_TRY) + throw 666; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 666); +#endif // !USE_TRY + } // endif jvp if (n < Nod - 1 && jvp->GetJson()) { jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1)); @@ -1475,8 +1479,12 @@ void JSONCOL::WriteColumn(PGLOBAL g) { if (Xpd && Tjp->Pretty < 2) { strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); +#if defined(USE_TRY) + throw 666; +#else // !USE_TRY longjmp(g->jumper[g->jump_level], 666); - } // endif Xpd +#endif // !USE_TRY + } // endif Xpd /*********************************************************************/ /* Check whether this node must be written. */ @@ -1510,8 +1518,12 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (!(jsp = ParseJson(G, s, (int)strlen(s)))) { strcpy(g->Message, s); - longjmp(g->jumper[g->jump_level], 666); - } // endif jsp +#if defined(USE_TRY) + throw 666; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 666); +#endif // !USE_TRY + } // endif jsp if (arp) { if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ) diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 78adde81d12..8f1e6f3819d 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -958,8 +958,12 @@ void DIRCOL::ReadColumn(PGLOBAL g) #endif // !__WIN__ default: sprintf(g->Message, MSG(INV_DIRCOL_OFST), N); - longjmp(g->jumper[g->jump_level], GetAmType()); - } // endswitch N +#if defined(USE_TRY) + throw GetAmType(); +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], GetAmType()); +#endif // !USE_TRY + } // endswitch N } // end of ReadColumn @@ -1423,8 +1427,12 @@ void TDBDHR::CloseDB(PGLOBAL g) // Close the search handle. if (!FindClose(Hsearch)) { strcpy(g->Message, MSG(SRCH_CLOSE_ERR)); - longjmp(g->jumper[g->jump_level], GetAmType()); - } // endif FindClose +#if defined(USE_TRY) + throw GetAmType(); +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], GetAmType()); +#endif // !USE_TRY + } // endif FindClose iFile = 0; Hsearch = INVALID_HANDLE_VALUE; diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index 1a715819fc8..3ce1ea1f7d8 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -1407,8 +1407,12 @@ void MYSQLCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); - longjmp(g->jumper[g->jump_level], 11); - } else +#if defined(USE_TRY) + throw 11; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 11); +#endif // !USE_TRY + } else tdbp->Fetched = true; if ((buf = ((PTDBMY)To_Tdb)->Myc.GetCharField(Rank))) { diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 488acdd330d..b8c8a4d8cb4 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -116,47 +116,12 @@ bool ODBCDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (EXTDEF::DefineAM(g, am, poff)) return true; - // Tabname = GetStringCatInfo(g, "Name", - // (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); - // Tabname = GetStringCatInfo(g, "Tabname", Tabname); - // Tabschema = GetStringCatInfo(g, "Dbname", NULL); - // Tabschema = GetStringCatInfo(g, "Schema", Tabschema); - // Tabcat = GetStringCatInfo(g, "Qualifier", NULL); - // Tabcat = GetStringCatInfo(g, "Catalog", Tabcat); - //Username = GetStringCatInfo(g, "User", NULL); - // Password = GetStringCatInfo(g, "Password", NULL); - - // if ((Srcdef = GetStringCatInfo(g, "Srcdef", NULL))) - // Read_Only = true; - - // Qrystr = GetStringCatInfo(g, "Query_String", "?"); - // Sep = GetStringCatInfo(g, "Separator", NULL); Catver = GetIntCatInfo("Catver", 2); - //Xsrc = GetBoolCatInfo("Execsrc", FALSE); - //Maxerr = GetIntCatInfo("Maxerr", 0); - //Maxres = GetIntCatInfo("Maxres", 0); - //Quoted = GetIntCatInfo("Quoted", 0); Options = ODBConn::noOdbcDialog; //Options = ODBConn::noOdbcDialog | ODBConn::useCursorLib; Cto= GetIntCatInfo("ConnectTimeout", DEFAULT_LOGIN_TIMEOUT); Qto= GetIntCatInfo("QueryTimeout", DEFAULT_QUERY_TIMEOUT); - - //if ((Scrollable = GetBoolCatInfo("Scrollable", false)) && !Elemt) - // Elemt = 1; // Cannot merge SQLFetch and SQLExtendedFetch - - //if (Catfunc == FNC_COL) - // Colpat = GetStringCatInfo(g, "Colpat", NULL); - - //if (Catfunc == FNC_TABLE) - // Tabtyp = GetStringCatInfo(g, "Tabtype", NULL); - UseCnc = GetBoolCatInfo("UseDSN", false); - - // Memory was Boolean, it is now integer - //if (!(Memory = GetIntCatInfo("Memory", 0))) - // Memory = GetBoolCatInfo("Memory", false) ? 1 : 0; - - //Pseudo = 2; // FILID is Ok but not ROWID return false; } // end of DefineAM @@ -210,59 +175,22 @@ TDBODBC::TDBODBC(PODEF tdp) : TDBEXT(tdp) if (tdp) { Connect = tdp->Connect; - //TableName = tdp->Tabname; - //Schema = tdp->Tabschema; Ops.User = tdp->Username; Ops.Pwd = tdp->Password; - //Catalog = tdp->Tabcat; - //Srcdef = tdp->Srcdef; - //Qrystr = tdp->Qrystr; - //Sep = tdp->GetSep(); - //Options = tdp->Options; Ops.Cto = tdp->Cto; Ops.Qto = tdp->Qto; - //Quoted = MY_MAX(0, tdp->GetQuoted()); - //Rows = tdp->GetElemt(); Catver = tdp->Catver; - //Memory = tdp->Memory; - //Scrollable = tdp->Scrollable; Ops.UseCnc = tdp->UseCnc; } else { Connect = NULL; - //TableName = NULL; - //Schema = NULL; Ops.User = NULL; Ops.Pwd = NULL; - //Catalog = NULL; - //Srcdef = NULL; - //Qrystr = NULL; - //Sep = 0; - //Options = 0; Ops.Cto = DEFAULT_LOGIN_TIMEOUT; Ops.Qto = DEFAULT_QUERY_TIMEOUT; - //Quoted = 0; - //Rows = 0; Catver = 0; - //Memory = 0; - //Scrollable = false; Ops.UseCnc = false; } // endif tdp - //Quote = NULL; - //Query = NULL; - //Count = NULL; -//Where = NULL; - //MulConn = NULL; - //DBQ = NULL; - //Qrp = NULL; - //Fpos = 0; - //Curpos = 0; - //AftRows = 0; - //CurNum = 0; - //Rbuf = 0; - //BufSize = 0; - //Nparm = 0; - //Placed = false; } // end of TDBODBC standard constructor TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp) @@ -270,32 +198,7 @@ TDBODBC::TDBODBC(PTDBODBC tdbp) : TDBEXT(tdbp) Ocp = tdbp->Ocp; // is that right ? Cnp = tdbp->Cnp; Connect = tdbp->Connect; - //TableName = tdbp->TableName; - //Schema = tdbp->Schema; Ops = tdbp->Ops; - //Catalog = tdbp->Catalog; - //Srcdef = tdbp->Srcdef; - //Qrystr = tdbp->Qrystr; - //Memory = tdbp->Memory; - //Scrollable = tdbp->Scrollable; - //Quote = tdbp->Quote; - //Query = tdbp->Query; - //Count = tdbp->Count; -//Where = tdbp->Where; - //MulConn = tdbp->MulConn; - //DBQ = tdbp->DBQ; - //Options = tdbp->Options; - //Quoted = tdbp->Quoted; - //Rows = tdbp->Rows; - //Fpos = 0; - //Curpos = 0; - //AftRows = 0; - //CurNum = 0; - //Rbuf = 0; - //BufSize = tdbp->BufSize; - //Nparm = tdbp->Nparm; - //Qrp = tdbp->Qrp; - //Placed = false; } // end of TDBODBC copy constructor // Method @@ -389,144 +292,6 @@ void TDBODBC::SetFile(PGLOBAL g, PSZ fn) DBQ = fn; } // end of SetFile -#if 0 -/******************************************************************/ -/* Convert an UTF-8 string to latin characters. */ -/******************************************************************/ -int TDBODBC::Decode(char *txt, char *buf, size_t n) -{ - uint dummy_errors; - uint32 len= copy_and_convert(buf, n, &my_charset_latin1, - txt, strlen(txt), - &my_charset_utf8_general_ci, - &dummy_errors); - buf[len]= '\0'; - return 0; -} // end of Decode - -/***********************************************************************/ -/* MakeSQL: make the SQL statement use with ODBC connection. */ -/* Note: when implementing EOM filtering, column only used in local */ -/* filter should be removed from column list. */ -/***********************************************************************/ -bool TDBODBC::MakeSQL(PGLOBAL g, bool cnt) - { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; - int len; - bool oom = false, first = true; - PTABLE tablep = To_Table; - PCOL colp; - - if (Srcdef) { - if (strstr(Srcdef, "%s")) { - char *fil; - - fil = (To_CondFil) ? To_CondFil->Body : PlugDup(g, "1=1"); - Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil)); - Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil)); - } else - Query = new(g)STRING(g, 0, Srcdef); - - return false; - } // endif Srcdef - - // Allocate the string used to contain the Query - Query = new(g)STRING(g, 1023, "SELECT "); - - if (!cnt) { - if (Columns) { - // Normal SQL statement to retrieve results - for (colp = Columns; colp; colp = colp->GetNext()) - if (!colp->IsSpecial()) { - if (!first) - oom |= Query->Append(", "); - else - first = false; - - // Column name can be encoded in UTF-8 - Decode(colp->GetName(), buf, sizeof(buf)); - - if (Quote) { - // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); - } else - oom |= Query->Append(buf); - - ((PEXTCOL)colp)->SetRank(++Ncol); - } // endif colp - - } else - // !Columns can occur for queries such that sql count(*) from... - // for which we will count the rows from sql * from... - oom |= Query->Append('*'); - - } else - // SQL statement used to retrieve the size of the result - oom |= Query->Append("count(*)"); - - oom |= Query->Append(" FROM "); - - if (Catalog && *Catalog) - catp = Catalog; - - //if (tablep->GetSchema()) - // schmp = (char*)tablep->GetSchema(); - //else - if (Schema && *Schema) - schmp = Schema; - - if (catp) { - oom |= Query->Append(catp); - - if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); - } // endif schmp - - oom |= Query->Append('.'); - } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); - } // endif schmp - - // Table name can be encoded in UTF-8 - Decode(TableName, buf, sizeof(buf)); - - if (Quote) { - // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); - } else - oom |= Query->Append(buf); - - len = Query->GetLength(); - - if (To_CondFil) { - if (Mode == MODE_READ) { - oom |= Query->Append(" WHERE "); - oom |= Query->Append(To_CondFil->Body); - len = Query->GetLength() + 1; - } else - len += (strlen(To_CondFil->Body) + 256); - - } else - len += ((Mode == MODE_READX) ? 256 : 1); - - if (oom || Query->Resize(len)) { - strcpy(g->Message, "MakeSQL: Out of memory"); - return true; - } // endif oom - - if (trace) - htrc("Query=%s\n", Query->GetStr()); - - return false; - } // end of MakeSQL -#endif // 0 - /***********************************************************************/ /* MakeInsert: make the Insert statement used with ODBC connection. */ /***********************************************************************/ @@ -645,73 +410,6 @@ bool TDBODBC::BindParameters(PGLOBAL g) } // end of BindParameters #if 0 -/***********************************************************************/ -/* MakeCommand: make the Update or Delete statement to send to the */ -/* MySQL server. Limited to remote values and filtering. */ -/***********************************************************************/ -bool TDBODBC::MakeCommand(PGLOBAL g) - { - char *p, *stmt, name[68], *body = NULL, *qc = Ocp->GetQuoteChar(); - char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); - bool qtd = Quoted > 0; - int i = 0, k = 0; - - // Make a lower case copy of the originale query and change - // back ticks to the data source identifier quoting character - do { - qrystr[i] = (Qrystr[i] == '`') ? *qc : tolower(Qrystr[i]); - } while (Qrystr[i++]); - - if (To_CondFil && (p = strstr(qrystr, " where "))) { - p[7] = 0; // Remove where clause - Qrystr[(p - qrystr) + 7] = 0; - body = To_CondFil->Body; - stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr) - + strlen(body) + 64); - } else - stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64); - - // Check whether the table name is equal to a keyword - // If so, it must be quoted in the original query - strlwr(strcat(strcat(strcpy(name, " "), Name), " ")); - - if (strstr(" update delete low_priority ignore quick from ", name)) { - strlwr(strcat(strcat(strcpy(name, qc), Name), qc)); - k += 2; - } else - strlwr(strcpy(name, Name)); // Not a keyword - - if ((p = strstr(qrystr, name))) { - for (i = 0; i < p - qrystr; i++) - stmt[i] = (Qrystr[i] == '`') ? *qc : Qrystr[i]; - - stmt[i] = 0; - k += i + (int)strlen(Name); - - if (qtd && *(p - 1) == ' ') - strcat(strcat(strcat(stmt, qc), TableName), qc); - else - strcat(stmt, TableName); - - i = (int)strlen(stmt); - - do { - stmt[i++] = (Qrystr[k] == '`') ? *qc : Qrystr[k]; - } while (Qrystr[k++]); - - if (body) - strcat(stmt, body); - - } else { - sprintf(g->Message, "Cannot use this %s command", - (Mode == MODE_UPDATE) ? "UPDATE" : "DELETE"); - return true; - } // endif p - - Query = new(g) STRING(g, 0, stmt); - return (!Query->GetSize()); - } // end of MakeCommand - /***********************************************************************/ /* MakeUpdate: make the SQL statement to send to ODBC connection. */ /***********************************************************************/ @@ -829,35 +527,6 @@ int TDBODBC::Cardinality(PGLOBAL g) return Cardinal; } // end of Cardinality -#if 0 -/***********************************************************************/ -/* ODBC GetMaxSize: returns table size estimate in number of lines. */ -/***********************************************************************/ -int TDBODBC::GetMaxSize(PGLOBAL g) - { - if (MaxSize < 0) { - if (Mode == MODE_DELETE) - // Return 0 in mode DELETE in case of delete all. - MaxSize = 0; - else if (!Cardinality(NULL)) - MaxSize = 10; // To make MySQL happy - else if ((MaxSize = Cardinality(g)) < 0) - MaxSize = 12; // So we can see an error occurred - - } // endif MaxSize - - return MaxSize; - } // end of GetMaxSize - -/***********************************************************************/ -/* Return max size value. */ -/***********************************************************************/ -int TDBODBC::GetProgMax(PGLOBAL g) - { - return GetMaxSize(g); - } // end of GetProgMax -#endif // 0 - /***********************************************************************/ /* ODBC Access Method opening routine. */ /* New method now that this routine is called recursively (last table */ @@ -1097,8 +766,6 @@ int TDBODBC::ReadDB(PGLOBAL g) if (trace > 1) htrc("ODBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode); - //htrc("ODBC ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p\n", - // GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex); if (Mode == MODE_UPDATE || Mode == MODE_DELETE) { if (!Query && MakeCommand(g)) @@ -1118,12 +785,6 @@ int TDBODBC::ReadDB(PGLOBAL g) } // endif Mode - //if (To_Kindex) { - // // Direct access of ODBC tables is not implemented yet - // strcpy(g->Message, MSG(NO_ODBC_DIRECT)); - // return RC_FX; - // } // endif To_Kindex - /*********************************************************************/ /* Now start the reading process. */ /* Here is the place to fetch the line(s). */ @@ -1208,11 +869,6 @@ int TDBODBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ void TDBODBC::CloseDB(PGLOBAL g) { -//if (To_Kindex) { -// To_Kindex->Close(); -// To_Kindex = NULL; -// } // endif - if (Ocp) Ocp->Close(); @@ -1231,16 +887,9 @@ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) { // Set additional ODBC access method information for column. -//Crp = NULL; -//Long = Precision; -//strcpy(F_Date, cdp->F_Date); -//To_Val = NULL; Slen = 0; StrLen = &Slen; Sqlbuf = NULL; -//Bufp = NULL; -//Blkp = NULL; -//Rank = 0; // Not known yet } // end of ODBCCOL constructor /***********************************************************************/ @@ -1248,17 +897,9 @@ ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) /***********************************************************************/ ODBCCOL::ODBCCOL(void) : EXTCOL() { -//Crp = NULL; -//Buf_Type = TYPE_INT; // This is a count(*) column -//// Set additional Dos access method information for column. -//Long = sizeof(int); -//To_Val = NULL; Slen = 0; StrLen = &Slen; Sqlbuf = NULL; -//Bufp = NULL; -//Blkp = NULL; -//Rank = 1; } // end of ODBCCOL constructor /***********************************************************************/ @@ -1267,66 +908,11 @@ ODBCCOL::ODBCCOL(void) : EXTCOL() /***********************************************************************/ ODBCCOL::ODBCCOL(ODBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) { -//Crp = col1->Crp; -//Long = col1->Long; -//strcpy(F_Date, col1->F_Date); -//To_Val = col1->To_Val; Slen = col1->Slen; StrLen = col1->StrLen; Sqlbuf = col1->Sqlbuf; -//Bufp = col1->Bufp; -//Blkp = col1->Blkp; -//Rank = col1->Rank; } // end of ODBCCOL copy constructor -#if 0 -/***********************************************************************/ -/* SetBuffer: prepare a column block for write operation. */ -/***********************************************************************/ -bool ODBCCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) - { - if (!(To_Val = value)) { - sprintf(g->Message, MSG(VALUE_ERROR), Name); - return true; - } else if (Buf_Type == value->GetType()) { - // Values are of the (good) column type - if (Buf_Type == TYPE_DATE) { - // If any of the date values is formatted - // output format must be set for the receiving table - if (GetDomain() || ((DTVAL *)value)->IsFormatted()) - goto newval; // This will make a new value; - - } else if (Buf_Type == TYPE_DOUBLE) - // Float values must be written with the correct (column) precision - // Note: maybe this should be forced by ShowValue instead of this ? - value->SetPrec(GetScale()); - - Value = value; // Directly access the external value - } else { - // Values are not of the (good) column type - if (check) { - sprintf(g->Message, MSG(TYPE_VALUE_ERR), Name, - GetTypeName(Buf_Type), GetTypeName(value->GetType())); - return true; - } // endif check - - newval: - if (InitValue(g)) // Allocate the matching value block - return true; - - } // endif's Value, Buf_Type - - // Because Colblk's have been made from a copy of the original TDB in - // case of Update, we must reset them to point to the original one. - if (To_Tdb->GetOrig()) - To_Tdb = (PTDB)To_Tdb->GetOrig(); - - // Set the Column - Status = (ok) ? BUF_EMPTY : BUF_NO; - return false; - } // end of SetBuffer -#endif // 0 - /***********************************************************************/ /* ReadColumn: when SQLFetch is used there is nothing to do as the */ /* column buffer was bind to the record set. This is also the case */ diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index fcefad5647b..7c65a8ea7e5 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -1,7 +1,7 @@ /*************** Tabodbc H Declares Source Code File (.H) **************/ -/* Name: TABODBC.H Version 1.8 */ +/* Name: TABODBC.H Version 1.9 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */ /* */ /* This file contains the TDBODBC classes declares. */ /***********************************************************************/ @@ -33,14 +33,7 @@ public: // Implementation virtual const char *GetType(void) {return "ODBC";} PSZ GetConnect(void) {return Connect;} - //PSZ GetTabname(void) {return Tabname;} - //PSZ GetTabschema(void) {return Tabschema;} - //PSZ GetTabcat(void) {return Tabcat;} - //PSZ GetSrcdef(void) {return Srcdef;} - //char GetSep(void) {return (Sep) ? *Sep : 0;} - //int GetQuoted(void) {return Quoted;} int GetCatver(void) {return Catver;} - //int GetOptions(void) {return Options;} // Methods virtual int Indexable(void) {return 2;} @@ -50,27 +43,7 @@ public: protected: // Members PSZ Connect; /* ODBC connection string */ - //PSZ Tabname; /* External table name */ - //PSZ Tabschema; /* External table schema */ - //PSZ Username; /* User connect name */ - //PSZ Password; /* Password connect info */ - //PSZ Tabcat; /* External table catalog */ - //PSZ Tabtyp; /* Catalog table type */ - //PSZ Colpat; /* Catalog column pattern */ - //PSZ Srcdef; /* The source table SQL definition */ - //PSZ Qchar; /* Identifier quoting character */ - //PSZ Qrystr; /* The original query */ - //PSZ Sep; /* Decimal separator */ int Catver; /* ODBC version for catalog functions */ - //int Options; /* Open connection options */ - //int Cto; /* Open connection timeout */ - //int Qto; /* Query (command) timeout */ - //int Quoted; /* Identifier quoting level */ - //int Maxerr; /* Maxerr for an Exec table */ - //int Maxres; /* Maxres for a catalog table */ - //int Memory; /* Put result set in memory */ - //bool Scrollable; /* Use scrollable cursor */ - //bool Xsrc; /* Execution type */ bool UseCnc; /* Use SQLConnect (!SQLDriverConnect) */ }; // end of ODBCDEF @@ -96,20 +69,16 @@ class TDBODBC : public TDBEXT { // Methods virtual PTDB Clone(PTABS t); -//virtual int GetRecpos(void); virtual bool SetRecpos(PGLOBAL g, int recpos); virtual PSZ GetFile(PGLOBAL g); virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); -//virtual int GetAffectedRows(void) {return AftRows;} virtual PSZ GetServer(void) {return "ODBC";} virtual int Indexable(void) {return 2;} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); virtual int Cardinality(PGLOBAL g); -//virtual int GetMaxSize(PGLOBAL g); -//virtual int GetProgMax(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual int WriteDB(PGLOBAL g); @@ -119,14 +88,8 @@ class TDBODBC : public TDBEXT { protected: // Internal functions -//int Decode(char *utf, char *buf, size_t n); -//bool MakeSQL(PGLOBAL g, bool cnt); bool MakeInsert(PGLOBAL g); -//virtual bool MakeCommand(PGLOBAL g); -//bool MakeFilter(PGLOBAL g, bool c); bool BindParameters(PGLOBAL g); -//char *MakeUpdate(PGLOBAL g); -//char *MakeDelete(PGLOBAL g); // Members ODBConn *Ocp; // Points to an ODBC connection class @@ -151,9 +114,6 @@ class ODBCCOL : public EXTCOL { // Implementation virtual int GetAmType(void) {return TYPE_AM_ODBC;} SQLLEN *GetStrLen(void) {return StrLen;} -// int GetRank(void) {return Rank;} -// PVBLK GetBlkp(void) {return Blkp;} -// void SetCrp(PCOLRES crp) {Crp = crp;} // Methods //virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); @@ -162,7 +122,6 @@ class ODBCCOL : public EXTCOL { void AllocateBuffers(PGLOBAL g, int rows); void *GetBuffer(DWORD rows); SWORD GetBuflen(void); -// void Print(PGLOBAL g, FILE *, uint); protected: // Constructor for count(*) column @@ -170,14 +129,8 @@ class ODBCCOL : public EXTCOL { // Members TIMESTAMP_STRUCT *Sqlbuf; // To get SQL_TIMESTAMP's -//PCOLRES Crp; // To storage result -//void *Bufp; // To extended buffer -//PVBLK Blkp; // To Value Block -//char F_Date[12]; // Internal Date format -//PVAL To_Val; // To value used for Insert SQLLEN *StrLen; // As returned by ODBC SQLLEN Slen; // Used with Fetch -//int Rank; // Rank (position) number in the query }; // end of class ODBCCOL /***********************************************************************/ @@ -230,7 +183,6 @@ class XSRCCOL : public ODBCCOL { XSRCCOL(XSRCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation -//virtual int GetAmType(void) {return TYPE_AM_ODBC;} // Methods virtual void ReadColumn(PGLOBAL g); diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp index c6d32884417..f24929eb2bb 100644 --- a/storage/connect/tabpivot.cpp +++ b/storage/connect/tabpivot.cpp @@ -108,21 +108,25 @@ bool PIVAID::SkipColumn(PCOLRES crp, char *skc) PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) { char *p, *query, *colname, *skc, buf[64]; - int rc, ndif, nblin, w = 0; + int ndif, nblin, w = 0; bool b = false; PVAL valp; PQRYRES qrp; PCOLRES *pcrp, crp, fncrp = NULL; - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level +#if defined(USE_TRY) + try { +#else // !USE_TRY + // Save stack and allocation environment and prepare error return + if (g->jump_level == MAX_JUMP) { + strcpy(g->Message, MSG(TOO_MANY_JUMPS)); + return NULL; + } // endif jump_level - if ((rc= setjmp(g->jumper[++g->jump_level])) != 0) { - goto err; - } // endif rc + if (setjmp(g->jumper[++g->jump_level])) { + goto err; + } // endif rc +#endif // !USE_TRY // Are there columns to skip? if (Skcol) { @@ -145,7 +149,7 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname); } else if (!Tabsrc) { strcpy(g->Message, MSG(SRC_TABLE_UNDEF)); - return NULL; + goto err; } else query = Tabsrc; @@ -160,7 +164,7 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) Myc.FreeResult(); } else - return NULL; + goto err; // Send the source command to MySQL if (Myc.ExecSQL(g, query, &w) == RC_FX) @@ -233,18 +237,18 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) Index.Sub = TRUE; // Should be small enough if (!PlgDBalloc(g, NULL, Index)) - return NULL; + goto err; Offset.Size = (nblin + 1) * sizeof(int); Offset.Sub = TRUE; // Should be small enough if (!PlgDBalloc(g, NULL, Offset)) - return NULL; + goto err; ndif = Qsort(g, nblin); if (ndif < 0) // error - return NULL; + goto err; } else { // The query was limited, we must get pivot column values @@ -278,7 +282,7 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) if (!(valp = AllocateValue(g, Rblkp->GetType(), Rblkp->GetVlen(), Rblkp->GetPrec()))) - return NULL; + goto err; // Now make the functional columns for (int i = 0; i < ndif; i++) { @@ -306,9 +310,25 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) // We added ndif columns and removed 2 (picol and fncol) Qryp->Nbcol += (ndif - 2); - return Qryp; +#if defined(USE_TRY) + return Qryp; + + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch err: +#else // !USE_TRY + g->jump_level--; + return Qryp; + +err: + g->jump_level--; +#endif // !USE_TRY + if (b) Myc.Close(); diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp index 2ddd1c3c753..d2f5cdb2780 100644 --- a/storage/connect/tabsys.cpp +++ b/storage/connect/tabsys.cpp @@ -1,9 +1,9 @@ /************* TabSys C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABSYS */ /* ------------- */ -/* Version 2.3 */ +/* Version 2.4 */ /* */ -/* Author Olivier BERTRAND 2004-2015 */ +/* Author Olivier BERTRAND 2004-2017 */ /* */ /* This program are the INI/CFG tables classes. */ /***********************************************************************/ @@ -511,12 +511,20 @@ void INICOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], 31); - } else if (Flag == 1) { +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (Flag == 1) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_SEC_UPDATE)); - longjmp(g->jumper[g->jump_level], 31); - } else if (*p) { +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (*p) { tdbp->Section = p; } else tdbp->Section = NULL; @@ -524,8 +532,12 @@ void INICOL::WriteColumn(PGLOBAL g) return; } else if (!tdbp->Section) { strcpy(g->Message, MSG(SEC_NAME_FIRST)); - longjmp(g->jumper[g->jump_level], 31); - } // endif's +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endif's /*********************************************************************/ /* Updating must be done only when not in checking pass. */ @@ -536,8 +548,12 @@ void INICOL::WriteColumn(PGLOBAL g) if (!rc) { sprintf(g->Message, "Error %d writing to %s", GetLastError(), tdbp->Ifile); - longjmp(g->jumper[g->jump_level], 31); - } // endif rc +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endif rc } // endif Status @@ -837,12 +853,20 @@ void XINCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], 31); - } else if (Flag == 1) { +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (Flag == 1) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_SEC_UPDATE)); - longjmp(g->jumper[g->jump_level], 31); - } else if (*p) { +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (*p) { tdbp->Section = p; } else tdbp->Section = NULL; @@ -851,8 +875,12 @@ void XINCOL::WriteColumn(PGLOBAL g) } else if (Flag == 2) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_KEY_UPDATE)); - longjmp(g->jumper[g->jump_level], 31); - } else if (*p) { +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } else if (*p) { tdbp->Keycur = p; } else tdbp->Keycur = NULL; @@ -860,8 +888,12 @@ void XINCOL::WriteColumn(PGLOBAL g) return; } else if (!tdbp->Section || !tdbp->Keycur) { strcpy(g->Message, MSG(SEC_KEY_FIRST)); - longjmp(g->jumper[g->jump_level], 31); - } // endif's +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endif's /*********************************************************************/ /* Updating must be done only when not in checking pass. */ @@ -872,8 +904,12 @@ void XINCOL::WriteColumn(PGLOBAL g) if (!rc) { sprintf(g->Message, "Error %d writing to %s", GetLastError(), tdbp->Ifile); - longjmp(g->jumper[g->jump_level], 31); - } // endif rc +#if defined(USE_TRY) + throw 31; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 31); +#endif // !USE_TRY + } // endif rc } // endif Status diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index 282fb55a43c..902137ead2e 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -1,11 +1,11 @@ /************* TabVct C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABVCT */ /* ------------- */ -/* Version 3.8 */ +/* Version 3.9 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1999-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 1999-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -490,15 +490,23 @@ void VCTCOL::ReadBlock(PGLOBAL g) #if defined(_DEBUG) if (!Blk) { strcpy(g->Message, MSG(TO_BLK_IS_NULL)); - longjmp(g->jumper[g->jump_level], 58); - } // endif +#if defined(USE_TRY) + throw 58; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 58); +#endif // !USE_TRY + } // endif #endif /*********************************************************************/ /* Read column block according to used access method. */ /*********************************************************************/ if (txfp->ReadBlock(g, this)) - longjmp(g->jumper[g->jump_level], 6); +#if defined(USE_TRY) + throw 6; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 6); +#endif // !USE_TRY ColBlk = txfp->CurBlk; ColPos = -1; // Any invalid position @@ -518,15 +526,23 @@ void VCTCOL::WriteBlock(PGLOBAL g) #if defined(_DEBUG) if (!Blk) { strcpy(g->Message, MSG(BLK_IS_NULL)); - longjmp(g->jumper[g->jump_level], 56); - } // endif +#if defined(USE_TRY) + throw 56; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 56); +#endif // !USE_TRY + } // endif #endif /*******************************************************************/ /* Write column block according to used access method. */ /*******************************************************************/ if (txfp->WriteBlock(g, this)) - longjmp(g->jumper[g->jump_level], 6); +#if defined(USE_TRY) + throw 6; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 6); +#endif // !USE_TRY Modif = 0; } // endif Modif diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp index 155c71fe268..dc57c9f3538 100644 --- a/storage/connect/tabvir.cpp +++ b/storage/connect/tabvir.cpp @@ -1,6 +1,6 @@ /************* tdbvir C++ Program Source Code File (.CPP) **************/ -/* PROGRAM NAME: tdbvir.cpp Version 1.1 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 */ +/* PROGRAM NAME: tdbvir.cpp Version 1.2 */ +/* (C) Copyright to the author Olivier BERTRAND 2014-2017 */ /* This program are the VIR classes DB execution routines. */ /***********************************************************************/ @@ -289,8 +289,12 @@ void VIRCOL::ReadColumn(PGLOBAL g) { // This should never be called sprintf(g->Message, "ReadColumn: Column %s is not virtual", Name); - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); - } // end of ReadColumn +#if defined(USE_TRY) + throw TYPE_COLBLK; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_COLBLK); +#endif // !USE_TRY +} // end of ReadColumn /* ---------------------------TDBVICL class -------------------------- */ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 52cf3d3812f..a895ffbd839 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -1,9 +1,9 @@ /************* Tabxml C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABXML */ /* ------------- */ -/* Version 2.9 */ +/* Version 3.0 */ /* */ -/* Author Olivier BERTRAND 2007 - 2016 */ +/* Author Olivier BERTRAND 2007 - 2017 */ /* */ /* This program are the XML tables classes using MS-DOM or libxml2. */ /***********************************************************************/ @@ -1314,8 +1314,12 @@ void TDBXML::CloseDB(PGLOBAL g) Docp->CloseDoc(g, To_Xb); // This causes a crash in Diagnostics_area::set_error_status -// longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif DumpDoc +//#if defined(USE_TRY) +// throw TYPE_AM_XML; +//#else // !USE_TRY +// longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +//#endif // !USE_TRY + } // endif DumpDoc } // endif Changed @@ -1637,8 +1641,12 @@ void XMLCOL::ReadColumn(PGLOBAL g) if (ValNode->GetType() != XML_ELEMENT_NODE && ValNode->GetType() != XML_ATTRIBUTE_NODE) { sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif type +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endif type // Get the Xname value from the XML file switch (ValNode->GetContent(g, Valbuf, Long + 1)) { @@ -1648,8 +1656,12 @@ void XMLCOL::ReadColumn(PGLOBAL g) PushWarning(g, Tdbp); break; default: - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endswitch +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endswitch Value->SetValue_psz(Valbuf); } else { @@ -1699,7 +1711,11 @@ void XMLCOL::WriteColumn(PGLOBAL g) /* For columns having an Xpath, the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, Nod || Tdbp->Colname)) - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY /*********************************************************************/ /* Null values are represented by no node. */ @@ -1771,8 +1787,16 @@ void XMLCOL::WriteColumn(PGLOBAL g) if (ColNode == NULL) { strcpy(g->Message, MSG(COL_ALLOC_ERR)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif ColNode +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY +#endif // !USE_TRY + } // endif ColNode } // endif ColNode @@ -1800,8 +1824,12 @@ void XMLCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } else strcpy(Valbuf, p); /*********************************************************************/ @@ -1850,8 +1878,12 @@ void XMULCOL::ReadColumn(PGLOBAL g) if (ValNode->GetType() != XML_ELEMENT_NODE && ValNode->GetType() != XML_ATTRIBUTE_NODE) { sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif type +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endif type // Get the Xname value from the XML file switch (ValNode->GetContent(g, p, (b ? Long : len))) { @@ -1936,7 +1968,11 @@ void XMULCOL::WriteColumn(PGLOBAL g) /* For columns having an Xpath, the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, Nod)) - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY /*********************************************************************/ /* Find the column and value nodes to update or insert. */ @@ -1985,8 +2021,12 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (len > 1 && !Tdbp->Xpand) { sprintf(g->Message, MSG(BAD_VAL_UPDATE), Name); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } else ValNode = Nlx->GetItem(g, Tdbp->Nsub, Vxnp); } else // Inod != Nod @@ -2027,8 +2067,12 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (ColNode == NULL) { strcpy(g->Message, MSG(COL_ALLOC_ERR)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif ColNode +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endif ColNode } // endif ColNode @@ -2056,8 +2100,12 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } else strcpy(Valbuf, p); /*********************************************************************/ @@ -2088,8 +2136,12 @@ void XPOSCOL::ReadColumn(PGLOBAL g) if (Tdbp->Clist == NULL) { strcpy(g->Message, MSG(MIS_TAG_LIST)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif Clist +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endif Clist if ((ValNode = Tdbp->Clist->GetItem(g, Rank, Vxnp))) { // Get the column value from the XML file @@ -2100,8 +2152,12 @@ void XPOSCOL::ReadColumn(PGLOBAL g) PushWarning(g, Tdbp); break; default: - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endswitch +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endswitch Value->SetValue_psz(Valbuf); } else { @@ -2151,15 +2207,23 @@ void XPOSCOL::WriteColumn(PGLOBAL g) /* For all columns the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, true)) - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY /*********************************************************************/ /* Find the column and value nodes to update or insert. */ /*********************************************************************/ if (Tdbp->Clist == NULL) { strcpy(g->Message, MSG(MIS_TAG_LIST)); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } // endif Clist +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } // endif Clist n = Tdbp->Clist->GetLength(); k = Rank; @@ -2183,8 +2247,12 @@ void XPOSCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); - } else +#if defined(USE_TRY) + throw TYPE_AM_XML; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_AM_XML); +#endif // !USE_TRY + } else strcpy(Valbuf, p); /*********************************************************************/ diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp index 5fefcba5856..4331962e0ad 100644 --- a/storage/connect/valblk.cpp +++ b/storage/connect/valblk.cpp @@ -1,7 +1,7 @@ /************ Valblk C++ Functions Source Code File (.CPP) *************/ -/* Name: VALBLK.CPP Version 2.1 */ +/* Name: VALBLK.CPP Version 2.2 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ /* */ /* This file contains the VALBLK and derived classes functions. */ /* Second family is VALBLK, representing simple suballocated arrays */ @@ -138,8 +138,12 @@ PSZ VALBLK::GetCharValue(int) assert(g); sprintf(g->Message, MSG(NO_CHAR_FROM), Type); - longjmp(g->jumper[g->jump_level], Type); - return NULL; +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + return NULL; } // end of GetCharValue /***********************************************************************/ @@ -206,8 +210,12 @@ void VALBLK::ChkIndx(int n) if (n < 0 || n >= Nval) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_VALBLK_INDX)); - longjmp(g->jumper[g->jump_level], Type); - } // endif n +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif n } // end of ChkIndx @@ -216,8 +224,12 @@ void VALBLK::ChkTyp(PVAL v) if (Check && (Type != v->GetType() || Unsigned != v->IsUnsigned())) { PGLOBAL& g = Global; strcpy(g->Message, MSG(VALTYPE_NOMATCH)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Type } // end of ChkTyp @@ -226,8 +238,12 @@ void VALBLK::ChkTyp(PVBLK vb) if (Check && (Type != vb->GetType() || Unsigned != vb->IsUnsigned())) { PGLOBAL& g = Global; strcpy(g->Message, MSG(VALTYPE_NOMATCH)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Type } // end of ChkTyp @@ -342,8 +358,12 @@ void TYPBLK::SetValue(PSZ p, int n) if (Check) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_SET_STRING)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Check +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Check bool minus; ulonglong maxval = MaxVal(); @@ -392,8 +412,12 @@ void TYPBLK::SetValue(PSZ p, int n) if (Check) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_SET_STRING)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Check +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Check Typp[n] = atof(p); SetNull(n, false); @@ -795,8 +819,12 @@ void CHRBLK::SetValue(char *sp, uint len, int n) if (Check && (signed)len > Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(SET_STR_TRUNC)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Check +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Check #endif // _DEBUG if (sp) @@ -823,8 +851,12 @@ void CHRBLK::SetValue(PVBLK pv, int n1, int n2) if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BLKTYPLEN_MISM)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Type if (!(b = pv->IsNull(n2))) memcpy(Chrp + n1 * Long, ((CHRBLK*)pv)->Chrp + n2 * Long, Long); @@ -874,8 +906,12 @@ void CHRBLK::SetValues(PVBLK pv, int k, int n) if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BLKTYPLEN_MISM)); - longjmp(g->jumper[g->jump_level], Type); - } // endif Type +#if defined(USE_TRY) + throw Type; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY + } // endif Type #endif // _DEBUG char *p = ((CHRBLK*)pv)->Chrp; diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index ced690e77c0..fec38217b0b 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -1,7 +1,7 @@ /************* Value C++ Functions Source Code File (.CPP) *************/ -/* Name: VALUE.CPP Version 2.6 */ +/* Name: VALUE.CPP Version 2.7 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2001-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2001-2017 */ /* */ /* This file contains the VALUE and derived classes family functions. */ /* These classes contain values of different types. They are used so */ @@ -57,10 +57,17 @@ /* Check macro's. */ /***********************************************************************/ #if defined(_DEBUG) +#if defined(USE_TRY) #define CheckType(V) if (Type != V->GetType()) { \ PGLOBAL& g = Global; \ strcpy(g->Message, MSG(VALTYPE_NOMATCH)); \ - longjmp(g->jumper[g->jump_level], Type); } + throw Type; +#else // !USE_TRY +#define CheckType(V) if (Type != V->GetType()) { \ + PGLOBAL& g = Global; \ + strcpy(g->Message, MSG(VALTYPE_NOMATCH)); \ + longjmp(g->jumper[g->jump_level], Type); +#endif // !USE_TRY #else #define CheckType(V) #endif @@ -1019,12 +1026,20 @@ TYPE TYPVAL::SafeAdd(TYPE n1, TYPE n2) if ((n2 > 0) && (n < n1)) { // Overflow strcpy(g->Message, MSG(FIX_OVFLW_ADD)); - longjmp(g->jumper[g->jump_level], 138); - } else if ((n2 < 0) && (n > n1)) { +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY + } else if ((n2 < 0) && (n > n1)) { // Underflow strcpy(g->Message, MSG(FIX_UNFLW_ADD)); - longjmp(g->jumper[g->jump_level], 138); - } // endif's n2 +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY + } // endif's n2 return n; } // end of SafeAdd @@ -1047,12 +1062,20 @@ TYPE TYPVAL::SafeMult(TYPE n1, TYPE n2) if (n > MinMaxVal(true)) { // Overflow strcpy(g->Message, MSG(FIX_OVFLW_TIMES)); - longjmp(g->jumper[g->jump_level], 138); - } else if (n < MinMaxVal(false)) { +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY + } else if (n < MinMaxVal(false)) { // Underflow strcpy(g->Message, MSG(FIX_UNFLW_TIMES)); - longjmp(g->jumper[g->jump_level], 138); - } // endif's n2 +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY + } // endif's n2 return (TYPE)n; } // end of SafeMult @@ -1432,8 +1455,16 @@ void TYPVAL::SetValue(int n) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); - longjmp(g->jumper[g->jump_level], 138); - } else +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY +#endif // !USE_TRY + } else SetValue_psz(buf); Null = false; @@ -1486,8 +1517,12 @@ void TYPVAL::SetValue(longlong n) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); - longjmp(g->jumper[g->jump_level], 138); - } else +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY + } else SetValue_psz(buf); Null = false; @@ -1529,8 +1564,12 @@ void TYPVAL::SetValue(double f) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); - longjmp(g->jumper[g->jump_level], 138); - } else +#if defined(USE_TRY) + throw 138; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 138); +#endif // !USE_TRY + } else SetValue_psz(buf); Null = false; diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 15fb71ab88a..a2ddf468e5a 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -1,7 +1,7 @@ /***************** Xindex C++ Class Xindex Code (.CPP) *****************/ -/* Name: XINDEX.CPP Version 2.9 */ +/* Name: XINDEX.CPP Version 3.0 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */ /* */ /* This file contains the class XINDEX implementation code. */ /***********************************************************************/ @@ -446,8 +446,12 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) #if 0 if (!dup->Step) { strcpy(g->Message, MSG(QUERY_CANCELLED)); - longjmp(g->jumper[g->jump_level], 99); - } // endif Step +#if defined(USE_TRY) + throw 99; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], 99); +#endif // !USE_TRY + } // endif Step #endif // 0 /*******************************************************************/ diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index a0b7849543d..7d1b6b9c992 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -1,7 +1,7 @@ /************ Xobject C++ Functions Source Code File (.CPP) ************/ -/* Name: XOBJECT.CPP Version 2.4 */ +/* Name: XOBJECT.CPP Version 2.5 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2014 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ /* */ /* This file contains base XOBJECT class functions. */ /* Also here is the implementation of the CONSTANT class. */ @@ -84,7 +84,11 @@ double XOBJECT::GetFloatValue(void) CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) { if (!(Value = AllocateValue(g, value, (int)type))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); +#if defined(USE_TRY) + throw TYPE_CONST; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_CONST); +#endif // !USE_TRY Constant = true; } // end of CONSTANT constructor @@ -95,7 +99,11 @@ CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) CONSTANT::CONSTANT(PGLOBAL g, int n) { if (!(Value = AllocateValue(g, &n, TYPE_INT))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); +#if defined(USE_TRY) + throw TYPE_CONST; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_CONST); +#endif // !USE_TRY Constant = true; } // end of CONSTANT constructor @@ -117,7 +125,11 @@ void CONSTANT::Convert(PGLOBAL g, int newtype) { if (Value->GetType() != newtype) if (!(Value = AllocateValue(g, Value, newtype))) - longjmp(g->jumper[g->jump_level], TYPE_CONST); +#if defined(USE_TRY) + throw TYPE_CONST; +#else // !USE_TRY + longjmp(g->jumper[g->jump_level], TYPE_CONST); +#endif // !USE_TRY } // end of Convert -- cgit v1.2.1 From 5bc538dd8548e669480c91b7510ef914c442d1f0 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 11 Mar 2017 19:35:03 +0100 Subject: Commit the 2 last commits merged from 10.1 --- storage/connect/ha_connect.cc | 4 +- storage/connect/myconn.cpp | 39 ++- storage/connect/myutil.cpp | 13 +- storage/connect/tabmul.cpp | 632 +++++++++++++++++++++++++----------------- storage/connect/tabmul.h | 58 ++-- 5 files changed, 455 insertions(+), 291 deletions(-) (limited to 'storage') diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 930e7604fbd..2cd581c5c64 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -172,7 +172,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.05.0003 February 27, 2017"; + char version[]= "Version 1.05.0003 March 7, 2017"; #if defined(__WIN__) char compver[]= "Version 1.05.0003 " __DATE__ " " __TIME__; char slash= '\\'; @@ -509,7 +509,7 @@ ha_create_table_option connect_table_option_list[]= HA_TOPTION_NUMBER("LRECL", lrecl, 0, 0, INT_MAX32, 1), HA_TOPTION_NUMBER("BLOCK_SIZE", elements, 0, 0, INT_MAX32, 1), //HA_TOPTION_NUMBER("ESTIMATE", estimate, 0, 0, INT_MAX32, 1), - HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 2, 1), + HA_TOPTION_NUMBER("MULTIPLE", multiple, 0, 0, 3, 1), HA_TOPTION_NUMBER("HEADER", header, 0, 0, 3, 1), HA_TOPTION_NUMBER("QUOTED", quoted, (ulonglong) -1, 0, 3, 1), HA_TOPTION_NUMBER("ENDING", ending, (ulonglong) -1, 0, INT_MAX32, 1), diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index d05254a32a6..f7cd245df59 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -135,10 +135,12 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, FLD_KEY, FLD_SCALE, FLD_RADIX, FLD_NULL, FLD_REM, FLD_NO, FLD_DEFAULT, FLD_EXTRA, FLD_CHARSET}; - unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; - char *fld, *colname, *chset, *fmt, v, buf[128], uns[16], zero[16]; + //unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; + unsigned int length[] = {0, 4, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; + char *fld, *colname, *chset, *fmt, v, buf[128], uns[16], zero[16]; int i, n, nf, ncol = sizeof(buftyp) / sizeof(int); int len, type, prec, rc, k = 0; + bool b; PQRYRES qrp; PCOLRES crp; MYSQLC myc; @@ -157,7 +159,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, /* Do an evaluation of the result size. */ /********************************************************************/ STRING cmd(g, 64, "SHOW FULL COLUMNS FROM "); - bool b = cmd.Append((PSZ)table); + b = cmd.Append((PSZ)table); b |= cmd.Append(" FROM "); b |= cmd.Append((PSZ)(db ? db : PlgGetUser(g)->DBName)); @@ -232,11 +234,31 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, fld = myc.GetCharField(1); prec = 0; len = 0; - v = (chset && !strcmp(chset, "binary")) ? 'B' : 0; +// v = (chset && !strcmp(chset, "binary")) ? 'B' : 0; + v = 0; *uns = 0; *zero = 0; - - switch ((nf = sscanf(fld, "%[^(](%d,%d", buf, &len, &prec))) { + b = false; + + if (!strnicmp(fld, "enum", 4)) { + char *p2, *p1 = fld + 6; // to skip enum(' + + while (true) { + p2 = strchr(p1, '\''); + len = MY_MAX(len, p2 - p1); + if (*++p2 != ',') break; + p1 = p2 + 2; + } // endwhile + + v = (len > 255) ? 'V' : 0; + strcpy(buf, "enum"); + b = true; + } else if (!strnicmp(fld, "set", 3)) { + len = (int)strlen(fld) - 2; + v = 'V'; + strcpy(buf, "set"); + b = true; + } else switch ((nf = sscanf(fld, "%[^(](%d,%d", buf, &len, &prec))) { case 3: nf = sscanf(fld, "%[^(](%d,%d) %s %s", buf, &len, &prec, uns, zero); break; @@ -271,7 +293,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, colname, len); PushWarning(g, thd); v = 'V'; - } else + } else len = MY_MIN(len, 4096); } // endif type @@ -286,6 +308,9 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, default: crp->Nulls[i] = v; break; } // endswitch nf + if (b) // enum or set + nf = sscanf(fld, "%s ", buf); // get values + crp = crp->Next; // Type_Name crp->Kdata->SetValue(buf, i); diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index d4416e188c8..5fcc914f632 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -42,7 +42,8 @@ int MYSQLtoPLG(char *typname, char *var) type = TYPE_INT; else if (!stricmp(typname, "smallint")) type = TYPE_SHORT; - else if (!stricmp(typname, "char") || !stricmp(typname, "varchar")) + else if (!stricmp(typname, "char") || !stricmp(typname, "varchar") || + !stricmp(typname, "enum") || !stricmp(typname, "set")) type = TYPE_STRING; else if (!stricmp(typname, "double") || !stricmp(typname, "float") || !stricmp(typname, "real")) @@ -87,10 +88,12 @@ int MYSQLtoPLG(char *typname, char *var) else if (!stricmp(typname, "year")) *var = 'Y'; - } else if (type == TYPE_STRING && !stricmp(typname, "varchar")) - // This is to make the difference between CHAR and VARCHAR - *var = 'V'; - else if (type == TYPE_ERROR && xconv == TPC_SKIP) + } else if (type == TYPE_STRING) { + if (!stricmp(typname, "varchar")) + // This is to make the difference between CHAR and VARCHAR + *var = 'V'; + + } else if (type == TYPE_ERROR && xconv == TPC_SKIP) *var = 'K'; else *var = 0; diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 8f1e6f3819d..4128fea6afc 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -1,7 +1,7 @@ /************* TabMul C++ Program Source Code File (.CPP) **************/ /* PROGRAM NAME: TABMUL */ /* ------------- */ -/* Version 1.8 */ +/* Version 1.9 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -44,6 +44,11 @@ #define __MFC_COMPAT__ // To define min/max as macro #endif //#include +#if defined(PATHMATCHSPEC) +#include "Shlwapi.h" +//using namespace std; +#pragma comment(lib,"shlwapi.lib") +#endif // PATHMATCHSPEC #else #if defined(UNIX) #include @@ -124,9 +129,10 @@ bool TDBMUL::InitFileNames(PGLOBAL g) { #define PFNZ 4096 #define FNSZ (_MAX_DRIVE+_MAX_DIR+_MAX_FNAME+_MAX_EXT) - char *pfn[PFNZ]; - char *filename; - int rc, n = 0; + PTDBDIR dirp; + PSZ pfn[PFNZ]; + PSZ filename; + int rc, n = 0; if (trace) htrc("in InitFileName: fn[]=%d\n", FNSZ); @@ -141,115 +147,39 @@ bool TDBMUL::InitFileNames(PGLOBAL g) if (trace) htrc("InitFileName: fn='%s'\n", filename); - if (Mul == 1) { + if (Mul != 2) { /*******************************************************************/ /* To_File is a multiple name with special characters */ /*******************************************************************/ -#if defined(__WIN__) - char drive[_MAX_DRIVE], direc[_MAX_DIR]; - WIN32_FIND_DATA FileData; - HANDLE hSearch; - - _splitpath(filename, drive, direc, NULL, NULL); - - // Start searching files in the target directory. - hSearch = FindFirstFile(filename, &FileData); - - if (hSearch == INVALID_HANDLE_VALUE) { - rc = GetLastError(); - - if (rc != ERROR_FILE_NOT_FOUND) { - FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, GetLastError(), 0, - (LPTSTR)&filename, sizeof(filename), NULL); - sprintf(g->Message, MSG(BAD_FILE_HANDLE), filename); - return true; - } // endif rc + if (Mul == 1) + dirp = new(g) TDBDIR(PlugDup(g, filename)); + else // Mul == 3 (Subdir) + dirp = new(g) TDBSDR(PlugDup(g, filename)); - goto suite; - } // endif hSearch + if (dirp->OpenDB(g)) + return true; - while (n < PFNZ) { - if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) { - strcat(strcat(strcpy(filename, drive), direc), FileData.cFileName); - pfn[n++] = PlugDup(g, filename); - } // endif dwFileAttributes - - if (!FindNextFile(hSearch, &FileData)) { - rc = GetLastError(); - - if (rc != ERROR_NO_MORE_FILES) { - sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); - FindClose(hSearch); - return true; - } // endif rc - - break; - } // endif FindNextFile - - } // endwhile n - - // Close the search handle. - if (!FindClose(hSearch)) { - strcpy(g->Message, MSG(SRCH_CLOSE_ERR)); - return true; - } // endif FindClose + if (trace && Mul == 3) { + int nf = ((PTDBSDR)dirp)->FindInDir(g); + htrc("Number of files = %d\n", nf); + } // endif trace + while (true) + if ((rc = dirp->ReadDB(g)) == RC_OK) { +#if defined(__WIN__) + strcat(strcpy(filename, dirp->Drive), dirp->Direc); #else // !__WIN__ - struct stat fileinfo; - char fn[FN_REFLEN], direc[FN_REFLEN], pattern[FN_HEADLEN], ftype[FN_EXTLEN]; - DIR *dir; - struct dirent *entry; - - _splitpath(filename, NULL, direc, pattern, ftype); - strcat(pattern, ftype); - - if (trace) - htrc("direc=%s pattern=%s ftype=%s\n", direc, pattern, ftype); - - // Start searching files in the target directory. - if (!(dir = opendir(direc))) { - sprintf(g->Message, MSG(BAD_DIRECTORY), direc, strerror(errno)); - - if (trace) - htrc("%s\n", g->Message); - - return true; - } // endif dir - - if (trace) - htrc("dir opened: reading files\n"); - - while ((entry = readdir(dir)) && n < PFNZ) { - strcat(strcpy(fn, direc), entry->d_name); - - if (trace) - htrc("%s read\n", fn); - - if (lstat(fn, &fileinfo) < 0) { - sprintf(g->Message, "%s: %s", fn, strerror(errno)); - return true; - } else if (!S_ISREG(fileinfo.st_mode)) - continue; // Not a regular file (should test for links) - - /*******************************************************************/ - /* Test whether the file name matches the table name filter. */ - /*******************************************************************/ - if (fnmatch(pattern, entry->d_name, 0)) - continue; // Not a match - - strcat(strcpy(filename, direc), entry->d_name); - pfn[n++] = PlugDup(g, filename); - - if (trace) - htrc("Adding pfn[%d] %s\n", n, filename); + strcpy(filename, dirp->Direc); +#endif // !__WIN__ + strcat(strcat(filename, dirp->Fname), dirp->Ftype); + pfn[n++] = PlugDup(g, filename); + } else + break; - } // endwhile readdir + dirp->CloseDB(g); - // Close the dir handle. - closedir(dir); -#endif // !__WIN__ + if (rc == RC_FX) + return true; } else { /*******************************************************************/ @@ -297,10 +227,6 @@ bool TDBMUL::InitFileNames(PGLOBAL g) } // endif Mul -#if defined(__WIN__) - suite: -#endif - if (n) { Filenames = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*)); @@ -581,7 +507,95 @@ void TDBMUL::CloseDB(PGLOBAL g) } // end of CloseDB -/* --------------------------- Class DIRDEF -------------------------- */ +#if 0 +/* ------------------------- Class TDBMSD ---------------------------- */ + + // Method +PTDB TDBMSD::Clone(PTABS t) +{ + PTDBMSD tp; + PGLOBAL g = t->G; // Is this really useful ??? + + tp = new(g) TDBMSD(this); + tp->Tdbp = Tdbp->Clone(t); + tp->Columns = tp->Tdbp->GetColumns(); + return tp; +} // end of Clone + +PTDB TDBMSD::Duplicate(PGLOBAL g) +{ + PTDBMSD tmup = new(g) TDBMSD(this); + + tmup->Tdbp = Tdbp->Duplicate(g); + return tmup; +} // end of Duplicate + +/***********************************************************************/ +/* Initializes the table filename list. */ +/* Note: tables created by concatenating the file components without */ +/* specifying the LRECL value (that should be restricted to _MAX_PATH)*/ +/* have a LRECL that is the sum of the lengths of all components. */ +/* This is why we use a big filename array to take care of that. */ +/***********************************************************************/ +bool TDBMSD::InitFileNames(PGLOBAL g) +{ +#define PFNZ 4096 +#define FNSZ (_MAX_DRIVE+_MAX_DIR+_MAX_FNAME+_MAX_EXT) + PTDBSDR dirp; + PSZ pfn[PFNZ]; + PSZ filename; + int rc, n = 0; + + if (trace) + htrc("in InitFileName: fn[]=%d\n", FNSZ); + + filename = (char*)PlugSubAlloc(g, NULL, FNSZ); + + // The sub table may need to refer to the Table original block + Tdbp->SetTable(To_Table); // Was not set at construction + + PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath()); + + if (trace) + htrc("InitFileName: fn='%s'\n", filename); + + dirp = new(g) TDBSDR(filename); + + if (dirp->OpenDB(g)) + return true; + + while (true) + if ((rc = dirp->ReadDB(g)) == RC_OK) { +#if defined(__WIN__) + strcat(strcpy(filename, dirp->Drive), dirp->Direc); +#else // !__WIN__ + strcpy(filename, dirp->Direc); +#endif // !__WIN__ + strcat(strcat(filename, dirp->Fname), dirp->Ftype); + pfn[n++] = PlugDup(g, filename); + } else + break; + + if (rc == RC_FX) + return true; + + if (n) { + Filenames = (char**)PlugSubAlloc(g, NULL, n * sizeof(char*)); + + for (int i = 0; i < n; i++) + Filenames[i] = pfn[i]; + + } else { + Filenames = (char**)PlugSubAlloc(g, NULL, sizeof(char*)); + Filenames[0] = NULL; + } // endif n + + NumFiles = n; + return false; +} // end of InitFileNames +#endif // 0 + + /* --------------------------- Class DIRDEF -------------------------- */ /***********************************************************************/ /* DefineAM: define specific AM block values from XDB file. */ @@ -616,57 +630,38 @@ PTDB DIRDEF::GetTable(PGLOBAL g, MODE) /***********************************************************************/ /* TABDIR constructors. */ /***********************************************************************/ -TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp) - { - To_File = tdp->Fn; - iFile = 0; -#if defined(__WIN__) - memset(&FileData, 0, sizeof(_finddata_t)); - Hsearch = -1; - *Drive = '\0'; -#else // !__WIN__ - memset(&Fileinfo, 0, sizeof(struct stat)); - Entry = NULL; - Dir = NULL; - Done = false; - *Pattern = '\0'; -#endif // !__WIN__ - *Fpath = '\0'; - *Direc = '\0'; - *Fname = '\0'; - *Ftype = '\0'; - } // end of TDBDIR standard constructor - -TDBDIR::TDBDIR(PTDBDIR tdbp) : TDBASE(tdbp) - { - To_File = tdbp->To_File; - iFile = tdbp->iFile; +void TDBDIR::Init(void) +{ + iFile = 0; #if defined(__WIN__) - FileData = tdbp->FileData; - Hsearch = tdbp->Hsearch; - strcpy(Drive, tdbp->Drive); + Dvalp = NULL; + memset(&FileData, 0, sizeof(_finddata_t)); + hSearch = INVALID_HANDLE_VALUE; + *Drive = '\0'; #else // !__WIN__ - Fileinfo = tdbp->Fileinfo; - Entry = tdbp->Entry; - Dir = tdbp->Dir; - Done = tdbp->Done; - strcpy(Pattern, tdbp->Pattern); + memset(&Fileinfo, 0, sizeof(struct stat)); + Entry = NULL; + Dir = NULL; + Done = false; + *Pattern = '\0'; #endif // !__WIN__ - strcpy(Direc, tdbp->Direc); - strcpy(Fname, tdbp->Fname); - strcpy(Ftype, tdbp->Ftype); - } // end of TDBDIR copy constructor + *Fpath = '\0'; + *Direc = '\0'; + *Fname = '\0'; + *Ftype = '\0'; +} // end of Init -// Method -PTDB TDBDIR::Clone(PTABS t) - { - PTDB tp; - PGLOBAL g = t->G; // Is this really useful ??? +TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp) +{ + To_File = tdp->Fn; + Init(); +} // end of TDBDIR standard constructor - tp = new(g) TDBDIR(this); - tp->SetColumns(Columns); - return tp; - } // end of Clone +TDBDIR::TDBDIR(PSZ fpat) : TDBASE((PTABDEF)NULL) +{ + To_File = fpat; + Init(); +} // end of TDBDIR constructor /***********************************************************************/ /* Initialize/get the components of the search file pattern. */ @@ -674,18 +669,19 @@ PTDB TDBDIR::Clone(PTABS t) char* TDBDIR::Path(PGLOBAL g) { PCATLG cat = PlgGetCatalog(g); + PTABDEF defp = (PTABDEF)To_Def; #if defined(__WIN__) if (!*Drive) { - PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath()); + PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL); _splitpath(Fpath, Drive, Direc, Fname, Ftype); } else - _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull ??? + _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull for TDBSDR return Fpath; #else // !__WIN__ if (!Done) { - PlugSetPath(Fpath, To_File, ((PTABDEF)To_Def)->GetPath()); + PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL); _splitpath(Fpath, NULL, Direc, Fname, Ftype); strcat(strcpy(Pattern, Fname), Ftype); Done = true; @@ -709,23 +705,48 @@ PCOL TDBDIR::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) int TDBDIR::GetMaxSize(PGLOBAL g) { if (MaxSize < 0) { - int n = -1; + int rc, n = -1; #if defined(__WIN__) - int h; // Start searching files in the target directory. - h = _findfirst(Path(g), &FileData); + hSearch = FindFirstFile(Path(g), &FileData); - if (h != -1) { - for (n = 1;; n++) - if (_findnext(h, &FileData)) - break; + if (hSearch == INVALID_HANDLE_VALUE) { + rc = GetLastError(); - // Close the search handle. - _findclose(h); - } else - n = 0; + if (rc != ERROR_FILE_NOT_FOUND) { + char buf[512]; + + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL); + sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf); + return -1; + } // endif rc + + return 0; + } // endif hSearch + + while (true) { + if (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) + n++; + + if (!FindNextFile(hSearch, &FileData)) { + rc = GetLastError(); + if (rc != ERROR_NO_MORE_FILES) { + sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); + FindClose(hSearch); + return -1; + } // endif rc + + break; + } // endif Next + + } // endwhile + + // Close the search handle. + FindClose(hSearch); #else // !__WIN__ Path(g); @@ -791,30 +812,30 @@ int TDBDIR::ReadDB(PGLOBAL g) int rc = RC_OK; #if defined(__WIN__) - if (Hsearch == -1) { + if (hSearch == INVALID_HANDLE_VALUE) { /*******************************************************************/ /* Start searching files in the target directory. The use of the */ /* Path function is required when called from TDBSDR. */ /*******************************************************************/ - Hsearch = _findfirst(Path(g), &FileData); + hSearch = FindFirstFile(Path(g), &FileData); - if (Hsearch == -1) + if (hSearch == INVALID_HANDLE_VALUE) rc = RC_EF; else iFile++; } else { - if (_findnext(Hsearch, &FileData)) { + if (!FindNextFile(hSearch, &FileData)) { // Restore file name and type pattern _splitpath(To_File, NULL, NULL, Fname, Ftype); rc = RC_EF; } else iFile++; - } // endif Hsearch + } // endif hSearch if (rc == RC_OK) - _splitpath(FileData.name, NULL, NULL, Fname, Ftype); + _splitpath(FileData.cFileName, NULL, NULL, Fname, Ftype); #else // !Win32 rc = RC_NF; @@ -878,8 +899,8 @@ void TDBDIR::CloseDB(PGLOBAL) { #if defined(__WIN__) // Close the search handle. - _findclose(Hsearch); - Hsearch = -1; + FindClose(hSearch); + hSearch = INVALID_HANDLE_VALUE; #else // !__WIN__ // Close the DIR handle if (Dir) { @@ -907,6 +928,7 @@ DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) } // endif cprec // Set additional DIR access method information for column. + Tdbp = (PTDBDIR)tdbp; N = cdp->GetOffset(); } // end of DIRCOL constructor @@ -916,45 +938,73 @@ DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) /***********************************************************************/ DIRCOL::DIRCOL(DIRCOL *col1, PTDB tdbp) : COLBLK(col1, tdbp) { - N = col1->N; + Tdbp = (PTDBDIR)tdbp; + N = col1->N; } // end of DIRCOL copy constructor +#if defined(__WIN__) +/***********************************************************************/ +/* Retrieve time information from FileData. */ +/***********************************************************************/ +void DIRCOL::SetTimeValue(PGLOBAL g, FILETIME& ftime) +{ + char tsp[24]; + SYSTEMTIME stp; + + if (FileTimeToSystemTime(&ftime, &stp)) { + sprintf(tsp, "%04d-%02d-%02d %02d:%02d:%02d", + stp.wYear, stp.wMonth, stp.wDay, stp.wHour, stp.wMinute, stp.wSecond); + + if (Value->GetType() != TYPE_STRING) { + if (!Tdbp->Dvalp) + Tdbp->Dvalp = AllocateValue(g, TYPE_DATE, 20, 0, false, + "YYYY-MM-DD hh:mm:ss"); + + Tdbp->Dvalp->SetValue_psz(tsp); + Value->SetValue_pval(Tdbp->Dvalp); + } else + Value->SetValue_psz(tsp); + + } else + Value->Reset(); + +} // end of SetTimeValue +#endif // __WIN__ + /***********************************************************************/ /* ReadColumn: what this routine does is to access the information */ /* corresponding to this column and convert it to buffer type. */ /***********************************************************************/ void DIRCOL::ReadColumn(PGLOBAL g) - { - PTDBDIR tdbp = (PTDBDIR)To_Tdb; - + { if (trace) htrc("DIR ReadColumn: col %s R%d use=%.4X status=%.4X type=%d N=%d\n", - Name, tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N); + Name, Tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N); /*********************************************************************/ /* Retrieve the information corresponding to the column number. */ /*********************************************************************/ switch (N) { #if defined(__WIN__) - case 0: Value->SetValue_psz(tdbp->Drive); break; + case 0: Value->SetValue_psz(Tdbp->Drive); break; #endif // __WIN__ - case 1: Value->SetValue_psz(tdbp->Direc); break; - case 2: Value->SetValue_psz(tdbp->Fname); break; - case 3: Value->SetValue_psz(tdbp->Ftype); break; + case 1: Value->SetValue_psz(Tdbp->Direc); break; + case 2: Value->SetValue_psz(Tdbp->Fname); break; + case 3: Value->SetValue_psz(Tdbp->Ftype); break; #if defined(__WIN__) - case 4: Value->SetValue((int)tdbp->FileData.attrib); break; - case 5: Value->SetValue((int)tdbp->FileData.size); break; - case 6: Value->SetValue((int)tdbp->FileData.time_write); break; - case 7: Value->SetValue((int)tdbp->FileData.time_create); break; - case 8: Value->SetValue((int)tdbp->FileData.time_access); break; + case 4: Value->SetValue((int)Tdbp->FileData.dwFileAttributes); break; + case 5: Value->SetValue((int)Tdbp->FileData.nFileSizeLow); break; + case 6: SetTimeValue(g, Tdbp->FileData.ftLastWriteTime); break; + case 7: SetTimeValue(g, Tdbp->FileData.ftCreationTime); break; + case 8: SetTimeValue(g, Tdbp->FileData.ftLastAccessTime); break; #else // !__WIN__ - case 4: Value->SetValue((int)tdbp->Fileinfo.st_mode); break; - case 5: Value->SetValue((int)tdbp->Fileinfo.st_size); break; - case 6: Value->SetValue((int)tdbp->Fileinfo.st_mtime); break; - case 7: Value->SetValue((int)tdbp->Fileinfo.st_ctime); break; - case 8: Value->SetValue((int)tdbp->Fileinfo.st_atime); break; - case 9: Value->SetValue((int)tdbp->Fileinfo.st_uid); break; - case 10: Value->SetValue((int)tdbp->Fileinfo.st_gid); break; + case 4: Value->SetValue((int)Tdbp->Fileinfo.st_mode); break; + case 5: Value->SetValue((int)Tdbp->Fileinfo.st_size); break; + case 6: Value->SetValue((int)Tdbp->Fileinfo.st_mtime); break; + case 7: Value->SetValue((int)Tdbp->Fileinfo.st_ctime); break; + case 8: Value->SetValue((int)Tdbp->Fileinfo.st_atime); break; + case 9: Value->SetValue((int)Tdbp->Fileinfo.st_uid); break; + case 10: Value->SetValue((int)Tdbp->Fileinfo.st_gid); break; #endif // !__WIN__ default: sprintf(g->Message, MSG(INV_DIRCOL_OFST), N); @@ -969,25 +1019,6 @@ void DIRCOL::ReadColumn(PGLOBAL g) /* ------------------------- Class TDBSDR ---------------------------- */ -/***********************************************************************/ -/* TABSDR copy constructors. */ -/***********************************************************************/ -TDBSDR::TDBSDR(PTDBSDR tdbp) : TDBDIR(tdbp) - { - Sub = tdbp->Sub; - } // end of TDBSDR copy constructor - -// Method -PTDB TDBSDR::Clone(PTABS t) - { - PTDB tp; - PGLOBAL g = t->G; // Is this really useful ??? - - tp = new(g) TDBSDR(this); - tp->SetColumns(Columns); - return tp; - } // end of Clone - /***********************************************************************/ /* SDR GetMaxSize: returns the number of retrieved files. */ /***********************************************************************/ @@ -1002,47 +1033,124 @@ int TDBSDR::GetMaxSize(PGLOBAL g) } // end of GetMaxSize /***********************************************************************/ -/* SDR GetMaxSize: returns the number of retrieved files. */ +/* SDR FindInDir: returns the number of retrieved files. */ /***********************************************************************/ int TDBSDR::FindInDir(PGLOBAL g) { - int n = 0; + int rc, n = 0; size_t m = strlen(Direc); // Start searching files in the target directory. #if defined(__WIN__) - int h = _findfirst(Path(g), &FileData); + HANDLE h; - if (h != -1) { - for (n = 1;; n++) - if (_findnext(h, &FileData)) - break; +#if defined(PATHMATCHSPEC) + if (!*Drive) + Path(g); - // Close the search handle. - _findclose(h); - } // endif h + _makepath(Fpath, Drive, Direc, "*", "*"); - // Now search files in sub-directories. - _makepath(Fpath, Drive, Direc, "*", ""); - h = _findfirst(Fpath, &FileData); + h = FindFirstFile(Fpath, &FileData); - if (h != -1) { - while (true) { - if (FileData.attrib & _A_SUBDIR && *FileData.name != '.') { - // Look in the name sub-directory - strcat(strcat(Direc, FileData.name), "\\"); - n += FindInDir(g); - Direc[m] = '\0'; // Restore path - } // endif SUBDIR + if (h == INVALID_HANDLE_VALUE) { + rc = GetLastError(); - if (_findnext(h, &FileData)) - break; + if (rc != ERROR_FILE_NOT_FOUND) { + char buf[512]; - } // endwhile + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL); + sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf); + return -1; + } // endif rc - // Close the search handle. - _findclose(h); - } // endif h + return 0; + } // endif h + + while (true) { + if ((FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && + *FileData.cFileName != '.') { + // Look in the name sub-directory + strcat(strcat(Direc, FileData.cFileName), "/"); + n += FindInDir(g); + Direc[m] = '\0'; // Restore path + } else if (PathMatchSpec(FileData.cFileName, Fpath)) + n++; + + if (!FindNextFile(h, &FileData)) { + rc = GetLastError(); + + if (rc != ERROR_NO_MORE_FILES) { + sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); + FindClose(h); + return -1; + } // endif rc + + break; + } // endif Next + + } // endwhile +#else // !PATHMATCHSPEC + h = FindFirstFile(Path(g), &FileData); + + if (h == INVALID_HANDLE_VALUE) { + rc = GetLastError(); + + if (rc != ERROR_FILE_NOT_FOUND) { + char buf[512]; + + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), 0, (LPTSTR)&buf, sizeof(buf), NULL); + sprintf(g->Message, MSG(BAD_FILE_HANDLE), buf); + return -1; + } // endif rc + + return 0; + } // endif hSearch + + while (true) { + n++; + + if (!FindNextFile(h, &FileData)) { + rc = GetLastError(); + + if (rc != ERROR_NO_MORE_FILES) { + sprintf(g->Message, MSG(NEXT_FILE_ERROR), rc); + FindClose(h); + return -1; + } // endif rc + + break; + } // endif Next + + } // endwhile + + // Now search files in sub-directories. + _makepath(Fpath, Drive, Direc, "*", "."); + h = FindFirstFile(Fpath, &FileData); + + if (h != INVALID_HANDLE_VALUE) { + while (true) { + if ((FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) && + *FileData.cFileName != '.') { + // Look in the name sub-directory + strcat(strcat(Direc, FileData.cFileName), "/"); + n += FindInDir(g); + Direc[m] = '\0'; // Restore path + } // endif SUBDIR + + if (!FindNextFile(h, &FileData)) + break; + + } // endwhile + + } // endif h +#endif // !PATHMATCHSPEC + + // Close the search handle. + FindClose(h); #else // !__WIN__ int k; DIR *dir = opendir(Direc); @@ -1094,7 +1202,7 @@ bool TDBSDR::OpenDB(PGLOBAL g) Sub->Next = NULL; Sub->Prev = NULL; #if defined(__WIN__) - Sub->H = -1; + Sub->H = INVALID_HANDLE_VALUE; Sub->Len = strlen(Direc); #else // !__WIN__ Sub->D = NULL; @@ -1120,18 +1228,20 @@ int TDBSDR::ReadDB(PGLOBAL g) // Are there more files in sub-directories retry: do { - if (Sub->H == -1) { - _makepath(Fpath, Drive, Direc, "*", ""); - Sub->H = _findfirst(Fpath, &FileData); - } else if (_findnext(Sub->H, &FileData)) { - _findclose(Sub->H); - Sub->H = -1; - *FileData.name = '\0'; - } // endif findnext - - } while(*FileData.name == '.'); - - if (Sub->H == -1) { + if (Sub->H == INVALID_HANDLE_VALUE) { + _makepath(Fpath, Drive, Direc, "*", "."); + Sub->H = FindFirstFile(Fpath, &FileData); + } else if (!FindNextFile(Sub->H, &FileData)) { + FindClose(Sub->H); + Sub->H = INVALID_HANDLE_VALUE; + *FileData.cFileName= '\0'; + break; + } // endif findnext + + } while(!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) + || *FileData.cFileName== '.'); + + if (Sub->H == INVALID_HANDLE_VALUE) { // No more sub-directories. Are we in a sub-directory? if (!Sub->Prev) return rc; // No, all is finished @@ -1149,17 +1259,17 @@ int TDBSDR::ReadDB(PGLOBAL g) sup = (PSUBDIR)PlugSubAlloc(g, NULL, sizeof(SUBDIR)); sup->Next = NULL; sup->Prev = Sub; - sup->H = -1; + sup->H = INVALID_HANDLE_VALUE; Sub->Next = sup; } // endif Next Sub = Sub->Next; - strcat(strcat(Direc, FileData.name), "\\"); + strcat(strcat(Direc, FileData.cFileName), "/"); Sub->Len = strlen(Direc); // Reset Hsearch used by TDBDIR::ReadDB - _findclose(Hsearch); - Hsearch = -1; + FindClose(hSearch); + hSearch = INVALID_HANDLE_VALUE; goto again; } // endif H diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h index 51fa7f9000a..3c0ab1a4aa5 100644 --- a/storage/connect/tabmul.h +++ b/storage/connect/tabmul.h @@ -69,6 +69,34 @@ class DllExport TDBMUL : public TDBASE { int iFile; // Index of currently processed file }; // end of class TDBMUL +#if 0 +/***********************************************************************/ +/* This is the MSD Access Method class declaration for files that are */ +/* physically split in multiple files having the same format. */ +/* This sub-class also include files of the sub-directories. */ +/***********************************************************************/ +class DllExport TDBMSD : public TDBMUL { + //friend class MULCOL; +public: + // Constructor + TDBMSD(PTDB tdbp) : TDBMUL(tdbp) {} + TDBMSD(PTDBMSD tdbp) : TDBMUL(tdbp) {} + + // Implementation + virtual PTDB Duplicate(PGLOBAL g); + + // Methods + virtual PTDB Clone(PTABS t); + bool InitFileNames(PGLOBAL g); + + // Database routines + +protected: + + // Members +}; // end of class TDBMSD +#endif + /***********************************************************************/ /* Directory listing table. */ /***********************************************************************/ @@ -101,18 +129,16 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */ /***********************************************************************/ class TDBDIR : public TDBASE { friend class DIRCOL; - public: + friend class TDBMUL; +public: // Constructor TDBDIR(PDIRDEF tdp); - TDBDIR(PTDBDIR tdbp); + TDBDIR(PSZ fpat); // Implementation virtual AMT GetAmType(void) {return TYPE_AM_DIR;} - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBDIR(this);} // Methods - virtual PTDB Clone(PTABS t); virtual int GetRecpos(void) {return iFile;} // Database routines @@ -127,14 +153,16 @@ class TDBDIR : public TDBASE { virtual void CloseDB(PGLOBAL g); protected: + void Init(void); char *Path(PGLOBAL g); // Members PSZ To_File; // Points to file search pathname int iFile; // Index of currently retrieved file #if defined(__WIN__) - _finddata_t FileData; // Find data structure - intptr_t Hsearch; // Search handle + PVAL Dvalp; // Used to retrieve file date values + WIN32_FIND_DATA FileData; // Find data structure + HANDLE hSearch; // Search handle char Drive[_MAX_DRIVE]; // Drive name #else // !__WIN__ struct stat Fileinfo; // File info structure @@ -158,17 +186,11 @@ class TDBDIR : public TDBASE { /***********************************************************************/ class TDBSDR : public TDBDIR { friend class DIRCOL; + friend class TDBMUL; public: // Constructors TDBSDR(PDIRDEF tdp) : TDBDIR(tdp) {Sub = NULL;} - TDBSDR(PTDBSDR tdbp); - - // Implementation - virtual PTDB Duplicate(PGLOBAL g) - {return (PTDB)new(g) TDBSDR(this);} - - // Methods - virtual PTDB Clone(PTABS t); + TDBSDR(PSZ fpat) : TDBDIR(fpat) {Sub = NULL;} // Database routines virtual int GetMaxSize(PGLOBAL g); @@ -184,7 +206,7 @@ class TDBSDR : public TDBDIR { struct _Sub_Dir *Next; struct _Sub_Dir *Prev; #if defined(__WIN__) - intptr_t H; // Search handle + HANDLE H; // Search handle #else // !__WIN__ DIR *D; #endif // !__WIN__ @@ -214,7 +236,11 @@ class DIRCOL : public COLBLK { protected: // Default constructor not to be used DIRCOL(void) {} +#if defined(__WIN__) + void SetTimeValue(PGLOBAL g, FILETIME& ftime); +#endif // __WIN__ // Members + PTDBDIR Tdbp; // To DIR table int N; // Column number }; // end of class DIRCOL -- cgit v1.2.1 From 0e0ae0bb06f75d8d745d35b2470e01671ddbe7a8 Mon Sep 17 00:00:00 2001 From: Sachin Setiya Date: Tue, 14 Mar 2017 13:13:22 +0530 Subject: MW-28, codership/mysql-wsrep#28 Fix sync_thread_levels debug assert Introduced a new wsrep_trx_print_locking() which may be called under lock_sys->mutex if the trx has locks. Signed-off-by: Sachin Setiya --- storage/innobase/include/trx0trx.h | 17 ++++++ storage/innobase/lock/lock0lock.cc | 15 ++--- storage/innobase/trx/trx0trx.cc | 112 +++++++++++++++++++++++++++++++++++++ 3 files changed, 134 insertions(+), 10 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 07123cd6d86..c7e0dc5a551 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -333,6 +333,23 @@ trx_print_latched( or 0 to use the default max length */ MY_ATTRIBUTE((nonnull)); +#ifdef WITH_WSREP +/**********************************************************************//** +Prints info about a transaction. +Transaction information may be retrieved without having trx_sys->mutex acquired +so it may not be completely accurate. The caller must own lock_sys->mutex +and the trx must have some locks to make sure that it does not escape +without locking lock_sys->mutex. */ +UNIV_INTERN +void +wsrep_trx_print_locking( +/*==============*/ + FILE* f, /*!< in: output stream */ + const trx_t* trx, /*!< in: transaction */ + ulint max_query_len) /*!< in: max query length to print, + or 0 to use the default max length */ + MY_ATTRIBUTE((nonnull)); +#endif /* WITH_WSREP */ /**********************************************************************//** Prints info about a transaction. Acquires and releases lock_sys->mutex and trx_sys->mutex. */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index c44aa490a81..c1f5d71e1b6 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1704,16 +1704,14 @@ wsrep_kill_victim( is in the queue*/ } else if (lock->trx != trx) { if (wsrep_log_conflicts) { - mutex_enter(&trx_sys->mutex); - if (bf_this) { - fputs("\n*** Priority TRANSACTION:\n", + if (bf_this) + fputs("\n*** Priority TRANSACTION:\n", stderr); - } else { + else { fputs("\n*** Victim TRANSACTION:\n", stderr); } - - trx_print_latched(stderr, trx, 3000); + wsrep_trx_print_locking(stderr, trx, 3000); if (bf_other) { fputs("\n*** Priority TRANSACTION:\n", @@ -1722,10 +1720,7 @@ wsrep_kill_victim( fputs("\n*** Victim TRANSACTION:\n", stderr); } - - trx_print_latched(stderr, lock->trx, 3000); - - mutex_exit(&trx_sys->mutex); + wsrep_trx_print_locking(stderr, lock->trx, 3000); fputs("*** WAITING FOR THIS LOCK TO BE GRANTED:\n", stderr); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index de3afcdd8b0..3b39f685e6d 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1876,6 +1876,118 @@ trx_print_latched( mem_heap_get_size(trx->lock.lock_heap)); } +#ifdef WITH_WSREP +/**********************************************************************//** +Prints info about a transaction. +Transaction information may be retrieved without having trx_sys->mutex acquired +so it may not be completely accurate. The caller must own lock_sys->mutex +and the trx must have some locks to make sure that it does not escape +without locking lock_sys->mutex. */ +UNIV_INTERN +void +wsrep_trx_print_locking( +/*==========*/ + FILE* f, + /*!< in: output stream */ + const trx_t* trx, + /*!< in: transaction */ + ulint max_query_len) + /*!< in: max query length to print, + or 0 to use the default max length */ +{ + ibool newline; + const char* op_info; + + ut_ad(lock_mutex_own()); + ut_ad(trx->lock.trx_locks.count > 0); + + fprintf(f, "TRANSACTION " TRX_ID_FMT, trx->id); + + /* trx->state may change since trx_sys->mutex is not required */ + switch (trx->state) { + case TRX_STATE_NOT_STARTED: + fputs(", not started", f); + goto state_ok; + case TRX_STATE_ACTIVE: + fprintf(f, ", ACTIVE %lu sec", + (ulong) difftime(time(NULL), trx->start_time)); + goto state_ok; + case TRX_STATE_PREPARED: + fprintf(f, ", ACTIVE (PREPARED) %lu sec", + (ulong) difftime(time(NULL), trx->start_time)); + goto state_ok; + case TRX_STATE_COMMITTED_IN_MEMORY: + fputs(", COMMITTED IN MEMORY", f); + goto state_ok; + } + fprintf(f, ", state %lu", (ulong) trx->state); + ut_ad(0); +state_ok: + + /* prevent a race condition */ + op_info = trx->op_info; + + if (*op_info) { + putc(' ', f); + fputs(op_info, f); + } + + if (trx->is_recovered) { + fputs(" recovered trx", f); + } + + if (trx->declared_to_be_inside_innodb) { + fprintf(f, ", thread declared inside InnoDB %lu", + (ulong) trx->n_tickets_to_enter_innodb); + } + + putc('\n', f); + + if (trx->n_mysql_tables_in_use > 0 || trx->mysql_n_tables_locked > 0) { + fprintf(f, "mysql tables in use %lu, locked %lu\n", + (ulong) trx->n_mysql_tables_in_use, + (ulong) trx->mysql_n_tables_locked); + } + + newline = TRUE; + + /* trx->lock.que_state of an ACTIVE transaction may change + while we are not holding trx->mutex. We perform a dirty read + for performance reasons. */ + + switch (trx->lock.que_state) { + case TRX_QUE_RUNNING: + newline = FALSE; break; + case TRX_QUE_LOCK_WAIT: + fputs("LOCK WAIT ", f); break; + case TRX_QUE_ROLLING_BACK: + fputs("ROLLING BACK ", f); break; + case TRX_QUE_COMMITTING: + fputs("COMMITTING ", f); break; + default: + fprintf(f, "que state %lu ", (ulong) trx->lock.que_state); + } + + if (trx->has_search_latch) { + newline = TRUE; + fputs(", holds adaptive hash latch", f); + } + + if (trx->undo_no != 0) { + newline = TRUE; + fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); + } + + if (newline) { + putc('\n', f); + } + + if (trx->mysql_thd != NULL) { + innobase_mysql_print_thd( + f, trx->mysql_thd, static_cast(max_query_len)); + } +} +#endif /* WITH_WSREP */ /**********************************************************************//** Prints info about a transaction. Acquires and releases lock_sys->mutex and trx_sys->mutex. */ -- cgit v1.2.1 From 5de5daa74e1ff3d3b002801c22a94c556e80d942 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 18 Mar 2017 12:49:14 +0100 Subject: Fix MDEV-12220: Crash when doing UPDATE or DELETE on an external table (ODBC, JDBC, MYSQL) with a WHERE clause on an indexed column. Also fix a bugs in TDBEXT::MakeCommand (use of uninitialised Quote) Add in this function the eventual Schema (database) prefixing. modified: storage/connect/connect.cc modified: storage/connect/tabext.cpp Typo modified: storage/connect/tabjdbc.h --- storage/connect/connect.cc | 5 +++-- storage/connect/tabext.cpp | 40 ++++++++++++++++++++++++++++++++-------- storage/connect/tabjdbc.h | 4 ++-- 3 files changed, 37 insertions(+), 12 deletions(-) (limited to 'storage') diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 6be5bd349dc..9ec93bfe1ba 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -858,8 +858,9 @@ RCODE CntIndexRead(PGLOBAL g, PTDB ptdb, OPVAL op, sprintf(g->Message, MSG(TABLE_NO_INDEX), ptdb->GetName()); return RC_FX; } else if (x == 2) { - // Remote index - if (op != OP_SAME && ptdb->ReadKey(g, op, kr)) + // Remote index. Only used in read mode + if ((ptdb->GetMode() == MODE_READ || ptdb->GetMode() == MODE_READX) + && op != OP_SAME && ptdb->ReadKey(g, op, kr)) return RC_FX; goto rnd; diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index c3403ca0081..ad9716b6a10 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -434,15 +434,16 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) /***********************************************************************/ bool TDBEXT::MakeCommand(PGLOBAL g) { - char *p, *stmt, name[68], *body = NULL; + char *p, *stmt, name[132], *body = NULL, *schmp = NULL; char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); bool qtd = Quoted > 0; + char q = qtd ? *Quote : ' '; int i = 0, k = 0; // Make a lower case copy of the originale query and change // back ticks to the data source identifier quoting character do { - qrystr[i] = (Qrystr[i] == '`') ? *Quote : tolower(Qrystr[i]); + qrystr[i] = (Qrystr[i] == '`') ? q : tolower(Qrystr[i]); } while (Qrystr[i++]); if (To_CondFil && (p = strstr(qrystr, " where "))) { @@ -459,27 +460,50 @@ bool TDBEXT::MakeCommand(PGLOBAL g) strlwr(strcat(strcat(strcpy(name, " "), Name), " ")); if (strstr(" update delete low_priority ignore quick from ", name)) { - strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote)); - k += 2; + if (Quote) { + strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote)); + k += 2; + } else { + strcpy(g->Message, "Quoted must be specified"); + return true; + } // endif Quote + } else strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { for (i = 0; i < p - qrystr; i++) - stmt[i] = (Qrystr[i] == '`') ? *Quote : Qrystr[i]; + stmt[i] = (Qrystr[i] == '`') ? q : Qrystr[i]; stmt[i] = 0; + k += i + (int)strlen(Name); - if (qtd && *(p - 1) == ' ') + if (Schema && *Schema) + schmp = Schema; + + if (qtd && *(p - 1) == ' ') { + if (schmp) + strcat(strcat(stmt, schmp), "."); + strcat(strcat(strcat(stmt, Quote), TableName), Quote); - else + } else { + if (schmp) { + if (qtd && *(p - 1) != ' ') { + stmt[i - 1] = 0; + strcat(strcat(strcat(stmt, schmp), "."), Quote); + } else + strcat(strcat(stmt, schmp), "."); + + } // endif schmp + strcat(stmt, TableName); + } // endif's i = (int)strlen(stmt); do { - stmt[i++] = (Qrystr[k] == '`') ? *Quote : Qrystr[k]; + stmt[i++] = (Qrystr[k] == '`') ? q : Qrystr[k]; } while (Qrystr[k++]); if (body) diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index 212f2a58b58..5a59b6c2df8 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -1,7 +1,7 @@ /*************** Tabjdbc H Declares Source Code File (.H) **************/ -/* Name: TABJDBC.H Version 1.0 */ +/* Name: TABJDBC.H Version 1.1 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ /* */ /* This file contains the TDBJDBC classes declares. */ /***********************************************************************/ -- cgit v1.2.1 From 53c6195eede2b9959331bf11fa7679a108896bde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Mon, 20 Mar 2017 10:17:13 +0200 Subject: Fixed test failure on galere_wsrep_log_conflicts on XtraDB. Problem was that trx_sys->mutex was acquired to print trx info even when we already hold trx_sys->mutex. Fixed similarly as in InnoDB, i.e. with wsrep_trx_print_locking() function that does not acquire trx_sys->mutex. --- storage/innobase/include/trx0trx.h | 1 + storage/innobase/lock/lock0lock.cc | 10 ++-- storage/innobase/trx/trx0trx.cc | 1 + storage/xtradb/include/trx0trx.h | 20 ++++++- storage/xtradb/lock/lock0lock.cc | 9 +-- storage/xtradb/trx/trx0trx.cc | 114 +++++++++++++++++++++++++++++++++++++ 6 files changed, 144 insertions(+), 11 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index c7e0dc5a551..6db87efaf91 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2015, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index c1f5d71e1b6..b927efaa286 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2016, MariaDB Corporation +Copyright (c) 2014, 2017, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1704,13 +1704,14 @@ wsrep_kill_victim( is in the queue*/ } else if (lock->trx != trx) { if (wsrep_log_conflicts) { - if (bf_this) - fputs("\n*** Priority TRANSACTION:\n", + if (bf_this) { + fputs("\n*** Priority TRANSACTION:\n", stderr); - else { + } else { fputs("\n*** Victim TRANSACTION:\n", stderr); } + wsrep_trx_print_locking(stderr, trx, 3000); if (bf_other) { @@ -1720,6 +1721,7 @@ wsrep_kill_victim( fputs("\n*** Victim TRANSACTION:\n", stderr); } + wsrep_trx_print_locking(stderr, lock->trx, 3000); fputs("*** WAITING FOR THIS LOCK TO BE GRANTED:\n", diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index bfc1c546510..5fa176d99e1 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2015, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h index f9917c8448d..1bc1e07f547 100644 --- a/storage/xtradb/include/trx0trx.h +++ b/storage/xtradb/include/trx0trx.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, MariaDB Corporation +Copyright (c) 2015, 2017, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -333,6 +333,24 @@ trx_print_low( /*!< in: mem_heap_get_size(trx->lock.lock_heap) */ MY_ATTRIBUTE((nonnull)); +#ifdef WITH_WSREP +/**********************************************************************//** +Prints info about a transaction. +Transaction information may be retrieved without having trx_sys->mutex acquired +so it may not be completely accurate. The caller must own lock_sys->mutex +and the trx must have some locks to make sure that it does not escape +without locking lock_sys->mutex. */ +UNIV_INTERN +void +wsrep_trx_print_locking( +/*==============*/ + FILE* f, /*!< in: output stream */ + const trx_t* trx, /*!< in: transaction */ + ulint max_query_len) /*!< in: max query length to print, + or 0 to use the default max length */ + MY_ATTRIBUTE((nonnull)); +#endif /* WITH_WSREP */ + /**********************************************************************//** Prints info about a transaction. The caller must hold lock_sys->mutex and trx_sys->mutex. diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index 9be03e78977..c09a3ac2550 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2015, MariaDB Corporation +Copyright (c) 2014, 2017, MariaDB Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1714,7 +1714,6 @@ wsrep_kill_victim( is in the queue*/ } else if (lock->trx != trx) { if (wsrep_log_conflicts) { - mutex_enter(&trx_sys->mutex); if (bf_this) { fputs("\n*** Priority TRANSACTION:\n", stderr); @@ -1723,7 +1722,7 @@ wsrep_kill_victim( stderr); } - trx_print_latched(stderr, trx, 3000); + wsrep_trx_print_locking(stderr, trx, 3000); if (bf_other) { fputs("\n*** Priority TRANSACTION:\n", @@ -1733,9 +1732,7 @@ wsrep_kill_victim( stderr); } - trx_print_latched(stderr, lock->trx, 3000); - - mutex_exit(&trx_sys->mutex); + wsrep_trx_print_locking(stderr, lock->trx, 3000); fputs("*** WAITING FOR THIS LOCK TO BE GRANTED:\n", stderr); diff --git a/storage/xtradb/trx/trx0trx.cc b/storage/xtradb/trx/trx0trx.cc index 07e122e54ee..1670837df59 100644 --- a/storage/xtradb/trx/trx0trx.cc +++ b/storage/xtradb/trx/trx0trx.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2015, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2156,6 +2157,119 @@ trx_print_latched( mem_heap_get_size(trx->lock.lock_heap)); } +#ifdef WITH_WSREP +/**********************************************************************//** +Prints info about a transaction. +Transaction information may be retrieved without having trx_sys->mutex acquired +so it may not be completely accurate. The caller must own lock_sys->mutex +and the trx must have some locks to make sure that it does not escape +without locking lock_sys->mutex. */ +UNIV_INTERN +void +wsrep_trx_print_locking( +/*==========*/ + FILE* f, + /*!< in: output stream */ + const trx_t* trx, + /*!< in: transaction */ + ulint max_query_len) + /*!< in: max query length to print, + or 0 to use the default max length */ +{ + ibool newline; + const char* op_info; + + ut_ad(lock_mutex_own()); + ut_ad(trx->lock.trx_locks.count > 0); + + fprintf(f, "TRANSACTION " TRX_ID_FMT, trx->id); + + /* trx->state may change since trx_sys->mutex is not required */ + switch (trx->state) { + case TRX_STATE_NOT_STARTED: + fputs(", not started", f); + goto state_ok; + case TRX_STATE_ACTIVE: + fprintf(f, ", ACTIVE %lu sec", + (ulong) difftime(time(NULL), trx->start_time)); + goto state_ok; + case TRX_STATE_PREPARED: + fprintf(f, ", ACTIVE (PREPARED) %lu sec", + (ulong) difftime(time(NULL), trx->start_time)); + goto state_ok; + case TRX_STATE_COMMITTED_IN_MEMORY: + fputs(", COMMITTED IN MEMORY", f); + goto state_ok; + } + fprintf(f, ", state %lu", (ulong) trx->state); + ut_ad(0); +state_ok: + + /* prevent a race condition */ + op_info = trx->op_info; + + if (*op_info) { + putc(' ', f); + fputs(op_info, f); + } + + if (trx->is_recovered) { + fputs(" recovered trx", f); + } + + if (trx->declared_to_be_inside_innodb) { + fprintf(f, ", thread declared inside InnoDB %lu", + (ulong) trx->n_tickets_to_enter_innodb); + } + + putc('\n', f); + + if (trx->n_mysql_tables_in_use > 0 || trx->mysql_n_tables_locked > 0) { + fprintf(f, "mysql tables in use %lu, locked %lu\n", + (ulong) trx->n_mysql_tables_in_use, + (ulong) trx->mysql_n_tables_locked); + } + + newline = TRUE; + + /* trx->lock.que_state of an ACTIVE transaction may change + while we are not holding trx->mutex. We perform a dirty read + for performance reasons. */ + + switch (trx->lock.que_state) { + case TRX_QUE_RUNNING: + newline = FALSE; break; + case TRX_QUE_LOCK_WAIT: + fputs("LOCK WAIT ", f); break; + case TRX_QUE_ROLLING_BACK: + fputs("ROLLING BACK ", f); break; + case TRX_QUE_COMMITTING: + fputs("COMMITTING ", f); break; + default: + fprintf(f, "que state %lu ", (ulong) trx->lock.que_state); + } + + if (trx->has_search_latch) { + newline = TRUE; + fputs(", holds adaptive hash latch", f); + } + + if (trx->undo_no != 0) { + newline = TRUE; + fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); + } + + if (newline) { + putc('\n', f); + } + + if (trx->mysql_thd != NULL) { + innobase_mysql_print_thd( + f, trx->mysql_thd, static_cast(max_query_len)); + } +} +#endif /* WITH_WSREP */ + /**********************************************************************//** Prints info about a transaction. Acquires and releases lock_sys->mutex and trx_sys->mutex. */ -- cgit v1.2.1 From 3ac35bb05906df8561cd9f37c98e6b70ae149140 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 28 Mar 2017 10:25:21 +0200 Subject: Fix crash when a line is not ended by \n. modified: storage/connect/filamap.cpp Add specifying a password when reading zipped tables. modified: storage/connect/filamzip.cpp modified: storage/connect/filamzip.h modified: storage/connect/tabdos.cpp modified: storage/connect/tabdos.h Try Vaintroub suggestion modified: storage/connect/mysql-test/connect/t/jdbc.test --- storage/connect/filamap.cpp | 21 +++++++++++-------- storage/connect/filamzip.cpp | 49 +++++++++++++++++++++++++++++++++----------- storage/connect/filamzip.h | 16 +++++++++------ storage/connect/tabdos.cpp | 4 +++- storage/connect/tabdos.h | 1 + 5 files changed, 64 insertions(+), 27 deletions(-) (limited to 'storage') diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 8fffaca3d06..84dff422db7 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -301,10 +301,9 @@ int MAPFAM::SkipRecord(PGLOBAL g, bool header) PDBUSER dup = (PDBUSER)g->Activityp->Aptr; // Skip this record - while (*Mempos++ != '\n') ; // What about Unix ??? - - if (Mempos >= Top) - return RC_EF; + while (*Mempos++ != '\n') // What about Unix ??? + if (Mempos == Top) + return RC_EF; // Update progress information dup->ProgCur = GetPos(); @@ -320,7 +319,7 @@ int MAPFAM::SkipRecord(PGLOBAL g, bool header) /***********************************************************************/ int MAPFAM::ReadBuffer(PGLOBAL g) { - int rc, len; + int rc, len, n = 1; // Are we at the end of the memory if (Mempos >= Top) { @@ -362,10 +361,14 @@ int MAPFAM::ReadBuffer(PGLOBAL g) Placed = false; // Immediately calculate next position (Used by DeleteDB) - while (*Mempos++ != '\n') ; // What about Unix ??? + while (*Mempos++ != '\n') // What about Unix ??? + if (Mempos == Top) { + n = 0; + break; + } // endif Mempos // Set caller line buffer - len = (Mempos - Fpos) - 1; + len = (Mempos - Fpos) - n; // Don't rely on ENDING setting if (len > 0 && *(Mempos - 2) == '\r') @@ -619,7 +622,9 @@ int MBKFAM::ReadBuffer(PGLOBAL g) } // endif's // Immediately calculate next position (Used by DeleteDB) - while (*Mempos++ != '\n') ; // What about Unix ??? + while (*Mempos++ != '\n') // What about Unix ??? + if (Mempos == Top) + break; // Set caller line buffer len = (Mempos - Fpos) - Ending; diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 9abb2602b56..1a27c974f30 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -386,6 +386,7 @@ UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul) { zipfile = NULL; target = tgt; + pwd = NULL; fp = NULL; memory = NULL; size = 0; @@ -401,6 +402,26 @@ UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul) #endif } // end of UNZIPUTL standard constructor +UNZIPUTL::UNZIPUTL(PDOSDEF tdp) +{ + zipfile = NULL; + target = tdp->GetEntry(); + pwd = tdp->Pwd; + fp = NULL; + memory = NULL; + size = 0; + entryopen = false; + multiple = tdp->GetMul(); + memset(fn, 0, sizeof(fn)); + + // Init the case mapping table. +#if defined(__WIN__) + for (int i = 0; i < 256; ++i) mapCaseTable[i] = toupper(i); +#else + for (int i = 0; i < 256; ++i) mapCaseTable[i] = i; +#endif +} // end of UNZIPUTL standard constructor + #if 0 UNZIPUTL::UNZIPUTL(PZIPUTIL zutp) { @@ -625,7 +646,7 @@ bool UNZIPUTL::openEntry(PGLOBAL g) if (rc != UNZ_OK) { sprintf(g->Message, "unzGetCurrentFileInfo64 rc=%d", rc); return true; - } else if ((rc = unzOpenCurrentFile(zipfile)) != UNZ_OK) { + } else if ((rc = unzOpenCurrentFilePassword(zipfile, pwd)) != UNZ_OK) { sprintf(g->Message, "unzOpen fn=%s rc=%d", fn, rc); return true; } // endif rc @@ -675,15 +696,17 @@ void UNZIPUTL::closeEntry() UNZFAM::UNZFAM(PDOSDEF tdp) : MAPFAM(tdp) { zutp = NULL; - target = tdp->GetEntry(); - mul = tdp->GetMul(); + tdfp = tdp; + //target = tdp->GetEntry(); + //mul = tdp->GetMul(); } // end of UNZFAM standard constructor UNZFAM::UNZFAM(PUNZFAM txfp) : MAPFAM(txfp) { zutp = txfp->zutp; - target = txfp->target; - mul = txfp->mul; + tdfp = txfp->tdfp; + //target = txfp->target; + //mul = txfp->mul; } // end of UNZFAM copy constructor /***********************************************************************/ @@ -726,7 +749,7 @@ bool UNZFAM::OpenTableFile(PGLOBAL g) /*********************************************************************/ /* Allocate the ZIP utility class. */ /*********************************************************************/ - zutp = new(g) UNZIPUTL(target, mul); + zutp = new(g) UNZIPUTL(tdfp); // We used the file name relative to recorded datapath PlugSetPath(filename, To_File, Tdbp->GetPath()); @@ -841,17 +864,19 @@ void UNZFAM::CloseTableFile(PGLOBAL g, bool) UZXFAM::UZXFAM(PDOSDEF tdp) : MPXFAM(tdp) { zutp = NULL; - target = tdp->GetEntry(); - mul = tdp->GetMul(); + tdfp = tdp; + //target = tdp->GetEntry(); + //mul = tdp->GetMul(); //Lrecl = tdp->GetLrecl(); } // end of UZXFAM standard constructor UZXFAM::UZXFAM(PUZXFAM txfp) : MPXFAM(txfp) { zutp = txfp->zutp; - target = txfp->target; - mul = txfp->mul; -//Lrecl = txfp->Lrecl; + tdfp = txfp->tdfp; + //target = txfp->target; + //mul = txfp->mul; + //Lrecl = txfp->Lrecl; } // end of UZXFAM copy constructor /***********************************************************************/ @@ -907,7 +932,7 @@ bool UZXFAM::OpenTableFile(PGLOBAL g) /* Allocate the ZIP utility class. */ /*********************************************************************/ if (!zutp) - zutp = new(g)UNZIPUTL(target, mul); + zutp = new(g)UNZIPUTL(tdfp); // We used the file name relative to recorded datapath PlugSetPath(filename, To_File, Tdbp->GetPath()); diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index 3160703bd20..2f9e491f621 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -45,6 +45,7 @@ class DllExport ZIPUTIL : public BLOCK { // Members zipFile zipfile; // The ZIP container file PSZ target; // The target file name + PSZ pwd; // The ZIP file password //unz_file_info finfo; // The current file info PFBLOCK fp; //char *memory; @@ -61,8 +62,8 @@ class DllExport ZIPUTIL : public BLOCK { class DllExport UNZIPUTL : public BLOCK { public: // Constructor - UNZIPUTL(PSZ tgt, bool mul); -//UNZIPUTL(UNZIPUTL *zutp); + UNZIPUTL(PSZ tgt, bool mul); + UNZIPUTL(PDOSDEF tdp); // Implementation //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); } @@ -80,6 +81,7 @@ class DllExport UNZIPUTL : public BLOCK { // Members unzFile zipfile; // The ZIP container file PSZ target; // The target file name + PSZ pwd; // The ZIP file password unz_file_info finfo; // The current file info PFBLOCK fp; char *memory; @@ -119,8 +121,9 @@ class DllExport UNZFAM : public MAPFAM { protected: // Members UNZIPUTL *zutp; - PSZ target; - bool mul; + PDOSDEF tdfp; +//PSZ target; +//bool mul; }; // end of UNZFAM /***********************************************************************/ @@ -147,8 +150,9 @@ class DllExport UZXFAM : public MPXFAM { protected: // Members UNZIPUTL *zutp; - PSZ target; - bool mul; + PDOSDEF tdfp; +//PSZ target; +//bool mul; }; // end of UZXFAM /***********************************************************************/ diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index f1bbef19a22..02d3fc176ce 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -98,6 +98,7 @@ DOSDEF::DOSDEF(void) Ofn = NULL; Entry = NULL; To_Indx = NULL; + Pwd = NULL; Recfm = RECFM_VAR; Mapped = false; Zipped = false; @@ -139,7 +140,8 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) : false; Mulentries = GetBoolCatInfo("Mulentries", Mulentries); Append = GetBoolCatInfo("Append", false); - } + Pwd = GetStringCatInfo(g, "Password", NULL); + } // endif Zipped Desc = Fn = GetStringCatInfo(g, "Filename", NULL); Ofn = GetStringCatInfo(g, "Optname", Fn); diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index 922d52ee399..c404328a675 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -77,6 +77,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ PSZ Fn; /* Path/Name of corresponding file */ PSZ Ofn; /* Base Path/Name of matching index files*/ PSZ Entry; /* Zip entry name or pattern */ + PSZ Pwd; /* Zip password */ PIXDEF To_Indx; /* To index definitions blocks */ RECFM Recfm; /* 0:VAR, 1:FIX, 2:BIN, 3:VCT, 6:DBF */ bool Mapped; /* 0: disk file, 1: memory mapped file */ -- cgit v1.2.1 From a091314d2775e0d33f2476953a7c5ab9647b2f37 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 22 Apr 2017 14:14:11 +0200 Subject: Fix MDEV-12520: Decimal values can be truncated for JDBC tables modified: storage/connect/jdbconn.cpp Fix bug. Date value was null when retrieved from a json expanded array. modified: storage/connect/tabjson.cpp --- storage/connect/jdbconn.cpp | 5 +++-- storage/connect/tabjson.cpp | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 02adff5d2c8..e8a260c8be9 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -1235,7 +1235,8 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) case 12: // VARCHAR case -1: // LONGVARCHAR case 1: // CHAR - if (jb) + case 3: // DECIMAL + if (jb && ctyp != 3) cn = (jstring)jb; else if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;")) cn = (jstring)env->CallObjectMethod(job, chrfldid, (jint)rank, jn); @@ -1261,7 +1262,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) break; case 8: // DOUBLE case 2: // NUMERIC - case 3: // DECIMAL +//case 3: // DECIMAL if (!gmID(g, dblfldid, "DoubleField", "(ILjava/lang/String;)D")) val->SetValue((double)env->CallDoubleMethod(job, dblfldid, rank, jn)); else diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 4b2ab4fd240..f451c73b665 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1171,6 +1171,7 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) case TYPE_INTG: case TYPE_BINT: case TYPE_DBL: + case TYPE_DATE: vp->SetValue_pval(val->GetValue()); break; case TYPE_BOOL: -- cgit v1.2.1 From 63b7d9d1586336a61d6de7fbdcdfbc72e6117f93 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 29 Apr 2017 19:20:51 +0200 Subject: Fix MDEV-12631 valgrind warning for zipped tables modified: storage/connect/filamzip.cpp --- storage/connect/filamzip.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'storage') diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 1a27c974f30..66e628abb14 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -1,7 +1,7 @@ /*********** File AM Zip C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMZIP */ /* ------------- */ -/* Version 1.1 */ +/* Version 1.2 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -652,12 +652,18 @@ bool UNZIPUTL::openEntry(PGLOBAL g) } // endif rc size = finfo.uncompressed_size; - memory = new char[size + 1]; + + try { + memory = new char[size + 1]; + } catch (...) { + strcpy(g->Message, "Out of memory"); + return true; + } // end try/catch if ((rc = unzReadCurrentFile(zipfile, memory, size)) < 0) { sprintf(g->Message, "unzReadCurrentFile rc = %d", rc); unzCloseCurrentFile(zipfile); - free(memory); + delete[] memory; memory = NULL; entryopen = false; } else { @@ -682,7 +688,7 @@ void UNZIPUTL::closeEntry() } // endif entryopen if (memory) { - free(memory); + delete[] memory; memory = NULL; } // endif memory -- cgit v1.2.1 From 1de6440a2e441e7aeffe85ee9c72e1e001fcf38d Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 3 May 2017 10:32:01 +0200 Subject: Fix MDEV-12587 MariaDB CONNECT DIR Type - Subfolder Option: SELECT Query Never Ends modified: storage/connect/tabmul.cpp modified: storage/connect/tabmul.h Work on MDEV-12667 Crash when using JSON tables modified: storage/connect/connect.cc modified: storage/connect/ha_connect.cc modified: storage/connect/ha_connect.h modified: storage/connect/plgdbutl.cpp Change Base offset for DIR tables on Linux modified: storage/connect/reldef.cpp --- storage/connect/connect.cc | 3 +- storage/connect/ha_connect.cc | 27 +++++++++++------- storage/connect/ha_connect.h | 2 +- storage/connect/plgdbutl.cpp | 5 ++-- storage/connect/reldef.cpp | 11 ++++++-- storage/connect/tabjson.cpp | 6 ++-- storage/connect/tabmul.cpp | 65 +++++++++++++++++++++++++------------------ storage/connect/tabmul.h | 6 ++-- storage/connect/tabxml.cpp | 7 +++-- 9 files changed, 82 insertions(+), 50 deletions(-) (limited to 'storage') diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 9ec93bfe1ba..5d5eca80275 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -117,7 +117,8 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) handler); // Set the database path for this table - handler->SetDataPath(g, pathname); + if (handler->SetDataPath(g, pathname)) + return true; if (dbuserp->Catalog) { // ((MYCAT *)dbuserp->Catalog)->SetHandler(handler); done later diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 2cd581c5c64..9f0b62c73b3 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1739,9 +1739,9 @@ void ha_connect::AddColName(char *cp, Field *fp) /***********************************************************************/ /* This function sets the current database path. */ /***********************************************************************/ -void ha_connect::SetDataPath(PGLOBAL g, const char *path) +bool ha_connect::SetDataPath(PGLOBAL g, const char *path) { - datapath= SetPath(g, path); + return (!(datapath = SetPath(g, path))); } // end of SetDataPath /****************************************************************************/ @@ -2721,6 +2721,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) if (x) return NULL; + else + pb0 = pb1 = pb2 = ph0 = ph1 = ph2 = NULL; if (trace) htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), @@ -4110,10 +4112,14 @@ int ha_connect::info(uint flag) } // endif xmod // This is necessary for getting file length - if (table) - SetDataPath(g, table->s->db.str); - else - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + if (table) { + if (SetDataPath(g, table->s->db.str)) { + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); + } // endif SetDataPath + + } else + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen if (!(tdbp= GetTDB(g))) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen @@ -6563,9 +6569,10 @@ int ha_connect::create(const char *name, TABLE *table_arg, PDBUSER dup= PlgGetUser(g); PCATLG cat= (dup) ? dup->Catalog : NULL; - SetDataPath(g, table_arg->s->db.str); - - if (cat) { + if (SetDataPath(g, table_arg->s->db.str)) { + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + rc = HA_ERR_INTERNAL_ERROR; + } else if (cat) { // cat->SetDataPath(g, table_arg->s->db.str); #if defined(WITH_PARTITION_STORAGE_ENGINE) @@ -7137,6 +7144,6 @@ maria_declare_plugin(connect) NULL, /* status variables */ connect_system_variables, /* system variables */ "1.05.0003", /* string version */ - MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */ + MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index 3d9ff967618..155bec3c966 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -199,7 +199,7 @@ public: bool IsUnique(uint n); char *GetDataPath(void) {return (char*)datapath;} - void SetDataPath(PGLOBAL g, const char *path); + bool SetDataPath(PGLOBAL g, const char *path); PTDB GetTDB(PGLOBAL g); int OpenTable(PGLOBAL g, bool del= false); bool CheckColumnList(PGLOBAL g); diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index 299332f51b4..eef9fba44f5 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -412,8 +412,9 @@ char *SetPath(PGLOBAL g, const char *path) if (path) { size_t len= strlen(path) + (*path != '.' ? 4 : 1); - buf= (char*)PlugSubAlloc(g, NULL, len); - + if (!(buf = (char*)PlgDBSubAlloc(g, NULL, len))) + return NULL; + if (PlugIsAbsolutePath(path)) { strcpy(buf, path); return buf; diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 5bb7848ab1c..c0f3074769a 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -1,11 +1,11 @@ /************* RelDef CPP Program Source Code File (.CPP) **************/ /* PROGRAM NAME: RELDEF */ /* ------------- */ -/* Version 1.6 */ +/* Version 1.7 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -277,8 +277,13 @@ int TABDEF::GetColCatInfo(PGLOBAL g) // Take care of the column definitions i= poff= nof= nlg= 0; +#if defined(__WIN__) // Offsets of HTML and DIR tables start from 0, DBF at 1 - loff= (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0; + loff = (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0; +#else // !__WIN__ + // Offsets of HTML tables start from 0, DIR and DBF at 1 + loff = (tc == TAB_DBF || tc == TAB_DIR) ? 1 : (tc == TAB_XML) ? -1 : 0; +#endif // !__WIN__ while (true) { // Default Offset depends on table type diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index f451c73b665..e074232600b 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -117,8 +117,10 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) return NULL; } // endif Fn - tdp->Database = SetPath(g, db); - tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); + if (!(tdp->Database = SetPath(g, db))) + return NULL; + + tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 4128fea6afc..b5f07db8413 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -603,9 +603,10 @@ bool TDBMSD::InitFileNames(PGLOBAL g) bool DIRDEF::DefineAM(PGLOBAL g, LPCSTR, int) { Desc = Fn = GetStringCatInfo(g, "Filename", NULL); - Incl = (GetIntCatInfo("Subdir", 0) != 0); - Huge = (GetIntCatInfo("Huge", 0) != 0); - return false; + Incl = GetBoolCatInfo("Subdir", false); + Huge = GetBoolCatInfo("Huge", false); + Nodir = GetBoolCatInfo("Nodir", true); + return false; } // end of DefineAM /***********************************************************************/ @@ -654,12 +655,14 @@ void TDBDIR::Init(void) TDBDIR::TDBDIR(PDIRDEF tdp) : TDBASE(tdp) { To_File = tdp->Fn; + Nodir = tdp->Nodir; Init(); } // end of TDBDIR standard constructor TDBDIR::TDBDIR(PSZ fpat) : TDBASE((PTABDEF)NULL) { To_File = fpat; + Nodir = true; Init(); } // end of TDBDIR constructor @@ -812,27 +815,32 @@ int TDBDIR::ReadDB(PGLOBAL g) int rc = RC_OK; #if defined(__WIN__) - if (hSearch == INVALID_HANDLE_VALUE) { - /*******************************************************************/ - /* Start searching files in the target directory. The use of the */ - /* Path function is required when called from TDBSDR. */ - /*******************************************************************/ - hSearch = FindFirstFile(Path(g), &FileData); + do { + if (hSearch == INVALID_HANDLE_VALUE) { + /*****************************************************************/ + /* Start searching files in the target directory. The use of */ + /* the Path function is required when called from TDBSDR. */ + /*****************************************************************/ + hSearch = FindFirstFile(Path(g), &FileData); + + if (hSearch == INVALID_HANDLE_VALUE) { + rc = RC_EF; + break; + } else + iFile++; - if (hSearch == INVALID_HANDLE_VALUE) - rc = RC_EF; - else - iFile++; + } else { + if (!FindNextFile(hSearch, &FileData)) { + // Restore file name and type pattern + _splitpath(To_File, NULL, NULL, Fname, Ftype); + rc = RC_EF; + break; + } else + iFile++; - } else { - if (!FindNextFile(hSearch, &FileData)) { - // Restore file name and type pattern - _splitpath(To_File, NULL, NULL, Fname, Ftype); - rc = RC_EF; - } else - iFile++; + } // endif hSearch - } // endif hSearch + } while (Nodir && FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY); if (rc == RC_OK) _splitpath(FileData.cFileName, NULL, NULL, Fname, Ftype); @@ -1229,8 +1237,9 @@ int TDBSDR::ReadDB(PGLOBAL g) retry: do { if (Sub->H == INVALID_HANDLE_VALUE) { - _makepath(Fpath, Drive, Direc, "*", "."); - Sub->H = FindFirstFile(Fpath, &FileData); +// _makepath(Fpath, Drive, Direc, "*", "."); why was this made? + _makepath(Fpath, Drive, Direc, "*", NULL); + Sub->H = FindFirstFile(Fpath, &FileData); } else if (!FindNextFile(Sub->H, &FileData)) { FindClose(Sub->H); Sub->H = INVALID_HANDLE_VALUE; @@ -1238,8 +1247,9 @@ int TDBSDR::ReadDB(PGLOBAL g) break; } // endif findnext - } while(!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) - || *FileData.cFileName== '.'); + } while (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) || + (*FileData.cFileName == '.' && + (!FileData.cFileName[1] || FileData.cFileName[1] == '.'))); if (Sub->H == INVALID_HANDLE_VALUE) { // No more sub-directories. Are we in a sub-directory? @@ -1293,8 +1303,9 @@ int TDBSDR::ReadDB(PGLOBAL g) if (lstat(Fpath, &Fileinfo) < 0) { sprintf(g->Message, "%s: %s", Fpath, strerror(errno)); rc = RC_FX; - } else if (S_ISDIR(Fileinfo.st_mode) && *Entry->d_name != '.') { - // Look in the name sub-directory + } else if (S_ISDIR(Fileinfo.st_mode) && strcmp(Entry->d_name, ".") + && strcmp(Entry->d_name, "..")) { + // Look in the name sub-directory if (!Sub->Next) { PSUBDIR sup; diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h index 3c0ab1a4aa5..f26d982d49b 100644 --- a/storage/connect/tabmul.h +++ b/storage/connect/tabmul.h @@ -119,7 +119,8 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */ PSZ Fn; /* Path/Name of file search */ bool Incl; /* true to include sub-directories */ bool Huge; /* true if files can be larger than 2GB */ - }; // end of DIRDEF + bool Nodir; /* true to exclude directories */ +}; // end of DIRDEF /***********************************************************************/ /* This is the DIR Access Method class declaration for tables that */ @@ -175,7 +176,8 @@ public: char Direc[_MAX_DIR]; // Search path char Fname[_MAX_FNAME]; // File name char Ftype[_MAX_EXT]; // File extention - }; // end of class TDBDIR + bool Nodir; // Exclude directories from file list +}; // end of class TDBDIR /***********************************************************************/ /* This is the DIR Access Method class declaration for tables that */ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index a895ffbd839..386d016d082 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -157,8 +157,11 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) tdp = new(g) XMLDEF; tdp->Fn = fn; - tdp->Database = SetPath(g, db); - tdp->Tabname = tab; + + if (!(tdp->Database = SetPath(g, db))) + return NULL; + + tdp->Tabname = tab; tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); -- cgit v1.2.1 From 6525dc63366f751fcfa45a586f378a1c81458657 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 3 May 2017 12:05:05 +0200 Subject: Disable json tests --- storage/connect/mysql-test/connect/disabled.def | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def index 5e15e0806ba..4e07b5c0576 100644 --- a/storage/connect/mysql-test/connect/disabled.def +++ b/storage/connect/mysql-test/connect/disabled.def @@ -9,7 +9,7 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -#jdbc : Variable settings depend on machine configuration -#jdbc_new : Variable settings depend on machine configuration +jdbc : Variable settings depend on machine configuration +jdbc_new : Variable settings depend on machine configuration jdbc_oracle : Variable settings depend on machine configuration jdbc_postgresql : Variable settings depend on machine configuration -- cgit v1.2.1 From a2af3c0d44d919051c08b63cb10bed546d5f92a3 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 4 May 2017 18:51:19 +0200 Subject: Fix MDEV-12653 Cannot add index for ZIP CONNECT table modified: storage/connect/filamzip.cpp modified: storage/connect/ha_connect.cc modified: storage/connect/tabdos.cpp modified: storage/connect/tabfmt.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/xindex.cpp --- storage/connect/filamzip.cpp | 8 +++++++- storage/connect/ha_connect.cc | 10 ++++++---- storage/connect/tabdos.cpp | 4 ++-- storage/connect/tabfmt.cpp | 2 +- storage/connect/tabjson.cpp | 2 +- storage/connect/xindex.cpp | 4 ++-- 6 files changed, 19 insertions(+), 11 deletions(-) (limited to 'storage') diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 66e628abb14..17ca84d9d5c 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -740,7 +740,13 @@ int UNZFAM::Cardinality(PGLOBAL g) int card = -1; int len = GetFileLength(g); - card = (len / (int)Lrecl) * 2; // Estimated ??? + if (len) { + // Estimated ??? + card = (len / (int)Lrecl) * 2; + card = card ? card : 10; // Lrecl can be too big + } else + card = 0; + return card; } // end of Cardinality diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 9f0b62c73b3..5245b2559c5 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -4601,10 +4601,12 @@ int ha_connect::external_lock(THD *thd, int lock_type) DBUG_RETURN(0); } else if (g->Xchk) { if (!tdbp) { - if (!(tdbp= GetTDB(g))) - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); - else if (!tdbp->GetDef()->Indexable()) { - sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName()); + if (!(tdbp = GetTDB(g))) { +// DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + DBUG_RETURN(0); + } else if (!tdbp->GetDef()->Indexable()) { + sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName()); // DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); DBUG_RETURN(0); diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 02d3fc176ce..d6651fb21a9 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -353,7 +353,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) if (Zipped) { #if defined(ZIP_SUPPORT) if (Recfm == RECFM_VAR) { - if (mode == MODE_READ || mode == MODE_ANY) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (mode == MODE_INSERT) { txfp = new(g) ZIPFAM(this); @@ -364,7 +364,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) tdbp = new(g) TDBDOS(this, txfp); } else { - if (mode == MODE_READ || mode == MODE_ANY) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UZXFAM(this); } else if (mode == MODE_INSERT) { txfp = new(g) ZPXFAM(this); diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index be288b3858d..ac85165d99f 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -517,7 +517,7 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) /*******************************************************************/ if (Zipped) { #if defined(ZIP_SUPPORT) - if (mode == MODE_READ || mode == MODE_ANY) { + if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (mode == MODE_INSERT) { txfp = new(g) ZIPFAM(this); diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index e074232600b..9450f8d76aa 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -443,7 +443,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) if (Zipped) { #if defined(ZIP_SUPPORT) - if (m == MODE_READ || m == MODE_UPDATE) { + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (m == MODE_INSERT) { txfp = new(g) ZIPFAM(this); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index a2ddf468e5a..48edd12b3d8 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -969,8 +969,8 @@ bool XINDEX::Init(PGLOBAL g) // For DBF tables, Cardinality includes bad or soft deleted lines // that are not included in the index, and can be larger then the // index size. - estim = (Tdbp->Ftype == RECFM_DBF); - n = Tdbp->Cardinality(g); // n is exact table size + estim = (Tdbp->Ftype == RECFM_DBF || Tdbp->Txfp->GetAmType() == TYPE_AM_ZIP); + n = Tdbp->Cardinality(g); // n is exact table size } else { // Variable table not optimized estim = true; // n is an estimate of the size -- cgit v1.2.1 From 531698e0da307943dff5ee6830a241d6829345cd Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 6 May 2017 00:08:20 +0200 Subject: Fix MDEV-12603 Insert replaces values in ZIP file modified: storage/connect/filamzip.cpp modified: storage/connect/filamzip.h Fix MDEV-12686 Handle null in json Fix MDEV-12688 Insert does not handle type TINYINT modified: storage/connect/json.cpp modified: storage/connect/tabjson.cpp --- storage/connect/filamzip.cpp | 65 +++++++++++++++++++++++++++++++++++++++++--- storage/connect/filamzip.h | 3 +- storage/connect/json.cpp | 24 ++++++++-------- storage/connect/tabjson.cpp | 9 +++--- 4 files changed, 80 insertions(+), 21 deletions(-) (limited to 'storage') diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 17ca84d9d5c..92de5117638 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -1,7 +1,7 @@ /*********** File AM Zip C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMZIP */ /* ------------- */ -/* Version 1.2 */ +/* Version 1.3 */ /* */ /* COPYRIGHT: */ /* ---------- */ @@ -633,6 +633,28 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) return false; } // end of OpenTableFile +/***********************************************************************/ +/* Insert only if the entry does not exist. */ +/***********************************************************************/ +bool UNZIPUTL::IsInsertOk(PGLOBAL g, char *fn) +{ + bool ok = true, b = open(g, fn); + + if (!b) { + if (!target || *target == 0) { + unz_global_info64 ginfo; + int err = unzGetGlobalInfo64(zipfile, &ginfo); + + ok = !(err == UNZ_OK && ginfo.number_entry > 0); + } else // Check if the target exist + ok = (unzLocateFile(zipfile, target, 0) != UNZ_OK); + + unzClose(zipfile); + } // endif b + + return ok; +} // end of IsInsertOk + /***********************************************************************/ /* Open target in zip file. */ /***********************************************************************/ @@ -1006,6 +1028,25 @@ bool ZIPFAM::OpenTableFile(PGLOBAL g) { char filename[_MAX_PATH]; MODE mode = Tdbp->GetMode(); + int len = TXTFAM::GetFileLength(g); + + // We used the file name relative to recorded datapath + PlugSetPath(filename, To_File, Tdbp->GetPath()); + + if (len < 0) + return true; + else if (!append && len > 0) { + strcpy(g->Message, "No insert into existing zip file"); + return true; + } else if (append && len > 0) { + UNZIPUTL *zutp = new(g) UNZIPUTL(target, false); + + if (!zutp->IsInsertOk(g, filename)) { + strcpy(g->Message, "No insert into existing entry"); + return true; + } // endif Ok + + } // endif's /*********************************************************************/ /* Allocate the ZIP utility class. */ @@ -1065,15 +1106,31 @@ ZPXFAM::ZPXFAM(PDOSDEF tdp) : FIXFAM(tdp) target = tdp->GetEntry(); append = tdp->GetAppend(); //Lrecl = tdp->GetLrecl(); -} // end of UZXFAM standard constructor +} // end of ZPXFAM standard constructor /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ /***********************************************************************/ bool ZPXFAM::OpenTableFile(PGLOBAL g) { - char filename[_MAX_PATH]; - MODE mode = Tdbp->GetMode(); + char filename[_MAX_PATH]; + MODE mode = Tdbp->GetMode(); + int len = TXTFAM::GetFileLength(g); + + if (len < 0) + return true; + else if (!append && len > 0) { + strcpy(g->Message, "No insert into existing zip file"); + return true; + } else if (append && len > 0) { + UNZIPUTL *zutp = new(g) UNZIPUTL(target, false); + + if (!zutp->IsInsertOk(g, filename)) { + strcpy(g->Message, "No insert into existing entry"); + return true; + } // endif Ok + + } // endif's /*********************************************************************/ /* Allocate the ZIP utility class. */ diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index 2f9e491f621..ba1d8658dc4 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -1,5 +1,5 @@ /************** filamzip H Declares Source Code File (.H) **************/ -/* Name: filamzip.h Version 1.1 */ +/* Name: filamzip.h Version 1.2 */ /* */ /* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ /* */ @@ -77,6 +77,7 @@ class DllExport UNZIPUTL : public BLOCK { bool WildMatch(PSZ pat, PSZ str); int findEntry(PGLOBAL g, bool next); int nextEntry(PGLOBAL g); + bool IsInsertOk(PGLOBAL g, char *fn); // Members unzFile zipfile; // The ZIP container file diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 6817439a635..2aca1377d69 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -370,16 +370,16 @@ PJVAL ParseValue(PGLOBAL g, int& i, STRG& src, bool *pty) PJVAL jvp = new(g) JVALUE; for (; i < len; i++) - switch (s[i]) { - case '\n': - pty[0] = pty[1] = false; - case '\r': - case ' ': - case '\t': - break; - default: - goto suite; - } // endswitch + switch (s[i]) { + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + goto suite; + } // endswitch suite: switch (s[i]) { @@ -1432,7 +1432,7 @@ void JVALUE::SetTiny(PGLOBAL g, char n) { Value = AllocateValue(g, &n, TYPE_TINY); Jsp = NULL; -} // end of SetInteger +} // end of SetTiny /***********************************************************************/ /* Set the Value's value as the given big integer. */ @@ -1466,6 +1466,6 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c) /***********************************************************************/ bool JVALUE::IsNull(void) { - return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsZero() : true; + return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true; } // end of IsNull diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 9450f8d76aa..22f9b592952 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1193,11 +1193,11 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) // } // endif Type default: - vp->Reset(); - } // endswitch Type + vp->SetNull(true); + } // endswitch Type } else - vp->Reset(); + vp->SetNull(true); } // end of SetJsonValue @@ -1211,7 +1211,7 @@ void JSONCOL::ReadColumn(PGLOBAL g) // Set null when applicable if (Nullable) - Value->SetNull(Value->IsZero()); + Value->SetNull(Value->IsNull()); } // end of ReadColumn @@ -1548,6 +1548,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) // Passthru case TYPE_DATE: case TYPE_INT: + case TYPE_TINY: case TYPE_SHORT: case TYPE_BIGINT: case TYPE_DOUBLE: -- cgit v1.2.1 From 1c88b9a8d3200bdf554ece09316cacf2146877ee Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 11 May 2017 21:57:21 +0200 Subject: Fix wrong value of JSON column When null and the column is NOT NULL the value was not reset. modified: storage/connect/tabjson.cpp Fix converting bstr_t string to set error message modified: storage/connect/domdoc.cpp Fix MDEV-12768: -Wformat-overflow compile warnings modified: storage/connect/global.h modified: storage/connect/jsonudf.cpp modified: storage/connect/plugutil.cpp modified: storage/connect/tabvct.cpp Typo (in external_lock) modified: storage/connect/ha_connect.cc Remove some warnings modified: storage/connect/odbconn.cpp modified: storage/connect/tabmysql.cpp Add MEM_RESERVE flag to virtualAlloc modified: storage/connect/plgdbutl.cpp Fix MDEV-12573: Accept=1 may show incorrect value for NULL column in CONNECT TBL modified: storage/connect/tabutil.cpp Fix wrong setting of key size when greater than 2G modified: storage/connect/xindex.cpp Fixing MDEV-12149: compile errors on Windows with /Zc:strictStrings Introduce typedef PCSZ and replace PSZ by it where it matters All done on CONNECT but compile still fails because of an included system file modified: storage/connect/array.cpp modified: storage/connect/catalog.h modified: storage/connect/colblk.cpp modified: storage/connect/colblk.h modified: storage/connect/connect.cc modified: storage/connect/domdoc.cpp modified: storage/connect/domdoc.h modified: storage/connect/filamdbf.cpp modified: storage/connect/filamdbf.h modified: storage/connect/filamfix.cpp modified: storage/connect/filamgz.cpp modified: storage/connect/filamtxt.h modified: storage/connect/filamvct.cpp modified: storage/connect/filamvct.h modified: storage/connect/filamzip.cpp modified: storage/connect/filamzip.h modified: storage/connect/ha_connect.cc modified: storage/connect/ha_connect.h modified: storage/connect/jdbccat.h modified: storage/connect/jdbconn.cpp modified: storage/connect/jdbconn.h modified: storage/connect/json.cpp modified: storage/connect/json.h modified: storage/connect/jsonudf.cpp modified: storage/connect/jsonudf.h modified: storage/connect/libdoc.cpp modified: storage/connect/macutil.cpp modified: storage/connect/myconn.cpp modified: storage/connect/myutil.cpp modified: storage/connect/myutil.h modified: storage/connect/odbccat.h modified: storage/connect/odbconn.cpp modified: storage/connect/odbconn.h modified: storage/connect/os.h modified: storage/connect/plgdbsem.h modified: storage/connect/plgdbutl.cpp modified: storage/connect/plgxml.cpp modified: storage/connect/plgxml.h modified: storage/connect/plugutil.cpp modified: storage/connect/preparse.h modified: storage/connect/reldef.cpp modified: storage/connect/reldef.h modified: storage/connect/tabdos.cpp modified: storage/connect/tabdos.h modified: storage/connect/tabext.cpp modified: storage/connect/tabext.h modified: storage/connect/tabfix.cpp modified: storage/connect/tabfix.h modified: storage/connect/tabfmt.cpp modified: storage/connect/tabfmt.h modified: storage/connect/tabjdbc.cpp modified: storage/connect/tabjdbc.h modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h modified: storage/connect/table.cpp modified: storage/connect/tabmac.cpp modified: storage/connect/tabmul.cpp modified: storage/connect/tabmul.h modified: storage/connect/tabmysql.cpp modified: storage/connect/tabmysql.h modified: storage/connect/tabodbc.cpp modified: storage/connect/tabodbc.h modified: storage/connect/tabpivot.cpp modified: storage/connect/tabpivot.h modified: storage/connect/tabsys.cpp modified: storage/connect/tabsys.h modified: storage/connect/tabutil.cpp modified: storage/connect/tabutil.h modified: storage/connect/tabvir.cpp modified: storage/connect/tabvir.h modified: storage/connect/tabwmi.cpp modified: storage/connect/tabwmi.h modified: storage/connect/tabxml.cpp modified: storage/connect/tabxml.h modified: storage/connect/tabzip.cpp modified: storage/connect/tabzip.h modified: storage/connect/valblk.cpp modified: storage/connect/valblk.h modified: storage/connect/value.cpp modified: storage/connect/value.h modified: storage/connect/xindex.cpp modified: storage/connect/xobject.cpp modified: storage/connect/xobject.h modified: storage/connect/xtable.h Fix MDEV-12603 Insert replaces values in ZIP file modified: storage/connect/filamzip.cpp modified: storage/connect/filamzip.h Fix MDEV-12686 Handle null in json Fix MDEV-12688 Insert does not handle type TINYINT modified: storage/connect/json.cpp modified: storage/connect/tabjson.cpp Fix MDEV-12653 Cannot add index for ZIP CONNECT table modified: storage/connect/filamzip.cpp modified: storage/connect/ha_connect.cc modified: storage/connect/tabdos.cpp modified: storage/connect/tabfmt.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/xindex.cpp --- storage/connect/array.cpp | 18 +- storage/connect/blkfil.cpp | 4 - storage/connect/catalog.h | 17 +- storage/connect/colblk.cpp | 22 +- storage/connect/colblk.h | 8 +- storage/connect/connect.cc | 589 ++++++----------- storage/connect/connect.h | 2 +- storage/connect/domdoc.cpp | 20 +- storage/connect/domdoc.h | 8 +- storage/connect/filamdbf.cpp | 6 +- storage/connect/filamdbf.h | 4 +- storage/connect/filamfix.cpp | 3 +- storage/connect/filamgz.cpp | 4 +- storage/connect/filamtxt.cpp | 31 +- storage/connect/filamtxt.h | 2 +- storage/connect/filamvct.cpp | 98 +-- storage/connect/filamvct.h | 6 +- storage/connect/filamzip.cpp | 30 +- storage/connect/filamzip.h | 44 +- storage/connect/filter.cpp | 12 - storage/connect/filter.h | 2 +- storage/connect/global.h | 2 +- storage/connect/ha_connect.cc | 1391 +++++++++++++++++++-------------------- storage/connect/ha_connect.h | 36 +- storage/connect/inihandl.c | 2 +- storage/connect/ioapi.c | 21 +- storage/connect/ioapi.h | 5 +- storage/connect/jdbccat.h | 20 +- storage/connect/jdbconn.cpp | 42 +- storage/connect/jdbconn.h | 30 +- storage/connect/json.cpp | 343 ++++------ storage/connect/json.h | 20 +- storage/connect/jsonudf.cpp | 372 ++++------- storage/connect/jsonudf.h | 2 +- storage/connect/libdoc.cpp | 27 +- storage/connect/macutil.cpp | 2 +- storage/connect/mycat.cc | 37 +- storage/connect/mycat.h | 7 +- storage/connect/myconn.cpp | 11 +- storage/connect/myutil.cpp | 8 +- storage/connect/myutil.h | 8 +- storage/connect/odbccat.h | 18 +- storage/connect/odbconn.cpp | 115 ++-- storage/connect/odbconn.h | 32 +- storage/connect/os.h | 3 + storage/connect/plgdbsem.h | 19 +- storage/connect/plgdbutl.cpp | 184 ++---- storage/connect/plgxml.cpp | 4 +- storage/connect/plgxml.h | 12 +- storage/connect/plugutil.cpp | 29 +- storage/connect/preparse.h | 4 +- storage/connect/reldef.cpp | 43 +- storage/connect/reldef.h | 18 +- storage/connect/tabdos.cpp | 63 +- storage/connect/tabdos.h | 20 +- storage/connect/tabext.cpp | 65 +- storage/connect/tabext.h | 36 +- storage/connect/tabfix.cpp | 38 +- storage/connect/tabfix.h | 4 +- storage/connect/tabfmt.cpp | 22 +- storage/connect/tabfmt.h | 6 +- storage/connect/tabjdbc.cpp | 68 +- storage/connect/tabjdbc.h | 14 +- storage/connect/tabjson.cpp | 132 ++-- storage/connect/tabjson.h | 58 +- storage/connect/table.cpp | 4 +- storage/connect/tabmac.cpp | 2 +- storage/connect/tabmul.cpp | 28 +- storage/connect/tabmul.h | 10 +- storage/connect/tabmysql.cpp | 150 ++--- storage/connect/tabmysql.h | 24 +- storage/connect/tabodbc.cpp | 55 +- storage/connect/tabodbc.h | 16 +- storage/connect/tabpivot.cpp | 375 +++++------ storage/connect/tabpivot.h | 18 +- storage/connect/tabsys.cpp | 40 +- storage/connect/tabsys.h | 6 +- storage/connect/tabutil.cpp | 18 +- storage/connect/tabutil.h | 4 +- storage/connect/tabvct.cpp | 18 +- storage/connect/tabvir.cpp | 6 +- storage/connect/tabvir.h | 2 +- storage/connect/tabwmi.cpp | 4 +- storage/connect/tabwmi.h | 2 +- storage/connect/tabxml.cpp | 82 +-- storage/connect/tabxml.h | 12 +- storage/connect/tabzip.cpp | 4 +- storage/connect/tabzip.h | 6 +- storage/connect/user_connect.cc | 2 +- storage/connect/user_connect.h | 2 +- storage/connect/valblk.cpp | 58 +- storage/connect/valblk.h | 24 +- storage/connect/value.cpp | 330 +++------- storage/connect/value.h | 51 +- storage/connect/xindex.cpp | 22 +- storage/connect/xobject.cpp | 27 +- storage/connect/xobject.h | 10 +- storage/connect/xtable.h | 70 +- storage/connect/zip.c | 14 +- 99 files changed, 2406 insertions(+), 3413 deletions(-) (limited to 'storage') diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index beb58baa107..8eaeeea2d8a 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -519,11 +519,7 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm) } else if (opc != OP_EXIST) { sprintf(g->Message, MSG(MISSING_ARG), opc); -#if defined(USE_TRY) throw TYPE_ARRAY; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); -#endif // !USE_TRY } else // OP_EXIST return Nval > 0; @@ -685,22 +681,14 @@ void ARRAY::SetPrecision(PGLOBAL g, int p) { if (Vblp == NULL) { strcpy(g->Message, MSG(PREC_VBLP_NULL)); -#if defined(USE_TRY) throw TYPE_ARRAY; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); -#endif // !USE_TRY } // endif Vblp bool was = Vblp->IsCi(); if (was && !p) { strcpy(g->Message, MSG(BAD_SET_CASE)); -#if defined(USE_TRY) throw TYPE_ARRAY; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); -#endif // !USE_TRY } // endif Vblp if (was || !p) @@ -711,11 +699,7 @@ void ARRAY::SetPrecision(PGLOBAL g, int p) if (!was && Type == TYPE_STRING) // Must be resorted to eliminate duplicate strings if (Sort(g)) -#if defined(USE_TRY) throw TYPE_ARRAY; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); -#endif // !USE_TRY } // end of SetPrecision @@ -993,7 +977,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) size_t z, len = 2; if (Type == TYPE_LIST) - return "(?" "?" "?)"; // To be implemented + return (PSZ)("(?" "?" "?)"); // To be implemented z = MY_MAX(24, GetTypeSize(Type, Len) + 4); tp = (char*)PlugSubAlloc(g, NULL, z); diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp index 4e5c7484057..e438e185e5d 100644 --- a/storage/connect/blkfil.cpp +++ b/storage/connect/blkfil.cpp @@ -595,11 +595,7 @@ BLKFILIN::BLKFILIN(PGLOBAL g, PTDBDOS tdbp, int op, int opm, PXOB *xp) if (Colp->GetResultType() != Type) { sprintf(g->Message, "BLKFILIN: %s", MSG(VALTYPE_NOMATCH)); -#if defined(USE_TRY) throw g->Message; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 99); -#endif // !USE_TRY } else if (Colp->GetValue()->IsCi()) Arap->SetPrecision(g, 1); // Case insensitive diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h index 5fa38277832..48347d7519e 100644 --- a/storage/connect/catalog.h +++ b/storage/connect/catalog.h @@ -36,7 +36,7 @@ typedef struct _curtab { /* Defines the structure used to get column catalog info. */ /***********************************************************************/ typedef struct _colinfo { - char *Name; + PCSZ Name; int Type; int Offset; int Length; @@ -45,9 +45,9 @@ typedef struct _colinfo { int Scale; int Opt; int Freq; - char *Remark; - char *Datefmt; - char *Fieldfmt; + PCSZ Remark; + PCSZ Datefmt; + PCSZ Fieldfmt; ushort Flags; // Used by MariaDB CONNECT handlers } COLINFO, *PCOLINFO; @@ -68,17 +68,15 @@ class DllExport CATALOG { bool GetDefHuge(void) {return DefHuge;} void SetDefHuge(bool b) {DefHuge = b;} char *GetCbuf(void) {return Cbuf;} -//char *GetDataPath(void) {return (char*)DataPath;} // Methods virtual void Reset(void) {} -//virtual void SetDataPath(PGLOBAL g, const char *path) {} virtual bool CheckName(PGLOBAL, char*) {return true;} virtual bool ClearName(PGLOBAL, PSZ) {return true;} virtual PRELDEF MakeOneTableDesc(PGLOBAL, LPCSTR, LPCSTR) {return NULL;} virtual PRELDEF GetTableDescEx(PGLOBAL, PTABLE) {return NULL;} - /*virtual PRELDEF GetTableDesc(PGLOBAL, LPCSTR, LPCSTR, - PRELDEF* = NULL) {return NULL;}*/ + //virtual PRELDEF GetTableDesc(PGLOBAL, LPCSTR, LPCSTR, + // PRELDEF* = NULL) {return NULL;} virtual PRELDEF GetFirstTable(PGLOBAL) {return NULL;} virtual PRELDEF GetNextTable(PGLOBAL) {return NULL;} virtual bool TestCond(PGLOBAL, const char*, const char*) {return true;} @@ -95,14 +93,13 @@ class DllExport CATALOG { protected: virtual bool ClearSection(PGLOBAL, const char*, const char*) {return true;} - /*virtual PRELDEF MakeTableDesc(PGLOBAL, LPCSTR, LPCSTR) {return NULL;}*/ + //virtual PRELDEF MakeTableDesc(PGLOBAL, LPCSTR, LPCSTR) {return NULL;} // Members char *Cbuf; /* Buffer used for col section */ int Cblen; /* Length of suballoc. buffer */ CURTAB Ctb; /* Used to enumerate tables */ bool DefHuge; /* true: tables default to huge */ -//LPCSTR DataPath; /* Is the Path of DB data dir */ }; // end of class CATALOG #endif // __CATALOG__H diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index a1169296a89..fa205b493a2 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -195,13 +195,9 @@ int COLBLK::GetLengthEx(void) /* corresponding to this column and convert it to buffer type. */ /***********************************************************************/ void COLBLK::ReadColumn(PGLOBAL g) - { +{ sprintf(g->Message, MSG(UNDEFINED_AM), "ReadColumn"); -#if defined(USE_TRY) throw TYPE_COLBLK; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); -#endif // !USE_TRY } // end of ReadColumn /***********************************************************************/ @@ -210,13 +206,9 @@ void COLBLK::ReadColumn(PGLOBAL g) /* corresponding to this column from the column buffer and type. */ /***********************************************************************/ void COLBLK::WriteColumn(PGLOBAL g) - { +{ sprintf(g->Message, MSG(UNDEFINED_AM), "WriteColumn"); -#if defined(USE_TRY) throw TYPE_COLBLK; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); -#endif // !USE_TRY } // end of WriteColumn /***********************************************************************/ @@ -268,13 +260,9 @@ SPCBLK::SPCBLK(PCOLUMN cp) /* corresponding to this column from the column buffer and type. */ /***********************************************************************/ void SPCBLK::WriteColumn(PGLOBAL g) - { +{ sprintf(g->Message, MSG(SPCOL_READONLY), Name); -#if defined(USE_TRY) throw TYPE_COLBLK; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); -#endif // !USE_TRY } // end of WriteColumn /***********************************************************************/ @@ -389,7 +377,7 @@ PRTBLK::PRTBLK(PCOLUMN cp) : SPCBLK(cp) void PRTBLK::ReadColumn(PGLOBAL g) { if (Pname == NULL) { - char *p; + const char *p; Pname = To_Tdb->GetDef()->GetStringCatInfo(g, "partname", "?"); p = strrchr(Pname, '#'); @@ -419,7 +407,7 @@ SIDBLK::SIDBLK(PCOLUMN cp) : SPCBLK(cp) void SIDBLK::ReadColumn(PGLOBAL) { //if (Sname == NULL) { - Sname = (char*)To_Tdb->GetServer(); + Sname = To_Tdb->GetServer(); Value->SetValue_psz(Sname); // } // endif Sname diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h index c64f9d95129..02c4f2361b7 100644 --- a/storage/connect/colblk.h +++ b/storage/connect/colblk.h @@ -154,7 +154,7 @@ class DllExport FIDBLK : public SPCBLK { virtual void ReadColumn(PGLOBAL g); protected: - PSZ Fn; // The current To_File of the table + PCSZ Fn; // The current To_File of the table OPVAL Op; // The file part operator }; // end of class FIDBLK @@ -178,7 +178,7 @@ class DllExport TIDBLK : public SPCBLK { TIDBLK(void) {} // Members - PSZ Tname; // The current table name + PCSZ Tname; // The current table name }; // end of class TIDBLK /***********************************************************************/ @@ -201,7 +201,7 @@ class DllExport PRTBLK : public SPCBLK { PRTBLK(void) {} // Members - PSZ Pname; // The current partition name + PCSZ Pname; // The current partition name }; // end of class PRTBLK /***********************************************************************/ @@ -224,7 +224,7 @@ class DllExport SIDBLK : public SPCBLK { SIDBLK(void) {} // Members - PSZ Sname; // The current server name + PCSZ Sname; // The current server name }; // end of class SIDBLK #endif // __COLBLK__H diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 5d5eca80275..e15cc724b85 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -12,7 +12,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /***********************************************************************/ /* Author Olivier BERTRAND bertrandop@gmail.com 2004-2017 */ @@ -121,8 +121,6 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) return true; if (dbuserp->Catalog) { -// ((MYCAT *)dbuserp->Catalog)->SetHandler(handler); done later -// ((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); return false; // Nothing else to do } // endif Catalog @@ -139,9 +137,6 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) if (!(dbuserp->Catalog= new MYCAT(handler))) return true; -//((MYCAT *)dbuserp->Catalog)->SetDataPath(g, pathname); -//dbuserp->UseTemp= TMP_AUTO; - /*********************************************************************/ /* All is correct. */ /*********************************************************************/ @@ -173,7 +168,7 @@ bool CntInfo(PGLOBAL g, PTDB tp, PXF info) // info->mean_rec_length= tdbp->GetLrecl(); info->mean_rec_length= 0; - info->data_file_name= (b) ? NULL : tdbp->GetFile(g); + info->data_file_name= (b) ? NULL : (char*)tdbp->GetFile(g); return true; } else { info->data_file_length= 0; @@ -201,50 +196,31 @@ PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h) if (!cat) return NULL; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level])) { - tdbp = NULL; - goto err; - } // endif rc -#endif // !USE_TRY + // Get table object from the catalog + tabp = new(g) XTAB(name); - // Get table object from the catalog - tabp = new(g) XTAB(name); - - if (trace) - printf("CntGetTDB: tabp=%p\n", tabp); + if (trace) + printf("CntGetTDB: tabp=%p\n", tabp); - // Perhaps this should be made thread safe - ((MYCAT*)cat)->SetHandler(h); + // Perhaps this should be made thread safe + ((MYCAT*)cat)->SetHandler(h); - if (!(tdbp = cat->GetTable(g, tabp, mode))) - printf("CntGetTDB: %s\n", g->Message); + if (!(tdbp = cat->GetTable(g, tabp, mode))) + printf("CntGetTDB: %s\n", g->Message); -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); } catch (const char *msg) { strcpy(g->Message, msg); } // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY if (trace) printf("Returning tdbp=%p mode=%d\n", tdbp, mode); return tdbp; - } // end of CntGetTDB +} // end of CntGetTDB /***********************************************************************/ /* OPENTAB: Open a Table. */ @@ -268,161 +244,116 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, return true; } // endif tdbp -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level])) { - goto err; - } // endif rc -#endif // !USE_TRY - - if (!c1) { - if (mode == MODE_INSERT) - // Allocate all column blocks for that table - tdbp->ColDB(g, NULL, 0); - - } else for (p= c1; *p; p+= n) { - // Allocate only used column blocks - if (trace) - printf("Allocating column %s\n", p); - - g->Message[0] = 0; // To check whether ColDB made an error message - colp= tdbp->ColDB(g, p, 0); - - if (!colp && !(mode == MODE_INSERT && tdbp->IsSpecial(p))) { - if (g->Message[0] == 0) - sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName()); - -#if defined(USE_TRY) - throw 1; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - } // endif colp - - n= strlen(p) + 1; - } // endfor p - - for (i= 0, colp= tdbp->GetColumns(); colp; i++, colp= colp->GetNext()) { - if (colp->InitValue(g)) -#if defined(USE_TRY) - throw 2; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - - if (mode == MODE_INSERT) - // Allow type conversion - if (colp->SetBuffer(g, colp->GetValue(), true, false)) -#if defined(USE_TRY) - throw 3; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - - colp->AddColUse(U_P); // For PLG tables - } // endfor colp - - /*********************************************************************/ - /* In Update mode, the updated column blocks must be distinct from */ - /* the read column blocks. So make a copy of the TDB and allocate */ - /* its column blocks in mode write (required by XML tables). */ - /*********************************************************************/ - if (mode == MODE_UPDATE) { - PTDBASE utp; - - if (!(utp= (PTDBASE)tdbp->Duplicate(g))) { - sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName()); -#if defined(USE_TRY) - throw 4; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - } // endif tp - - if (!c2) - // Allocate all column blocks for that table - utp->ColDB(g, NULL, 0); - else for (p= c2; *p; p+= n) { - // Allocate only used column blocks - colp= utp->ColDB(g, p, 0); - n= strlen(p) + 1; - } // endfor p - - for (i= 0, colp= utp->GetColumns(); colp; i++, colp= colp->GetNext()) { - if (colp->InitValue(g)) -#if defined(USE_TRY) - throw 5; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - - if (colp->SetBuffer(g, colp->GetValue(), true, false)) -#if defined(USE_TRY) - throw 6; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - - } // endfor colp - - // Attach the updated columns list to the main table - tdbp->SetSetCols(utp->GetColumns()); - } else if (tdbp && mode == MODE_INSERT) - tdbp->SetSetCols(tdbp->GetColumns()); - - // Now do open the physical table - if (trace) - printf("Opening table %s in mode %d tdbp=%p\n", - tdbp->GetName(), mode, tdbp); + if (!c1) { + if (mode == MODE_INSERT) + // Allocate all column blocks for that table + tdbp->ColDB(g, NULL, 0); + + } else for (p = c1; *p; p += n) { + // Allocate only used column blocks + if (trace) + printf("Allocating column %s\n", p); + + g->Message[0] = 0; // To check whether ColDB made an error message + colp = tdbp->ColDB(g, p, 0); + + if (!colp && !(mode == MODE_INSERT && tdbp->IsSpecial(p))) { + if (g->Message[0] == 0) + sprintf(g->Message, MSG(COL_ISNOT_TABLE), p, tdbp->GetName()); + + throw 1; + } // endif colp + + n = strlen(p) + 1; + } // endfor p + + for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) { + if (colp->InitValue(g)) + throw 2; + + if (mode == MODE_INSERT) + // Allow type conversion + if (colp->SetBuffer(g, colp->GetValue(), true, false)) + throw 3; + + colp->AddColUse(U_P); // For PLG tables + } // endfor colp + + /*******************************************************************/ + /* In Update mode, the updated column blocks must be distinct from */ + /* the read column blocks. So make a copy of the TDB and allocate */ + /* its column blocks in mode write (required by XML tables). */ + /*******************************************************************/ + if (mode == MODE_UPDATE) { + PTDBASE utp; + + if (!(utp = (PTDBASE)tdbp->Duplicate(g))) { + sprintf(g->Message, MSG(INV_UPDT_TABLE), tdbp->GetName()); + throw 4; + } // endif tp + + if (!c2) + // Allocate all column blocks for that table + utp->ColDB(g, NULL, 0); + else for (p = c2; *p; p += n) { + // Allocate only used column blocks + colp = utp->ColDB(g, p, 0); + n = strlen(p) + 1; + } // endfor p + + for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) { + if (colp->InitValue(g)) + throw 5; + + if (colp->SetBuffer(g, colp->GetValue(), true, false)) + throw 6; + + } // endfor colp + + // Attach the updated columns list to the main table + tdbp->SetSetCols(utp->GetColumns()); + } else if (tdbp && mode == MODE_INSERT) + tdbp->SetSetCols(tdbp->GetColumns()); + + // Now do open the physical table + if (trace) + printf("Opening table %s in mode %d tdbp=%p\n", + tdbp->GetName(), mode, tdbp); -//tdbp->SetMode(mode); + //tdbp->SetMode(mode); - if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) { - // To avoid erasing the table when doing a partial delete - // make a fake Next + if (del/* && (tdbp->GetFtype() != RECFM_NAF*/) { + // To avoid erasing the table when doing a partial delete + // make a fake Next // PDOSDEF ddp= new(g) DOSDEF; // PTDB tp= new(g) TDBDOS(ddp, NULL); - tdbp->SetNext((PTDB)1); - dup->Check &= ~CHK_DELETE; - } // endif del + tdbp->SetNext((PTDB)1); + dup->Check &= ~CHK_DELETE; + } // endif del - if (trace) - printf("About to open the table: tdbp=%p\n", tdbp); + if (trace) + printf("About to open the table: tdbp=%p\n", tdbp); - if (mode != MODE_ANY && mode != MODE_ALTER) { - if (tdbp->OpenDB(g)) { - printf("%s\n", g->Message); -#if defined(USE_TRY) - throw 7; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - } else - tdbp->SetNext(NULL); + if (mode != MODE_ANY && mode != MODE_ALTER) { + if (tdbp->OpenDB(g)) { + printf("%s\n", g->Message); + throw 7; + } else + tdbp->SetNext(NULL); - } // endif mode + } // endif mode - rcop= false; + rcop = false; -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); } catch (const char *msg) { strcpy(g->Message, msg); } // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY + return rcop; } // end of CntOpenTable @@ -442,65 +373,40 @@ bool CntRewindTable(PGLOBAL g, PTDB tdbp) /* Evaluate all columns after a record is read. */ /***********************************************************************/ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr) - { +{ RCODE rc= RC_OK; PCOL colp; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - if (trace) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - printf("EvalColumns: %s\n", g->Message); - } // endif + for (colp = tdbp->GetColumns(); rc == RC_OK && colp; + colp = colp->GetNext()) { + if (reset) + colp->Reset(); - return RC_FX; - } // endif jump_level + // Virtual columns are computed by MariaDB + if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol())) + if (colp->Eval(g)) + rc = RC_FX; - if (setjmp(g->jumper[++g->jump_level]) != 0) { + } // endfor colp + + } catch (int n) { if (trace) - printf("Error reading columns: %s\n", g->Message); + printf("Error %d reading columns: %s\n", n, g->Message); rc = RC_FX; - goto err; - } // endif rc -#endif // !USE_TRY - - for (colp= tdbp->GetColumns(); rc == RC_OK && colp; - colp= colp->GetNext()) { - if (reset) - colp->Reset(); - - // Virtual columns are computed by MariaDB - if (!colp->GetColUse(U_VIRTUAL) && (!mrr || colp->GetKcol())) - if (colp->Eval(g)) - rc= RC_FX; - - } // endfor colp + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch -#if defined(USE_TRY) -} catch (int n) { - if (trace) - printf("Error %d reading columns: %s\n", n, g->Message); - - rc = RC_FX; -} catch (const char *msg) { - strcpy(g->Message, msg); -} // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY return rc; - } // end of EvalColumns +} // end of EvalColumns /***********************************************************************/ /* ReadNext: Read next record sequentially. */ /***********************************************************************/ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) - { +{ RCODE rc; if (!tdbp) @@ -515,103 +421,66 @@ RCODE CntReadNext(PGLOBAL g, PTDB tdbp) ((PTDBASE)tdbp)->ResetKindex(g, NULL); } // endif index -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return RC_FX; - } // endif jump_level - - if ((setjmp(g->jumper[++g->jump_level])) != 0) { - rc = RC_FX; - goto err; - } // endif rc -#endif // !USE_TRY + // Do it now to avoid double eval when filtering + for (PCOL colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) + colp->Reset(); - // Do it now to avoid double eval when filtering - for (PCOL colp= tdbp->GetColumns(); colp; colp= colp->GetNext()) - colp->Reset(); + do { + if ((rc = (RCODE)tdbp->ReadDB(g)) == RC_OK) + if (!ApplyFilter(g, tdbp->GetFilter())) + rc = RC_NF; - do { - if ((rc= (RCODE)tdbp->ReadDB(g)) == RC_OK) - if (!ApplyFilter(g, tdbp->GetFilter())) - rc= RC_NF; + } while (rc == RC_NF); - } while (rc == RC_NF); + if (rc == RC_OK) + rc = EvalColumns(g, tdbp, false); - if (rc == RC_OK) - rc= EvalColumns(g, tdbp, false); - -#if defined(USE_TRY) - } catch (int) { + } catch (int) { rc = RC_FX; } catch (const char *msg) { strcpy(g->Message, msg); rc = RC_FX; } // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY + return rc; - } // end of CntReadNext +} // end of CntReadNext /***********************************************************************/ /* WriteRow: Insert a new row into a table. */ /***********************************************************************/ RCODE CntWriteRow(PGLOBAL g, PTDB tdbp) - { - RCODE rc; - PCOL colp; -//PTDBASE tp= (PTDBASE)tdbp; +{ + RCODE rc; + PCOL colp; + //PTDBASE tp= (PTDBASE)tdbp; - if (!tdbp) - return RC_FX; + if (!tdbp) + return RC_FX; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return RC_FX; - } // endif jump_level + // Store column values in table write buffer(s) + for (colp = tdbp->GetSetCols(); colp; colp = colp->GetNext()) + if (!colp->GetColUse(U_VIRTUAL)) + colp->WriteColumn(g); + + if (tdbp->IsIndexed()) + // Index values must be sorted before updating + rc = (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true); + else + // Return result code from write operation + rc = (RCODE)tdbp->WriteDB(g); - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); + } catch (int n) { + printf("Exception %d: %s\n", n, g->Message); rc = RC_FX; - goto err; - } // endif rc -#endif // !USE_TRY - - // Store column values in table write buffer(s) - for (colp= tdbp->GetSetCols(); colp; colp= colp->GetNext()) - if (!colp->GetColUse(U_VIRTUAL)) - colp->WriteColumn(g); + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; + } // end catch - if (tdbp->IsIndexed()) - // Index values must be sorted before updating - rc= (RCODE)((PTDBDOS)tdbp)->GetTxfp()->StoreValues(g, true); - else - // Return result code from write operation - rc= (RCODE)tdbp->WriteDB(g); - -#if defined(USE_TRY) -} catch (int n) { - printf("Exception %d: %s\n", n, g->Message); - rc = RC_FX; -} catch (const char *msg) { - strcpy(g->Message, msg); - rc = RC_FX; -} // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY - return rc; - } // end of CntWriteRow + return rc; +} // end of CntWriteRow /***********************************************************************/ /* UpdateRow: Update a row into a table. */ @@ -659,98 +528,78 @@ RCODE CntDeleteRow(PGLOBAL g, PTDB tdbp, bool all) /* CLOSETAB: Close a table. */ /***********************************************************************/ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) - { - int rc= RC_OK; -//TDBASE *tbxp= (PTDBASE)tdbp; +{ + int rc = RC_OK; + //TDBASE *tbxp= (PTDBASE)tdbp; - if (!tdbp) - return rc; // Nothing to do - else if (tdbp->GetUse() != USE_OPEN) { - if (tdbp->GetAmType() == TYPE_AM_XML) - tdbp->CloseDB(g); // Opened by GetMaxSize + if (!tdbp) + return rc; // Nothing to do + else if (tdbp->GetUse() != USE_OPEN) { + if (tdbp->GetAmType() == TYPE_AM_XML) + tdbp->CloseDB(g); // Opened by GetMaxSize - return rc; - } // endif !USE_OPEN + return rc; + } // endif !USE_OPEN - if (trace) - printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", - tdbp, tdbp->GetMode(), nox, abort); + if (trace) + printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", + tdbp, tdbp->GetMode(), nox, abort); - if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) { - if (tdbp->IsIndexed()) - rc= ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g); + if (tdbp->GetMode() == MODE_DELETE && tdbp->GetUse() == USE_OPEN) { + if (tdbp->IsIndexed()) + rc = ((PTDBDOS)tdbp)->GetTxfp()->DeleteSortedRows(g); - if (!rc) - rc= tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine + if (!rc) + rc = tdbp->DeleteDB(g, RC_EF); // Specific A.M. delete routine - } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed()) - rc= ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g); + } else if (tdbp->GetMode() == MODE_UPDATE && tdbp->IsIndexed()) + rc = ((PTDBDOX)tdbp)->Txfp->UpdateSortedRows(g); - switch(rc) { - case RC_FX: - abort= true; - break; - case RC_INFO: - PushWarning(g, tdbp); - break; - } // endswitch rc + switch (rc) { + case RC_FX: + abort = true; + break; + case RC_INFO: + PushWarning(g, tdbp); + break; + } // endswitch rc -#if defined(USE_TRY) try { -#else // !USE_TRY - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - rc = RC_FX; - goto err; - } // endif + // This will close the table file(s) and also finalize write + // operations such as Insert, Update, or Delete. + tdbp->SetAbort(abort); + tdbp->CloseDB(g); + tdbp->SetAbort(false); - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - rc = RC_FX; - goto err; - } // endif -#endif // !USE_TRY + if (trace > 1) + printf("Table %s closed\n", tdbp->GetName()); - // This will close the table file(s) and also finalize write - // operations such as Insert, Update, or Delete. - tdbp->SetAbort(abort); - tdbp->CloseDB(g); - tdbp->SetAbort(false); + if (!nox && tdbp->GetMode() != MODE_READ && tdbp->GetMode() != MODE_ANY) { + if (trace > 1) + printf("About to reset opt\n"); - if (trace > 1) - printf("Table %s closed\n", tdbp->GetName()); + if (!tdbp->IsRemote()) { + // Make all the eventual indexes + PTDBDOX tbxp = (PTDBDOX)tdbp; + tbxp->ResetKindex(g, NULL); + tbxp->SetKey_Col(NULL); + rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); + } // endif remote - if (!nox && tdbp->GetMode() != MODE_READ && tdbp->GetMode() != MODE_ANY) { - if (trace > 1) - printf("About to reset opt\n"); - - if (!tdbp->IsRemote()) { - // Make all the eventual indexes - PTDBDOX tbxp = (PTDBDOX)tdbp; - tbxp->ResetKindex(g, NULL); - tbxp->SetKey_Col(NULL); - rc = tbxp->ResetTableOpt(g, true, tbxp->GetDef()->Indexable() == 1); - } // endif remote - - } // endif nox - -#if defined(USE_TRY) -} catch (int) { - rc = RC_FX; -} catch (const char *msg) { - strcpy(g->Message, msg); - rc = RC_FX; -} // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY - - if (trace > 1) - htrc("Done rc=%d\n", rc); - - return (rc == RC_OK || rc == RC_INFO) ? 0 : rc; - } // end of CntCloseTable + } // endif nox + + } catch (int) { + rc = RC_FX; + } catch (const char *msg) { + strcpy(g->Message, msg); + rc = RC_FX; + } // end catch + + if (trace > 1) + htrc("Done rc=%d\n", rc); + + return (rc == RC_OK || rc == RC_INFO) ? 0 : rc; +} // end of CntCloseTable /***********************************************************************/ /* Load and initialize the use of an index. */ diff --git a/storage/connect/connect.h b/storage/connect/connect.h index ce4cf9bf8b9..128561b80f3 100644 --- a/storage/connect/connect.h +++ b/storage/connect/connect.h @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /**************** Cnt H Declares Source Code File (.H) *****************/ /* Name: CONNECT.H Version 2.4 */ diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp index 1622ec16c68..e24e10835c1 100644 --- a/storage/connect/domdoc.cpp +++ b/storage/connect/domdoc.cpp @@ -58,13 +58,15 @@ void CloseXMLFile(PGLOBAL g, PFBLOCK fp, bool all) if (xp && xp->Count > 1 && !all) { xp->Count--; } else if (xp && xp->Count > 0) { - try { + try { if (xp->Docp) xp->Docp->Release(); - } catch(_com_error e) { - sprintf(g->Message, "%s %s", MSG(COM_ERROR), e.Description()); - } catch(...) {} + } catch(_com_error e) { + char *p = _com_util::ConvertBSTRToString(e.Description()); + sprintf(g->Message, "%s %s", MSG(COM_ERROR), p); + delete[] p; + } catch(...) {} CoUninitialize(); xp->Count = 0; @@ -89,7 +91,7 @@ DOMDOC::DOMDOC(char *nsl, char *nsdf, char *enc, PFBLOCK fp) /******************************************************************/ /* Initialize XML parser and check library compatibility. */ /******************************************************************/ -bool DOMDOC::Initialize(PGLOBAL g, char *entry, bool zipped) +bool DOMDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) { if (zipped && InitZip(g, entry)) return true; @@ -155,7 +157,7 @@ PFBLOCK DOMDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn) /******************************************************************/ /* Create the XML node. */ /******************************************************************/ -bool DOMDOC::NewDoc(PGLOBAL g, char *ver) +bool DOMDOC::NewDoc(PGLOBAL g, PCSZ ver) { char buf[64]; MSXML2::IXMLDOMProcessingInstructionPtr pip; @@ -490,9 +492,9 @@ PXATTR DOMNODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new element child node to this node and return it. */ /******************************************************************/ -PXNODE DOMNODE::AddChildNode(PGLOBAL g, char *name, PXNODE np) +PXNODE DOMNODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) { - char *p, *pn; + const char *p, *pn; // char *p, *pn, *epf, *pf = NULL; MSXML2::IXMLDOMNodePtr ep; // _bstr_t uri((wchar_t*)NULL); @@ -585,7 +587,7 @@ PXATTR DOMNODE::AddProperty(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new text node to this node. */ /******************************************************************/ -void DOMNODE::AddText(PGLOBAL g, char *txtp) +void DOMNODE::AddText(PGLOBAL g, PCSZ txtp) { MSXML2::IXMLDOMTextPtr tp= Docp->createTextNode((_bstr_t)txtp); diff --git a/storage/connect/domdoc.h b/storage/connect/domdoc.h index 7f269002d59..dd8936097e2 100644 --- a/storage/connect/domdoc.h +++ b/storage/connect/domdoc.h @@ -37,9 +37,9 @@ class DOMDOC : public XMLDOCUMENT { virtual void SetNofree(bool b) {} // Only libxml2 // Methods - virtual bool Initialize(PGLOBAL g, char *entry, bool zipped); + virtual bool Initialize(PGLOBAL g, PCSZ entry, bool zipped); virtual bool ParseFile(PGLOBAL g, char *fn); - virtual bool NewDoc(PGLOBAL g, char *ver); + virtual bool NewDoc(PGLOBAL g, PCSZ ver); virtual void AddComment(PGLOBAL g, char *com); virtual PXNODE GetRoot(PGLOBAL g); virtual PXNODE NewRoot(PGLOBAL g, char *name); @@ -78,9 +78,9 @@ class DOMNODE : public XMLNODE { virtual PXLIST SelectNodes(PGLOBAL g, char *xp, PXLIST lp); virtual PXNODE SelectSingleNode(PGLOBAL g, char *xp, PXNODE np); virtual PXATTR GetAttribute(PGLOBAL g, char *name, PXATTR ap); - virtual PXNODE AddChildNode(PGLOBAL g, char *name, PXNODE np); + virtual PXNODE AddChildNode(PGLOBAL g, PCSZ name, PXNODE np); virtual PXATTR AddProperty(PGLOBAL g, char *name, PXATTR ap); - virtual void AddText(PGLOBAL g, char *txtp); + virtual void AddText(PGLOBAL g, PCSZ txtp); virtual void DeleteChild(PGLOBAL g, PXNODE dnp); protected: diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 9feb61d7d61..8878f2c922b 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -128,7 +128,7 @@ typedef struct _descriptor { /* Moves file pointer to byte 32; fills buffer at buf with */ /* first 32 bytes of file. */ /****************************************************************************/ -static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf) +static int dbfhead(PGLOBAL g, FILE *file, PCSZ fn, DBFHEADER *buf) { char endmark[2]; int dbc = 2, rc = RC_OK; @@ -186,7 +186,7 @@ static int dbfhead(PGLOBAL g, FILE *file, PSZ fn, DBFHEADER *buf) /* DBFColumns: constructs the result blocks containing the description */ /* of all the columns of a DBF file that will be retrieved by #GetData. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info) +PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) { int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -393,7 +393,7 @@ DBFBASE::DBFBASE(DBFBASE *txfp) /* and header length. Set Records, check that Reclen is equal to lrecl and */ /* return the header length or 0 in case of error. */ /****************************************************************************/ -int DBFBASE::ScanHeader(PGLOBAL g, PSZ fn, int lrecl, int *rln, char *defpath) +int DBFBASE::ScanHeader(PGLOBAL g, PCSZ fn, int lrecl, int *rln, PCSZ defpath) { int rc; char filename[_MAX_PATH]; diff --git a/storage/connect/filamdbf.h b/storage/connect/filamdbf.h index 66458a10eaa..640fc349b4c 100644 --- a/storage/connect/filamdbf.h +++ b/storage/connect/filamdbf.h @@ -19,7 +19,7 @@ typedef class DBMFAM *PDBMFAM; /****************************************************************************/ /* Functions used externally. */ /****************************************************************************/ -PQRYRES DBFColumns(PGLOBAL g, char *dp, const char *fn, bool info); +PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info); /****************************************************************************/ /* This is the base class for dBASE file access methods. */ @@ -31,7 +31,7 @@ class DllExport DBFBASE { DBFBASE(PDBF txfp); // Implementation - int ScanHeader(PGLOBAL g, PSZ fname, int lrecl, int *rlen, char *defpath); + int ScanHeader(PGLOBAL g, PCSZ fname, int lrecl, int *rlen, PCSZ defpath); protected: // Default constructor, not to be used diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index cd25429318a..962a4516bf2 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -761,7 +761,8 @@ bool BGXFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw); if (!brc || nbw != len) { - char buf[256], *fn = (h == Hfile) ? To_File : "Tempfile"; + char buf[256]; + PCSZ fn = (h == Hfile) ? To_File : "Tempfile"; if (brc) strcpy(buf, MSG(BAD_BYTE_NUM)); diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp index dc6f277ee27..df366ef15f9 100644 --- a/storage/connect/filamgz.cpp +++ b/storage/connect/filamgz.cpp @@ -920,8 +920,8 @@ int ZLBFAM::GetFileLength(PGLOBAL g) /***********************************************************************/ bool ZLBFAM::AllocateBuffer(PGLOBAL g) { - char *msg; - int n, zrc; + PCSZ msg; + int n, zrc; #if 0 if (!Optimized && Tdbp->NeedIndexing(g)) { diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 99b51632dc9..382af51aae4 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -71,8 +71,23 @@ TXTFAM::TXTFAM(PDOSDEF tdp) { Tdbp = NULL; To_Fb = NULL; - To_File = tdp->Fn; - Lrecl = tdp->Lrecl; + + if (tdp) { + To_File = tdp->Fn; + Lrecl = tdp->Lrecl; + Eof = tdp->Eof; + Ending = tdp->Ending; + } else { + To_File = NULL; + Lrecl = 0; + Eof = false; +#if defined(__WIN__) + Ending = 2; +#else + Ending = 1; +#endif + } // endif tdp + Placed = false; IsRead = true; Blocked = false; @@ -103,8 +118,6 @@ TXTFAM::TXTFAM(PDOSDEF tdp) Blksize = 0; Fpos = Spos = Tpos = 0; Padded = false; - Eof = tdp->Eof; - Ending = tdp->Ending; Abort = false; CrLf = (char*)(Ending == 1 ? "\n" : "\r\n"); } // end of TXTFAM standard constructor @@ -973,7 +986,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) } else { /*****************************************************************/ - /* Move of eventual preceding lines is not required here. */ + /* Move of eventual preceding lines is not required here. */ /* Set the target file as being the source file itself. */ /* Set the future Tpos, and give Spos a value to block copying. */ /*****************************************************************/ @@ -1161,20 +1174,12 @@ int DOSFAM::RenameTempFile(PGLOBAL g) if (rename(filename, filetemp)) { // Save file for security sprintf(g->Message, MSG(RENAME_ERROR), filename, filetemp, strerror(errno)); -#if defined(USE_TRY) throw 51; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 51); -#endif // !USE_TRY } else if (rename(tempname, filename)) { sprintf(g->Message, MSG(RENAME_ERROR), tempname, filename, strerror(errno)); rc = rename(filetemp, filename); // Restore saved file -#if defined(USE_TRY) throw 52; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 52); -#endif // !USE_TRY } else if (remove(filetemp)) { sprintf(g->Message, MSG(REMOVE_ERROR), filetemp, strerror(errno)); diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index ae8f74a9830..1fdae8fcd37 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -80,7 +80,7 @@ class DllExport TXTFAM : public BLOCK { protected: // Members PTDBDOS Tdbp; // To table class - PSZ To_File; // Points to table file name + PCSZ To_File; // Points to table file name PFBLOCK To_Fb; // Pointer to file block PPARM To_Pos; // Pointer to position list PPARM To_Sos; // Pointer to start position list diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index 8e5c31d6b49..9b4ef43eb8b 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -143,9 +143,9 @@ int VCTFAM::GetFileLength(PGLOBAL g) { if (Split) { // Get the total file length - char filename[_MAX_PATH]; - char *savfile = To_File; - int i, len = 0; + char filename[_MAX_PATH]; + PCSZ savfile = To_File; + int i, len = 0; // Initialize the array of file structures if (!Colfn) { @@ -313,8 +313,8 @@ int VCTFAM::Cardinality(PGLOBAL g) // and Last must be set from the file cardinality. // Only happens when called by sub classes. char filename[_MAX_PATH]; - PSZ savfn = To_File; - int len, clen, card = -1; + PCSZ savfn = To_File; + int len, clen, card = -1; PCOLDEF cdp = Tdbp->GetDef()->GetCols(); if (!Colfn) { @@ -368,7 +368,7 @@ int VCTFAM::GetRowID(void) /***********************************************************************/ /* VCT Create an empty file for Vector formatted tables. */ /***********************************************************************/ -bool VCTFAM::MakeEmptyFile(PGLOBAL g, char *fn) +bool VCTFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn) { // Vector formatted file: this will create an empty file of the // required length if it does not exists yet. @@ -559,7 +559,7 @@ bool VCTFAM::AllocateBuffer(PGLOBAL g) /* Do initial action when inserting. */ /***********************************************************************/ bool VCTFAM::InitInsert(PGLOBAL g) - { +{ bool rc = false; // We come here in MODE_INSERT only @@ -575,26 +575,11 @@ bool VCTFAM::InitInsert(PGLOBAL g) CurBlk = Block - 1; CurNum = Last; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - g->jump_level--; - return true; - } // endif -#endif // !USE_TRY - - // Last block must be updated by new values - for (; cp; cp = (PVCTCOL)cp->Next) - cp->ReadBlock(g); + // Last block must be updated by new values + for (; cp; cp = (PVCTCOL)cp->Next) + cp->ReadBlock(g); -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -603,9 +588,7 @@ bool VCTFAM::InitInsert(PGLOBAL g) strcpy(g->Message, msg); rc = true; } // end catch -#else // !USE_TRY - g->jump_level--; -#endif // !USE_TRY + } // endif Last if (!rc) @@ -897,8 +880,9 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) /***********************************************************************/ bool VCTFAM::OpenTempFile(PGLOBAL g) { - char *opmode, tempname[_MAX_PATH]; - bool rc = false; + PCSZ opmode; + char tempname[_MAX_PATH]; + bool rc = false; /*********************************************************************/ /* Open the temporary file, Spos is at the beginning of file. */ @@ -1126,11 +1110,7 @@ void VCTFAM::CloseTableFile(PGLOBAL g, bool abort) } else if (AddBlock) { // Last block was not written rc = ResetTableSize(g, CurBlk, Nrec); -#if defined(USE_TRY) throw 44; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 44); -#endif // !USE_TRY } // endif } else if (mode == MODE_UPDATE) { @@ -1550,7 +1530,7 @@ bool VCMFAM::AllocateBuffer(PGLOBAL g) /* Do initial action when inserting. */ /***********************************************************************/ bool VCMFAM::InitInsert(PGLOBAL g) - { +{ bool rc = false; volatile PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns(); @@ -1565,27 +1545,12 @@ bool VCMFAM::InitInsert(PGLOBAL g) CurNum = Last; } // endif Last -#if defined(USE_TRY) try { -#else // !USE_TRY - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if ((rc = setjmp(g->jumper[++g->jump_level])) != 0) { - g->jump_level--; - return true; - } // endif -#endif // !USE_TRY - - // Initialize the column block pointer - for (; cp; cp = (PVCTCOL)cp->Next) - cp->ReadBlock(g); - -#if defined(USE_TRY) - } catch (int n) { + // Initialize the column block pointer + for (; cp; cp = (PVCTCOL)cp->Next) + cp->ReadBlock(g); + + } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); rc = true; @@ -1593,11 +1558,9 @@ bool VCMFAM::InitInsert(PGLOBAL g) strcpy(g->Message, msg); rc = true; } // end catch -#else // !USE_TRY - g->jump_level--; -#endif // !USE_TRY + return rc; - } // end of InitInsert +} // end of InitInsert /***********************************************************************/ /* Data Base write routine for VMP access method. */ @@ -2036,7 +1999,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) /***********************************************************************/ /* Open the file corresponding to one column. */ /***********************************************************************/ -bool VECFAM::OpenColumnFile(PGLOBAL g, char *opmode, int i) +bool VECFAM::OpenColumnFile(PGLOBAL g, PCSZ opmode, int i) { char filename[_MAX_PATH]; PDBUSER dup = PlgGetUser(g); @@ -2541,11 +2504,7 @@ void VECFAM::CloseTableFile(PGLOBAL g, bool abort) if (wrc != RC_FX) rc = ResetTableSize(g, Block, Last); else -#if defined(USE_TRY) throw 44; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 44); -#endif // !USE_TRY } else if (mode == MODE_UPDATE) { if (UseTemp && !InitUpdate && !Abort) { @@ -3185,7 +3144,8 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw); if (!brc || nbw != len) { - char buf[256], *fn = (h == Hfile) ? To_File : "Tempfile"; + char buf[256]; + PCSZ fn = (h == Hfile) ? To_File : "Tempfile"; if (brc) strcpy(buf, MSG(BAD_BYTE_NUM)); @@ -3361,7 +3321,7 @@ bool BGVFAM::SetBlockInfo(PGLOBAL g) /***********************************************************************/ /* VEC Create an empty file for new Vector formatted tables. */ /***********************************************************************/ -bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn) +bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn) { // Vector formatted file this will create an empty file of the // required length if it does not exists yet. @@ -3371,7 +3331,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, char *fn) PlugSetPath(filename, fn, Tdbp->GetPath()); #if defined(__WIN__) - char *p; + PCSZ p; DWORD rc; bool brc; LARGE_INTEGER of; @@ -4206,11 +4166,7 @@ void BGVFAM::CloseTableFile(PGLOBAL g, bool abort) } else if (AddBlock) { // Last block was not written rc = ResetTableSize(g, CurBlk, Nrec); -#if defined(USE_TRY) throw 44; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 44); -#endif // !USE_TRY } // endif } else if (mode == MODE_UPDATE) { diff --git a/storage/connect/filamvct.h b/storage/connect/filamvct.h index be66232acfb..85982403270 100644 --- a/storage/connect/filamvct.h +++ b/storage/connect/filamvct.h @@ -61,7 +61,7 @@ class DllExport VCTFAM : public FIXFAM { virtual bool WriteBlock(PGLOBAL g, PVCTCOL colp); protected: - virtual bool MakeEmptyFile(PGLOBAL g, char *fn); + virtual bool MakeEmptyFile(PGLOBAL g, PCSZ fn); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveLines(PGLOBAL g) {return false;} virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); @@ -160,7 +160,7 @@ class DllExport VECFAM : public VCTFAM { virtual bool MoveLines(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual int RenameTempFile(PGLOBAL g); - bool OpenColumnFile(PGLOBAL g, char *opmode, int i); + bool OpenColumnFile(PGLOBAL g, PCSZ opmode, int i); // Members FILE* *Streams; // Points to Dos file structure array @@ -235,7 +235,7 @@ class BGVFAM : public VCTFAM { bool BigSeek(PGLOBAL g, HANDLE h, BIGINT pos, bool b = false); bool BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req); bool BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req); - virtual bool MakeEmptyFile(PGLOBAL g, char *fn); + virtual bool MakeEmptyFile(PGLOBAL g, PCSZ fn); virtual bool OpenTempFile(PGLOBAL g); virtual bool MoveIntermediateLines(PGLOBAL g, bool *b = NULL); virtual bool CleanUnusedSpace(PGLOBAL g); diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 92de5117638..81511214bfe 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -45,12 +45,12 @@ #define WRITEBUFFERSIZE (16384) -bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul); +bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul); /***********************************************************************/ /* Compress a file in zip when creating a table. */ /***********************************************************************/ -static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf) +static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, PCSZ fn, PCSZ entry, char *buf) { int rc = RC_OK, size_read, size_buf = WRITEBUFFERSIZE; FILE *fin; @@ -88,7 +88,7 @@ static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, char *fn, char *entry, char *buf) /***********************************************************************/ /* Find and Compress several files in zip when creating a table. */ /***********************************************************************/ -static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf) +static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf) { char filename[_MAX_PATH]; int rc; @@ -203,7 +203,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, char *pat, char *buf) /***********************************************************************/ /* Load and Compress a file in zip when creating a table. */ /***********************************************************************/ -bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool mul) +bool ZipLoadFile(PGLOBAL g, PCSZ zfn, PCSZ fn, PCSZ entry, bool append, bool mul) { char *buf; bool err; @@ -228,7 +228,7 @@ bool ZipLoadFile(PGLOBAL g, char *zfn, char *fn, char *entry, bool append, bool /***********************************************************************/ /* Constructors. */ /***********************************************************************/ -ZIPUTIL::ZIPUTIL(PSZ tgt) +ZIPUTIL::ZIPUTIL(PCSZ tgt) { zipfile = NULL; target = tgt; @@ -269,7 +269,7 @@ void ZIPUTIL::getTime(tm_zip& tmZip) /* append: set true to append the zip file */ /* return: true if open, false otherwise. */ /***********************************************************************/ -bool ZIPUTIL::open(PGLOBAL g, char *filename, bool append) +bool ZIPUTIL::open(PGLOBAL g, PCSZ filename, bool append) { if (!zipfile && !(zipfile = zipOpen64(filename, append ? APPEND_STATUS_ADDINZIP @@ -295,7 +295,7 @@ void ZIPUTIL::close() /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ /***********************************************************************/ -bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append) +bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, PCSZ fn, bool append) { /*********************************************************************/ /* The file will be compressed. */ @@ -338,7 +338,7 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append) /***********************************************************************/ /* Add target in zip file. */ /***********************************************************************/ -bool ZIPUTIL::addEntry(PGLOBAL g, char *entry) +bool ZIPUTIL::addEntry(PGLOBAL g, PCSZ entry) { //?? we dont need the stinking time zip_fileinfo zi = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; @@ -382,7 +382,7 @@ void ZIPUTIL::closeEntry() /***********************************************************************/ /* Constructors. */ /***********************************************************************/ -UNZIPUTL::UNZIPUTL(PSZ tgt, bool mul) +UNZIPUTL::UNZIPUTL(PCSZ tgt, bool mul) { zipfile = NULL; target = tgt; @@ -439,8 +439,8 @@ UNZIPUTL::UNZIPUTL(PZIPUTIL zutp) /* This code is the copyright property of Alessandro Felice Cantatore. */ /* http://xoomer.virgilio.it/acantato/dev/wildcard/wildmatch.html */ /***********************************************************************/ -bool UNZIPUTL::WildMatch(PSZ pat, PSZ str) { - PSZ s, p; +bool UNZIPUTL::WildMatch(PCSZ pat, PCSZ str) { + PCSZ s, p; bool star = FALSE; loopStart: @@ -474,7 +474,7 @@ starCheck: /* param: filename path and the filename of the zip file to open. */ /* return: true if open, false otherwise. */ /***********************************************************************/ -bool UNZIPUTL::open(PGLOBAL g, char *filename) +bool UNZIPUTL::open(PGLOBAL g, PCSZ filename) { if (!zipfile && !(zipfile = unzOpen64(filename))) sprintf(g->Message, "Zipfile open error on %s", filename); @@ -564,7 +564,7 @@ int UNZIPUTL::nextEntry(PGLOBAL g) /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file from a ZIP file. */ /***********************************************************************/ -bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) +bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, PCSZ fn) { /*********************************************************************/ /* The file will be decompressed into virtual memory. */ @@ -602,7 +602,7 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) if (openEntry(g)) return true; - if (size > 0) { + if (size > 0) { /*******************************************************************/ /* Link a Fblock. This make possible to automatically close it */ /* in case of error g->jump. */ @@ -636,7 +636,7 @@ bool UNZIPUTL::OpenTable(PGLOBAL g, MODE mode, char *fn) /***********************************************************************/ /* Insert only if the entry does not exist. */ /***********************************************************************/ -bool UNZIPUTL::IsInsertOk(PGLOBAL g, char *fn) +bool UNZIPUTL::IsInsertOk(PGLOBAL g, PCSZ fn) { bool ok = true, b = open(g, fn); diff --git a/storage/connect/filamzip.h b/storage/connect/filamzip.h index ba1d8658dc4..be17d954728 100644 --- a/storage/connect/filamzip.h +++ b/storage/connect/filamzip.h @@ -27,16 +27,13 @@ typedef class ZPXFAM *PZPXFAM; class DllExport ZIPUTIL : public BLOCK { public: // Constructor - ZIPUTIL(PSZ tgt); + ZIPUTIL(PCSZ tgt); //ZIPUTIL(ZIPUTIL *zutp); - // Implementation - //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); } - // Methods - bool OpenTable(PGLOBAL g, MODE mode, char *fn, bool append); - bool open(PGLOBAL g, char *fn, bool append); - bool addEntry(PGLOBAL g, char *entry); + bool OpenTable(PGLOBAL g, MODE mode, PCSZ fn, bool append); + bool open(PGLOBAL g, PCSZ fn, bool append); + bool addEntry(PGLOBAL g, PCSZ entry); void close(void); void closeEntry(void); int writeEntry(PGLOBAL g, char *buf, int len); @@ -44,16 +41,10 @@ class DllExport ZIPUTIL : public BLOCK { // Members zipFile zipfile; // The ZIP container file - PSZ target; // The target file name - PSZ pwd; // The ZIP file password -//unz_file_info finfo; // The current file info + PCSZ target; // The target file name + PCSZ pwd; // The ZIP file password PFBLOCK fp; -//char *memory; -//uint size; -//int multiple; // Multiple targets bool entryopen; // True when open current entry -//char fn[FILENAME_MAX]; // The current entry file name -//char mapCaseTable[256]; }; // end of ZIPUTIL /***********************************************************************/ @@ -62,27 +53,27 @@ class DllExport ZIPUTIL : public BLOCK { class DllExport UNZIPUTL : public BLOCK { public: // Constructor - UNZIPUTL(PSZ tgt, bool mul); + UNZIPUTL(PCSZ tgt, bool mul); UNZIPUTL(PDOSDEF tdp); // Implementation //PTXF Duplicate(PGLOBAL g) { return (PTXF) new(g)UNZFAM(this); } // Methods - bool OpenTable(PGLOBAL g, MODE mode, char *fn); - bool open(PGLOBAL g, char *fn); + bool OpenTable(PGLOBAL g, MODE mode, PCSZ fn); + bool open(PGLOBAL g, PCSZ fn); bool openEntry(PGLOBAL g); void close(void); void closeEntry(void); - bool WildMatch(PSZ pat, PSZ str); + bool WildMatch(PCSZ pat, PCSZ str); int findEntry(PGLOBAL g, bool next); int nextEntry(PGLOBAL g); - bool IsInsertOk(PGLOBAL g, char *fn); + bool IsInsertOk(PGLOBAL g, PCSZ fn); // Members unzFile zipfile; // The ZIP container file - PSZ target; // The target file name - PSZ pwd; // The ZIP file password + PCSZ target; // The target file name + PCSZ pwd; // The ZIP file password unz_file_info finfo; // The current file info PFBLOCK fp; char *memory; @@ -123,8 +114,6 @@ class DllExport UNZFAM : public MAPFAM { // Members UNZIPUTL *zutp; PDOSDEF tdfp; -//PSZ target; -//bool mul; }; // end of UNZFAM /***********************************************************************/ @@ -152,8 +141,6 @@ class DllExport UZXFAM : public MPXFAM { // Members UNZIPUTL *zutp; PDOSDEF tdfp; -//PSZ target; -//bool mul; }; // end of UZXFAM /***********************************************************************/ @@ -180,8 +167,9 @@ class DllExport ZIPFAM : public DOSFAM { protected: // Members ZIPUTIL *zutp; - PSZ target; + PCSZ target; bool append; +//bool replace; }; // end of ZIPFAM /***********************************************************************/ @@ -205,7 +193,7 @@ class DllExport ZPXFAM : public FIXFAM { protected: // Members ZIPUTIL *zutp; - PSZ target; + PCSZ target; bool append; }; // end of ZPXFAM diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index db96647f634..0c38d10a578 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -87,11 +87,7 @@ BYTE OpBmp(PGLOBAL g, OPVAL opc) case OP_EXIST: bt = 0x00; break; default: sprintf(g->Message, MSG(BAD_FILTER_OP), opc); -#if defined(USE_TRY) throw TYPE_ARRAY; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_ARRAY); -#endif // !USE_TRY } // endswitch opc return bt; @@ -1715,11 +1711,7 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) break; // Remove eventual ending separator(s) // if (fp->Convert(g, having)) -//#if defined(USE_TRY) // throw TYPE_ARRAY; -//#else // !USE_TRY -// longjmp(g->jumper[g->jump_level], TYPE_FILTER); -//#endif // !USE_TRY filp = fp; fp = fp->Next; @@ -1752,11 +1744,7 @@ DllExport bool ApplyFilter(PGLOBAL g, PFIL filp) // return TRUE; if (filp->Eval(g)) -#if defined(USE_TRY) throw TYPE_FILTER; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_FILTER); -#endif // !USE_TRY if (trace > 1) htrc("PlugFilter filp=%p result=%d\n", diff --git a/storage/connect/filter.h b/storage/connect/filter.h index 15730e2cc44..f4835d23a7c 100644 --- a/storage/connect/filter.h +++ b/storage/connect/filter.h @@ -61,7 +61,7 @@ class DllExport FILTER : public XOBJECT { /* Filter description block */ //virtual PXOB CheckSubQuery(PGLOBAL, PSQL); //virtual bool CheckLocal(PTDB); //virtual int CheckSpcCol(PTDB tdbp, int n); - virtual void Print(PGLOBAL g, FILE *f, uint n); + virtual void Print(PGLOBAL g, FILE *f, uint n); virtual void Print(PGLOBAL g, char *ps, uint z); // PFIL Linearize(bool nosep); // PFIL Link(PGLOBAL g, PFIL fil2); diff --git a/storage/connect/global.h b/storage/connect/global.h index 5579e8bd248..a2030fdb5d0 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -59,7 +59,7 @@ #define NO_IVAL -95684275 /* Used by GetIntegerOption */ #define VMLANG 370 /* Size of olf VM lang blocks */ #define MAX_JUMP 24 /* Maximum jump level number */ -#define MAX_STR 1024 /* Maximum string length */ +#define MAX_STR 4160 /* Maximum message length */ #define STR_SIZE 501 /* Length of char strings. */ #define STD_INPUT 0 /* Standard language input */ #define STD_OUTPUT 1 /* Standard language output */ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 5245b2559c5..240625d03fb 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /** @file ha_connect.cc @@ -172,9 +172,9 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.05.0003 March 7, 2017"; + char version[]= "Version 1.06.0001 April 17, 2017"; #if defined(__WIN__) - char compver[]= "Version 1.05.0003 " __DATE__ " " __TIME__; + char compver[]= "Version 1.06.0001 " __DATE__ " " __TIME__; char slash= '\\'; #else // !__WIN__ char slash= '/'; @@ -209,14 +209,15 @@ pthread_mutex_t parmut = PTHREAD_MUTEX_INITIALIZER; /***********************************************************************/ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); PQRYRES VirColumns(PGLOBAL g, bool info); -PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info); +PQRYRES JSONColumns(PGLOBAL g, char *db, char *dsn, PTOS topt, bool info); PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v); void PushWarning(PGLOBAL g, THD *thd, int level); -bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, - const char *db, char *tab, const char *src, int port); -bool ZipLoadFile(PGLOBAL, char*, char*, char*, bool, bool); +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, PCSZ db, + PCSZ tab, PCSZ src, int port); +bool ZipLoadFile(PGLOBAL, PCSZ, PCSZ, PCSZ, bool, bool); bool ExactInfo(void); +void mongo_init(bool); USETEMP UseTemp(void); int GetConvSize(void); TYPCONV GetTypeConv(void); @@ -670,7 +671,7 @@ static int connect_init_func(void *p) sql_print_information("CONNECT: %s", version); #endif // !__WIN__ -#ifdef LIBXML2_SUPPORT +#if defined(LIBXML2_SUPPORT) XmlInitParserLib(); #endif // LIBXML2_SUPPORT @@ -1018,15 +1019,14 @@ ulonglong ha_connect::table_flags() const /****************************************************************************/ /* Return the value of an option specified in an option list. */ /****************************************************************************/ -char *GetListOption(PGLOBAL g, const char *opname, - const char *oplist, const char *def) +PCSZ GetListOption(PGLOBAL g, PCSZ opname, PCSZ oplist, PCSZ def) { if (!oplist) return (char*)def; char key[16], val[256]; char *pk, *pv, *pn; - char *opval= (char*)def; + PCSZ opval= def; int n; for (pk= (char*)oplist; pk; pk= ++pn) { @@ -1062,9 +1062,9 @@ char *GetListOption(PGLOBAL g, const char *opname, /****************************************************************************/ /* Return the value of a string option or NULL if not specified. */ /****************************************************************************/ -char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef) +PCSZ GetStringTableOption(PGLOBAL g, PTOS options, PCSZ opname, PCSZ sdef) { - const char *opval= NULL; + PCSZ opval= NULL; if (!options) return sdef; @@ -1107,10 +1107,10 @@ char *GetStringTableOption(PGLOBAL g, PTOS options, char *opname, char *sdef) /****************************************************************************/ /* Return the value of a Boolean option or bdef if not specified. */ /****************************************************************************/ -bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef) +bool GetBooleanTableOption(PGLOBAL g, PTOS options, PCSZ opname, bool bdef) { - bool opval= bdef; - char *pv; + bool opval= bdef; + PCSZ pv; if (!options) return bdef; @@ -1138,7 +1138,7 @@ bool GetBooleanTableOption(PGLOBAL g, PTOS options, char *opname, bool bdef) /****************************************************************************/ /* Return the value of an integer option or NO_IVAL if not specified. */ /****************************************************************************/ -int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef) +int GetIntegerTableOption(PGLOBAL g, PTOS options, PCSZ opname, int idef) { ulonglong opval= (ulonglong) NO_IVAL; @@ -1160,10 +1160,10 @@ int GetIntegerTableOption(PGLOBAL g, PTOS options, char *opname, int idef) opval= (options->compressed); if ((ulonglong) opval == (ulonglong)NO_IVAL) { - char *pv; + PCSZ pv; if ((pv= GetListOption(g, opname, options->oplist))) - opval= CharToNumber(pv, strlen(pv), ULONGLONG_MAX, true); + opval= CharToNumber((char*)pv, strlen(pv), ULONGLONG_MAX, true); else return idef; @@ -1188,7 +1188,7 @@ PTOS ha_connect::GetTableOptionStruct(TABLE_SHARE *s) /****************************************************************************/ /* Return the string eventually formatted with partition name. */ /****************************************************************************/ -char *ha_connect::GetRealString(const char *s) +char *ha_connect::GetRealString(PCSZ s) { char *sv; @@ -1205,10 +1205,10 @@ char *ha_connect::GetRealString(const char *s) /****************************************************************************/ /* Return the value of a string option or sdef if not specified. */ /****************************************************************************/ -char *ha_connect::GetStringOption(char *opname, char *sdef) +PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef) { - char *opval= NULL; - PTOS options= GetTableOptionStruct(); + PCSZ opval= NULL; + PTOS options= GetTableOptionStruct(); if (!stricmp(opname, "Connect")) { LEX_STRING cnc= (tshp) ? tshp->connect_string @@ -1267,7 +1267,7 @@ char *ha_connect::GetStringOption(char *opname, char *sdef) /****************************************************************************/ /* Return the value of a Boolean option or bdef if not specified. */ /****************************************************************************/ -bool ha_connect::GetBooleanOption(char *opname, bool bdef) +bool ha_connect::GetBooleanOption(PCSZ opname, bool bdef) { bool opval; PTOS options= GetTableOptionStruct(); @@ -1284,7 +1284,7 @@ bool ha_connect::GetBooleanOption(char *opname, bool bdef) /* Set the value of the opname option (does not work for oplist options) */ /* Currently used only to set the Sepindex value. */ /****************************************************************************/ -bool ha_connect::SetBooleanOption(char *opname, bool b) +bool ha_connect::SetBooleanOption(PCSZ opname, bool b) { PTOS options= GetTableOptionStruct(); @@ -1302,7 +1302,7 @@ bool ha_connect::SetBooleanOption(char *opname, bool b) /****************************************************************************/ /* Return the value of an integer option or NO_IVAL if not specified. */ /****************************************************************************/ -int ha_connect::GetIntegerOption(char *opname) +int ha_connect::GetIntegerOption(PCSZ opname) { int opval; PTOS options= GetTableOptionStruct(); @@ -1322,7 +1322,7 @@ int ha_connect::GetIntegerOption(char *opname) /* Set the value of the opname option (does not work for oplist options) */ /* Currently used only to set the Lrecl value. */ /****************************************************************************/ -bool ha_connect::SetIntegerOption(char *opname, int n) +bool ha_connect::SetIntegerOption(PCSZ opname, int n) { PTOS options= GetTableOptionStruct(); @@ -1522,7 +1522,7 @@ PXOS ha_connect::GetIndexOptionStruct(KEY *kp) /****************************************************************************/ /* Return a Boolean index option or false if not specified. */ /****************************************************************************/ -bool ha_connect::GetIndexOption(KEY *kp, char *opname) +bool ha_connect::GetIndexOption(KEY *kp, PCSZ opname) { bool opval= false; PXOS options= GetIndexOptionStruct(kp); @@ -1534,7 +1534,7 @@ bool ha_connect::GetIndexOption(KEY *kp, char *opname) opval= options->mapped; } else if (kp->comment.str && kp->comment.length) { - char *pv, *oplist= strz(xp->g, kp->comment); + PCSZ pv, oplist= strz(xp->g, kp->comment); if ((pv= GetListOption(xp->g, opname, oplist))) opval= (!*pv || *pv == 'y' || *pv == 'Y' || atoi(pv) != 0); @@ -1739,9 +1739,9 @@ void ha_connect::AddColName(char *cp, Field *fp) /***********************************************************************/ /* This function sets the current database path. */ /***********************************************************************/ -bool ha_connect::SetDataPath(PGLOBAL g, const char *path) +bool ha_connect::SetDataPath(PGLOBAL g, const char *path) { - return (!(datapath = SetPath(g, path))); + return (!(datapath= SetPath(g, path))); } // end of SetDataPath /****************************************************************************/ @@ -1901,41 +1901,21 @@ bool ha_connect::CheckColumnList(PGLOBAL g) Field* fp; MY_BITMAP *map= table->read_set; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif jump_level - - if (!setjmp(g->jumper[++g->jump_level])) { -#endif // !USE_TRY for (field= table->field; fp= *field; field++) if (bitmap_is_set(map, fp->field_index)) { if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name, 0))) { sprintf(g->Message, "Column %s not found in %s", fp->field_name, tdbp->GetName()); -#if defined(USE_TRY) throw 1; -#else // !USE_TRY - brc = true; - goto fin; -#endif // !USE_TRY } // endif colp if ((brc= colp->InitValue(g))) -#if defined(USE_TRY) throw 2; -#else // !USE_TRY - goto fin; -#endif // !USE_TRY colp->AddColUse(U_P); // For PLG tables } // endif -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -1944,13 +1924,7 @@ bool ha_connect::CheckColumnList(PGLOBAL g) strcpy(g->Message, msg); brc = true; } // end catch -#else // !USE_TRY - } else - brc = true; - fin: - g->jump_level--; -#endif // !USE_TRY return brc; } // end of CheckColumnList @@ -1987,7 +1961,8 @@ int ha_connect::CloseTable(PGLOBAL g) /***********************************************************************/ int ha_connect::MakeRecord(char *buf) { - char *p, *fmt, val[32]; + PCSZ fmt; + char *p, val[32]; int rc= 0; Field* *field; Field *fp; @@ -2123,7 +2098,7 @@ int ha_connect::ScanRecord(PGLOBAL g, uchar *) { char attr_buffer[1024]; char data_buffer[1024]; - char *fmt; + PCSZ fmt; int rc= 0; PCOL colp; PVAL value, sdvalin; @@ -2297,7 +2272,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, const uchar *ptr; //uint i, rem, len, klen, stlen; uint i, rem, len, stlen; - bool nq, both, oom= false; + bool nq, both, oom; OPVAL op; Field *fp; const key_range *ranges[2]; @@ -2325,9 +2300,9 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, continue; if (both && i > 0) - oom|= qry->Append(") AND ("); + qry->Append(") AND ("); else - oom|= qry->Append(" WHERE ("); + qry->Append(" WHERE ("); // klen= len= ranges[i]->length; len= ranges[i]->length; @@ -2340,14 +2315,14 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, nq= fp->str_needs_quotes(); if (kpart != kfp->key_part) - oom|= qry->Append(" AND "); + qry->Append(" AND "); if (q) { - oom|= qry->Append(q); - oom|= qry->Append((PSZ)fp->field_name); - oom|= qry->Append(q); + qry->Append(q); + qry->Append((PSZ)fp->field_name); + qry->Append(q); } else - oom|= qry->Append((PSZ)fp->field_name); + qry->Append((PSZ)fp->field_name); switch (ranges[i]->flag) { case HA_READ_KEY_EXACT: @@ -2372,10 +2347,10 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, goto err; } // endswitch flag - oom|= qry->Append((PSZ)GetValStr(op, false)); + qry->Append((PSZ)GetValStr(op, false)); if (nq) - oom|= qry->Append('\''); + qry->Append('\''); if (kpart->key_part_flag & HA_VAR_LENGTH_PART) { String varchar; @@ -2383,17 +2358,17 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, varchar.set_quick((char*)ptr + HA_KEY_BLOB_LENGTH, var_length, &my_charset_bin); - oom|= qry->Append(varchar.ptr(), varchar.length(), nq); + qry->Append(varchar.ptr(), varchar.length(), nq); } else { char strbuff[MAX_FIELD_WIDTH]; String str(strbuff, sizeof(strbuff), kpart->field->charset()), *res; res= fp->val_str(&str, ptr); - oom|= qry->Append(res->ptr(), res->length(), nq); + qry->Append(res->ptr(), res->length(), nq); } // endif flag if (nq) - oom |= qry->Append('\''); + qry->Append('\''); if (stlen >= len) break; @@ -2408,7 +2383,9 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, } // endfor i - if ((oom|= qry->Append(")"))) + qry->Append(')'); + + if ((oom= qry->IsTruncated())) strcpy(g->Message, "Out of memory"); dbug_tmp_restore_column_map(table->write_set, old_map); @@ -2722,7 +2699,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) if (x) return NULL; else - pb0 = pb1 = pb2 = ph0 = ph1 = ph2 = NULL; + pb0= pb1= pb2= ph0= ph1= ph2= NULL; if (trace) htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), @@ -3086,59 +3063,56 @@ const COND *ha_connect::cond_push(const COND *cond) tty == TYPE_AM_PLG || tty == TYPE_AM_JDBC || x); // This should never happen but is done to avoid crashing -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - DBUG_RETURN(cond); - } // endif jump_level + if (b) { + PCFIL filp; + int rc; - if (setjmp(g->jumper[++g->jump_level])) - goto fin; -#endif // !USE_TRY + if ((filp = tdbp->GetCondFil()) && tdbp->GetCond() == cond && + filp->Idx == active_index && filp->Type == tty) + goto fin; - if (b) { - PCFIL filp; - int rc; + filp = new(g) CONDFIL(active_index, tty); + rc = filp->Init(g, this); - if ((filp= tdbp->GetCondFil()) && filp->Cond == cond && - filp->Idx == active_index && filp->Type == tty) - goto fin; + if (rc == RC_INFO) { + filp->Having = (char*)PlugSubAlloc(g, NULL, 256); + *filp->Having = 0; + } else if (rc == RC_FX) + goto fin; - filp= new(g) CONDFIL(cond, active_index, tty); - rc = filp->Init(g, this); + filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0); + *filp->Body = 0; - if (rc == RC_INFO) { - filp->Having = (char*)PlugSubAlloc(g, NULL, 256); - *filp->Having = 0; - } else if (rc == RC_FX) - goto fin; + if (CheckCond(g, filp, cond)) { + if (filp->Having && strlen(filp->Having) > 255) + goto fin; // Memory collapse - filp->Body = (char*)PlugSubAlloc(g, NULL, (x) ? 128 : 0); - *filp->Body = 0; + if (trace) + htrc("cond_push: %s\n", filp->Body); - if (CheckCond(g, filp, cond)) { - if (filp->Having && strlen(filp->Having) > 255) - goto fin; // Memory collapse + tdbp->SetCond(cond); - if (trace) - htrc("cond_push: %s\n", filp->Body); + if (!x) + PlugSubAlloc(g, NULL, strlen(filp->Body) + 1); + else + cond = NULL; // Does this work? - if (!x) - PlugSubAlloc(g, NULL, strlen(filp->Body) + 1); - else - cond= NULL; // Does this work? + tdbp->SetCondFil(filp); + } else if (x && cond) + tdbp->SetCondFil(filp); // Wrong filter + + } else if (tdbp->CanBeFiltered()) { + if (!tdbp->GetCond() || tdbp->GetCond() != cond) { + tdbp->SetFilter(CondFilter(g, (Item *)cond)); - tdbp->SetCondFil(filp); - } else if (x && cond) - tdbp->SetCondFil(filp); // Wrong filter + if (tdbp->GetFilter()) + tdbp->SetCond(cond); - } else if (tty != TYPE_AM_JSN && tty != TYPE_AM_JSON) - tdbp->SetFilter(CondFilter(g, (Item *)cond)); + } // endif cond + + } // endif tty -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -3147,10 +3121,6 @@ const COND *ha_connect::cond_push(const COND *cond) } // end catch fin:; -#else // !USE_TRY - fin: - g->jump_level--; -#endif // !USE_TRY } // endif tdbp // Let MySQL do the filtering @@ -3301,44 +3271,28 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*) PGLOBAL& g= xp->g; PDBUSER dup= PlgGetUser(g); -#if defined(USE_TRY) try { -#else // !USE_TRY - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif - - if (setjmp(g->jumper[++g->jump_level])) { - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif setjmp -#endif // !USE_TRY + // Ignore error on the opt file + dup->Check &= ~CHK_OPT; + tdbp = GetTDB(g); + dup->Check |= CHK_OPT; - // Ignore error on the opt file - dup->Check &= ~CHK_OPT; - tdbp= GetTDB(g); - dup->Check |= CHK_OPT; + if (tdbp && !tdbp->IsRemote()) { + bool dop = IsTypeIndexable(GetRealType(NULL)); + bool dox = (tdbp->GetDef()->Indexable() == 1); - if (tdbp && !tdbp->IsRemote()) { - bool dop= IsTypeIndexable(GetRealType(NULL)); - bool dox= (tdbp->GetDef()->Indexable() == 1); - - if ((rc= ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) { - if (rc == RC_INFO) { - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - rc= 0; - } else - rc= HA_ERR_INTERNAL_ERROR; + if ((rc = ((PTDBASE)tdbp)->ResetTableOpt(g, dop, dox))) { + if (rc == RC_INFO) { + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + rc = 0; + } else + rc = HA_ERR_INTERNAL_ERROR; - } // endif rc + } // endif rc - } else if (!tdbp) - rc= HA_ERR_INTERNAL_ERROR; + } else if (!tdbp) + rc = HA_ERR_INTERNAL_ERROR; -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -3347,10 +3301,6 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*) strcpy(g->Message, msg); rc = HA_ERR_INTERNAL_ERROR; } // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY return rc; } // end of optimize @@ -4045,8 +3995,12 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) tdbp->SetFilter(NULL); rc= rnd_next(buf); - } else - rc= HA_ERR_KEY_NOT_FOUND; + } else { + PGLOBAL g = GetPlug((table) ? table->in_use : NULL, xp); + strcpy(g->Message, "Not supported by this table type"); + my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + rc= HA_ERR_INTERNAL_ERROR; + } // endif SetRecpos DBUG_RETURN(rc); } // end of rnd_pos @@ -4112,14 +4066,14 @@ int ha_connect::info(uint flag) } // endif xmod // This is necessary for getting file length - if (table) { + if (table) { if (SetDataPath(g, table->s->db.str)) { my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } // endif SetDataPath } else - DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen if (!(tdbp= GetTDB(g))) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); // Should never happen @@ -4268,35 +4222,35 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) case TAB_INI: case TAB_VEC: case TAB_JSON: - if (options->filename && *options->filename) { - if (!quick) { - char *s, path[FN_REFLEN], dbpath[FN_REFLEN]; + if (options->filename && *options->filename) { + if (!quick) { + char path[FN_REFLEN], dbpath[FN_REFLEN]; + + strcpy(dbpath, mysql_real_data_home); + + if (db) #if defined(__WIN__) - s= "\\"; + strcat(strcat(dbpath, db), "\\"); #else // !__WIN__ - s= "/"; + strcat(strcat(dbpath, db), "/"); #endif // !__WIN__ - strcpy(dbpath, mysql_real_data_home); - - if (db) - strcat(strcat(dbpath, db), s); - (void) fn_format(path, options->filename, dbpath, "", - MY_RELATIVE_PATH | MY_UNPACK_FILENAME); + (void)fn_format(path, options->filename, dbpath, "", + MY_RELATIVE_PATH | MY_UNPACK_FILENAME); - if (!is_secure_file_path(path)) { - my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); - return true; - } // endif path - } - } else + if (!is_secure_file_path(path)) { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); + return true; + } // endif path + } + } else return false; /* Fall through to check FILE_ACL */ case TAB_ODBC: case TAB_JDBC: case TAB_MYSQL: - case TAB_DIR: + case TAB_DIR: case TAB_MAC: case TAB_WMI: case TAB_ZIP: @@ -4606,7 +4560,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); DBUG_RETURN(0); } else if (!tdbp->GetDef()->Indexable()) { - sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName()); + sprintf(g->Message, "external_lock: Table %s is not indexable", tdbp->GetName()); // DBUG_RETURN(HA_ERR_INTERNAL_ERROR); causes assert error push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); DBUG_RETURN(0); @@ -4689,7 +4643,9 @@ int ha_connect::external_lock(THD *thd, int lock_type) push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); rc= 0; - } // endif MakeIndex + //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + //rc= HA_ERR_INTERNAL_ERROR; + } // endif MakeIndex } else if (tdbp->GetDef()->Indexable() == 3) { if (CheckVirtualIndex(NULL)) { @@ -4710,9 +4666,12 @@ int ha_connect::external_lock(THD *thd, int lock_type) // Make it a warning to avoid crash push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); rc= 0; - } // endif Close + //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + //rc = HA_ERR_INTERNAL_ERROR; + } // endif Close locked= 0; +// m_lock_type= lock_type; xmod= MODE_ANY; // For info commands DBUG_RETURN(rc); } // endif MODE_ANY @@ -5070,8 +5029,8 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, } // end of records_in_range // Used to check whether a MYSQL table is created on itself -bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, - const char *db, char *tab, const char *src, int port) +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, + PCSZ db, PCSZ tab, PCSZ src, int port) { if (src) return false; @@ -5307,41 +5266,41 @@ static int connect_assisted_discovery(handlerton *, THD* thd, TABLE_SHARE *table_s, HA_CREATE_INFO *create_info) { - char v=0; - const char *fncn= "?"; - const char *user, *fn, *db, *host, *pwd, *sep, *tbl, *src; - const char *col, *ocl, *rnk, *pic, *fcl, *skc, *zfn; - char *tab, *dsn, *shm, *dpath; + char v=0; + PCSZ fncn= "?"; + PCSZ user, fn, db, host, pwd, sep, tbl, src; + PCSZ col, ocl, rnk, pic, fcl, skc, zfn; + char *tab, *dsn, *shm, *dpath; #if defined(__WIN__) - char *nsp= NULL, *cls= NULL; + PCSZ nsp= NULL, cls= NULL; #endif // __WIN__ -//int hdr, mxe; - int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0; +//int hdr, mxe; + int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0; #if defined(ODBC_SUPPORT) - POPARM sop= NULL; - char *ucnc= NULL; - bool cnc= false; - int cto= -1, qto= -1; + POPARM sop= NULL; + PCSZ ucnc= NULL; + bool cnc= false; + int cto= -1, qto= -1; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) - PJPARM sjp= NULL; - char *driver= NULL; - char *url= NULL; -//char *prop= NULL; - char *tabtyp= NULL; + PJPARM sjp= NULL; + PCSZ driver= NULL; + char *url= NULL; +//char *prop= NULL; + PCSZ tabtyp= NULL; #endif // JDBC_SUPPORT - uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL); - bool bif, ok= false, dbf= false; - TABTYPE ttp= TAB_UNDEF; - PQRYRES qrp= NULL; - PCOLRES crp; - PCONNECT xp= NULL; - PGLOBAL g= GetPlug(thd, xp); - PDBUSER dup= PlgGetUser(g); - PCATLG cat= (dup) ? dup->Catalog : NULL; - PTOS topt= table_s->option_struct; - char buf[1024]; - String sql(buf, sizeof(buf), system_charset_info); + uint tm, fnc= FNC_NO, supfnc= (FNC_NO | FNC_COL); + bool bif, ok= false, dbf= false; + TABTYPE ttp= TAB_UNDEF; + PQRYRES qrp= NULL; + PCOLRES crp; + PCONNECT xp= NULL; + PGLOBAL g= GetPlug(thd, xp); + PDBUSER dup= PlgGetUser(g); + PCATLG cat= (dup) ? dup->Catalog : NULL; + PTOS topt= table_s->option_struct; + char buf[1024]; + String sql(buf, sizeof(buf), system_charset_info); sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info); @@ -5409,547 +5368,492 @@ static int connect_assisted_discovery(handlerton *, THD* thd, if (!(shm= (char*)db)) db= table_s->db.str; // Default value -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - goto jer; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif rc -#endif // !USE_TRY - - // Check table type - if (ttp == TAB_UNDEF) { - topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; - ttp= GetTypeID(topt->type); - sprintf(g->Message, "No table_type. Was set to %s", topt->type); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - } else if (ttp == TAB_NIY) { - sprintf(g->Message, "Unsupported table type %s", topt->type); - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif ttp + // Check table type + if (ttp == TAB_UNDEF) { + topt->type = (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; + ttp = GetTypeID(topt->type); + sprintf(g->Message, "No table_type. Was set to %s", topt->type); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + } else if (ttp == TAB_NIY) { + sprintf(g->Message, "Unsupported table type %s", topt->type); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif ttp - if (!tab) { - if (ttp == TAB_TBL) { - // Make tab the first table of the list - char *p; + if (!tab) { + if (ttp == TAB_TBL) { + // Make tab the first table of the list + char *p; - if (!tbl) { - strcpy(g->Message, "Missing table list"); - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif tbl + if (!tbl) { + strcpy(g->Message, "Missing table list"); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif tbl - tab= PlugDup(g, tbl); + tab = PlugDup(g, tbl); - if ((p= strchr(tab, ','))) - *p= 0; + if ((p = strchr(tab, ','))) + *p = 0; - if ((p=strchr(tab, '.'))) { - *p= 0; - db= tab; - tab= p + 1; - } // endif p + if ((p = strchr(tab, '.'))) { + *p = 0; + db = tab; + tab = p + 1; + } // endif p - } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) - tab= table_s->table_name.str; // Default value + } else if (ttp != TAB_ODBC || !(fnc & (FNC_TABLE | FNC_COL))) + tab = table_s->table_name.str; // Default value - } // endif tab + } // endif tab - switch (ttp) { + switch (ttp) { #if defined(ODBC_SUPPORT) - case TAB_ODBC: - dsn= strz(g, create_info->connect_string); + case TAB_ODBC: + dsn = strz(g, create_info->connect_string); - if (fnc & (FNC_DSN | FNC_DRIVER)) { - ok= true; + if (fnc & (FNC_DSN | FNC_DRIVER)) { + ok = true; #if defined(PROMPT_OK) - } else if (!stricmp(thd->main_security_ctx.host, "localhost") - && cop == 1) { - if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) { - thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn)); - ok= true; - } // endif dsn + } else if (!stricmp(thd->main_security_ctx.host, "localhost") + && cop == 1) { + if ((dsn = ODBCCheckConnection(g, dsn, cop)) != NULL) { + thd->make_lex_string(&create_info->connect_string, dsn, strlen(dsn)); + ok = true; + } // endif dsn #endif // PROMPT_OK - } else if (!dsn) { - sprintf(g->Message, "Missing %s connection string", topt->type); - } else { - // Store ODBC additional parameters - sop= (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM)); - sop->User= (char*)user; - sop->Pwd= (char*)pwd; - sop->Cto= cto; - sop->Qto= qto; - sop->UseCnc= cnc; - ok= true; - } // endif's - - supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER); - break; + } else if (!dsn) { + sprintf(g->Message, "Missing %s connection string", topt->type); + } else { + // Store ODBC additional parameters + sop = (POPARM)PlugSubAlloc(g, NULL, sizeof(ODBCPARM)); + sop->User = (char*)user; + sop->Pwd = (char*)pwd; + sop->Cto = cto; + sop->Qto = qto; + sop->UseCnc = cnc; + ok = true; + } // endif's + + supfnc |= (FNC_TABLE | FNC_DSN | FNC_DRIVER); + break; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) - case TAB_JDBC: - if (fnc & FNC_DRIVER) { - ok= true; - } else if (!(url= strz(g, create_info->connect_string))) { - strcpy(g->Message, "Missing URL"); - } else { - // Store JDBC additional parameters - int rc; - PJDBCDEF jdef= new(g) JDBCDEF(); - - jdef->SetName(create_info->alias); - sjp= (PJPARM)PlugSubAlloc(g, NULL, sizeof(JDBCPARM)); - sjp->Driver= driver; -// sjp->Properties = prop; - sjp->Fsize= 0; - sjp->Scrollable= false; - - if ((rc = jdef->ParseURL(g, url, false)) == RC_OK) { - sjp->Url= url; - sjp->User= (char*)user; - sjp->Pwd= (char*)pwd; - ok= true; - } else if (rc == RC_NF) { - if (jdef->GetTabname()) - tab= jdef->GetTabname(); - - ok= jdef->SetParms(sjp); - } // endif rc - - } // endif's - - supfnc |= (FNC_DRIVER | FNC_TABLE); - break; + case TAB_JDBC: + if (fnc & FNC_DRIVER) { + ok = true; + } else if (!(url = strz(g, create_info->connect_string))) { + strcpy(g->Message, "Missing URL"); + } else { + // Store JDBC additional parameters + int rc; + PJDBCDEF jdef = new(g) JDBCDEF(); + + jdef->SetName(create_info->alias); + sjp = (PJPARM)PlugSubAlloc(g, NULL, sizeof(JDBCPARM)); + sjp->Driver = driver; + // sjp->Properties = prop; + sjp->Fsize = 0; + sjp->Scrollable = false; + + if ((rc = jdef->ParseURL(g, url, false)) == RC_OK) { + sjp->Url = url; + sjp->User = (char*)user; + sjp->Pwd = (char*)pwd; + ok = true; + } else if (rc == RC_NF) { + if (jdef->GetTabname()) + tab = (char*)jdef->GetTabname(); + + ok = jdef->SetParms(sjp); + } // endif rc + + } // endif's + + supfnc |= (FNC_DRIVER | FNC_TABLE); + break; #endif // JDBC_SUPPORT - case TAB_DBF: - dbf= true; - // Passthru - case TAB_CSV: - if (!fn && fnc != FNC_NO) - sprintf(g->Message, "Missing %s file name", topt->type); - else if (sep && strlen(sep) > 1) - sprintf(g->Message, "Invalid separator %s", sep); - else - ok= true; + case TAB_DBF: + dbf = true; + // Passthru + case TAB_CSV: + if (!fn && fnc != FNC_NO) + sprintf(g->Message, "Missing %s file name", topt->type); + else if (sep && strlen(sep) > 1) + sprintf(g->Message, "Invalid separator %s", sep); + else + ok = true; - break; - case TAB_MYSQL: - ok= true; + break; + case TAB_MYSQL: + ok = true; - if (create_info->connect_string.str && - create_info->connect_string.length) { - PMYDEF mydef= new(g) MYSQLDEF(); + if (create_info->connect_string.str && + create_info->connect_string.length) { + PMYDEF mydef = new(g) MYSQLDEF(); - dsn= strz(g, create_info->connect_string); - mydef->SetName(create_info->alias); + dsn = strz(g, create_info->connect_string); + mydef->SetName(create_info->alias); - if (!mydef->ParseURL(g, dsn, false)) { - if (mydef->GetHostname()) - host= mydef->GetHostname(); + if (!mydef->ParseURL(g, dsn, false)) { + if (mydef->GetHostname()) + host = mydef->GetHostname(); - if (mydef->GetUsername()) - user= mydef->GetUsername(); + if (mydef->GetUsername()) + user = mydef->GetUsername(); - if (mydef->GetPassword()) - pwd= mydef->GetPassword(); + if (mydef->GetPassword()) + pwd = mydef->GetPassword(); - if (mydef->GetTabschema()) - db = mydef->GetTabschema(); + if (mydef->GetTabschema()) + db = mydef->GetTabschema(); - if (mydef->GetTabname()) - tab= mydef->GetTabname(); + if (mydef->GetTabname()) + tab = (char*)mydef->GetTabname(); - if (mydef->GetPortnumber()) - port= mydef->GetPortnumber(); + if (mydef->GetPortnumber()) + port = mydef->GetPortnumber(); - } else - ok= false; + } else + ok = false; - } else if (!user) - user= "root"; + } else if (!user) + user = "root"; - if (ok && CheckSelf(g, table_s, host, db, tab, src, port)) - ok= false; + if (ok && CheckSelf(g, table_s, host, db, tab, src, port)) + ok = false; - break; + break; #if defined(__WIN__) - case TAB_WMI: - ok= true; - break; + case TAB_WMI: + ok = true; + break; #endif // __WIN__ #if defined(PIVOT_SUPPORT) - case TAB_PIVOT: - supfnc= FNC_NO; + case TAB_PIVOT: + supfnc = FNC_NO; #endif // PIVOT_SUPPORT - case TAB_PRX: - case TAB_TBL: - case TAB_XCL: - case TAB_OCCUR: - if (!src && !stricmp(tab, create_info->alias) && - (!db || !stricmp(db, table_s->db.str))) - sprintf(g->Message, "A %s table cannot refer to itself", topt->type); - else - ok= true; + case TAB_PRX: + case TAB_TBL: + case TAB_XCL: + case TAB_OCCUR: + if (!src && !stricmp(tab, create_info->alias) && + (!db || !stricmp(db, table_s->db.str))) + sprintf(g->Message, "A %s table cannot refer to itself", topt->type); + else + ok = true; - break; - case TAB_OEM: - if (topt->module && topt->subtype) - ok= true; - else - strcpy(g->Message, "Missing OEM module or subtype"); + break; + case TAB_OEM: + if (topt->module && topt->subtype) + ok = true; + else + strcpy(g->Message, "Missing OEM module or subtype"); - break; + break; #if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT) - case TAB_XML: + case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT - case TAB_JSON: - if (!fn && !zfn && !mul) - sprintf(g->Message, "Missing %s file name", topt->type); - else - ok= true; - - break; - case TAB_VIR: - ok= true; - break; - default: - sprintf(g->Message, "Cannot get column info for table type %s", topt->type); - break; - } // endif ttp - - // Check for supported catalog function - if (ok && !(supfnc & fnc)) { - sprintf(g->Message, "Unsupported catalog function %s for table type %s", - fncn, topt->type); - ok= false; - } // endif supfnc - - if (src && fnc != FNC_NO) { - strcpy(g->Message, "Cannot make catalog table from srcdef"); - ok= false; - } // endif src - - if (ok) { - char *cnm, *rem, *dft, *xtra, *key, *fmt; - int i, len, prec, dec, typ, flg; + case TAB_JSON: + dsn = strz(g, create_info->connect_string); - // if (cat) - // cat->SetDataPath(g, table_s->db.str); - // else - // return HA_ERR_INTERNAL_ERROR; // Should never happen + if (!fn && !zfn && !mul && !dsn) + sprintf(g->Message, "Missing %s file name", topt->type); + else + ok = true; - dpath = SetPath(g, table_s->db.str); + break; + case TAB_VIR: + ok = true; + break; + default: + sprintf(g->Message, "Cannot get column info for table type %s", topt->type); + break; + } // endif ttp + + // Check for supported catalog function + if (ok && !(supfnc & fnc)) { + sprintf(g->Message, "Unsupported catalog function %s for table type %s", + fncn, topt->type); + ok = false; + } // endif supfnc + + if (src && fnc != FNC_NO) { + strcpy(g->Message, "Cannot make catalog table from srcdef"); + ok = false; + } // endif src + + if (ok) { + const char *cnm, *rem; + char *dft, *xtra, *key, *fmt; + int i, len, prec, dec, typ, flg; + + if (!(dpath = SetPath(g, table_s->db.str))) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif dpath - if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) { - qrp = SrcColumns(g, host, db, user, pwd, src, port); + if (src && ttp != TAB_PIVOT && ttp != TAB_ODBC && ttp != TAB_JDBC) { + qrp = SrcColumns(g, host, db, user, pwd, src, port); - if (qrp && ttp == TAB_OCCUR) - if (OcrSrcCols(g, qrp, col, ocl, rnk)) { - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif OcrSrcCols + if (qrp && ttp == TAB_OCCUR) + if (OcrSrcCols(g, qrp, col, ocl, rnk)) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif OcrSrcCols - } else switch (ttp) { - case TAB_DBF: - qrp = DBFColumns(g, dpath, fn, fnc == FNC_COL); - break; + } else switch (ttp) { + case TAB_DBF: + qrp = DBFColumns(g, dpath, fn, fnc == FNC_COL); + break; #if defined(ODBC_SUPPORT) - case TAB_ODBC: - switch (fnc) { - case FNC_NO: - case FNC_COL: - if (src) { - qrp = ODBCSrcCols(g, dsn, (char*)src, sop); - src = NULL; // for next tests - } else - qrp = ODBCColumns(g, dsn, shm, tab, NULL, - mxr, fnc == FNC_COL, sop); - - break; - case FNC_TABLE: - qrp = ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); - break; - case FNC_DSN: - qrp = ODBCDataSources(g, mxr, true); - break; - case FNC_DRIVER: - qrp = ODBCDrivers(g, mxr, true); - break; - default: - sprintf(g->Message, "invalid catfunc %s", fncn); - break; - } // endswitch info + case TAB_ODBC: + switch (fnc) { + case FNC_NO: + case FNC_COL: + if (src) { + qrp = ODBCSrcCols(g, dsn, (char*)src, sop); + src = NULL; // for next tests + } else + qrp = ODBCColumns(g, dsn, shm, tab, NULL, + mxr, fnc == FNC_COL, sop); - break; + break; + case FNC_TABLE: + qrp = ODBCTables(g, dsn, shm, tab, NULL, mxr, true, sop); + break; + case FNC_DSN: + qrp = ODBCDataSources(g, mxr, true); + break; + case FNC_DRIVER: + qrp = ODBCDrivers(g, mxr, true); + break; + default: + sprintf(g->Message, "invalid catfunc %s", fncn); + break; + } // endswitch info + + break; #endif // ODBC_SUPPORT #if defined(JDBC_SUPPORT) - case TAB_JDBC: - switch (fnc) { - case FNC_NO: - case FNC_COL: - if (src) { - qrp = JDBCSrcCols(g, (char*)src, sjp); - src = NULL; // for next tests - } else - qrp = JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp); + case TAB_JDBC: + switch (fnc) { + case FNC_NO: + case FNC_COL: + if (src) { + qrp = JDBCSrcCols(g, (char*)src, sjp); + src = NULL; // for next tests + } else + qrp = JDBCColumns(g, shm, tab, NULL, mxr, fnc == FNC_COL, sjp); - break; - case FNC_TABLE: - qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); - break; + break; + case FNC_TABLE: + qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); + break; #if 0 - case FNC_DSN: - qrp = JDBCDataSources(g, mxr, true); - break; + case FNC_DSN: + qrp = JDBCDataSources(g, mxr, true); + break; #endif // 0 - case FNC_DRIVER: - qrp = JDBCDrivers(g, mxr, true); - break; - default: - sprintf(g->Message, "invalid catfunc %s", fncn); - break; - } // endswitch info + case FNC_DRIVER: + qrp = JDBCDrivers(g, mxr, true); + break; + default: + sprintf(g->Message, "invalid catfunc %s", fncn); + break; + } // endswitch info - break; + break; #endif // JDBC_SUPPORT - case TAB_MYSQL: - qrp = MyColumns(g, thd, host, db, user, pwd, tab, - NULL, port, fnc == FNC_COL); - break; - case TAB_CSV: - qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL); - break; + case TAB_MYSQL: + qrp = MyColumns(g, thd, host, db, user, pwd, tab, + NULL, port, fnc == FNC_COL); + break; + case TAB_CSV: + qrp = CSVColumns(g, dpath, topt, fnc == FNC_COL); + break; #if defined(__WIN__) - case TAB_WMI: - qrp = WMIColumns(g, nsp, cls, fnc == FNC_COL); - break; + case TAB_WMI: + qrp = WMIColumns(g, nsp, cls, fnc == FNC_COL); + break; #endif // __WIN__ - case TAB_PRX: - case TAB_TBL: - case TAB_XCL: - case TAB_OCCUR: - bif = fnc == FNC_COL; - qrp = TabColumns(g, thd, db, tab, bif); - - if (!qrp && bif && fnc != FNC_COL) // tab is a view - qrp = MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false); - - if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL) - if (OcrColumns(g, qrp, col, ocl, rnk)) { - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif OcrColumns + case TAB_PRX: + case TAB_TBL: + case TAB_XCL: + case TAB_OCCUR: + bif = fnc == FNC_COL; + qrp = TabColumns(g, thd, db, tab, bif); + + if (!qrp && bif && fnc != FNC_COL) // tab is a view + qrp = MyColumns(g, thd, host, db, user, pwd, tab, NULL, port, false); + + if (qrp && ttp == TAB_OCCUR && fnc != FNC_COL) + if (OcrColumns(g, qrp, col, ocl, rnk)) { + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif OcrColumns - break; + break; #if defined(PIVOT_SUPPORT) - case TAB_PIVOT: - qrp = PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port); - break; + case TAB_PIVOT: + qrp = PivotColumns(g, tab, src, pic, fcl, skc, host, db, user, pwd, port); + break; #endif // PIVOT_SUPPORT - case TAB_VIR: - qrp = VirColumns(g, fnc == FNC_COL); - break; - case TAB_JSON: - qrp = JSONColumns(g, (char*)db, topt, fnc == FNC_COL); - break; + case TAB_VIR: + qrp = VirColumns(g, fnc == FNC_COL); + break; + case TAB_JSON: + qrp = JSONColumns(g, (char*)db, dsn, topt, fnc == FNC_COL); + break; #if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT) - case TAB_XML: - qrp = XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL); - break; + case TAB_XML: + qrp = XMLColumns(g, (char*)db, tab, topt, fnc == FNC_COL); + break; #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT - case TAB_OEM: - qrp = OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL); - break; - default: - strcpy(g->Message, "System error during assisted discovery"); - break; - } // endswitch ttp - - if (!qrp) { - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif !qrp - - if (fnc != FNC_NO || src || ttp == TAB_PIVOT) { - // Catalog like table - for (crp = qrp->Colresp; !rc && crp; crp = crp->Next) { - cnm = (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name); - typ = crp->Type; - len = crp->Length; - dec = crp->Prec; - flg = crp->Flag; - v = (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var; - tm = (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG; - - if (!len && typ == TYPE_STRING) - len = 256; // STRBLK's have 0 length - - // Now add the field - if (add_field(&sql, cnm, typ, len, dec, NULL, tm, - NULL, NULL, NULL, NULL, flg, dbf, v)) - rc = HA_ERR_OUT_OF_MEM; - } // endfor crp - - } else { - char *schem = NULL; - char *tn = NULL; - - // Not a catalog table - if (!qrp->Nblin) { - if (tab) - sprintf(g->Message, "Cannot get columns from %s", tab); - else - strcpy(g->Message, "Fail to retrieve columns"); + case TAB_OEM: + qrp = OEMColumns(g, topt, tab, (char*)db, fnc == FNC_COL); + break; + default: + strcpy(g->Message, "System error during assisted discovery"); + break; + } // endswitch ttp + if (!qrp) { rc = HA_ERR_INTERNAL_ERROR; goto err; - } // endif !nblin - - for (i = 0; !rc && i < qrp->Nblin; i++) { - typ = len = prec = dec = 0; - tm = NOT_NULL_FLAG; - cnm = (char*)"noname"; - dft = xtra = key = fmt = tn = NULL; - v = ' '; - rem = NULL; - - for (crp = qrp->Colresp; crp; crp = crp->Next) - switch (crp->Fld) { - case FLD_NAME: - if (ttp == TAB_PRX || - (ttp == TAB_CSV && topt->data_charset && - (!stricmp(topt->data_charset, "UTF8") || - !stricmp(topt->data_charset, "UTF-8")))) - cnm = crp->Kdata->GetCharValue(i); - else - cnm = encode(g, crp->Kdata->GetCharValue(i)); + } // endif !qrp + + if (fnc != FNC_NO || src || ttp == TAB_PIVOT) { + // Catalog like table + for (crp = qrp->Colresp; !rc && crp; crp = crp->Next) { + cnm = (ttp == TAB_PIVOT) ? crp->Name : encode(g, crp->Name); + typ = crp->Type; + len = crp->Length; + dec = crp->Prec; + flg = crp->Flag; + v = (crp->Kdata->IsUnsigned()) ? 'U' : crp->Var; + tm = (crp->Kdata->IsNullable()) ? 0 : NOT_NULL_FLAG; + + if (!len && typ == TYPE_STRING) + len = 256; // STRBLK's have 0 length - break; - case FLD_TYPE: - typ = crp->Kdata->GetIntValue(i); - v = (crp->Nulls) ? crp->Nulls[i] : 0; - break; - case FLD_TYPENAME: - tn = crp->Kdata->GetCharValue(i); - break; - case FLD_PREC: - // PREC must be always before LENGTH - len = prec = crp->Kdata->GetIntValue(i); - break; - case FLD_LENGTH: - len = crp->Kdata->GetIntValue(i); - break; - case FLD_SCALE: - dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1; - break; - case FLD_NULL: - if (crp->Kdata->GetIntValue(i)) - tm = 0; // Nullable - - break; - case FLD_FORMAT: - fmt = (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL; - break; - case FLD_REM: - rem = crp->Kdata->GetCharValue(i); - break; - // case FLD_CHARSET: - // No good because remote table is already translated - // if (*(csn= crp->Kdata->GetCharValue(i))) - // cs= get_charset_by_name(csn, 0); - - // break; - case FLD_DEFAULT: - dft = crp->Kdata->GetCharValue(i); - break; - case FLD_EXTRA: - xtra = crp->Kdata->GetCharValue(i); + // Now add the field + if (add_field(&sql, cnm, typ, len, dec, NULL, tm, + NULL, NULL, NULL, NULL, flg, dbf, v)) + rc = HA_ERR_OUT_OF_MEM; + } // endfor crp - // Auto_increment is not supported yet - if (!stricmp(xtra, "AUTO_INCREMENT")) - xtra = NULL; + } else { + char *schem = NULL; + char *tn = NULL; - break; - case FLD_KEY: - if (ttp == TAB_VIR) - key = crp->Kdata->GetCharValue(i); + // Not a catalog table + if (!qrp->Nblin) { + if (tab) + sprintf(g->Message, "Cannot get columns from %s", tab); + else + strcpy(g->Message, "Fail to retrieve columns"); - break; - case FLD_SCHEM: -#if defined(ODBC_SUPPORT) || defined(JDBC_SUPPORT) - if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { - if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { - sprintf(g->Message, - "Several %s tables found, specify DBNAME", tab); - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } else if (!schem) - schem = crp->Kdata->GetCharValue(i); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif !nblin + + for (i = 0; !rc && i < qrp->Nblin; i++) { + typ = len = prec = dec = 0; + tm = NOT_NULL_FLAG; + cnm = (char*)"noname"; + dft = xtra = key = fmt = tn = NULL; + v = ' '; + rem = NULL; + + for (crp = qrp->Colresp; crp; crp = crp->Next) + switch (crp->Fld) { + case FLD_NAME: + if (ttp == TAB_PRX || + (ttp == TAB_CSV && topt->data_charset && + (!stricmp(topt->data_charset, "UTF8") || + !stricmp(topt->data_charset, "UTF-8")))) + cnm = crp->Kdata->GetCharValue(i); + else + cnm = encode(g, crp->Kdata->GetCharValue(i)); - } // endif ttp -#endif // ODBC_SUPPORT || JDBC_SUPPORT - default: - break; // Ignore - } // endswitch Fld + break; + case FLD_TYPE: + typ = crp->Kdata->GetIntValue(i); + v = (crp->Nulls) ? crp->Nulls[i] : 0; + break; + case FLD_TYPENAME: + tn = crp->Kdata->GetCharValue(i); + break; + case FLD_PREC: + // PREC must be always before LENGTH + len = prec = crp->Kdata->GetIntValue(i); + break; + case FLD_LENGTH: + len = crp->Kdata->GetIntValue(i); + break; + case FLD_SCALE: + dec = (!crp->Kdata->IsNull(i)) ? crp->Kdata->GetIntValue(i) : -1; + break; + case FLD_NULL: + if (crp->Kdata->GetIntValue(i)) + tm = 0; // Nullable -#if defined(ODBC_SUPPORT) - if (ttp == TAB_ODBC) { - int plgtyp; - bool w = false; // Wide character type - - // typ must be PLG type, not SQL type - if (!(plgtyp = TranslateSQLType(typ, dec, prec, v, w))) { - if (GetTypeConv() == TPC_SKIP) { - // Skip this column - sprintf(g->Message, "Column %s skipped (unsupported type %d)", - cnm, typ); - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); - continue; - } else { - sprintf(g->Message, "Unsupported SQL type %d", typ); - rc = HA_ERR_INTERNAL_ERROR; - goto err; - } // endif type_conv + break; + case FLD_FORMAT: + fmt = (crp->Kdata) ? crp->Kdata->GetCharValue(i) : NULL; + break; + case FLD_REM: + rem = crp->Kdata->GetCharValue(i); + break; + // case FLD_CHARSET: + // No good because remote table is already translated + // if (*(csn= crp->Kdata->GetCharValue(i))) + // cs= get_charset_by_name(csn, 0); + + // break; + case FLD_DEFAULT: + dft = crp->Kdata->GetCharValue(i); + break; + case FLD_EXTRA: + xtra = crp->Kdata->GetCharValue(i); - } else - typ = plgtyp; + // Auto_increment is not supported yet + if (!stricmp(xtra, "AUTO_INCREMENT")) + xtra = NULL; - switch (typ) { - case TYPE_STRING: - if (w) { - sprintf(g->Message, "Column %s is wide characters", cnm); - push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message); - } // endif w + break; + case FLD_KEY: + if (ttp == TAB_VIR) + key = crp->Kdata->GetCharValue(i); - break; - case TYPE_DOUBLE: - // Some data sources do not count dec in length (prec) - prec += (dec + 2); // To be safe - break; - case TYPE_DECIM: - prec = len; - break; - default: - dec = 0; - } // endswitch typ + break; + case FLD_SCHEM: +#if defined(ODBC_SUPPORT) || defined(JDBC_SUPPORT) + if ((ttp == TAB_ODBC || ttp == TAB_JDBC) && crp->Kdata) { + if (schem && stricmp(schem, crp->Kdata->GetCharValue(i))) { + sprintf(g->Message, + "Several %s tables found, specify DBNAME", tab); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } else if (!schem) + schem = crp->Kdata->GetCharValue(i); + + } // endif ttp +#endif // ODBC_SUPPORT || JDBC_SUPPORT + default: + break; // Ignore + } // endswitch Fld - } else -#endif // ODBC_SUPPORT -#if defined(JDBC_SUPPORT) - if (ttp == TAB_JDBC) { +#if defined(ODBC_SUPPORT) + if (ttp == TAB_ODBC) { int plgtyp; + bool w = false; // Wide character type // typ must be PLG type, not SQL type - if (!(plgtyp = TranslateJDBCType(typ, tn, dec, prec, v))) { + if (!(plgtyp = TranslateSQLType(typ, dec, prec, v, w))) { if (GetTypeConv() == TPC_SKIP) { // Skip this column sprintf(g->Message, "Column %s skipped (unsupported type %d)", @@ -5966,43 +5870,84 @@ static int connect_assisted_discovery(handlerton *, THD* thd, typ = plgtyp; switch (typ) { + case TYPE_STRING: + if (w) { + sprintf(g->Message, "Column %s is wide characters", cnm); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, 0, g->Message); + } // endif w + + break; case TYPE_DOUBLE: - case TYPE_DECIM: // Some data sources do not count dec in length (prec) prec += (dec + 2); // To be safe break; + case TYPE_DECIM: + prec = len; + break; default: dec = 0; } // endswitch typ } else #endif // ODBC_SUPPORT - // Make the arguments as required by add_fields - if (typ == TYPE_DOUBLE) - prec = len; +#if defined(JDBC_SUPPORT) + if (ttp == TAB_JDBC) { + int plgtyp; + + // typ must be PLG type, not SQL type + if (!(plgtyp = TranslateJDBCType(typ, tn, dec, prec, v))) { + if (GetTypeConv() == TPC_SKIP) { + // Skip this column + sprintf(g->Message, "Column %s skipped (unsupported type %d)", + cnm, typ); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + continue; + } else { + sprintf(g->Message, "Unsupported SQL type %d", typ); + rc = HA_ERR_INTERNAL_ERROR; + goto err; + } // endif type_conv - if (typ == TYPE_DATE) - prec = 0; + } else + typ = plgtyp; - // Now add the field - if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra, - fmt, 0, dbf, v)) - rc = HA_ERR_OUT_OF_MEM; - } // endfor i + switch (typ) { + case TYPE_DOUBLE: + case TYPE_DECIM: + // Some data sources do not count dec in length (prec) + prec += (dec + 2); // To be safe + break; + default: + dec = 0; + } // endswitch typ + + } else +#endif // ODBC_SUPPORT + // Make the arguments as required by add_fields + if (typ == TYPE_DOUBLE) + prec = len; - } // endif fnc + if (typ == TYPE_DATE) + prec = 0; - if (!rc) - rc = init_table_share(thd, table_s, create_info, &sql); + // Now add the field + if (add_field(&sql, cnm, typ, prec, dec, key, tm, rem, dft, xtra, + fmt, 0, dbf, v)) + rc = HA_ERR_OUT_OF_MEM; + } // endfor i - //g->jump_level--; - //PopUser(xp); - //return rc; - } else { - rc = HA_ERR_UNSUPPORTED; - } // endif ok + } // endif fnc + + if (!rc) + rc = init_table_share(thd, table_s, create_info, &sql); + + //g->jump_level--; + //PopUser(xp); + //return rc; + } else { + rc = HA_ERR_UNSUPPORTED; + } // endif ok -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -6013,16 +5958,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } // end catch err: -#else // !USE_TRY - err: - g->jump_level--; -#endif // !USE_TRY if (rc) my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); -#if !defined(USE_TRY) - jer: -#endif // !USE_TRY PopUser(xp); return rc; } // end of connect_assisted_discovery @@ -6189,8 +6127,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (!part_info) #endif // WITH_PARTITION_STORAGE_ENGINE {const char *src= options->srcdef; - char *host, *db, *tab= (char*)options->tabname; - int port; + PCSZ host, db, tab= options->tabname; + int port; host= GetListOption(g, "host", options->oplist, NULL); db= GetStringOption("database", NULL); @@ -6234,8 +6172,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endswitch ttp if (type == TAB_XML) { - bool dom; // True: MS-DOM, False libxml2 - char *xsup= GetListOption(g, "Xmlsup", options->oplist, "*"); + bool dom; // True: MS-DOM, False libxml2 + PCSZ xsup= GetListOption(g, "Xmlsup", options->oplist, "*"); // Note that if no support is specified, the default is MS-DOM // on Windows and libxml2 otherwise @@ -6495,15 +6433,15 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (options->zipped) { // Check whether the zip entry must be made from a file - char *fn = GetListOption(g, "Load", options->oplist, NULL); + PCSZ fn = GetListOption(g, "Load", options->oplist, NULL); if (fn) { - char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH]; - char *entry = GetListOption(g, "Entry", options->oplist, NULL); - char *a = GetListOption(g, "Append", options->oplist, "NO"); - bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON"); - char *m = GetListOption(g, "Mulentries", options->oplist, "NO"); - bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON"); + char zbuf[_MAX_PATH], buf[_MAX_PATH], dbpath[_MAX_PATH]; + PCSZ entry = GetListOption(g, "Entry", options->oplist, NULL); + PCSZ a = GetListOption(g, "Append", options->oplist, "NO"); + bool append = *a == '1' || *a == 'Y' || *a == 'y' || !stricmp(a, "ON"); + PCSZ m = GetListOption(g, "Mulentries", options->oplist, "NO"); + bool mul = *m == '1' || *m == 'Y' || *m == 'y' || !stricmp(m, "ON"); if (!entry && !mul) { my_message(ER_UNKNOWN_ERROR, "Missing entry name", MYF(0)); @@ -6575,8 +6513,6 @@ int ha_connect::create(const char *name, TABLE *table_arg, my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); rc = HA_ERR_INTERNAL_ERROR; } else if (cat) { -// cat->SetDataPath(g, table_arg->s->db.str); - #if defined(WITH_PARTITION_STORAGE_ENGINE) if (part_info) strncpy(partname, @@ -6632,8 +6568,9 @@ bool ha_connect::FileExists(const char *fn, bool bf) return true; if (table) { - char *s, tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH]; - bool b= false; + const char *s; + char tfn[_MAX_PATH], filename[_MAX_PATH], path[_MAX_PATH]; + bool b= false; int n; struct stat info; @@ -6690,9 +6627,9 @@ bool ha_connect::CheckString(const char *str1, const char *str2) /** check whether a string option have changed */ -bool ha_connect::SameString(TABLE *tab, char *opn) +bool ha_connect::SameString(TABLE *tab, PCSZ opn) { - char *str1, *str2; + PCSZ str1, str2; tshp= tab->s; // The altered table str1= GetStringOption(opn); @@ -6704,7 +6641,7 @@ bool ha_connect::SameString(TABLE *tab, char *opn) /** check whether a Boolean option have changed */ -bool ha_connect::SameBool(TABLE *tab, char *opn) +bool ha_connect::SameBool(TABLE *tab, PCSZ opn) { bool b1, b2; @@ -6718,7 +6655,7 @@ bool ha_connect::SameBool(TABLE *tab, char *opn) /** check whether an integer option have changed */ -bool ha_connect::SameInt(TABLE *tab, char *opn) +bool ha_connect::SameInt(TABLE *tab, PCSZ opn) { int i1, i2; @@ -6897,7 +6834,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, // Conversion to outward table is only allowed for file based // tables whose file does not exist. tshp= altered_table->s; - char *fn= GetStringOption("filename"); + PCSZ fn= GetStringOption("filename"); tshp= NULL; if (FileExists(fn, false)) { @@ -7142,10 +7079,10 @@ maria_declare_plugin(connect) PLUGIN_LICENSE_GPL, connect_init_func, /* Plugin Init */ connect_done_func, /* Plugin Deinit */ - 0x0105, /* version number (1.05) */ + 0x0106, /* version number (1.05) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.05.0003", /* string version */ - MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ + "1.06.0001", /* string version */ + MariaDB_PLUGIN_MATURITY_BETA /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index 155bec3c966..8d8307b4bd1 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /** @file ha_connect.h @@ -61,7 +61,7 @@ public: oldopn= newopn= NULL; oldpix= newpix= NULL;} - inline char *SetName(PGLOBAL g, char *name) {return PlugDup(g, name);} + inline char *SetName(PGLOBAL g, PCSZ name) {return PlugDup(g, name);} bool oldsep; // Sepindex before create/alter bool newsep; // Sepindex after create/alter @@ -168,18 +168,18 @@ public: static bool connect_init(void); static bool connect_end(void); TABTYPE GetRealType(PTOS pos= NULL); - char *GetRealString(const char *s); - char *GetStringOption(char *opname, char *sdef= NULL); + char *GetRealString(PCSZ s); + PCSZ GetStringOption(PCSZ opname, PCSZ sdef= NULL); PTOS GetTableOptionStruct(TABLE_SHARE *s= NULL); - bool GetBooleanOption(char *opname, bool bdef); - bool SetBooleanOption(char *opname, bool b); - int GetIntegerOption(char *opname); - bool GetIndexOption(KEY *kp, char *opname); - bool CheckString(const char *str1, const char *str2); - bool SameString(TABLE *tab, char *opn); - bool SetIntegerOption(char *opname, int n); - bool SameInt(TABLE *tab, char *opn); - bool SameBool(TABLE *tab, char *opn); + bool GetBooleanOption(PCSZ opname, bool bdef); + bool SetBooleanOption(PCSZ opname, bool b); + int GetIntegerOption(PCSZ opname); + bool GetIndexOption(KEY *kp, PCSZ opname); + bool CheckString(PCSZ str1, PCSZ str2); + bool SameString(TABLE *tab, PCSZ opn); + bool SetIntegerOption(PCSZ opname, int n); + bool SameInt(TABLE *tab, PCSZ opn); + bool SameBool(TABLE *tab, PCSZ opn); bool FileExists(const char *fn, bool bf); bool NoFieldOptionChange(TABLE *tab); PFOS GetFieldOptionStruct(Field *fp); @@ -187,8 +187,8 @@ public: PXOS GetIndexOptionStruct(KEY *kp); PIXDEF GetIndexInfo(TABLE_SHARE *s= NULL); bool CheckVirtualIndex(TABLE_SHARE *s); - const char *GetDBName(const char *name); - const char *GetTableName(void); + PCSZ GetDBName(PCSZ name); + PCSZ GetTableName(void); char *GetPartName(void); //int GetColNameLen(Field *fp); //char *GetColName(Field *fp); @@ -197,9 +197,9 @@ public: bool IsSameIndex(PIXDEF xp1, PIXDEF xp2); bool IsPartitioned(void); bool IsUnique(uint n); - char *GetDataPath(void) {return (char*)datapath;} + PCSZ GetDataPath(void) {return datapath;} - bool SetDataPath(PGLOBAL g, const char *path); + bool SetDataPath(PGLOBAL g, PCSZ path); PTDB GetTDB(PGLOBAL g); int OpenTable(PGLOBAL g, bool del= false); bool CheckColumnList(PGLOBAL g); @@ -513,7 +513,7 @@ protected: ulong hnum; // The number of this handler query_id_t valid_query_id; // The one when tdbp was allocated query_id_t creat_query_id; // The one when handler was allocated - char *datapath; // Is the Path of DB data directory + PCSZ datapath; // Is the Path of DB data directory PTDB tdbp; // To table class object PVAL sdvalin1; // Used to convert date values PVAL sdvalin2; // Used to convert date values diff --git a/storage/connect/inihandl.c b/storage/connect/inihandl.c index 46102557b20..0ce0eb9fa0d 100644 --- a/storage/connect/inihandl.c +++ b/storage/connect/inihandl.c @@ -16,7 +16,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ #include "my_global.h" diff --git a/storage/connect/ioapi.c b/storage/connect/ioapi.c index 7f5c191b2af..a49da91f7f0 100644 --- a/storage/connect/ioapi.c +++ b/storage/connect/ioapi.c @@ -27,6 +27,7 @@ #include "ioapi.h" +#include "my_attribute.h" voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode) { @@ -92,7 +93,7 @@ static long ZCALLBACK fseek64_file_func OF((voidpf opaque, voidpf stream, ZPO static int ZCALLBACK fclose_file_func OF((voidpf opaque, voidpf stream)); static int ZCALLBACK ferror_file_func OF((voidpf opaque, voidpf stream)); -static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, int mode) +static voidpf ZCALLBACK fopen_file_func (voidpf opaque __attribute__((unused)), const char* filename, int mode) { FILE* file = NULL; const char* mode_fopen = NULL; @@ -110,7 +111,7 @@ static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, in return file; } -static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, int mode) +static voidpf ZCALLBACK fopen64_file_func (voidpf opaque __attribute__((unused)), const void* filename, int mode) { FILE* file = NULL; const char* mode_fopen = NULL; @@ -129,21 +130,21 @@ static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, } -static uLong ZCALLBACK fread_file_func (voidpf opaque, voidpf stream, void* buf, uLong size) +static uLong ZCALLBACK fread_file_func (voidpf opaque __attribute__((unused)), voidpf stream, void* buf, uLong size) { uLong ret; ret = (uLong)fread(buf, 1, (size_t)size, (FILE *)stream); return ret; } -static uLong ZCALLBACK fwrite_file_func (voidpf opaque, voidpf stream, const void* buf, uLong size) +static uLong ZCALLBACK fwrite_file_func (voidpf opaque __attribute__((unused)), voidpf stream, const void* buf, uLong size) { uLong ret; ret = (uLong)fwrite(buf, 1, (size_t)size, (FILE *)stream); return ret; } -static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream) +static long ZCALLBACK ftell_file_func (voidpf opaque __attribute__((unused)), voidpf stream) { long ret; ret = ftell((FILE *)stream); @@ -151,14 +152,14 @@ static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream) } -static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque, voidpf stream) +static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque __attribute__((unused)), voidpf stream) { ZPOS64_T ret; ret = FTELLO_FUNC((FILE *)stream); return ret; } -static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offset, int origin) +static long ZCALLBACK fseek_file_func (voidpf opaque __attribute__((unused)), voidpf stream, uLong offset, int origin) { int fseek_origin=0; long ret; @@ -181,7 +182,7 @@ static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offs return ret; } -static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T offset, int origin) +static long ZCALLBACK fseek64_file_func (voidpf opaque __attribute__((unused)), voidpf stream, ZPOS64_T offset, int origin) { int fseek_origin=0; long ret; @@ -207,14 +208,14 @@ static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T } -static int ZCALLBACK fclose_file_func (voidpf opaque, voidpf stream) +static int ZCALLBACK fclose_file_func (voidpf opaque __attribute__((unused)), voidpf stream) { int ret; ret = fclose((FILE *)stream); return ret; } -static int ZCALLBACK ferror_file_func (voidpf opaque, voidpf stream) +static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), voidpf stream) { int ret; ret = ferror((FILE *)stream); diff --git a/storage/connect/ioapi.h b/storage/connect/ioapi.h index 8dcbdb06e35..4fa73002053 100644 --- a/storage/connect/ioapi.h +++ b/storage/connect/ioapi.h @@ -129,8 +129,9 @@ extern "C" { #endif #endif - - +#ifndef OF +#define OF(args) args +#endif typedef voidpf (ZCALLBACK *open_file_func) OF((voidpf opaque, const char* filename, int mode)); typedef uLong (ZCALLBACK *read_file_func) OF((voidpf opaque, voidpf stream, void* buf, uLong size)); diff --git a/storage/connect/jdbccat.h b/storage/connect/jdbccat.h index 7108aa376ce..0b87df8bb51 100644 --- a/storage/connect/jdbccat.h +++ b/storage/connect/jdbccat.h @@ -4,10 +4,10 @@ typedef struct jdbc_parms { int CheckSize(int rows); - char *Driver; // JDBC driver - char *Url; // Driver URL - char *User; // User connect info - char *Pwd; // Password connect info + PCSZ Driver; // JDBC driver + PCSZ Url; // Driver URL + PCSZ User; // User connect info + PCSZ Pwd; // Password connect info //char *Properties; // Connection property list //int Cto; // Connect timeout //int Qto; // Query timeout @@ -19,12 +19,12 @@ typedef struct jdbc_parms { /* JDBC catalog function prototypes. */ /***********************************************************************/ #if defined(PROMPT_OK) -char *JDBCCheckConnection(PGLOBAL g, char *dsn, int cop); +char *JDBCCheckConnection(PGLOBAL g, PCSZ dsn, int cop); #endif // PROMPT_OK //PQRYRES JDBCDataSources(PGLOBAL g, int maxres, bool info); -PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, - char *colpat, int maxres, bool info, PJPARM sop); -PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sop); -PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat, - char *tabtyp, int maxres, bool info, PJPARM sop); +PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, + PCSZ colpat, int maxres, bool info, PJPARM sop); +PQRYRES JDBCSrcCols(PGLOBAL g, PCSZ src, PJPARM sop); +PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, + PCSZ tabtyp, int maxres, bool info, PJPARM sop); PQRYRES JDBCDrivers(PGLOBAL g, int maxres, bool info); diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index e8a260c8be9..f162a7ae645 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -189,8 +189,8 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) /***********************************************************************/ /* Allocate the structure used to refer to the result set. */ /***********************************************************************/ -static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db, - char *tab, PQRYRES qrp) +static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, PCSZ db, + PCSZ tab, PQRYRES qrp) { JCATPARM *cap; @@ -213,7 +213,7 @@ static JCATPARM *AllocCatInfo(PGLOBAL g, JCATINFO fid, char *db, /* JDBCColumns: constructs the result blocks containing all columns */ /* of a JDBC table that will be retrieved by GetData commands. */ /***********************************************************************/ -PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat, +PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, PCSZ colpat, int maxres, bool info, PJPARM sjp) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, @@ -316,7 +316,7 @@ PQRYRES JDBCColumns(PGLOBAL g, char *db, char *table, char *colpat, /* JDBCSrcCols: constructs the result blocks containing the */ /* description of all the columns of a Srcdef option. */ /**************************************************************************/ -PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp) +PQRYRES JDBCSrcCols(PGLOBAL g, PCSZ src, PJPARM sjp) { char *sqry; PQRYRES qrp; @@ -330,7 +330,7 @@ PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp) sqry = (char*)PlugSubAlloc(g, NULL, strlen(src) + 2); sprintf(sqry, src, "1=1"); // dummy where clause } else - sqry = src; + sqry = (char*)src; qrp = jcp->GetMetaData(g, sqry); jcp->Close(); @@ -341,7 +341,7 @@ PQRYRES JDBCSrcCols(PGLOBAL g, char *src, PJPARM sjp) /* JDBCTables: constructs the result blocks containing all tables in */ /* an JDBC database that will be retrieved by GetData commands. */ /**************************************************************************/ -PQRYRES JDBCTables(PGLOBAL g, char *db, char *tabpat, char *tabtyp, +PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, PCSZ tabtyp, int maxres, bool info, PJPARM sjp) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, @@ -1059,7 +1059,7 @@ int JDBConn::Open(PJPARM sop) /***********************************************************************/ /* Execute an SQL command. */ /***********************************************************************/ -int JDBConn::ExecSQLcommand(char *sql) +int JDBConn::ExecSQLcommand(PCSZ sql) { int rc; jint n; @@ -1142,7 +1142,7 @@ int JDBConn::Fetch(int pos) /***********************************************************************/ /* Restart from beginning of result set */ /***********************************************************************/ -int JDBConn::Rewind(char *sql) +int JDBConn::Rewind(PCSZ sql) { int rbuf = -1; @@ -1200,11 +1200,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (rank == 0) if (!name || (jn = env->NewStringUTF(name)) == nullptr) { sprintf(g->Message, "Fail to allocate jstring %s", SVP(name)); -#if defined(USE_TRY) throw TYPE_AM_JDBC; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); -#endif // !USE_TRY } // endif name // Returns 666 is case of error @@ -1212,11 +1208,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (Check((ctyp == 666) ? -1 : 1)) { sprintf(g->Message, "Getting ctyp: %s", Msg); -#if defined(USE_TRY) throw TYPE_AM_JDBC; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); -#endif // !USE_TRY } // endif Check if (val->GetNullable()) @@ -1235,7 +1227,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) case 12: // VARCHAR case -1: // LONGVARCHAR case 1: // CHAR - case 3: // DECIMAL + case 3: // DECIMAL if (jb && ctyp != 3) cn = (jstring)jb; else if (!gmID(g, chrfldid, "StringField", "(ILjava/lang/String;)Ljava/lang/String;")) @@ -1323,11 +1315,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) env->DeleteLocalRef(jn); sprintf(g->Message, "SetColumnValue: %s rank=%d ctyp=%d", Msg, rank, (int)ctyp); -#if defined(USE_TRY) throw TYPE_AM_JDBC; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_JDBC); -#endif // !USE_TRY } // endif Check if (rank == 0) @@ -1338,7 +1326,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) /***********************************************************************/ /* Prepare an SQL statement for insert. */ /***********************************************************************/ -bool JDBConn::PrepareSQL(char *sql) +bool JDBConn::PrepareSQL(PCSZ sql) { bool b = true; PGLOBAL& g = m_G; @@ -1361,7 +1349,7 @@ bool JDBConn::PrepareSQL(char *sql) /***********************************************************************/ /* Execute an SQL query that returns a result set. */ /***********************************************************************/ -int JDBConn::ExecuteQuery(char *sql) +int JDBConn::ExecuteQuery(PCSZ sql) { int rc = RC_FX; jint ncol; @@ -1389,7 +1377,7 @@ int JDBConn::ExecuteQuery(char *sql) /***********************************************************************/ /* Execute an SQL query and get the affected rows. */ /***********************************************************************/ -int JDBConn::ExecuteUpdate(char *sql) +int JDBConn::ExecuteUpdate(PCSZ sql) { int rc = RC_FX; jint n; @@ -1417,7 +1405,7 @@ int JDBConn::ExecuteUpdate(char *sql) /***********************************************************************/ /* Get the number of lines of the result set. */ /***********************************************************************/ -int JDBConn::GetResultSize(char *sql, JDBCCOL *colp) +int JDBConn::GetResultSize(PCSZ sql, JDBCCOL *colp) { int rc, n = 0; @@ -1655,7 +1643,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) /* GetMetaData: constructs the result blocks containing the */ /* description of all the columns of an SQL command. */ /**************************************************************************/ - PQRYRES JDBConn::GetMetaData(PGLOBAL g, char *src) + PQRYRES JDBConn::GetMetaData(PGLOBAL g, PCSZ src) { static int buftyp[] = {TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_INT, TYPE_INT}; @@ -1857,7 +1845,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) PGLOBAL& g = m_G; // void *buffer; int i, ncol; - PSZ fnc = "Unknown"; + PCSZ fnc = "Unknown"; uint n; short len, tp; int crow = 0; diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h index 9d428142839..73271c8f5be 100644 --- a/storage/connect/jdbconn.h +++ b/storage/connect/jdbconn.h @@ -46,9 +46,9 @@ enum JCATINFO { typedef struct tagJCATPARM { JCATINFO Id; // Id to indicate function PQRYRES Qrp; // Result set pointer - char *DB; // Database (Schema) - char *Tab; // Table name or pattern - char *Pat; // Table type or column pattern + PCSZ DB; // Database (Schema) + PCSZ Tab; // Table name or pattern + PCSZ Pat; // Table type or column pattern } JCATPARM; typedef jint(JNICALL *CRTJVM) (JavaVM **, void **, void *); @@ -77,7 +77,7 @@ public: JDBConn(PGLOBAL g, TDBJDBC *tdbp); int Open(PJPARM sop); - int Rewind(char *sql); + int Rewind(PCSZ sql); void Close(void); PQRYRES AllocateResult(PGLOBAL g); @@ -96,19 +96,19 @@ public: //void SetQueryTimeout(DWORD sec) {m_QueryTimeout = sec;} //void SetUserName(PSZ user) {m_User = user;} //void SetUserPwd(PSZ pwd) {m_Pwd = pwd;} - int GetResultSize(char *sql, JDBCCOL *colp); - int ExecuteQuery(char *sql); - int ExecuteUpdate(char *sql); + int GetResultSize(PCSZ sql, JDBCCOL *colp); + int ExecuteQuery(PCSZ sql); + int ExecuteUpdate(PCSZ sql); int Fetch(int pos = 0); - bool PrepareSQL(char *sql); + bool PrepareSQL(PCSZ sql); int ExecuteSQL(void); bool SetParam(JDBCCOL *colp); - int ExecSQLcommand(char *sql); + int ExecSQLcommand(PCSZ sql); void SetColumnValue(int rank, PSZ name, PVAL val); int GetCatInfo(JCATPARM *cap); //bool GetDataSources(PQRYRES qrp); bool GetDrivers(PQRYRES qrp); - PQRYRES GetMetaData(PGLOBAL g, char *src); + PQRYRES GetMetaData(PGLOBAL g, PCSZ src); public: // Set static variables @@ -174,16 +174,10 @@ protected: jmethodID timfldid; // The TimeField method ID jmethodID tspfldid; // The TimestampField method ID jmethodID bigfldid; // The BigintField method ID - //DWORD m_LoginTimeout; -//DWORD m_QueryTimeout; -//DWORD m_UpdateOptions; - char *Msg; + PCSZ Msg; char *m_Wrap; char m_IDQuoteChar[2]; -//PSZ m_Driver; -//PSZ m_Url; -//PSZ m_User; - PSZ m_Pwd; + PCSZ m_Pwd; int m_Ncol; int m_Aff; int m_Rows; diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 2aca1377d69..bcea0ec85aa 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -81,123 +81,70 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) pty[0] = false; - -#if defined(USE_TRY) - try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - -#if defined(SE_CATCH) - // Let's try to recover from any kind of interrupt - _se_translator_function f = _set_se_translator(trans_func); - try { -#endif // SE_CATCH --------------------- try section -------------------- - if (setjmp(g->jumper[++g->jump_level])) { - goto fin; - } // endif rc - -#if defined(SE_CATCH) // ------------- end of try section ----------------- -} catch (SE_Exception e) { - sprintf(g->Message, "ParseJson: exception doing setjmp: %s (rc=%hd)", - GetExceptionDesc(g, e.nSE), e.nSE); - _set_se_translator(f); - goto err; -} catch (...) { - strcpy(g->Message, "Exception doing setjmp"); - _set_se_translator(f); - goto err; -} // end of try-catches - -_set_se_translator(f); -#endif // SE_CATCH -#endif // !USE_TRY - - for (i = 0; i < len; i++) - switch (s[i]) { - case '[': - if (jsp) - goto tryit; - else if (!(jsp = ParseArray(g, ++i, src, pty))) -#if defined(USE_TRY) - throw 1; -#else // !USE_TRY - goto fin; -#endif // !USE_TRY - - break; - case '{': - if (jsp) - goto tryit; - else if (!(jsp = ParseObject(g, ++i, src, pty))) -#if defined(USE_TRY) - throw 2; -#else // !USE_TRY - goto fin; -#endif // !USE_TRY - - break; - case ' ': - case '\t': - case '\n': - case '\r': - break; - case ',': - if (jsp && (pretty == 1 || pretty == 3)) { - if (comma) - *comma = true; - - pty[0] = pty[2] = false; - break; - } // endif pretty - - sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); -#if defined(USE_TRY) - throw 3; -#else // !USE_TRY - jsp = NULL; - goto fin; -#endif // !USE_TRY - case '(': - b = true; - break; - case ')': - if (b) { - b = false; - break; - } // endif b + for (i = 0; i < len; i++) + switch (s[i]) { + case '[': + if (jsp) + goto tryit; + else if (!(jsp = ParseArray(g, ++i, src, pty))) + throw 1; + + break; + case '{': + if (jsp) + goto tryit; + else if (!(jsp = ParseObject(g, ++i, src, pty))) + throw 2; + + break; + case ' ': + case '\t': + case '\n': + case '\r': + break; + case ',': + if (jsp && (pretty == 1 || pretty == 3)) { + if (comma) + *comma = true; + + pty[0] = pty[2] = false; + break; + } // endif pretty + + sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); + throw 3; + case '(': + b = true; + break; + case ')': + if (b) { + b = false; + break; + } // endif b + + default: + if (jsp) + goto tryit; + else if (!(jsp = ParseValue(g, i, src, pty))) + throw 4; + + break; + }; // endswitch s[i] + + if (!jsp) + sprintf(g->Message, "Invalid Json string '%.*s'", 50, s); + else if (ptyp && pretty == 3) { + *ptyp = 3; // Not recognized pretty + + for (i = 0; i < 3; i++) + if (pty[i]) { + *ptyp = i; + break; + } // endif pty + + } // endif ptyp - default: - if (jsp) - goto tryit; - else if (!(jsp = ParseValue(g, i, src, pty))) -#if defined(USE_TRY) - throw 4; -#else // !USE_TRY - goto fin; -#endif // !USE_TRY - - break; - }; // endswitch s[i] - - if (!jsp) - sprintf(g->Message, "Invalid Json string '%.*s'", 50, s); - else if (ptyp && pretty == 3) { - *ptyp = 3; // Not recognized pretty - - for (i = 0; i < 3; i++) - if (pty[i]) { - *ptyp = i; - break; - } // endif pty - - } // endif ptyp - -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -208,20 +155,12 @@ _set_se_translator(f); } // end catch return jsp; -#else // !USE_TRY -fin: - g->jump_level--; - return jsp; -#endif // !USE_TRY tryit: if (pty[0] && (!pretty || pretty > 2)) { if ((jsp = ParseArray(g, (i = 0), src, pty)) && ptyp && pretty == 3) *ptyp = (pty[0]) ? 0 : 3; -#if !defined(USE_TRY) - g->jump_level--; -#endif // !USE_TRY return jsp; } else strcpy(g->Message, "More than one item in file"); @@ -620,101 +559,75 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src) PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) { PSZ str = NULL; - bool b = false, err = true; - JOUT *jp; + bool b = false, err = true; + JOUT *jp; FILE *fs = NULL; g->Message[0] = 0; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level])) { - str = NULL; - goto fin; - } // endif jmp -#endif // !USE_TRY - - if (!jsp) { - strcpy(g->Message, "Null json tree"); -#if defined(USE_TRY) - throw 1; -#else // !USE_TRY - goto fin; -#endif // !USE_TRY - } else if (!fn) { - // Serialize to a string - jp = new(g) JOUTSTR(g); - b = pretty == 1; - } else { - if (!(fs = fopen(fn, "wb"))) { - sprintf(g->Message, MSG(OPEN_MODE_ERROR), - "w", (int)errno, fn); - strcat(strcat(g->Message, ": "), strerror(errno)); -#if defined(USE_TRY) - throw 2; -#else // !USE_TRY - goto fin; -#endif // !USE_TRY - } else if (pretty >= 2) { - // Serialize to a pretty file - jp = new(g)JOUTPRT(g, fs); + if (!jsp) { + strcpy(g->Message, "Null json tree"); + throw 1; + } else if (!fn) { + // Serialize to a string + jp = new(g) JOUTSTR(g); + b = pretty == 1; } else { - // Serialize to a flat file - b = true; - jp = new(g)JOUTFILE(g, fs, pretty); - } // endif's - - } // endif's - - switch (jsp->GetType()) { - case TYPE_JAR: - err = SerializeArray(jp, (PJAR)jsp, b); - break; - case TYPE_JOB: - err = ((b && jp->Prty()) && jp->WriteChr('\t')); - err |= SerializeObject(jp, (PJOB)jsp); - break; - case TYPE_JVAL: - err = SerializeValue(jp, (PJVAL)jsp); - break; - default: - strcpy(g->Message, "Invalid json tree"); - } // endswitch Type + if (!(fs = fopen(fn, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "w", (int)errno, fn); + strcat(strcat(g->Message, ": "), strerror(errno)); + throw 2; + } else if (pretty >= 2) { + // Serialize to a pretty file + jp = new(g)JOUTPRT(g, fs); + } else { + // Serialize to a flat file + b = true; + jp = new(g)JOUTFILE(g, fs, pretty); + } // endif's + + } // endif's + + switch (jsp->GetType()) { + case TYPE_JAR: + err = SerializeArray(jp, (PJAR)jsp, b); + break; + case TYPE_JOB: + err = ((b && jp->Prty()) && jp->WriteChr('\t')); + err |= SerializeObject(jp, (PJOB)jsp); + break; + case TYPE_JVAL: + err = SerializeValue(jp, (PJVAL)jsp); + break; + default: + strcpy(g->Message, "Invalid json tree"); + } // endswitch Type + + if (fs) { + fputs(EL, fs); + fclose(fs); + str = (err) ? NULL : strcpy(g->Message, "Ok"); + } else if (!err) { + str = ((JOUTSTR*)jp)->Strp; + jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); + } else { + if (!g->Message[0]) + strcpy(g->Message, "Error in Serialize"); - if (fs) { - fputs(EL, fs); - fclose(fs); - str = (err) ? NULL : strcpy(g->Message, "Ok"); - } else if (!err) { - str = ((JOUTSTR*)jp)->Strp; - jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); - } else { - if (!g->Message[0]) - strcpy(g->Message, "Error in Serialize"); + } // endif's - } // endif's + } catch (int n) { + if (trace) + htrc("Exception %d: %s\n", n, g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + str = NULL; + } // end catch -#if defined(USE_TRY) -} catch (int n) { - if (trace) - htrc("Exception %d: %s\n", n, g->Message); - str = NULL; -} catch (const char *msg) { - strcpy(g->Message, msg); - str = NULL; -} // end catch -#else // !USE_TRY -fin: - g->jump_level--; -#endif // !USE_TRY return str; } // end of Serialize @@ -1023,7 +936,7 @@ return false; /***********************************************************************/ /* Add a new pair to an Object. */ /***********************************************************************/ -PJPR JOBJECT::AddPair(PGLOBAL g, PSZ key) +PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) { PJPR jpp = new(g) JPAIR(key); @@ -1109,7 +1022,7 @@ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key) +void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { PJPR jp; @@ -1129,7 +1042,7 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PSZ key) /***********************************************************************/ /* Delete a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::DeleteKey(PSZ key) +void JOBJECT::DeleteKey(PCSZ key) { PJPR jp, *pjp = &First; @@ -1308,10 +1221,10 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() /***********************************************************************/ /* Constructor for a given string. */ /***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PSZ strp) : JSON() +JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON() { Jsp = NULL; - Value = AllocateValue(g, strp, TYPE_STRING); + Value = AllocateValue(g, (void*)strp, TYPE_STRING); Next = NULL; Del = false; } // end of JVALUE constructor @@ -1466,6 +1379,6 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c) /***********************************************************************/ bool JVALUE::IsNull(void) { - return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true; + return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true; } // end of IsNull diff --git a/storage/connect/json.h b/storage/connect/json.h index 4ea169e1b18..49675ce8559 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -125,14 +125,14 @@ class JPAIR : public BLOCK { friend PJOB ParseObject(PGLOBAL, int&, STRG&, bool*); friend bool SerializeObject(JOUT *, PJOB); public: - JPAIR(PSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;} + JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;} - inline PSZ GetKey(void) {return Key;} + inline PCSZ GetKey(void) {return Key;} inline PJVAL GetVal(void) {return Val;} inline PJPR GetNext(void) {return Next;} protected: - PSZ Key; // This pair key name + PCSZ Key; // This pair key name PJVAL Val; // To the value of the pair PJPR Next; // To the next pair }; // end of class JPAIR @@ -150,7 +150,7 @@ class JSON : public BLOCK { virtual JTYP GetValType(void) {X return TYPE_JSON;} virtual void InitArray(PGLOBAL g) {X} //virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} - virtual PJPR AddPair(PGLOBAL g, PSZ key) {X return NULL;} + virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;} virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;} virtual PJVAL GetValue(const char *key) {X return NULL;} virtual PJOB GetObject(void) {return NULL;} @@ -166,13 +166,13 @@ class JSON : public BLOCK { virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;} virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } - virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key) {X} + virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X} virtual void SetValue(PVAL valp) {X} virtual void SetValue(PJSON jsp) {X} virtual void SetString(PGLOBAL g, PSZ s, short c) {X} virtual void SetInteger(PGLOBAL g, int n) {X} virtual void SetFloat(PGLOBAL g, double f) {X} - virtual void DeleteKey(char *k) {X} + virtual void DeleteKey(PCSZ k) {X} virtual bool DeleteValue(int i) {X return true;} virtual bool IsNull(void) {X return true;} @@ -195,14 +195,14 @@ class JOBJECT : public JSON { virtual void Clear(void) {First = Last = NULL; Size = 0;} virtual JTYP GetType(void) {return TYPE_JOB;} virtual PJPR GetFirst(void) {return First;} - virtual PJPR AddPair(PGLOBAL g, PSZ key); + virtual PJPR AddPair(PGLOBAL g, PCSZ key); virtual PJOB GetObject(void) {return this;} virtual PJVAL GetValue(const char* key); virtual PJAR GetKeyList(PGLOBAL g); virtual PSZ GetText(PGLOBAL g, PSZ text); virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual void SetValue(PGLOBAL g, PJVAL jvp, PSZ key); - virtual void DeleteKey(char *k); + virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); + virtual void DeleteKey(PCSZ k); virtual bool IsNull(void); protected: @@ -253,7 +253,7 @@ class JVALUE : public JSON { JVALUE(PJSON jsp) : JSON() {Jsp = jsp; Value = NULL; Next = NULL; Del = false;} JVALUE(PGLOBAL g, PVAL valp); - JVALUE(PGLOBAL g, PSZ strp); + JVALUE(PGLOBAL g, PCSZ strp); using JSON::GetValue; using JSON::SetValue; diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 9ea391edae9..4e00703d9ef 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1455,7 +1455,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, free(g->Sarea); if (!(g->Sarea = PlugAllocMem(g, ml))) { - char errmsg[256]; + char errmsg[MAX_STR]; sprintf(errmsg, MSG(WORK_AREA), g->Message); strcpy(g->Message, errmsg); @@ -1496,7 +1496,7 @@ static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) /*********************************************************************************/ /* Make a valid key from the passed argument. */ /*********************************************************************************/ -static PSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) +static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) { if (args->arg_count > (unsigned)i) { int j = 0, n = args->attribute_lengths[i]; @@ -2253,7 +2253,8 @@ my_bool json_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { - char *key, *str = NULL; + PCSZ key; + char *str = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; if (g->Xchk) { @@ -2358,7 +2359,7 @@ char *json_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif Xchk if (!CheckMemory(g, initid, args, 1, false, true, true)) { - char *key; + PCSZ key; PJOB jobp; PJSON jsp, top; PJVAL jvp = MakeValue(g, args, 0, &top); @@ -2921,70 +2922,53 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, if (g->N) { str = (char*)g->Activityp; - goto fin; + goto err; } else if (initid->const_item) g->N = 1; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *is_null = 1; - return NULL; - } // endif jump_level + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto err; + } else + jvp = MakeValue(g, args, 0); - if (setjmp(g->jumper[++g->jump_level])) { - PUSH_WARNING(g->Message); - str = NULL; - goto err; - } // endif rc -#endif // !USE_TRY + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true)) { - PUSH_WARNING("CheckMemory error"); - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); + jsp = (PJSON)g->Xchk; - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr - - } else - jsp = (PJSON)g->Xchk; + path = MakePSZ(g, args, 1); + jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); - path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); - - if (jsx->SetJpath(g, path)) { - PUSH_WARNING(g->Message); - goto err; - } // endif SetJpath + if (jsx->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + goto err; + } // endif SetJpath - jsx->ReadValue(g); + jsx->ReadValue(g); - if (!jsx->GetValue()->IsNull()) - str = jsx->GetValue()->GetCharValue(); + if (!jsx->GetValue()->IsNull()) + str = jsx->GetValue()->GetCharValue(); - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)str; + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; -#if defined(USE_TRY) - } catch (int n) { + } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -2996,12 +2980,6 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, } // end catch err: -#else // !USE_TRY - err: - g->jump_level--; -#endif // !USE_TRY - - fin: if (!str) { *is_null = 1; *res_length = 0; @@ -3292,64 +3270,45 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *error = 1; - *is_null = 1; - return NULL; - } // endif jump_level + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + jvp = MakeValue(g, args, 0); - if (setjmp(g->jumper[++g->jump_level])) { - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - goto err; - } // endif rc -#endif // !USE_TRY + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, !g->Xchk)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); - - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr - - } else - jsp = (PJSON)g->Xchk; + jsp = (PJSON)g->Xchk; - // The item to locate - jvp2 = MakeValue(g, args, 1); + // The item to locate + jvp2 = MakeValue(g, args, 1); - k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; + k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; - jsx = new(g) JSNX(g, jsp, TYPE_STRING); - path = jsx->Locate(g, jsp, jvp2, k); + jsx = new(g) JSNX(g, jsp, TYPE_STRING); + path = jsx->Locate(g, jsp, jvp2, k); - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)path; + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; -#if defined(USE_TRY) - } catch (int n) { + } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -3363,11 +3322,6 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, } // end catch err: -#else // !USE_TRY - err: - g->jump_level--; -#endif // !USE_TRY - if (!path) { *res_length = 0; *is_null = 1; @@ -3439,65 +3393,46 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *error = 1; - *is_null = 1; - return NULL; - } // endif jump_level + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + jvp = MakeValue(g, args, 0); - if (setjmp(g->jumper[++g->jump_level])) { - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - goto err; - } // endif rc -#endif // !USE_TRY + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); - - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr - - } else - jsp = (PJSON)g->Xchk; + jsp = (PJSON)g->Xchk; - // The item to locate - jvp2 = MakeValue(g, args, 1); + // The item to locate + jvp2 = MakeValue(g, args, 1); - if (args->arg_count > 2) - mx = (int)*(long long*)args->args[2]; + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; - jsx = new(g) JSNX(g, jsp, TYPE_STRING); - path = jsx->LocateAll(g, jsp, jvp2, mx); + jsx = new(g) JSNX(g, jsp, TYPE_STRING); + path = jsx->LocateAll(g, jsp, jvp2, mx); - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)path; + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; -#if defined(USE_TRY) - } catch (int n) { + } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -3511,11 +3446,6 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, } // end catch err: -#else // !USE_TRY - err: - g->jump_level--; -#endif // !USE_TRY - if (!path) { *res_length = 0; *is_null = 1; @@ -3722,87 +3652,61 @@ char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, goto fin; } // endelse -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - PUSH_WARNING(MSG(TOO_MANY_JUMPS)); - *error = 1; - goto fin; - } // endif jump_level + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, false, true)) { + PUSH_WARNING("CheckMemory error"); + throw 1; + } else + jvp = MakeValue(g, args, 0); - if (setjmp(g->jumper[++g->jump_level])) { - PUSH_WARNING(g->Message); - str = NULL; - goto err; - } // endif rc -#endif // !USE_TRY + if ((p = jvp->GetString())) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + throw 2; + } // endif jsp - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true, false, true)) { - PUSH_WARNING("CheckMemory error"); -#if defined(USE_TRY) - throw 1; -#else // !USE_TRY - goto err; -#endif // !USE_TRY - } else - jvp = MakeValue(g, args, 0); + } else + jsp = jvp->GetJson(); - if ((p = jvp->GetString())) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { -#if defined(USE_TRY) - throw 2; -#else // !USE_TRY - PUSH_WARNING(g->Message); - goto err; -#endif // !USE_TRY - } // endif jsp + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr } else - jsp = jvp->GetJson(); - - if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; - JsonMemSave(g); - } // endif Mrr + jsp = (PJSON)g->Xchk; - } else - jsp = (PJSON)g->Xchk; + jsx = new(g)JSNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); - jsx = new(g)JSNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); + for (uint i = 1; i + 1 < args->arg_count; i += 2) { + jvp = MakeValue(gb, args, i); + path = MakePSZ(g, args, i + 1); - for (uint i = 1; i+1 < args->arg_count; i += 2) { - jvp = MakeValue(gb, args, i); - path = MakePSZ(g, args, i+1); - - if (jsx->SetJpath(g, path, false)) { - PUSH_WARNING(g->Message); - continue; - } // endif SetJpath + if (jsx->SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath - if (w) { - jsx->ReadValue(g); - b = jsx->GetValue()->IsNull(); - b = (w == 1) ? b : !b; - } // endif w + if (w) { + jsx->ReadValue(g); + b = jsx->GetValue()->IsNull(); + b = (w == 1) ? b : !b; + } // endif w - if (b && jsx->WriteValue(gb, jvp)) - PUSH_WARNING(g->Message); + if (b && jsx->WriteValue(gb, jvp)) + PUSH_WARNING(g->Message); - } // endfor i + } // endfor i - // In case of error or file, return unchanged argument - if (!(str = MakeResult(g, args, jsp, INT_MAX32))) - str = MakePSZ(g, args, 0); + // In case of error or file, return unchanged argument + if (!(str = MakeResult(g, args, jsp, INT_MAX32))) + str = MakePSZ(g, args, 0); - if (g->N) - // Keep result of constant function - g->Activityp = (PACTIVITY)str; + if (g->N) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; -#if defined(USE_TRY) - } catch (int n) { + } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -3812,10 +3716,6 @@ char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, PUSH_WARNING(g->Message); str = NULL; } // end catch -#else // !USE_TRY -err: - g->jump_level--; -#endif // !USE_TRY fin: if (!str) { @@ -4642,7 +4542,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif bsp if (!CheckMemory(g, initid, args, 2, false, true, true)) { - char *key; + PCSZ key; PJOB jobp; PJVAL jvp = MakeValue(g, args, 0, &top); PJSON jsp = jvp->GetJson(); @@ -4722,7 +4622,7 @@ char *jbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif bsp if (!CheckMemory(g, initid, args, 1, false, true, true)) { - char *key; + PCSZ key; PJOB jobp; PJVAL jvp = MakeValue(g, args, 0, &top); PJSON jsp = jvp->GetJson(); diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index d2890421c62..5f4b98a0652 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -232,7 +232,7 @@ extern "C" { /*********************************************************************************/ typedef struct _jpn { enum JTYP Type; - PSZ Key; + PCSZ Key; int N; } JPN, *PJPN; diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp index 2470d37c353..700d247da38 100644 --- a/storage/connect/libdoc.cpp +++ b/storage/connect/libdoc.cpp @@ -68,9 +68,9 @@ class LIBXMLDOC : public XMLDOCUMENT { virtual void SetNofree(bool b) {Nofreelist = b;} // Methods - virtual bool Initialize(PGLOBAL g, char *entry, bool zipped); + virtual bool Initialize(PGLOBAL g, PCSZ entry, bool zipped); virtual bool ParseFile(PGLOBAL g, char *fn); - virtual bool NewDoc(PGLOBAL g, char *ver); + virtual bool NewDoc(PGLOBAL g, PCSZ ver); virtual void AddComment(PGLOBAL g, char *com); virtual PXNODE GetRoot(PGLOBAL g); virtual PXNODE NewRoot(PGLOBAL g, char *name); @@ -119,9 +119,9 @@ class XML2NODE : public XMLNODE { virtual PXLIST SelectNodes(PGLOBAL g, char *xp, PXLIST lp); virtual PXNODE SelectSingleNode(PGLOBAL g, char *xp, PXNODE np); virtual PXATTR GetAttribute(PGLOBAL g, char *name, PXATTR ap); - virtual PXNODE AddChildNode(PGLOBAL g, char *name, PXNODE np); + virtual PXNODE AddChildNode(PGLOBAL g, PCSZ name, PXNODE np); virtual PXATTR AddProperty(PGLOBAL g, char *name, PXATTR ap); - virtual void AddText(PGLOBAL g, char *txtp); + virtual void AddText(PGLOBAL g, PCSZ txtp); virtual void DeleteChild(PGLOBAL g, PXNODE dnp); protected: @@ -373,7 +373,7 @@ LIBXMLDOC::LIBXMLDOC(char *nsl, char *nsdf, char *enc, PFBLOCK fp) /******************************************************************/ /* Initialize XML parser and check library compatibility. */ /******************************************************************/ -bool LIBXMLDOC::Initialize(PGLOBAL g, char *entry, bool zipped) +bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) { if (zipped && InitZip(g, entry)) return true; @@ -434,7 +434,7 @@ PFBLOCK LIBXMLDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn) /******************************************************************/ /* Construct and add the XML processing instruction node. */ /******************************************************************/ -bool LIBXMLDOC::NewDoc(PGLOBAL g, char *ver) +bool LIBXMLDOC::NewDoc(PGLOBAL g, PCSZ ver) { if (trace) htrc("NewDoc\n"); @@ -863,14 +863,13 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len) xmlFree(Content); if ((Content = xmlNodeGetContent(Nodep))) { - char *extra = " \t\r\n"; char *p1 = (char*)Content, *p2 = buf; bool b = false; // Copy content eliminating extra characters for (; *p1; p1++) if ((p2 - buf) < len) { - if (strchr(extra, *p1)) { + if (strchr(" \t\r\n", *p1)) { if (b) { // This to have one blank between sub-nodes *p2++ = ' '; @@ -1020,19 +1019,19 @@ PXATTR XML2NODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new child node to this node and return it. */ /******************************************************************/ -PXNODE XML2NODE::AddChildNode(PGLOBAL g, char *name, PXNODE np) +PXNODE XML2NODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) { - char *p, *pn, *pf = NULL; + char *p, *pn, *pf = NULL, *nmp = PlugDup(g, name); if (trace) htrc("AddChildNode: %s\n", name); // Is a prefix specified - if ((pn = strchr(name, ':'))) { - pf = name; + if ((pn = strchr(nmp, ':'))) { + pf = nmp; *pn++ = '\0'; // Separate name from prefix } else - pn = name; + pn = nmp; // If name has the format m[n] only m is taken as node name if ((p = strchr(pn, '['))) @@ -1096,7 +1095,7 @@ PXATTR XML2NODE::AddProperty(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ /* Add a new text node to this node. */ /******************************************************************/ -void XML2NODE::AddText(PGLOBAL g, char *txtp) +void XML2NODE::AddText(PGLOBAL g, PCSZ txtp) { if (trace) htrc("AddText: %s\n", txtp); diff --git a/storage/connect/macutil.cpp b/storage/connect/macutil.cpp index f5d3bb11fe9..b9600bdac2e 100644 --- a/storage/connect/macutil.cpp +++ b/storage/connect/macutil.cpp @@ -192,7 +192,7 @@ bool MACINFO::GetOneInfo(PGLOBAL g, int flag, void *v, int lv) case 23: break; default: - p = ""; + p = PlugDup(g, ""); } // endswitch flag } else switch (flag) { diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index 30ac7613dd6..750cf3c0639 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /*************** Mycat CC Program Source Code File (.CC) ***************/ /* PROGRAM NAME: MYCAT */ @@ -161,7 +161,7 @@ TABTYPE GetTypeID(const char *type) #ifdef ZIP_SUPPORT : (!stricmp(type, "ZIP")) ? TAB_ZIP #endif - : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY; + : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY; } // end of GetTypeID /***********************************************************************/ @@ -477,39 +477,6 @@ void MYCAT::Reset(void) { } // end of Reset -#if 0 -/***********************************************************************/ -/* This function sets the current database path. */ -/***********************************************************************/ -void MYCAT::SetPath(PGLOBAL g, LPCSTR *datapath, const char *path) - { - if (path) { - size_t len= strlen(path) + (*path != '.' ? 4 : 1); - char *buf= (char*)PlugSubAlloc(g, NULL, len); - - if (PlugIsAbsolutePath(path)) - { - strcpy(buf, path); - *datapath= buf; - return; - } - - if (*path != '.') { -#if defined(__WIN__) - char *s= "\\"; -#else // !__WIN__ - char *s= "/"; -#endif // !__WIN__ - strcat(strcat(strcat(strcpy(buf, "."), s), path), s); - } else - strcpy(buf, path); - - *datapath= buf; - } // endif path - - } // end of SetDataPath -#endif // 0 - /***********************************************************************/ /* GetTableDesc: retrieve a table descriptor. */ /* Look for a table descriptor matching the name and type. */ diff --git a/storage/connect/mycat.h b/storage/connect/mycat.h index 663b68fd4b9..b6bdd5e5e11 100644 --- a/storage/connect/mycat.h +++ b/storage/connect/mycat.h @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /**************** MYCAT H Declares Source Code File (.H) ***************/ /* Name: MYCAT.H Version 2.3 */ @@ -98,10 +98,7 @@ class MYCAT : public CATALOG { // Methods void Reset(void); -//void SetDataPath(PGLOBAL g, const char *path) -// {SetPath(g, &DataPath, path);} bool StoreIndex(PGLOBAL, PTABDEF) {return false;} // Temporary -// PRELDEF GetTableDesc(PGLOBAL g, LPCSTR name, PRELDEF GetTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR type, PRELDEF *prp = NULL); PTDB GetTable(PGLOBAL g, PTABLE tablep, @@ -109,9 +106,7 @@ class MYCAT : public CATALOG { void ClearDB(PGLOBAL g); protected: -// PRELDEF MakeTableDesc(PGLOBAL g, LPCSTR name, LPCSTR am); PRELDEF MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am); - //void SetPath(PGLOBAL g, LPCSTR *datapath, const char *path); // Members ha_connect *Hc; // The Connect handler diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index f7cd245df59..e68489faad5 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -137,7 +137,8 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, FLD_CHARSET}; //unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; unsigned int length[] = {0, 4, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0}; - char *fld, *colname, *chset, *fmt, v, buf[128], uns[16], zero[16]; + PCSZ fmt; + char *fld, *colname, *chset, v, buf[128], uns[16], zero[16]; int i, n, nf, ncol = sizeof(buftyp) / sizeof(int); int len, type, prec, rc, k = 0; bool b; @@ -874,7 +875,8 @@ MYSQL_FIELD *MYSQLC::GetNextField(void) /***********************************************************************/ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) { - char *fmt, v; + PCSZ fmt; + char *name, v; int n; bool uns; PCOLRES *pcrp, crp; @@ -912,8 +914,9 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) memset(crp, 0, sizeof(COLRES)); crp->Ncol = ++qrp->Nbcol; - crp->Name = (char*)PlugSubAlloc(g, NULL, fld->name_length + 1); - strcpy(crp->Name, fld->name); + name = (char*)PlugSubAlloc(g, NULL, fld->name_length + 1); + strcpy(name, fld->name); + crp->Name = name; if ((crp->Type = MYSQLtoPLG(fld->type, &v)) == TYPE_ERROR) { sprintf(g->Message, "Type %d not supported for column %s", diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index 5fcc914f632..c2053f1c832 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -269,9 +269,9 @@ int MYSQLtoPLG(int mytype, char *var) /************************************************************************/ /* Returns the format corresponding to a MySQL date type number. */ /************************************************************************/ -char *MyDateFmt(int mytype) +PCSZ MyDateFmt(int mytype) { - char *fmt; + PCSZ fmt; switch (mytype) { case MYSQL_TYPE_TIMESTAMP: @@ -297,9 +297,9 @@ char *MyDateFmt(int mytype) /************************************************************************/ /* Returns the format corresponding to a MySQL date type name. */ /************************************************************************/ -char *MyDateFmt(char *typname) +PCSZ MyDateFmt(char *typname) { - char *fmt; + PCSZ fmt; if (!stricmp(typname, "datetime") || !stricmp(typname, "timestamp")) fmt = "YYYY-MM-DD hh:mm:ss"; diff --git a/storage/connect/myutil.h b/storage/connect/myutil.h index 9c22cfef118..6991172b39e 100644 --- a/storage/connect/myutil.h +++ b/storage/connect/myutil.h @@ -6,9 +6,9 @@ enum enum_field_types PLGtoMYSQL(int type, bool dbf, char var = 0); const char *PLGtoMYSQLtype(int type, bool dbf, char var = 0); -int MYSQLtoPLG(char *typname, char *var = NULL); -int MYSQLtoPLG(int mytype, char *var = NULL); -char *MyDateFmt(int mytype); -char *MyDateFmt(char *typname); +int MYSQLtoPLG(char *typname, char *var = NULL); +int MYSQLtoPLG(int mytype, char *var = NULL); +PCSZ MyDateFmt(int mytype); +PCSZ MyDateFmt(char *typname); #endif // __MYUTIL__H diff --git a/storage/connect/odbccat.h b/storage/connect/odbccat.h index 3b729bcb4bb..05b82e49727 100644 --- a/storage/connect/odbccat.h +++ b/storage/connect/odbccat.h @@ -3,11 +3,11 @@ #define DEFAULT_QUERY_TIMEOUT -1 // means do not set typedef struct odbc_parms { - char *User; // User connect info - char *Pwd; // Password connect info - int Cto; // Connect timeout - int Qto; // Query timeout - bool UseCnc; // Use SQLConnect (!SQLDriverConnect) + PCSZ User; // User connect info + PCSZ Pwd; // Password connect info + int Cto; // Connect timeout + int Qto; // Query timeout + bool UseCnc; // Use SQLConnect (!SQLDriverConnect) } ODBCPARM, *POPARM; /***********************************************************************/ @@ -17,9 +17,9 @@ typedef struct odbc_parms { char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop); #endif // PROMPT_OK PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info); -PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, - char *colpat, int maxres, bool info, POPARM sop); +PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, + PCSZ colpat, int maxres, bool info, POPARM sop); PQRYRES ODBCSrcCols(PGLOBAL g, char *dsn, char *src, POPARM sop); -PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, - char *tabtyp, int maxres, bool info, POPARM sop); +PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, + PCSZ tabtyp, int maxres, bool info, POPARM sop); PQRYRES ODBCDrivers(PGLOBAL g, int maxres, bool info); diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index cf55846765f..3dbc2d577d5 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -239,62 +239,43 @@ char *ODBCCheckConnection(PGLOBAL g, char *dsn, int cop) /***********************************************************************/ /* Allocate the structure used to refer to the result set. */ /***********************************************************************/ -static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, char *db, - char *tab, PQRYRES qrp) - { - size_t i, m, n; - CATPARM *cap; +static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, PCSZ db, + PCSZ tab, PQRYRES qrp) +{ + size_t i, m, n; + CATPARM *cap; #if defined(_DEBUG) - assert(qrp); + assert(qrp); #endif -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { + m = (size_t)qrp->Maxres; + n = (size_t)qrp->Nbcol; + cap = (CATPARM *)PlugSubAlloc(g, NULL, sizeof(CATPARM)); + memset(cap, 0, sizeof(CATPARM)); + cap->Id = fid; + cap->Qrp = qrp; + cap->DB = db; + cap->Tab = tab; + cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *)); + + for (i = 0; i < n; i++) + cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN)); + + cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); + + } catch (int n) { + htrc("Exeption %d: %s\n", n, g->Message); + cap = NULL; + } catch (const char *msg) { + htrc(g->Message, msg); printf("%s\n", g->Message); cap = NULL; - goto fin; - } // endif rc -#endif // !USE_TRY - - m = (size_t)qrp->Maxres; - n = (size_t)qrp->Nbcol; - cap = (CATPARM *)PlugSubAlloc(g, NULL, sizeof(CATPARM)); - memset(cap, 0, sizeof(CATPARM)); - cap->Id = fid; - cap->Qrp = qrp; - cap->DB = (PUCHAR)db; - cap->Tab = (PUCHAR)tab; - cap->Vlen = (SQLLEN* *)PlugSubAlloc(g, NULL, n * sizeof(SQLLEN *)); - - for (i = 0; i < n; i++) - cap->Vlen[i] = (SQLLEN *)PlugSubAlloc(g, NULL, m * sizeof(SQLLEN)); - - cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); - -#if defined(USE_TRY) -} catch (int n) { - htrc("Exeption %d: %s\n", n, g->Message); - cap = NULL; -} catch (const char *msg) { - htrc(g->Message, msg); - printf("%s\n", g->Message); - cap = NULL; -} // end catch -#else // !USE_TRY - fin: - g->jump_level--; -#endif // !USE_TRY + } // end catch + return cap; - } // end of AllocCatInfo +} // end of AllocCatInfo #if 0 /***********************************************************************/ @@ -324,8 +305,8 @@ static void ResetNullValues(CATPARM *cap) /* ODBCColumns: constructs the result blocks containing all columns */ /* of an ODBC table that will be retrieved by GetData commands. */ /***********************************************************************/ -PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, - char *colpat, int maxres, bool info, POPARM sop) +PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, + PCSZ colpat, int maxres, bool info, POPARM sop) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, @@ -398,7 +379,7 @@ PQRYRES ODBCColumns(PGLOBAL g, char *dsn, char *db, char *table, if (!(cap = AllocCatInfo(g, CAT_COL, db, table, qrp))) return NULL; - cap->Pat = (PUCHAR)colpat; + cap->Pat = colpat; /************************************************************************/ /* Now get the results into blocks. */ @@ -633,8 +614,8 @@ PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info) /* ODBCTables: constructs the result blocks containing all tables in */ /* an ODBC database that will be retrieved by GetData commands. */ /**************************************************************************/ -PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, - char *tabtyp, int maxres, bool info, POPARM sop) +PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, PCSZ tabtyp, + int maxres, bool info, POPARM sop) { int buftyp[] = {TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING, TYPE_STRING}; @@ -696,7 +677,7 @@ PQRYRES ODBCTables(PGLOBAL g, char *dsn, char *db, char *tabpat, if (!(cap = AllocCatInfo(g, CAT_TAB, db, tabpat, qrp))) return NULL; - cap->Pat = (PUCHAR)tabtyp; + cap->Pat = tabtyp; if (trace) htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol); @@ -894,7 +875,7 @@ PQRYRES ODBCStatistics(PGLOBAL g, ODBConn *op, char *dsn, char *pat, /***********************************************************************/ /* Implementation of DBX class. */ /***********************************************************************/ -DBX::DBX(RETCODE rc, PSZ msg) +DBX::DBX(RETCODE rc, PCSZ msg) { m_RC = rc; m_Msg = msg; @@ -1035,7 +1016,7 @@ bool ODBConn::Check(RETCODE rc) /***********************************************************************/ /* DB exception throw routines. */ /***********************************************************************/ -void ODBConn::ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt) +void ODBConn::ThrowDBX(RETCODE rc, PCSZ msg, HSTMT hstmt) { DBX* xp = new(m_G) DBX(rc, msg); @@ -1045,7 +1026,7 @@ void ODBConn::ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt) } // end of ThrowDBX -void ODBConn::ThrowDBX(PSZ msg) +void ODBConn::ThrowDBX(PCSZ msg) { DBX* xp = new(m_G) DBX(0, "Error"); @@ -1125,7 +1106,7 @@ void ODBConn::OnSetOptions(HSTMT hstmt) /***********************************************************************/ /* Open: connect to a data source. */ /***********************************************************************/ -int ODBConn::Open(PSZ ConnectString, POPARM sop, DWORD options) +int ODBConn::Open(PCSZ ConnectString, POPARM sop, DWORD options) { PGLOBAL& g = m_G; //ASSERT_VALID(this); @@ -1207,7 +1188,7 @@ void ODBConn::AllocConnect(DWORD Options) #if defined(_DEBUG) if (Options & traceSQL) { - SQLSetConnectOption(m_hdbc, SQL_OPT_TRACEFILE, (DWORD)"xodbc.out"); + SQLSetConnectOption(m_hdbc, SQL_OPT_TRACEFILE, (SQLULEN)"xodbc.out"); SQLSetConnectOption(m_hdbc, SQL_OPT_TRACE, 1); } // endif #endif // _DEBUG @@ -1230,7 +1211,7 @@ void ODBConn::AllocConnect(DWORD Options) // Turn on cursor lib support if (Options & useCursorLib) - rc = SQLSetConnectOption(m_hdbc, SQL_ODBC_CURSORS, SQL_CUR_USE_ODBC); + rc = SQLSetConnectOption(m_hdbc, SQL_ODBC_CURSORS, SQL_CUR_USE_DRIVER); return; } // end of AllocConnect @@ -1936,7 +1917,7 @@ bool ODBConn::ExecSQLcommand(char *sql) /* GetMetaData: constructs the result blocks containing the */ /* description of all the columns of an SQL command. */ /**************************************************************************/ -PQRYRES ODBConn::GetMetaData(PGLOBAL g, char *dsn, char *src) +PQRYRES ODBConn::GetMetaData(PGLOBAL g, PCSZ dsn, PCSZ src) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_INT, TYPE_SHORT, TYPE_SHORT}; @@ -2259,7 +2240,7 @@ int ODBConn::GetCatInfo(CATPARM *cap) void *buffer; int i, irc; bool b; - PSZ fnc = "Unknown"; + PCSZ fnc = "Unknown"; UWORD n; SWORD ncol, len, tp; SQLULEN crow = 0; @@ -2298,22 +2279,20 @@ int ODBConn::GetCatInfo(CATPARM *cap) // Now do call the proper ODBC API switch (cap->Id) { case CAT_TAB: -// rc = SQLSetStmtAttr(hstmt, SQL_ATTR_METADATA_ID, -// (SQLPOINTER)false, 0); fnc = "SQLTables"; rc = SQLTables(hstmt, name.ptr(2), name.length(2), name.ptr(1), name.length(1), name.ptr(0), name.length(0), - cap->Pat, cap->Pat ? SQL_NTS : 0); + (SQLCHAR *)cap->Pat, + cap->Pat ? SQL_NTS : 0); break; case CAT_COL: -// rc = SQLSetStmtAttr(hstmt, SQL_ATTR_METADATA_ID, -// (SQLPOINTER)true, 0); fnc = "SQLColumns"; rc = SQLColumns(hstmt, name.ptr(2), name.length(2), name.ptr(1), name.length(1), name.ptr(0), name.length(0), - cap->Pat, cap->Pat ? SQL_NTS : 0); + (SQLCHAR *)cap->Pat, + cap->Pat ? SQL_NTS : 0); break; case CAT_KEY: fnc = "SQLPrimaryKeys"; diff --git a/storage/connect/odbconn.h b/storage/connect/odbconn.h index 063985218ec..5abb8354160 100644 --- a/storage/connect/odbconn.h +++ b/storage/connect/odbconn.h @@ -54,9 +54,9 @@ enum CATINFO {CAT_TAB = 1, /* SQLTables */ typedef struct tagCATPARM { CATINFO Id; // Id to indicate function PQRYRES Qrp; // Result set pointer - PUCHAR DB; // Database (Schema) - PUCHAR Tab; // Table name or pattern - PUCHAR Pat; // Table type or column pattern + PCSZ DB; // Database (Schema) + PCSZ Tab; // Table name or pattern + PCSZ Pat; // Table type or column pattern SQLLEN* *Vlen; // To array of indicator values UWORD *Status; // To status block // For SQLStatistics @@ -80,23 +80,23 @@ class DBX : public BLOCK { friend class ODBConn; // Construction (by ThrowDBX only) -- destruction protected: - DBX(RETCODE rc, PSZ msg = NULL); + DBX(RETCODE rc, PCSZ msg = NULL); public: //virtual ~DBX() {} //void operator delete(void*, PGLOBAL, void*) {}; // Implementation (use ThrowDBX to create) RETCODE GetRC(void) {return m_RC;} - PSZ GetMsg(void) {return m_Msg;} - const char *GetErrorMessage(int i); + PCSZ GetMsg(void) {return m_Msg;} + PCSZ GetErrorMessage(int i); protected: bool BuildErrorMessage(ODBConn* pdb, HSTMT hstmt = SQL_NULL_HSTMT); // Attributes RETCODE m_RC; - PSZ m_Msg; - PSZ m_ErrMsg[MAX_NUM_OF_MSG]; + PCSZ m_Msg; + PCSZ m_ErrMsg[MAX_NUM_OF_MSG]; }; // end of DBX class definition /***********************************************************************/ @@ -119,7 +119,7 @@ class ODBConn : public BLOCK { noOdbcDialog = 0x0008, // Don't display ODBC Connect dialog forceOdbcDialog = 0x0010}; // Always display ODBC connect dialog - int Open(PSZ ConnectString, POPARM sop, DWORD Options = 0); + int Open(PCSZ ConnectString, POPARM sop, DWORD Options = 0); int Rewind(char *sql, ODBCCOL *tocols); void Close(void); PQRYRES AllocateResult(PGLOBAL g); @@ -131,7 +131,7 @@ class ODBConn : public BLOCK { bool IsOpen(void) {return m_hdbc != SQL_NULL_HDBC;} PSZ GetStringInfo(ushort infotype); int GetMaxValue(ushort infotype); - PSZ GetConnect(void) {return m_Connect;} + PCSZ GetConnect(void) {return m_Connect;} public: // Operations @@ -149,7 +149,7 @@ class ODBConn : public BLOCK { int GetCatInfo(CATPARM *cap); bool GetDataSources(PQRYRES qrp); bool GetDrivers(PQRYRES qrp); - PQRYRES GetMetaData(PGLOBAL g, char *dsn, char *src); + PQRYRES GetMetaData(PGLOBAL g, PCSZ dsn, PCSZ src); public: // Set special options @@ -162,8 +162,8 @@ class ODBConn : public BLOCK { // ODBC operations protected: bool Check(RETCODE rc); - void ThrowDBX(RETCODE rc, PSZ msg, HSTMT hstmt = SQL_NULL_HSTMT); - void ThrowDBX(PSZ msg); + void ThrowDBX(RETCODE rc, PCSZ msg, HSTMT hstmt = SQL_NULL_HSTMT); + void ThrowDBX(PCSZ msg); void AllocConnect(DWORD dwOptions); void Connect(void); bool DriverConnect(DWORD Options); @@ -187,9 +187,9 @@ class ODBConn : public BLOCK { DWORD m_UpdateOptions; DWORD m_RowsetSize; char m_IDQuoteChar[2]; - PSZ m_Connect; - PSZ m_User; - PSZ m_Pwd; + PCSZ m_Connect; + PCSZ m_User; + PCSZ m_Pwd; int m_Catver; int m_Rows; int m_Fetch; diff --git a/storage/connect/os.h b/storage/connect/os.h index 2dc603fdcda..8056a272990 100644 --- a/storage/connect/os.h +++ b/storage/connect/os.h @@ -17,13 +17,16 @@ typedef off_t off64_t; #if defined(__WIN__) typedef __int64 BIGINT; +typedef _Null_terminated_ const char *PCSZ; #else // !__WIN__ typedef longlong BIGINT; #define FILE_BEGIN SEEK_SET #define FILE_CURRENT SEEK_CUR #define FILE_END SEEK_END +typedef const char *PCSZ; #endif // !__WIN__ + #if !defined(__WIN__) typedef const void *LPCVOID; typedef const char *LPCTSTR; diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 800b1098d50..2198c44c200 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -80,7 +80,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */ TAB_DMY = 25, /* DMY Dummy tables NIY */ TAB_JDBC = 26, /* Table accessed via JDBC */ TAB_ZIP = 27, /* ZIP file info table */ - TAB_NIY = 28}; /* Table not implemented yet */ +// TAB_MONGO = 28, /* Table retrieved from MongoDB */ + TAB_NIY = 30}; /* Table not implemented yet */ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_ROWID = 1, /* ROWID type (special column) */ @@ -143,7 +144,8 @@ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_MYX = 193, /* MYSQL EXEC access method type */ TYPE_AM_CAT = 195, /* Catalog access method type no */ TYPE_AM_ZIP = 198, /* ZIP access method type no */ - TYPE_AM_OUT = 200}; /* Output relations (storage) */ + TYPE_AM_MGO = 199, /* MGO access method type no */ + TYPE_AM_OUT = 200}; /* Output relations (storage) */ enum RECFM {RECFM_NAF = -2, /* Not a file */ RECFM_OEM = -1, /* OEM file access method */ @@ -553,7 +555,7 @@ typedef struct _qryres { typedef struct _colres { PCOLRES Next; /* To next result column */ PCOL Colp; /* To matching column block */ - PSZ Name; /* Column header */ + PCSZ Name; /* Column header */ PVBLK Kdata; /* Column block of values */ char *Nulls; /* Column null value array */ int Type; /* Internal type */ @@ -583,7 +585,7 @@ void PlugLineDB(PGLOBAL, PSZ, short, void *, uint); char *SetPath(PGLOBAL g, const char *path); char *ExtractFromPath(PGLOBAL, char *, char *, OPVAL); void AddPointer(PTABS, void *); -PDTP MakeDateFormat(PGLOBAL, PSZ, bool, bool, int); +PDTP MakeDateFormat(PGLOBAL, PCSZ, bool, bool, int); int ExtractDate(char *, PDTP, int, int val[6]); /**************************************************************************/ @@ -615,11 +617,10 @@ DllExport void *PlgDBrealloc(PGLOBAL, void *, MBLOCK&, size_t); DllExport void NewPointer(PTABS, void *, void *); //lExport char *GetIni(int n= 0); // Not used anymore DllExport void SetTrc(void); -DllExport char *GetListOption(PGLOBAL, const char *, const char *, - const char *def=NULL); -DllExport char *GetStringTableOption(PGLOBAL, PTOS, char *, char *); -DllExport bool GetBooleanTableOption(PGLOBAL, PTOS, char *, bool); -DllExport int GetIntegerTableOption(PGLOBAL, PTOS, char *, int); +DllExport PCSZ GetListOption(PGLOBAL, PCSZ, PCSZ, PCSZ def=NULL); +DllExport PCSZ GetStringTableOption(PGLOBAL, PTOS, PCSZ, PCSZ); +DllExport bool GetBooleanTableOption(PGLOBAL, PTOS, PCSZ, bool); +DllExport int GetIntegerTableOption(PGLOBAL, PTOS, PCSZ, int); #define MSGID_NONE 0 #define MSGID_CANNOT_OPEN 1 diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index eef9fba44f5..4035e79e892 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -238,90 +238,74 @@ void ptrc(char const *fmt, ...) PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, int *buftyp, XFLD *fldtyp, unsigned int *length, bool blank, bool nonull) - { +{ char cname[NAM_LEN+1]; int i; PCOLRES *pcrp, crp; PQRYRES qrp; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level]) != 0) { - printf("%s\n", g->Message); - qrp = NULL; - goto fin; - } // endif rc -#endif // !USE_TRY - - /************************************************************************/ - /* Allocate the structure used to contain the result set. */ - /************************************************************************/ - qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES)); - pcrp = &qrp->Colresp; - qrp->Continued = false; - qrp->Truncated = false; - qrp->Info = false; - qrp->Suball = true; - qrp->Maxres = maxres; - qrp->Maxsize = 0; - qrp->Nblin = 0; - qrp->Nbcol = 0; // will be ncol - qrp->Cursor = 0; - qrp->BadLines = 0; - - for (i = 0; i < ncol; i++) { - *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); - crp = *pcrp; - pcrp = &crp->Next; - memset(crp, 0, sizeof(COLRES)); - crp->Colp = NULL; - crp->Ncol = ++qrp->Nbcol; - crp->Type = buftyp[i]; - crp->Length = length[i]; - crp->Clen = GetTypeSize(crp->Type, length[i]); - crp->Prec = 0; - - if (ids > 0) { + /**********************************************************************/ + /* Allocate the structure used to contain the result set. */ + /**********************************************************************/ + qrp = (PQRYRES)PlugSubAlloc(g, NULL, sizeof(QRYRES)); + pcrp = &qrp->Colresp; + qrp->Continued = false; + qrp->Truncated = false; + qrp->Info = false; + qrp->Suball = true; + qrp->Maxres = maxres; + qrp->Maxsize = 0; + qrp->Nblin = 0; + qrp->Nbcol = 0; // will be ncol + qrp->Cursor = 0; + qrp->BadLines = 0; + + for (i = 0; i < ncol; i++) { + *pcrp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); + crp = *pcrp; + pcrp = &crp->Next; + memset(crp, 0, sizeof(COLRES)); + crp->Colp = NULL; + crp->Ncol = ++qrp->Nbcol; + crp->Type = buftyp[i]; + crp->Length = length[i]; + crp->Clen = GetTypeSize(crp->Type, length[i]); + crp->Prec = 0; + + if (ids > 0) { #if defined(XMSG) - // Get header from message file - strncpy(cname, PlugReadMessage(g, ids + crp->Ncol, NULL), NAM_LEN); - cname[NAM_LEN] = 0; // for truncated long names + // Get header from message file + strncpy(cname, PlugReadMessage(g, ids + crp->Ncol, NULL), NAM_LEN); + cname[NAM_LEN] = 0; // for truncated long names #else // !XMSG - GetRcString(ids + crp->Ncol, cname, sizeof(cname)); + GetRcString(ids + crp->Ncol, cname, sizeof(cname)); #endif // !XMSG - crp->Name = (PSZ)PlugDup(g, cname); - } else - crp->Name = NULL; // Will be set by caller + crp->Name = (PSZ)PlugDup(g, cname); + } else + crp->Name = NULL; // Will be set by caller - if (fldtyp) - crp->Fld = fldtyp[i]; - else - crp->Fld = FLD_NO; + if (fldtyp) + crp->Fld = fldtyp[i]; + else + crp->Fld = FLD_NO; - // Allocate the Value Block that will contain data - if (crp->Length || nonull) - crp->Kdata = AllocValBlock(g, NULL, crp->Type, maxres, - crp->Length, 0, true, blank, false); - else - crp->Kdata = NULL; + // Allocate the Value Block that will contain data + if (crp->Length || nonull) + crp->Kdata = AllocValBlock(g, NULL, crp->Type, maxres, + crp->Length, 0, true, blank, false); + else + crp->Kdata = NULL; - if (trace) - htrc("Column(%d) %s type=%d len=%d value=%p\n", - crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata); + if (trace) + htrc("Column(%d) %s type=%d len=%d value=%p\n", + crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata); - } // endfor i + } // endfor i - *pcrp = NULL; + *pcrp = NULL; -#if defined(USE_TRY) - } catch (int n) { + } catch (int n) { htrc("Exception %d: %s\n", n, g->Message); qrp = NULL; } catch (const char *msg) { @@ -329,12 +313,9 @@ PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, htrc("%s\n", g->Message); qrp = NULL; } // end catch -#else // !USE_TRY - fin: - g->jump_level--; -#endif // !USE_TRY + return qrp; - } // end of PlgAllocResult +} // end of PlgAllocResult /***********************************************************************/ /* Allocate and initialize the new DB User Block. */ @@ -380,11 +361,7 @@ PCATLG PlgGetCatalog(PGLOBAL g, bool jump) if (!cat && jump) { // Raise exception so caller doesn't have to check return value strcpy(g->Message, MSG(NO_ACTIVE_DB)); -#if defined(USE_TRY) throw 1; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 1); -#endif // !USE_TRY } // endif cat return cat; @@ -410,27 +387,27 @@ char *SetPath(PGLOBAL g, const char *path) char *buf= NULL; if (path) { - size_t len= strlen(path) + (*path != '.' ? 4 : 1); + size_t len = strlen(path) + (*path != '.' ? 4 : 1); if (!(buf = (char*)PlgDBSubAlloc(g, NULL, len))) return NULL; if (PlugIsAbsolutePath(path)) { - strcpy(buf, path); - return buf; - } // endif path + strcpy(buf, path); + return buf; + } // endif path if (*path != '.') { #if defined(__WIN__) - char *s= "\\"; + const char *s = "\\"; #else // !__WIN__ - char *s= "/"; + const char *s = "/"; #endif // !__WIN__ strcat(strcat(strcat(strcpy(buf, "."), s), path), s); } else strcpy(buf, path); - } // endif path + } // endif path return buf; } // end of SetPath @@ -468,7 +445,7 @@ char *ExtractFromPath(PGLOBAL g, char *pBuff, char *FileName, OPVAL op) static bool PlugCheckPattern(PGLOBAL g, LPCSTR string, LPCSTR pat) { if (pat && strlen(pat)) { - // This leaves 512 bytes (MAX_STR / 2) for each components + // This leaves 2048 bytes (MAX_STR / 2) for each components LPSTR name = g->Message + MAX_STR / 2; strlwr(strcpy(name, string)); @@ -496,11 +473,7 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) tp = g->Message; else if (!(tp = new char[strlen(pat) + strlen(strg) + 2])) { strcpy(g->Message, MSG(NEW_RETURN_NULL)); -#if defined(USE_TRY) throw OP_LIKE; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], OP_LIKE); -#endif // !USE_TRY } /* endif tp */ sp = tp + strlen(pat) + 1; @@ -511,11 +484,7 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) tp = g->Message; /* Use this as temporary work space. */ else if (!(tp = new char[strlen(pat) + 1])) { strcpy(g->Message, MSG(NEW_RETURN_NULL)); -#if defined(USE_TRY) throw OP_LIKE; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], OP_LIKE); -#endif // !USE_TRY } /* endif tp */ strcpy(tp, pat); /* Make a copy to be worked into */ @@ -704,7 +673,7 @@ void PlugConvertConstant(PGLOBAL g, void* & value, short& type) /* format and a Strftime output format. Flag if not 0 indicates that */ /* non quoted blanks are not included in the output format. */ /***********************************************************************/ -PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag) +PDTP MakeDateFormat(PGLOBAL g, PCSZ dfmt, bool in, bool out, int flag) { int rc; PDTP pdp = (PDTP)PlugSubAlloc(g, NULL, sizeof(DATPAR)); @@ -713,7 +682,7 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag) htrc("MakeDateFormat: dfmt=%s\n", dfmt); memset(pdp, 0, sizeof(DATPAR)); - pdp->Format = pdp->Curp = dfmt; + pdp->Format = pdp->Curp = PlugDup(g, dfmt); pdp->Outsize = 2 * strlen(dfmt) + 1; if (in) @@ -755,10 +724,11 @@ PDTP MakeDateFormat(PGLOBAL g, PSZ dfmt, bool in, bool out, int flag) /***********************************************************************/ int ExtractDate(char *dts, PDTP pdp, int defy, int val[6]) { - char *fmt, c, d, e, W[8][12]; - int i, k, m, numval; - int n, y = 30; - bool b = true; // true for null dates + PCSZ fmt; + char c, d, e, W[8][12]; + int i, k, m, numval; + int n, y = 30; + bool b = true; // true for null dates if (pdp) fmt = pdp->InFmt; @@ -1283,7 +1253,7 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp) // in the area, do allocate from virtual storage. #if defined(__WIN__) if (mp.Size >= BIGMEM) - mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT, PAGE_READWRITE); + mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); else #endif mp.Memp = malloc(mp.Size); @@ -1548,11 +1518,7 @@ DllExport void NewPointer(PTABS t, void *oldv, void *newv) PGLOBAL g = t->G; sprintf(g->Message, "NewPointer: %s", MSG(MEM_ALLOC_ERROR)); -#if defined(USE_TRY) throw 3; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 3); -#endif // !USE_TRY } else { tp->Next = t->P1; tp->Num = 0; @@ -1589,22 +1555,14 @@ int FileComp(PGLOBAL g, char *file1, char *file2) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "rb", (int)errno, fn[i]); strcat(strcat(g->Message, ": "), strerror(errno)); -#if defined(USE_TRY) throw 666; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 666); -#endif // !USE_TRY // } else // len[i] = 0; // File does not exist yet } else { if ((len[i] = _filelength(h[i])) < 0) { sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", fn[i]); -#if defined(USE_TRY) throw 666; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 666); -#endif // !USE_TRY } // endif len } // endif h diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp index eb31e24235b..f3d3a010266 100644 --- a/storage/connect/plgxml.cpp +++ b/storage/connect/plgxml.cpp @@ -45,7 +45,7 @@ XMLDOCUMENT::XMLDOCUMENT(char *nsl, char *nsdf, char *enc) /******************************************************************/ /* Initialize zipped file processing. */ /******************************************************************/ -bool XMLDOCUMENT::InitZip(PGLOBAL g, char *entry) +bool XMLDOCUMENT::InitZip(PGLOBAL g, PCSZ entry) { #if defined(ZIP_SUPPORT) bool mul = (entry) ? strchr(entry, '*') || strchr(entry, '?') : false; @@ -173,7 +173,7 @@ void XMLNODE::Delete(PXNODE dnp) /******************************************************************/ /* Store a string in Buf, enventually reallocating it. */ /******************************************************************/ -char *XMLNODE::BufAlloc(PGLOBAL g, char *p, int n) +char *XMLNODE::BufAlloc(PGLOBAL g, const char *p, int n) { if (Len < n) { Len = n; diff --git a/storage/connect/plgxml.h b/storage/connect/plgxml.h index 6870764c503..82629e4c7db 100644 --- a/storage/connect/plgxml.h +++ b/storage/connect/plgxml.h @@ -76,9 +76,9 @@ class XMLDOCUMENT : public BLOCK { virtual void SetNofree(bool b) = 0; // Methods - virtual bool Initialize(PGLOBAL, char *, bool) = 0; + virtual bool Initialize(PGLOBAL, PCSZ, bool) = 0; virtual bool ParseFile(PGLOBAL, char *) = 0; - virtual bool NewDoc(PGLOBAL, char *) = 0; + virtual bool NewDoc(PGLOBAL, PCSZ) = 0; virtual void AddComment(PGLOBAL, char *) = 0; virtual PXNODE GetRoot(PGLOBAL) = 0; virtual PXNODE NewRoot(PGLOBAL, char *) = 0; @@ -95,7 +95,7 @@ class XMLDOCUMENT : public BLOCK { // Utility bool MakeNSlist(PGLOBAL g); - bool InitZip(PGLOBAL g, char *entry); + bool InitZip(PGLOBAL g, PCSZ entry); char *GetMemDoc(PGLOBAL g, char *fn); void CloseZip(void); @@ -131,15 +131,15 @@ class XMLNODE : public BLOCK { virtual PXLIST SelectNodes(PGLOBAL, char *, PXLIST = NULL) = 0; virtual PXNODE SelectSingleNode(PGLOBAL, char *, PXNODE = NULL) = 0; virtual PXATTR GetAttribute(PGLOBAL, char *, PXATTR = NULL) = 0; - virtual PXNODE AddChildNode(PGLOBAL, char *, PXNODE = NULL) = 0; + virtual PXNODE AddChildNode(PGLOBAL, PCSZ, PXNODE = NULL) = 0; virtual PXATTR AddProperty(PGLOBAL, char *, PXATTR = NULL) = 0; - virtual void AddText(PGLOBAL, char *) = 0; + virtual void AddText(PGLOBAL, PCSZ) = 0; virtual void DeleteChild(PGLOBAL, PXNODE) = 0; protected: PXNODE NewChild(PXNODE ncp); void Delete(PXNODE dnp); - char *BufAlloc(PGLOBAL g, char *p, int n); + char *BufAlloc(PGLOBAL g, const char *p, int n); // Constructor XMLNODE(PXDOC dp); diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index d34b43a63d2..f0822526b98 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -139,10 +139,17 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) htrc("PlugInit: Language='%s'\n", ((!Language) ? "Null" : (char*)Language)); - if (!(g = (PGLOBAL)malloc(sizeof(GLOBAL)))) { - fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); - return NULL; - } else { + try { + g = new GLOBAL; + } catch (...) { + fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); + return NULL; + } // end try/catch + + //if (!(g = (PGLOBAL)malloc(sizeof(GLOBAL)))) { + // fprintf(stderr, MSG(GLOBAL_ERROR), (int)sizeof(GLOBAL)); + // return NULL; + // } else { g->Sarea = NULL; g->Createas = 0; g->Alchecked = 0; @@ -157,14 +164,14 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) /* Allocate the main work segment. */ /*******************************************************************/ if (worksize && !(g->Sarea = PlugAllocMem(g, worksize))) { - char errmsg[256]; + char errmsg[MAX_STR]; sprintf(errmsg, MSG(WORK_AREA), g->Message); strcpy(g->Message, errmsg); g->Sarea_Size = 0; } else g->Sarea_Size = worksize; - } /* endif g */ + //} /* endif g */ g->jump_level = -1; /* New setting to allow recursive call of Plug */ return(g); @@ -183,7 +190,7 @@ int PlugExit(PGLOBAL g) if (g->Sarea) free(g->Sarea); - free(g); + delete g; return rc; } /* end of PlugExit */ @@ -510,7 +517,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) memp, size, pph->To_Free, pph->FreeBlk); if ((uint)size > pph->FreeBlk) { /* Not enough memory left in pool */ - char *pname = "Work"; + PCSZ pname = "Work"; sprintf(g->Message, "Not enough memory in %s area for request of %u (used=%d free=%d)", @@ -519,13 +526,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) if (trace) htrc("PlugSubAlloc: %s\n", g->Message); -#if defined(USE_TRY) throw 1234; -#else // !USE_TRY - /* Nothing we can do if longjmp is not initialized. */ - assert(g->jump_level >= 0); - longjmp(g->jumper[g->jump_level], 1); -#endif // !USE_TRY } /* endif size OS32 code */ /*********************************************************************/ diff --git a/storage/connect/preparse.h b/storage/connect/preparse.h index 2892a958bdd..f16624548fb 100644 --- a/storage/connect/preparse.h +++ b/storage/connect/preparse.h @@ -7,14 +7,14 @@ /* Struct of variables used by the date format pre-parser. */ /***********************************************************************/ typedef struct _datpar { - char *Format; // Points to format to decode + const char *Format; // Points to format to decode char *Curp; // Points to current parsing position char *InFmt; // Start of input format char *OutFmt; // Start of output format int Index[8]; // Indexes of date values int Num; // Number of values to retrieve int Flag; // 1: Input, 2: Output, 4: no output blank - int Outsize; // Size of output buffers + int Outsize; // Size of output buffers } DATPAR, *PDTP; /***********************************************************************/ diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index c0f3074769a..26cc27b1b01 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -1,11 +1,11 @@ /************* RelDef CPP Program Source Code File (.CPP) **************/ /* PROGRAM NAME: RELDEF */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.6 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2004-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2004-2016 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -88,7 +88,7 @@ PTOS RELDEF::GetTopt(void) /***********************************************************************/ /* This function sets an integer table information. */ /***********************************************************************/ -bool RELDEF::SetIntCatInfo(PSZ what, int n) +bool RELDEF::SetIntCatInfo(PCSZ what, int n) { return Hc->SetIntegerOption(what, n); } // end of SetIntCatInfo @@ -96,7 +96,7 @@ bool RELDEF::SetIntCatInfo(PSZ what, int n) /***********************************************************************/ /* This function returns integer table information. */ /***********************************************************************/ -int RELDEF::GetIntCatInfo(PSZ what, int idef) +int RELDEF::GetIntCatInfo(PCSZ what, int idef) { int n= Hc->GetIntegerOption(what); @@ -106,7 +106,7 @@ int RELDEF::GetIntCatInfo(PSZ what, int idef) /***********************************************************************/ /* This function returns Boolean table information. */ /***********************************************************************/ -bool RELDEF::GetBoolCatInfo(PSZ what, bool bdef) +bool RELDEF::GetBoolCatInfo(PCSZ what, bool bdef) { bool b= Hc->GetBooleanOption(what, bdef); @@ -116,9 +116,10 @@ bool RELDEF::GetBoolCatInfo(PSZ what, bool bdef) /***********************************************************************/ /* This function returns size catalog information. */ /***********************************************************************/ -int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) +int RELDEF::GetSizeCatInfo(PCSZ what, PCSZ sdef) { - char * s, c; + char c; + PCSZ s; int i, n= 0; if (!(s= Hc->GetStringOption(what))) @@ -138,9 +139,9 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) /***********************************************************************/ /* This function sets char table information in buf. */ /***********************************************************************/ -int RELDEF::GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size) +int RELDEF::GetCharCatInfo(PCSZ what, PCSZ sdef, char *buf, int size) { - char *s= Hc->GetStringOption(what); + PCSZ s= Hc->GetStringOption(what); strncpy(buf, ((s) ? s : sdef), size); return size; @@ -158,9 +159,10 @@ bool RELDEF::Partitioned(void) /* This function returns string table information. */ /* Default parameter is "*" to get the handler default. */ /***********************************************************************/ -char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef) +char *RELDEF::GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef) { - char *name, *sval= NULL, *s= Hc->GetStringOption(what, sdef); + char *sval = NULL; + PCSZ name, s= Hc->GetStringOption(what, sdef); if (s) { if (!Hc->IsPartitioned() || @@ -168,12 +170,12 @@ char *RELDEF::GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef) && stricmp(what, "connect"))) sval= PlugDup(g, s); else - sval= s; + sval= (char*)s; } else if (!stricmp(what, "filename")) { // Return default file name - char *ftype= Hc->GetStringOption("Type", "*"); - int i, n; + PCSZ ftype= Hc->GetStringOption("Type", "*"); + int i, n; if (IsFileType(GetTypeID(ftype))) { name= Hc->GetPartName(); @@ -251,9 +253,9 @@ bool TABDEF::Define(PGLOBAL g, PCATLG cat, /***********************************************************************/ /* This function returns the database data path. */ /***********************************************************************/ -PSZ TABDEF::GetPath(void) +PCSZ TABDEF::GetPath(void) { - return (Database) ? (PSZ)Database : (Hc) ? Hc->GetDataPath() : NULL; + return (Database) ? Database : (Hc) ? Hc->GetDataPath() : NULL; } // end of GetPath /***********************************************************************/ @@ -279,7 +281,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g) #if defined(__WIN__) // Offsets of HTML and DIR tables start from 0, DBF at 1 - loff = (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0; + loff= (tc == TAB_DBF) ? 1 : (tc == TAB_XML || tc == TAB_DIR) ? -1 : 0; #else // !__WIN__ // Offsets of HTML tables start from 0, DIR and DBF at 1 loff = (tc == TAB_DBF || tc == TAB_DIR) ? 1 : (tc == TAB_XML) ? -1 : 0; @@ -615,9 +617,10 @@ bool OEMDEF::DefineAM(PGLOBAL g, LPCSTR, int) if (!*Module) Module = Subtype; - Desc = (char*)PlugSubAlloc(g, NULL, strlen(Module) - + strlen(Subtype) + 3); - sprintf(Desc, "%s(%s)", Module, Subtype); + char *desc = (char*)PlugSubAlloc(g, NULL, strlen(Module) + + strlen(Subtype) + 3); + sprintf(desc, "%s(%s)", Module, Subtype); + Desc = desc; return false; } // end of DefineAM diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h index 52a131dbf3d..8b19a413ade 100644 --- a/storage/connect/reldef.h +++ b/storage/connect/reldef.h @@ -42,13 +42,13 @@ class DllExport RELDEF : public BLOCK { // Relation definition block // Methods PTOS GetTopt(void); - bool GetBoolCatInfo(PSZ what, bool bdef); - bool SetIntCatInfo(PSZ what, int ival); + bool GetBoolCatInfo(PCSZ what, bool bdef); + bool SetIntCatInfo(PCSZ what, int ival); bool Partitioned(void); - int GetIntCatInfo(PSZ what, int idef); - int GetSizeCatInfo(PSZ what, PSZ sdef); - int GetCharCatInfo(PSZ what, PSZ sdef, char *buf, int size); - char *GetStringCatInfo(PGLOBAL g, PSZ what, PSZ sdef); + int GetIntCatInfo(PCSZ what, int idef); + int GetSizeCatInfo(PCSZ what, PCSZ sdef); + int GetCharCatInfo(PCSZ what, PCSZ sdef, char *buf, int size); + char *GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef); virtual int Indexable(void) {return 0;} virtual bool Define(PGLOBAL g, PCATLG cat, LPCSTR name, LPCSTR schema, LPCSTR am) = 0; @@ -84,7 +84,7 @@ public: void SetNext(PTABDEF tdfp) {Next = tdfp;} int GetMultiple(void) {return Multiple;} int GetPseudo(void) {return Pseudo;} - PSZ GetPath(void); + PCSZ GetPath(void); //PSZ GetPath(void) // {return (Database) ? (PSZ)Database : Cat->GetDataPath();} bool SepIndex(void) {return GetBoolCatInfo("SepIndex", false);} @@ -105,8 +105,8 @@ public: protected: // Members - PSZ Schema; /* Table schema (for ODBC) */ - PSZ Desc; /* Table description */ + PCSZ Schema; /* Table schema (for ODBC) */ + PCSZ Desc; /* Table description */ uint Catfunc; /* Catalog function ID */ int Card; /* (max) number of rows in table */ int Elemt; /* Number of rows in blocks or rowset */ diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index d6651fb21a9..6f95eafe838 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -132,7 +132,8 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) bool map = (am && (*am == 'M' || *am == 'm')); LPCSTR dfm = (am && (*am == 'F' || *am == 'f')) ? "F" : (am && (*am == 'B' || *am == 'b')) ? "B" - : (am && !stricmp(am, "DBF")) ? "D" : "V"; + : (am && (*am == 'X' || *am == 'x')) ? "X" + : (am && !stricmp(am, "DBF")) ? "D" : "V"; if ((Zipped = GetBoolCatInfo("Zipped", false))) { Entry = GetStringCatInfo(g, "Entry", NULL); @@ -148,7 +149,8 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) GetCharCatInfo("Recfm", (PSZ)dfm, buf, sizeof(buf)); Recfm = (toupper(*buf) == 'F') ? RECFM_FIX : (toupper(*buf) == 'B') ? RECFM_BIN : - (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR; + (toupper(*buf) == 'X') ? RECFM_NAF : // MGO + (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR; Lrecl = GetIntCatInfo("Lrecl", 0); if (Recfm != RECFM_DBF) @@ -182,7 +184,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) /***********************************************************************/ bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename) { - char *ftype; + PCSZ ftype; switch (Recfm) { case RECFM_VAR: ftype = ".dop"; break; @@ -239,9 +241,9 @@ void DOSDEF::RemoveOptValues(PGLOBAL g) /***********************************************************************/ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) { - char *ftype; - char filename[_MAX_PATH]; - bool sep, rc = false; + PCSZ ftype; + char filename[_MAX_PATH]; + bool sep, rc = false; if (!To_Indx) return false; // No index @@ -1511,11 +1513,7 @@ PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv) if (n == 8 && ctype != TYPE_LIST) { // Should never happen strcpy(g->Message, "Block opt: bad constant"); -#if defined(USE_TRY) throw 99; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 99); -#endif // !USE_TRY } // endif Conv if (type[0] == 1) { @@ -1796,7 +1794,7 @@ err: /* Make a dynamic index. */ /***********************************************************************/ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) - { +{ int k; volatile bool dynamic; bool brc; @@ -1867,17 +1865,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } else // Column contains same values as ROWID kxp = new(g) XXROW(this); -#if defined(USE_TRY) try { -#else // !USE_TRY - // Prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return true; - } // endif - - if (!setjmp(g->jumper[++g->jump_level])) { -#endif // !USE_TRY if (dynamic) { ResetBlockFilter(g); kxp->SetDynamic(dynamic); @@ -1902,7 +1890,6 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } // endif brc -#if defined(USE_TRY) } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -1911,14 +1898,9 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) strcpy(g->Message, msg); brc = true; } // end catch -#else // !USE_TRY - } else - brc = true; - g->jump_level--; -#endif // !USE_TRY return brc; - } // end of InitialyzeIndex +} // end of InitialyzeIndex /***********************************************************************/ /* DOS GetProgMax: get the max value for progress information. */ @@ -2139,7 +2121,8 @@ bool TDBDOS::OpenDB(PGLOBAL g) return false; } // endif use - if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS) { + if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS + && Txfp->GetAmType() != TYPE_AM_MGO) { // Delete all lines. Not handled in MAP or block mode Txfp = new(g) DOSFAM((PDOSDEF)To_Def); Txfp->SetTdbp(this); @@ -2327,8 +2310,8 @@ void TDBDOS::CloseDB(PGLOBAL g) /***********************************************************************/ /* DOSCOL public constructor (also called by MAPCOL). */ /***********************************************************************/ -DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) - : COLBLK(cdp, tp, i) +DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am) + : COLBLK(cdp, tp, i) { char *p; int prec = Format.Prec; @@ -2358,7 +2341,7 @@ DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) Dval = NULL; Buf = NULL; - if (txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) { + if (txfp && txfp->Blocked && Opt && (cdp->GetMin() || cdp->GetDval())) { int nblk = txfp->GetBlock(); Clustered = (cdp->GetXdb2()) ? 2 : 1; @@ -2537,11 +2520,7 @@ void DOSCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); -#if defined(USE_TRY) throw 11; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 11); -#endif // !USE_TRY } // endif p = tdbp->To_Line + Deplac; @@ -2597,11 +2576,7 @@ void DOSCOL::ReadColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_RECFM), tdbp->Ftype); -#if defined(USE_TRY) throw 34; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 34); -#endif // !USE_TRY } // endswitch Ftype // Set null when applicable @@ -2710,11 +2685,7 @@ void DOSCOL::WriteColumn(PGLOBAL g) break; default: sprintf(g->Message, "Invalid field format for column %s", Name); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endswitch BufType p2 = Buf; @@ -2726,11 +2697,7 @@ void DOSCOL::WriteColumn(PGLOBAL g) if ((len = strlen(p2)) > field) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p2, Name, field); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (Dsp) for (i = 0; i < len; i++) if (p2[i] == '.') diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index c404328a675..d175cc2da4d 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -39,9 +39,9 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ virtual PIXDEF GetIndx(void) {return To_Indx;} virtual void SetIndx(PIXDEF xdp) {To_Indx = xdp;} virtual bool IsHuge(void) {return Huge;} - PSZ GetFn(void) {return Fn;} - PSZ GetOfn(void) {return Ofn;} - PSZ GetEntry(void) {return Entry;} + PCSZ GetFn(void) {return Fn;} + PCSZ GetOfn(void) {return Ofn;} + PCSZ GetEntry(void) {return Entry;} bool GetMul(void) {return Mulentries;} bool GetAppend(void) {return Append;} void SetBlock(int block) { Block = block; } @@ -74,10 +74,10 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ //virtual bool Erase(char *filename); // Members - PSZ Fn; /* Path/Name of corresponding file */ - PSZ Ofn; /* Base Path/Name of matching index files*/ - PSZ Entry; /* Zip entry name or pattern */ - PSZ Pwd; /* Zip password */ + PCSZ Fn; /* Path/Name of corresponding file */ + PCSZ Ofn; /* Base Path/Name of matching index files*/ + PCSZ Entry; /* Zip entry name or pattern */ + PCSZ Pwd; /* Zip password */ PIXDEF To_Indx; /* To index definitions blocks */ RECFM Recfm; /* 0:VAR, 1:FIX, 2:BIN, 3:VCT, 6:DBF */ bool Mapped; /* 0: disk file, 1: memory mapped file */ @@ -133,8 +133,8 @@ class DllExport TDBDOS : public TDBASE { // Implementation virtual AMT GetAmType(void) {return Txfp->GetAmType();} - virtual PSZ GetFile(PGLOBAL) {return Txfp->To_File;} - virtual void SetFile(PGLOBAL, PSZ fn) {Txfp->To_File = fn;} + virtual PCSZ GetFile(PGLOBAL) {return Txfp->To_File;} + virtual void SetFile(PGLOBAL, PCSZ fn) {Txfp->To_File = fn;} virtual void SetAbort(bool b) {Abort = b;} virtual RECFM GetFtype(void) {return Ftype;} virtual bool SkipHeader(PGLOBAL) {return false;} @@ -214,7 +214,7 @@ class DllExport DOSCOL : public COLBLK { friend class TDBFIX; public: // Constructors - DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am = "DOS"); + DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am = "DOS"); DOSCOL(DOSCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index ad9716b6a10..8f6e610dfc2 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -35,9 +35,9 @@ /***********************************************************************/ /* CONDFIL Constructor. */ /***********************************************************************/ -CONDFIL::CONDFIL(const Item *cond, uint idx, AMT type) +CONDFIL::CONDFIL(uint idx, AMT type) { - Cond = cond; +//Cond = cond; Idx = idx; Type = type; Op = OP_XX; @@ -61,7 +61,7 @@ int CONDFIL::Init(PGLOBAL g, PHC hc) bool h; if (options) - alt = GetListOption(g, "Alias", options->oplist, NULL); + alt = (char*)GetListOption(g, "Alias", options->oplist, NULL); while (alt) { if (!(p = strchr(alt, '='))) { @@ -267,7 +267,7 @@ TDBEXT::TDBEXT(PTDBEXT tdbp) : TDB(tdbp) /******************************************************************/ /* Convert an UTF-8 string to latin characters. */ /******************************************************************/ -int TDBEXT::Decode(char *txt, char *buf, size_t n) +int TDBEXT::Decode(PCSZ txt, char *buf, size_t n) { uint dummy_errors; uint32 len = copy_and_convert(buf, n, &my_charset_latin1, @@ -285,16 +285,17 @@ int TDBEXT::Decode(char *txt, char *buf, size_t n) /***********************************************************************/ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; + PCSZ schmp = NULL; + char *catp = NULL, buf[NAM_LEN * 3]; int len; - bool oom = false, first = true; + bool first = true; PTABLE tablep = To_Table; PCOL colp; if (Srcdef) { if ((catp = strstr(Srcdef, "%s"))) { char *fil1, *fil2; - PSZ ph = ((EXTDEF*)To_Def)->Phpos; + PCSZ ph = ((EXTDEF*)To_Def)->Phpos; if (!ph) ph = (strstr(catp + 2, "%s")) ? "WH" : "W"; @@ -341,7 +342,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) for (colp = Columns; colp; colp = colp->GetNext()) if (!colp->IsSpecial()) { if (!first) - oom |= Query->Append(", "); + Query->Append(", "); else first = false; @@ -350,11 +351,11 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) if (Quote) { // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); ((PEXTCOL)colp)->SetRank(++Ncol); } // endif colp @@ -362,13 +363,13 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) } else // !Columns can occur for queries such that sql count(*) from... // for which we will count the rows from sql * from... - oom |= Query->Append('*'); + Query->Append('*'); } else // SQL statement used to retrieve the size of the result - oom |= Query->Append("count(*)"); + Query->Append("count(*)"); - oom |= Query->Append(" FROM "); + Query->Append(" FROM "); if (Catalog && *Catalog) catp = Catalog; @@ -380,17 +381,17 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) schmp = Schema; if (catp) { - oom |= Query->Append(catp); + Query->Append(catp); if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); + Query->Append('.'); + Query->Append(schmp); } // endif schmp - oom |= Query->Append('.'); + Query->Append('.'); } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); + Query->Append(schmp); + Query->Append('.'); } // endif schmp // Table name can be encoded in UTF-8 @@ -398,18 +399,18 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) if (Quote) { // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); len = Query->GetLength(); if (To_CondFil) { if (Mode == MODE_READ) { - oom |= Query->Append(" WHERE "); - oom |= Query->Append(To_CondFil->Body); + Query->Append(" WHERE "); + Query->Append(To_CondFil->Body); len = Query->GetLength() + 1; } else len += (strlen(To_CondFil->Body) + 256); @@ -417,10 +418,11 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) } else len += ((Mode == MODE_READX) ? 256 : 1); - if (oom || Query->Resize(len)) { + if (Query->IsTruncated()) { strcpy(g->Message, "MakeSQL: Out of memory"); return true; - } // endif oom + } else + Query->Resize(len); if (trace) htrc("Query=%s\n", Query->GetStr()); @@ -434,7 +436,8 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) /***********************************************************************/ bool TDBEXT::MakeCommand(PGLOBAL g) { - char *p, *stmt, name[132], *body = NULL, *schmp = NULL; + PCSZ schmp = NULL; + char *p, *stmt, name[132], *body = NULL; char *qrystr = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 1); bool qtd = Quoted > 0; char q = qtd ? *Quote : ' '; @@ -562,7 +565,7 @@ int TDBEXT::GetProgMax(PGLOBAL g) /***********************************************************************/ /* EXTCOL public constructor. */ /***********************************************************************/ -EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : COLBLK(cdp, tdbp, i) { if (cprec) { diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h index 2ef20c89f2c..a7f5fb9d856 100644 --- a/storage/connect/tabext.h +++ b/storage/connect/tabext.h @@ -28,14 +28,14 @@ class ALIAS : public BLOCK { class CONDFIL : public BLOCK { public: // Constructor - CONDFIL(const Item *cond, uint idx, AMT type); + CONDFIL(uint idx, AMT type); // Functions int Init(PGLOBAL g, PHC hc); const char *Chk(const char *cln, bool *h); // Members - const Item *Cond; +//const Item *Cond; AMT Type; uint Idx; OPVAL Op; @@ -60,10 +60,10 @@ public: // Implementation virtual const char *GetType(void) { return "EXT"; } - inline PSZ GetTabname(void) { return Tabname; } - inline PSZ GetTabschema(void) { return Tabschema; } - inline PSZ GetUsername(void) { return Username; }; - inline PSZ GetPassword(void) { return Password; }; + inline PCSZ GetTabname(void) { return Tabname; } + inline PCSZ GetTabschema(void) { return Tabschema; } + inline PCSZ GetUsername(void) { return Username; }; + inline PCSZ GetPassword(void) { return Password; }; inline PSZ GetTabcat(void) { return Tabcat; } inline PSZ GetSrcdef(void) { return Srcdef; } inline char GetSep(void) { return (Sep) ? *Sep : 0; } @@ -76,10 +76,10 @@ public: protected: // Members - PSZ Tabname; /* External table name */ - PSZ Tabschema; /* External table schema */ - PSZ Username; /* User connect name */ - PSZ Password; /* Password connect info */ + PCSZ Tabname; /* External table name */ + PCSZ Tabschema; /* External table schema */ + PCSZ Username; /* User connect name */ + PCSZ Password; /* Password connect info */ PSZ Tabcat; /* External table catalog */ PSZ Tabtyp; /* Catalog table type */ PSZ Colpat; /* Catalog column pattern */ @@ -115,7 +115,7 @@ public: virtual bool IsRemote(void) { return true; } // Methods - virtual PSZ GetServer(void) { return "Remote"; } + virtual PCSZ GetServer(void) { return "Remote"; } virtual int GetRecpos(void); // Database routines @@ -127,19 +127,19 @@ protected: virtual bool MakeSQL(PGLOBAL g, bool cnt); //virtual bool MakeInsert(PGLOBAL g); virtual bool MakeCommand(PGLOBAL g); - int Decode(char *utf, char *buf, size_t n); + int Decode(PCSZ utf, char *buf, size_t n); // Members PQRYRES Qrp; // Points to storage result PSTRG Query; // Constructed SQL query - char *TableName; // Points to ODBC table name - char *Schema; // Points to ODBC table Schema - char *User; // User connect info - char *Pwd; // Password connect info + PCSZ TableName; // Points to ODBC table name + PCSZ Schema; // Points to ODBC table Schema + PCSZ User; // User connect info + PCSZ Pwd; // Password connect info char *Catalog; // Points to ODBC table Catalog char *Srcdef; // The source table SQL definition char *Count; // Points to count(*) SQL statement - //char *Where; // Points to local where clause + //char *Where; // Points to local where clause char *Quote; // The identifier quoting character char *MulConn; // Used for multiple ODBC tables char *DBQ; // The address part of Connect string @@ -170,7 +170,7 @@ class DllExport EXTCOL : public COLBLK { friend class TDBEXT; public: // Constructor - EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am); + EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am); EXTCOL(PEXTCOL colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp index fb25ab3d5c8..a78d5861e53 100644 --- a/storage/connect/tabfix.cpp +++ b/storage/connect/tabfix.cpp @@ -373,7 +373,7 @@ int TDBFIX::WriteDB(PGLOBAL g) /***********************************************************************/ /* BINCOL public constructor. */ /***********************************************************************/ -BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) +BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am) : DOSCOL(g, cdp, tp, cp, i, am) { char c, *fmt = cdp->GetFmt(); @@ -411,11 +411,7 @@ BINCOL::BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am) case 'D': M = sizeof(double); break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); -#if defined(USE_TRY) throw 11; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 11); -#endif // !USE_TRY } // endswitch Fmt } else if (IsTypeChar(Buf_Type)) @@ -490,11 +486,7 @@ void BINCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); -#if defined(USE_TRY) throw 11; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 11); -#endif // !USE_TRY } // endif p = tdbp->To_Line + Deplac; @@ -553,11 +545,7 @@ void BINCOL::ReadColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); -#if defined(USE_TRY) throw 11; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 11); -#endif // !USE_TRY } // endswitch Fmt // Set null when applicable @@ -607,11 +595,7 @@ void BINCOL::WriteColumn(PGLOBAL g) } else if (Value->GetBinValue(p, Long, Status)) { sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, Value->GetSize(), Long); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endif p break; @@ -620,11 +604,7 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > 32767LL || n < -32768LL) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (Status) Value->GetValueNonAligned(p, (short)n); @@ -634,11 +614,7 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > 255LL || n < -256LL) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (Status) *p = (char)n; @@ -648,11 +624,7 @@ void BINCOL::WriteColumn(PGLOBAL g) if (n > INT_MAX || n < INT_MIN) { sprintf(g->Message, MSG(VALUE_TOO_BIG), n, Name); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (Status) Value->GetValueNonAligned(p, (int)n); @@ -676,11 +648,7 @@ void BINCOL::WriteColumn(PGLOBAL g) case 'C': // Characters if ((n = (signed)strlen(Value->GetCharString(Buf))) > Long) { sprintf(g->Message, MSG(BIN_F_TOO_LONG), Name, (int) n, Long); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endif n if (Status) { @@ -692,11 +660,7 @@ void BINCOL::WriteColumn(PGLOBAL g) break; default: sprintf(g->Message, MSG(BAD_BIN_FMT), Fmt, Name); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endswitch Fmt if (Eds && Status) { diff --git a/storage/connect/tabfix.h b/storage/connect/tabfix.h index 4b9f9689992..53c0af1c422 100644 --- a/storage/connect/tabfix.h +++ b/storage/connect/tabfix.h @@ -65,7 +65,7 @@ class DllExport BINCOL : public DOSCOL { friend class TDBFIX; public: // Constructors - BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PSZ am = "BIN"); + BINCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am = "BIN"); BINCOL(BINCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -108,7 +108,7 @@ class TDBDCL : public TDBCAT { {return DBFColumns(g, ((PTABDEF)To_Def)->GetPath(), Fn, false);} // Members - char *Fn; // The DBF file (path) name + PCSZ Fn; // The DBF file (path) name }; // end of class TDBOCL diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index ac85165d99f..516601a5eb4 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -81,7 +81,7 @@ USETEMP UseTemp(void); /* of types (TYPE_STRING < TYPE_DOUBLE < TYPE_INT) (1 < 2 < 7). */ /* If these values are changed, this will have to be revisited. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info) +PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; @@ -153,7 +153,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info) tdp->Lrecl = 4096; tdp->Multiple = GetIntegerTableOption(g, topt, "Multiple", 0); - p = GetStringTableOption(g, topt, "Separator", ","); + p = (char*)GetStringTableOption(g, topt, "Separator", ","); tdp->Sep = (strlen(p) == 2 && p[0] == '\\' && p[1] == 't') ? '\t' : *p; #if defined(__WIN__) @@ -167,7 +167,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info) sep = tdp->Sep; tdp->Quoted = GetIntegerTableOption(g, topt, "Quoted", -1); - p = GetStringTableOption(g, topt, "Qchar", ""); + p = (char*)GetStringTableOption(g, topt, "Qchar", ""); tdp->Qot = *p; if (tdp->Qot && tdp->Quoted < 0) @@ -1435,11 +1435,7 @@ void CSVCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); -#if defined(USE_TRY) throw 34; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 34); -#endif // !USE_TRY } // endif if (tdbp->Mode != MODE_UPDATE) { @@ -1457,11 +1453,7 @@ void CSVCOL::ReadColumn(PGLOBAL g) Long = colen; // Restore column length sprintf(g->Message, MSG(FLD_TOO_LNG_FOR), Fldnum + 1, Name, To_Tdb->RowNumber(g), tdbp->GetFile(g)); -#if defined(USE_TRY) throw 34; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 34); -#endif // !USE_TRY } // endif Long // Now do the reading @@ -1524,11 +1516,7 @@ void CSVCOL::WriteColumn(PGLOBAL g) if ((signed)strlen(p) > flen) { sprintf(g->Message, MSG(BAD_FLD_LENGTH), Name, p, flen, tdbp->RowNumber(g), tdbp->GetFile(g)); -#if defined(USE_TRY) throw 34; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 34); -#endif // !USE_TRY } else if (Dsp) for (int i = 0; p[i]; i++) if (p[i] == '.') @@ -1544,11 +1532,7 @@ void CSVCOL::WriteColumn(PGLOBAL g) if (Fldnum < 0) { // This can happen for wrong offset value in XDB files sprintf(g->Message, MSG(BAD_FIELD_RANK), Fldnum + 1, Name); -#if defined(USE_TRY) throw 34; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 34); -#endif // !USE_TRY } else strncpy(tdbp->Field[Fldnum], p, flen); diff --git a/storage/connect/tabfmt.h b/storage/connect/tabfmt.h index e5655435be7..396bba568ff 100644 --- a/storage/connect/tabfmt.h +++ b/storage/connect/tabfmt.h @@ -13,7 +13,7 @@ typedef class TDBFMT *PTDBFMT; /***********************************************************************/ /* Functions used externally. */ /***********************************************************************/ -PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info); +PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info); /***********************************************************************/ /* CSV table. */ @@ -21,7 +21,7 @@ PQRYRES CSVColumns(PGLOBAL g, char *dp, PTOS topt, bool info); class DllExport CSVDEF : public DOSDEF { /* Logical table description */ friend class TDBCSV; friend class TDBCCL; - friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool); + friend PQRYRES CSVColumns(PGLOBAL, PCSZ, PTOS, bool); public: // Constructor CSVDEF(void); @@ -53,7 +53,7 @@ public: class DllExport TDBCSV : public TDBDOS { friend class CSVCOL; friend class MAPFAM; - friend PQRYRES CSVColumns(PGLOBAL, char *, PTOS, bool); + friend PQRYRES CSVColumns(PGLOBAL, PCSZ, PTOS, bool); public: // Constructor TDBCSV(PCSVDEF tdp, PTXF txfp); diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index 48427b9620c..7c82a2fc138 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -365,10 +365,11 @@ PCOL TDBJDBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ bool TDBJDBC::MakeInsert(PGLOBAL g) { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; + PCSZ schmp = NULL; + char *catp = NULL, buf[NAM_LEN * 3]; int len = 0; uint pos; - bool b = false, oom = false; + bool b = false; PTABLE tablep = To_Table; PCOL colp; @@ -405,32 +406,32 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) Query = new(g)STRING(g, len, "INSERT INTO "); if (catp) { - oom |= Query->Append(catp); + Query->Append(catp); if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); + Query->Append('.'); + Query->Append(schmp); } // endif schmp - oom |= Query->Append('.'); + Query->Append('.'); } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); + Query->Append(schmp); + Query->Append('.'); } // endif schmp if (Quote) { // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); - oom |= Query->Append('('); + Query->Append('('); for (colp = Columns; colp; colp = colp->GetNext()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; @@ -439,15 +440,15 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) if (Quote) { // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); } // endfor colp - if ((oom |= Query->Append(") VALUES ("))) { + if ((Query->Append(") VALUES ("))) { strcpy(g->Message, "MakeInsert: Out of memory"); return true; } else // in case prepared statement fails @@ -455,9 +456,9 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) // Make prepared statement for (int i = 0; i < Nparm; i++) - oom |= Query->Append("?,"); + Query->Append("?,"); - if (oom) { + if (Query->IsTruncated()) { strcpy(g->Message, "MakeInsert: Out of memory"); return true; } else @@ -750,7 +751,7 @@ bool TDBJDBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0); *To_CondFil->Body= 0; - if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond))) + if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); } // endif active_index @@ -864,7 +865,6 @@ int TDBJDBC::WriteDB(PGLOBAL g) // an insert query for each line to insert uint len = Query->GetLength(); char buf[64]; - bool oom = false; // Make the Insert command value list for (PCOL colp = Columns; colp; colp = colp->GetNext()) { @@ -872,28 +872,28 @@ int TDBJDBC::WriteDB(PGLOBAL g) char *s = colp->GetValue()->GetCharString(buf); if (colp->GetResultType() == TYPE_STRING) - oom |= Query->Append_quoted(s); + Query->Append_quoted(s); else if (colp->GetResultType() == TYPE_DATE) { DTVAL *dtv = (DTVAL*)colp->GetValue(); if (dtv->IsFormatted()) - oom |= Query->Append_quoted(s); + Query->Append_quoted(s); else - oom |= Query->Append(s); + Query->Append(s); } else - oom |= Query->Append(s); + Query->Append(s); } else - oom |= Query->Append("NULL"); + Query->Append("NULL"); - oom |= Query->Append(','); + Query->Append(','); } // endfor colp - if (unlikely(oom)) { + if (unlikely(Query->IsTruncated())) { strcpy(g->Message, "WriteDB: Out of memory"); return RC_FX; - } // endif oom + } // endif Query Query->RepLast(')'); @@ -967,7 +967,7 @@ void TDBJDBC::CloseDB(PGLOBAL g) /***********************************************************************/ /* JDBCCOL public constructor. */ /***********************************************************************/ -JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) { } // end of JDBCCOL constructor @@ -1212,8 +1212,8 @@ int TDBXJDC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ /* JSRCCOL public constructor. */ /***********************************************************************/ -JSRCCOL::JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : JDBCCOL(cdp, tdbp, cprec, i, am) +JSRCCOL::JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : JDBCCOL(cdp, tdbp, cprec, i, am) { // Set additional JDBC access method information for column. Flag = cdp->GetOffset(); diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index 5a59b6c2df8..7c14783285f 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -67,7 +67,7 @@ public: virtual PTDB Clone(PTABS t); virtual bool SetRecpos(PGLOBAL g, int recpos); virtual void ResetSize(void); - virtual PSZ GetServer(void) { return "JDBC"; } + virtual PCSZ GetServer(void) { return "JDBC"; } virtual int Indexable(void) { return 2; } // Database routines @@ -103,7 +103,7 @@ class JDBCCOL : public EXTCOL { friend class TDBJDBC; public: // Constructors - JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC"); + JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC"); JDBCCOL(JDBCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -165,7 +165,7 @@ class JSRCCOL : public JDBCCOL { friend class TDBXJDC; public: // Constructors - JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "JDBC"); + JSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC"); // Implementation virtual int GetAmType(void) {return TYPE_AM_JDBC;} @@ -210,9 +210,9 @@ protected: virtual PQRYRES GetResult(PGLOBAL g); // Members - char *Schema; // Points to schema name or NULL - char *Tab; // Points to JDBC table name or pattern - char *Tabtype; // Points to JDBC table type + PCSZ Schema; // Points to schema name or NULL + PCSZ Tab; // Points to JDBC table name or pattern + PCSZ Tabtype; // Points to JDBC table type JDBCPARM Ops; // Additional parameters }; // end of class TDBJTB @@ -229,7 +229,7 @@ protected: virtual PQRYRES GetResult(PGLOBAL g); // Members - char *Colpat; // Points to catalog column pattern + PCSZ Colpat; // Points to catalog column pattern }; // end of class TDBJDBCL #endif // !NJDBC diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 22f9b592952..edfe710bc92 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -63,7 +63,7 @@ typedef struct _jncol { /* JSONColumns: construct the result blocks containing the description */ /* of all the columns of a table contained inside a JSON file. */ /***********************************************************************/ -PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) +PQRYRES JSONColumns(PGLOBAL g, char *db, char *dsn, PTOS topt, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING}; @@ -112,7 +112,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) #endif // ZIP_SUPPORT tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); - if (!tdp->Fn) { + if (!tdp->Fn && !dsn) { strcpy(g->Message, MSG(MISSING_FNAME)); return NULL; } // endif Fn @@ -120,7 +120,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) if (!(tdp->Database = SetPath(g, db))) return NULL; - tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); + tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); @@ -153,7 +153,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) if (tdp->Zipped) { #if defined(ZIP_SUPPORT) - tjnp = new(g)TDBJSN(tdp, new(g)UNZFAM(tdp)); + tjnp = new(g)TDBJSN(tdp, new(g) UNZFAM(tdp)); #else // !ZIP_SUPPORT sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); return NULL; @@ -264,8 +264,13 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) break; if (jcp) { - if (jcp->Type != jcol.Type) - jcp->Type = TYPE_STRING; + if (jcp->Type != jcol.Type) { + if (jcp->Type == TYPE_UNKNOWN) + jcp->Type = jcol.Type; + else if (jcol.Type != TYPE_UNKNOWN) + jcp->Type = TYPE_STRING; + + } // endif Type if (*fmt && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) { jcp->Fmt = PlugDup(g, fmt); @@ -338,7 +343,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) skipit: if (trace) - htrc("CSVColumns: n=%d len=%d\n", n, length[0]); + htrc("JSONColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ /* Allocate the structures used to refer to the result set. */ @@ -419,7 +424,7 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff) Pretty = GetIntCatInfo("Pretty", 2); Limit = GetIntCatInfo("Limit", 10); Base = GetIntCatInfo("Base", 0) ? 1 : 0; - return DOSDEF::DefineAM(g, "DOS", poff); + return DOSDEF::DefineAM(g, (Uri ? "XMGO" : "DOS"), poff); } // end of DefineAM /***********************************************************************/ @@ -465,7 +470,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ"); return NULL; #endif // !GZ_SUPPORT - } else if (map) + } else if (map) txfp = new(g) MAPFAM(this); else txfp = new(g) DOSFAM(this); @@ -488,7 +493,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) } else { if (Zipped) { #if defined(ZIP_SUPPORT) - if (m == MODE_READ || m == MODE_UPDATE) { + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (m == MODE_INSERT) { strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0"); @@ -537,7 +542,7 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) } else { Jmode = MODE_OBJECT; Objname = NULL; - Xcol = NULL; + Xcol = NULL; Limit = 1; Pretty = 0; B = 0; @@ -697,6 +702,9 @@ bool TDBJSN::OpenDB(PGLOBAL g) return true; } // endswitch Jmode + if (Xcol && Txfp->GetAmType() != TYPE_AM_MGO) + To_Filter = NULL; // Imcompatible + } // endif Use return TDBDOS::OpenDB(g); @@ -867,24 +875,21 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) } // end of PrepareWriting - /***********************************************************************/ - /* WriteDB: Data Base write routine for DOS access method. */ - /***********************************************************************/ - int TDBJSN::WriteDB(PGLOBAL g) +/***********************************************************************/ +/* WriteDB: Data Base write routine for DOS access method. */ +/***********************************************************************/ +int TDBJSN::WriteDB(PGLOBAL g) { int rc = TDBDOS::WriteDB(g); #if USE_G - if (rc == RC_FX) - strcpy(g->Message, G->Message); - PlugSubSet(G, G->Sarea, G->Sarea_Size); #endif Row->Clear(); return rc; } // end of WriteDB - /* ---------------------------- JSONCOL ------------------------------ */ +/* ---------------------------- JSONCOL ------------------------------ */ /***********************************************************************/ /* JSONCOL public constructor. */ @@ -1148,12 +1153,62 @@ bool JSONCOL::ParseJpath(PGLOBAL g) return false; } // end of ParseJpath +/***********************************************************************/ +/* Get Jpath converted to Mongo path. */ +/***********************************************************************/ +char *JSONCOL::GetJpath(PGLOBAL g, bool proj) +{ + if (Jpath) { + char *p1, *p2, *mgopath; + int i = 0; + + if (strcmp(Jpath, "*")) + mgopath = PlugDup(g, Jpath); + else + return NULL; + + for (p1 = p2 = mgopath; *p1; p1++) + if (i) { // Inside [] + if (isdigit(*p1)) { + if (!proj) + *p2++ = *p1; + + i = 2; + } else if (*p1 == ']' && i == 2) { + if (proj && *(p1 + 1) == ':') + p1++; + + i = 0; + } else if (proj) + i = 2; + else + return NULL; + + } else switch (*p1) { + case ':': *p2++ = '.'; break; + case '[': i = 1; break; + case '*': + if (*(p2 - 1) == '.' && !*(p1 + 1)) { + p2--; // Suppress last :* + break; + } // endif p2 + + default: *p2++ = *p1; break; + } // endswitch p1; + + *p2 = 0; + return mgopath; + } else + return NULL; + +} // end of GetJpath + /***********************************************************************/ /* MakeJson: Serialize the json item and set value to it. */ /***********************************************************************/ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) - { - if (Value->IsTypeNum()) { +{ + if (Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric column"); Value->Reset(); } else @@ -1174,7 +1229,7 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) case TYPE_BINT: case TYPE_DBL: case TYPE_DATE: - vp->SetValue_pval(val->GetValue()); + vp->SetValue_pval(val->GetValue()); break; case TYPE_BOOL: if (vp->IsTypeNum()) @@ -1193,11 +1248,14 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) // } // endif Type default: + vp->Reset(); vp->SetNull(true); } // endswitch Type - } else + } else { + vp->Reset(); vp->SetNull(true); + } // endif val } // end of SetJsonValue @@ -1210,8 +1268,8 @@ void JSONCOL::ReadColumn(PGLOBAL g) Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0)); // Set null when applicable - if (Nullable) - Value->SetNull(Value->IsNull()); + if (!Nullable) + Value->SetNull(false); } // end of ReadColumn @@ -1292,11 +1350,7 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) { strcpy(g->Message, "Logical error expanding array"); -#if defined(USE_TRY) throw 666; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 666); -#endif // !USE_TRY } // endif jvp if (n < Nod - 1 && jvp->GetJson()) { @@ -1482,11 +1536,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) { if (Xpd && Tjp->Pretty < 2) { strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); -#if defined(USE_TRY) throw 666; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 666); -#endif // !USE_TRY } // endif Xpd /*********************************************************************/ @@ -1521,11 +1571,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (!(jsp = ParseJson(G, s, (int)strlen(s)))) { strcpy(g->Message, s); -#if defined(USE_TRY) throw 666; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 666); -#endif // !USE_TRY } // endif jsp if (arp) { @@ -1876,8 +1922,11 @@ bool TDBJSON::OpenDB(PGLOBAL g) return true; } // endswitch Jmode - Use = USE_OPEN; - return false; + if (Xcol) + To_Filter = NULL; // Imcompatible + + Use = USE_OPEN; + return false; } // end of OpenDB /***********************************************************************/ @@ -1887,7 +1936,7 @@ int TDBJSON::ReadDB(PGLOBAL) { int rc; - N++; + N++; if (NextSame) { SameRow = NextSame; @@ -1999,6 +2048,7 @@ TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) { Topt = tdp->GetTopt(); Db = (char*)tdp->GetDB(); + Dsn = (char*)tdp->Uri; } // end of TDBJCL constructor /***********************************************************************/ @@ -2006,7 +2056,7 @@ TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) /***********************************************************************/ PQRYRES TDBJCL::GetResult(PGLOBAL g) { - return JSONColumns(g, Db, Topt, false); + return JSONColumns(g, Db, Dsn, Topt, false); } // end of GetResult /* --------------------------- End of json --------------------------- */ diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 924ce387900..2c8f226b5ca 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -1,7 +1,7 @@ /*************** tabjson H Declares Source Code File (.H) **************/ -/* Name: tabjson.h Version 1.1 */ +/* Name: tabjson.h Version 1.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2015 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ @@ -32,12 +32,12 @@ typedef struct _jnode { /***********************************************************************/ /* JSON table. */ /***********************************************************************/ -class JSONDEF : public DOSDEF { /* Table description */ +class DllExport JSONDEF : public DOSDEF { /* Table description */ friend class TDBJSON; friend class TDBJSN; friend class TDBJCL; - friend PQRYRES JSONColumns(PGLOBAL, char*, PTOS, bool); - public: + friend PQRYRES JSONColumns(PGLOBAL, char*, char*, PTOS, bool); +public: // Constructor JSONDEF(void); @@ -51,13 +51,14 @@ class JSONDEF : public DOSDEF { /* Table description */ protected: // Members JMODE Jmode; /* MODE_OBJECT by default */ - char *Objname; /* Name of first level object */ - char *Xcol; /* Name of expandable column */ + PCSZ Objname; /* Name of first level object */ + PCSZ Xcol; /* Name of expandable column */ int Limit; /* Limit of multiple values */ int Pretty; /* Depends on file structure */ int Level; /* Used for catalog table */ - int Base; /* Tne array index base */ + int Base; /* The array index base */ bool Strict; /* Strict syntax checking */ + const char *Uri; /* MongoDB connection URI */ }; // end of JSONDEF /* -------------------------- TDBJSN class --------------------------- */ @@ -66,7 +67,7 @@ class JSONDEF : public DOSDEF { /* Table description */ /* This is the JSN Access Method class declaration. */ /* The table is a DOS file, each record being a JSON object. */ /***********************************************************************/ -class TDBJSN : public TDBDOS { +class DllExport TDBJSN : public TDBDOS { friend class JSONCOL; friend class JSONDEF; public: @@ -87,6 +88,8 @@ public: virtual PCOL InsertSpecialColumn(PCOL colp); virtual int RowNumber(PGLOBAL g, bool b = FALSE) {return (b) ? M : N;} + virtual bool CanBeFiltered(void) + {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;} // Database routines virtual int Cardinality(PGLOBAL g); @@ -107,8 +110,8 @@ public: PJSON Val; // The value of the current row PJCOL Colp; // The multiple column JMODE Jmode; // MODE_OBJECT by default - char *Objname; // The table object name - char *Xcol; // Name of expandable column + PCSZ Objname; // The table object name + PCSZ Xcol; // Name of expandable column int Fpos; // The current row index int N; // The current Rownum int M; // Index of multiple value @@ -127,9 +130,10 @@ public: /***********************************************************************/ /* Class JSONCOL: JSON access method column descriptor. */ /***********************************************************************/ -class JSONCOL : public DOSCOL { +class DllExport JSONCOL : public DOSCOL { friend class TDBJSN; friend class TDBJSON; + friend class MGOFAM; public: // Constructors JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i); @@ -139,20 +143,21 @@ class JSONCOL : public DOSCOL { virtual int GetAmType(void) {return Tjp->GetAmType();} // Methods - virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - bool ParseJpath(PGLOBAL g); - virtual void ReadColumn(PGLOBAL g); - virtual void WriteColumn(PGLOBAL g); + virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); + bool ParseJpath(PGLOBAL g); + char *GetJpath(PGLOBAL g, bool proj); + virtual void ReadColumn(PGLOBAL g); + virtual void WriteColumn(PGLOBAL g); protected: - bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); - bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm); - PVAL GetColumnValue(PGLOBAL g, PJSON row, int i); - PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); - PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); - PVAL MakeJson(PGLOBAL g, PJSON jsp); - void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); - PJSON GetRow(PGLOBAL g); + bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); + bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm); + PVAL GetColumnValue(PGLOBAL g, PJSON row, int i); + PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); + PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); + PVAL MakeJson(PGLOBAL g, PJSON jsp); + void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); + PJSON GetRow(PGLOBAL g); // Default constructor not to be used JSONCOL(void) {} @@ -174,7 +179,7 @@ class JSONCOL : public DOSCOL { /***********************************************************************/ /* This is the JSON Access Method class declaration. */ /***********************************************************************/ -class TDBJSON : public TDBJSN { +class DllExport TDBJSON : public TDBJSN { friend class JSONDEF; friend class JSONCOL; public: @@ -221,7 +226,7 @@ class TDBJSON : public TDBJSN { /***********************************************************************/ /* This is the class declaration for the JSON catalog table. */ /***********************************************************************/ -class TDBJCL : public TDBCAT { +class DllExport TDBJCL : public TDBCAT { public: // Constructor TDBJCL(PJDEF tdp); @@ -233,4 +238,5 @@ class TDBJCL : public TDBCAT { // Members PTOS Topt; char *Db; + char *Dsn; }; // end of class TDBJCL diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp index 916449be6c6..22fb09dbb86 100644 --- a/storage/connect/table.cpp +++ b/storage/connect/table.cpp @@ -47,6 +47,7 @@ TDB::TDB(PTABDEF tdp) : Tdb_No(++Tnum) To_Orig = NULL; To_Filter = NULL; To_CondFil = NULL; + Cond = NULL; Next = NULL; Name = (tdp) ? tdp->GetName() : NULL; To_Table = NULL; @@ -68,6 +69,7 @@ TDB::TDB(PTDB tdbp) : Tdb_No(++Tnum) To_Orig = tdbp; To_Filter = NULL; To_CondFil = NULL; + Cond = NULL; Next = NULL; Name = tdbp->Name; To_Table = tdbp->To_Table; @@ -97,7 +99,7 @@ CHARSET_INFO *TDB::data_charset(void) /***********************************************************************/ /* Return the datapath of the DB this table belongs to. */ /***********************************************************************/ -PSZ TDB::GetPath(void) +PCSZ TDB::GetPath(void) { return To_Def->GetPath(); } // end of GetPath diff --git a/storage/connect/tabmac.cpp b/storage/connect/tabmac.cpp index bbaba591540..a28b5d7108c 100644 --- a/storage/connect/tabmac.cpp +++ b/storage/connect/tabmac.cpp @@ -329,7 +329,7 @@ void MACCOL::ReadColumn(PGLOBAL g) n = 0; break; default: - p = ""; + p = PlugDup(g, ""); } // endswitch Flag } else switch (Flag) { diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index b5f07db8413..5c41f9094ac 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -603,10 +603,10 @@ bool TDBMSD::InitFileNames(PGLOBAL g) bool DIRDEF::DefineAM(PGLOBAL g, LPCSTR, int) { Desc = Fn = GetStringCatInfo(g, "Filename", NULL); - Incl = GetBoolCatInfo("Subdir", false); + Incl = GetBoolCatInfo("Subdir", false); Huge = GetBoolCatInfo("Huge", false); Nodir = GetBoolCatInfo("Nodir", true); - return false; + return false; } // end of DefineAM /***********************************************************************/ @@ -924,7 +924,7 @@ void TDBDIR::CloseDB(PGLOBAL) /***********************************************************************/ /* DIRCOL public constructor. */ /***********************************************************************/ -DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) +DIRCOL::DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -1016,11 +1016,7 @@ void DIRCOL::ReadColumn(PGLOBAL g) #endif // !__WIN__ default: sprintf(g->Message, MSG(INV_DIRCOL_OFST), N); -#if defined(USE_TRY) throw GetAmType(); -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], GetAmType()); -#endif // !USE_TRY } // endswitch N } // end of ReadColumn @@ -1247,8 +1243,8 @@ int TDBSDR::ReadDB(PGLOBAL g) break; } // endif findnext - } while (!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) || - (*FileData.cFileName == '.' && + } while(!(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) || + (*FileData.cFileName == '.' && (!FileData.cFileName[1] || FileData.cFileName[1] == '.'))); if (Sub->H == INVALID_HANDLE_VALUE) { @@ -1303,9 +1299,9 @@ int TDBSDR::ReadDB(PGLOBAL g) if (lstat(Fpath, &Fileinfo) < 0) { sprintf(g->Message, "%s: %s", Fpath, strerror(errno)); rc = RC_FX; - } else if (S_ISDIR(Fileinfo.st_mode) && strcmp(Entry->d_name, ".") - && strcmp(Entry->d_name, "..")) { - // Look in the name sub-directory + } else if (S_ISDIR(Fileinfo.st_mode) && strcmp(Entry->d_name, ".") + && strcmp(Entry->d_name, "..")) { + // Look in the name sub-directory if (!Sub->Next) { PSUBDIR sup; @@ -1548,11 +1544,7 @@ void TDBDHR::CloseDB(PGLOBAL g) // Close the search handle. if (!FindClose(Hsearch)) { strcpy(g->Message, MSG(SRCH_CLOSE_ERR)); -#if defined(USE_TRY) throw GetAmType(); -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], GetAmType()); -#endif // !USE_TRY } // endif FindClose iFile = 0; @@ -1564,8 +1556,8 @@ void TDBDHR::CloseDB(PGLOBAL g) /***********************************************************************/ /* DHRCOL public constructor. */ /***********************************************************************/ -DHRCOL::DHRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : COLBLK(cdp, tdbp, i) +DHRCOL::DHRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : COLBLK(cdp, tdbp, i) { if (cprec) { Next = cprec->GetNext(); diff --git a/storage/connect/tabmul.h b/storage/connect/tabmul.h index f26d982d49b..8a95a772c41 100644 --- a/storage/connect/tabmul.h +++ b/storage/connect/tabmul.h @@ -39,7 +39,7 @@ class DllExport TDBMUL : public TDBASE { virtual void ResetDB(void); virtual PTDB Clone(PTABS t); virtual bool IsSame(PTDB tp) {return tp == (PTDB)Tdbp;} - virtual PSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);} + virtual PCSZ GetFile(PGLOBAL g) {return Tdbp->GetFile(g);} virtual int GetRecpos(void) {return 0;} virtual PCOL ColDB(PGLOBAL g, PSZ name, int num); bool InitFileNames(PGLOBAL g); @@ -118,9 +118,9 @@ class DllExport DIRDEF : public TABDEF { /* Directory listing table */ // Members PSZ Fn; /* Path/Name of file search */ bool Incl; /* true to include sub-directories */ - bool Huge; /* true if files can be larger than 2GB */ + bool Huge; /* true if files can be larger than 2GB */ bool Nodir; /* true to exclude directories */ -}; // end of DIRDEF + }; // end of DIRDEF /***********************************************************************/ /* This is the DIR Access Method class declaration for tables that */ @@ -177,7 +177,7 @@ public: char Fname[_MAX_FNAME]; // File name char Ftype[_MAX_EXT]; // File extention bool Nodir; // Exclude directories from file list -}; // end of class TDBDIR + }; // end of class TDBDIR /***********************************************************************/ /* This is the DIR Access Method class declaration for tables that */ @@ -226,7 +226,7 @@ class TDBSDR : public TDBDIR { class DIRCOL : public COLBLK { public: // Constructors - DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "DIR"); + DIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "DIR"); DIRCOL(DIRCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index 3ce1ea1f7d8..bdddcf64ca8 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -68,8 +68,8 @@ void PrintResult(PGLOBAL, PSEM, PQRYRES); #endif // _CONSOLE // Used to check whether a MYSQL table is created on itself -bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, const char *host, - const char *db, char *tab, const char *src, int port); +bool CheckSelf(PGLOBAL g, TABLE_SHARE *s, PCSZ host, PCSZ db, + PCSZ tab, PCSZ src, int port); /***********************************************************************/ /* External function. */ @@ -125,7 +125,7 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) } // endif server DBUG_PRINT("info", ("get_server_by_name returned server at %lx", - (long unsigned int) server)); + (size_t) server)); // TODO: We need to examine which of these can really be NULL Hostname = PlugDup(g, server->host); @@ -183,19 +183,22 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) /***********************************************************************/ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) { + char *tabn, *pwd, *schema; + if ((!strstr(url, "://") && (!strchr(url, '@')))) { // No :// or @ in connection string. Must be a straight // connection name of either "server" or "server/table" // ok, so we do a little parsing, but not completely! - if ((Tabname= strchr(url, '/'))) { + if ((tabn= strchr(url, '/'))) { // If there is a single '/' in the connection string, // this means the user is specifying a table name - *Tabname++= '\0'; + *tabn++= '\0'; // there better not be any more '/'s ! - if (strchr(Tabname, '/')) + if (strchr(tabn, '/')) return true; + Tabname = tabn; } else // Otherwise, straight server name, Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL; @@ -223,7 +226,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) Username += 3; - if (!(Hostname = strchr(Username, '@'))) { + if (!(Hostname = (char*)strchr(Username, '@'))) { strcpy(g->Message, "No host specified in URL"); return true; } else { @@ -231,11 +234,11 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) Server = Hostname; } // endif Hostname - if ((Password = strchr(Username, ':'))) { - *Password++ = 0; // End username + if ((pwd = (char*)strchr(Username, ':'))) { + *pwd++ = 0; // End username - // Make sure there isn't an extra / or @ - if ((strchr(Password, '/') || strchr(Hostname, '@'))) { + // Make sure there isn't an extra / + if (strchr(pwd, '/')) { strcpy(g->Message, "Syntax error in URL"); return true; } // endif @@ -243,8 +246,10 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) // Found that if the string is: // user:@hostname:port/db/table // Then password is a null string, so set to NULL - if ((Password[0] == 0)) - Password = NULL; + if ((pwd[0] == 0)) + Password = NULL; + else + Password = pwd; } // endif password @@ -254,21 +259,23 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) return true; } // endif - if ((Tabschema = strchr(Hostname, '/'))) { - *Tabschema++ = 0; + if ((schema = strchr(Hostname, '/'))) { + *schema++ = 0; - if ((Tabname = strchr(Tabschema, '/'))) { - *Tabname++ = 0; + if ((tabn = strchr(schema, '/'))) { + *tabn++ = 0; // Make sure there's not an extra / - if ((strchr(Tabname, '/'))) { + if ((strchr(tabn, '/'))) { strcpy(g->Message, "Syntax error in URL"); return true; } // endif / + Tabname = tabn; } // endif TableName - } // endif database + Tabschema = schema; + } // endif database if ((sport = strchr(Hostname, ':'))) *sport++ = 0; @@ -349,7 +356,7 @@ bool MYSQLDEF::DefineAM(PGLOBAL g, LPCSTR am, int) Portnumber = GetIntCatInfo("Port", GetDefaultPort()); Server = Hostname; } else { - char *locdb = Tabschema; + PCSZ locdb = Tabschema; if (ParseURL(g, url)) return true; @@ -495,11 +502,11 @@ PCOL TDBMYSQL::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /* filter should be removed from column list. */ /***********************************************************************/ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) - { +{ //char *tk = "`"; char tk = '`'; int len = 0, rank = 0; - bool b = false, oom = false; + bool b = false; PCOL colp; //PDBUSER dup = PlgGetUser(g); @@ -526,13 +533,13 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) for (colp = Columns; colp; colp = colp->GetNext()) if (!colp->IsSpecial()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; - oom |= Query->Append(tk); - oom |= Query->Append(colp->GetName()); - oom |= Query->Append(tk); + Query->Append(tk); + Query->Append(colp->GetName()); + Query->Append(tk); ((PMYCOL)colp)->Rank = rank++; } // endif colp @@ -542,22 +549,22 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) // Query '*' from... // (the use of a char constant minimize the result storage) if (Isview) - oom |= Query->Append('*'); + Query->Append('*'); else - oom |= Query->Append("'*'"); + Query->Append("'*'"); } // endif ncol - oom |= Query->Append(" FROM "); - oom |= Query->Append(tk); - oom |= Query->Append(TableName); - oom |= Query->Append(tk); + Query->Append(" FROM "); + Query->Append(tk); + Query->Append(TableName); + Query->Append(tk); len = Query->GetLength(); if (To_CondFil) { if (!mx) { - oom |= Query->Append(" WHERE "); - oom |= Query->Append(To_CondFil->Body); + Query->Append(" WHERE "); + Query->Append(To_CondFil->Body); len = Query->GetLength() + 1; } else len += (strlen(To_CondFil->Body) + 256); @@ -565,25 +572,25 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) } else len += (mx ? 256 : 1); - if (oom || Query->Resize(len)) { + if (Query->IsTruncated() || Query->Resize(len)) { strcpy(g->Message, "MakeSelect: Out of memory"); return true; - } // endif oom + } // endif Query if (trace) htrc("Query=%s\n", Query->GetStr()); return false; - } // end of MakeSelect +} // end of MakeSelect /***********************************************************************/ /* MakeInsert: make the Insert statement used with MySQL connection. */ /***********************************************************************/ bool TDBMYSQL::MakeInsert(PGLOBAL g) { - char *tk = "`"; + const char *tk = "`"; uint len = 0; - bool b = false, oom; + bool oom, b = false; PCOL colp; if (Query) @@ -622,38 +629,38 @@ bool TDBMYSQL::MakeInsert(PGLOBAL g) Query = new(g) STRING(g, len); if (Delayed) - oom = Query->Set("INSERT DELAYED INTO "); + Query->Set("INSERT DELAYED INTO "); else - oom = Query->Set("INSERT INTO "); + Query->Set("INSERT INTO "); - oom |= Query->Append(tk); - oom |= Query->Append(TableName); - oom |= Query->Append("` ("); + Query->Append(tk); + Query->Append(TableName); + Query->Append("` ("); for (colp = Columns; colp; colp = colp->GetNext()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; - oom |= Query->Append(tk); - oom |= Query->Append(colp->GetName()); - oom |= Query->Append(tk); + Query->Append(tk); + Query->Append(colp->GetName()); + Query->Append(tk); } // endfor colp - oom |= Query->Append(") VALUES ("); + Query->Append(") VALUES ("); #if defined(MYSQL_PREPARED_STATEMENTS) if (Prep) { for (int i = 0; i < Nparm; i++) - oom |= Query->Append("?,"); + Query->Append("?,"); Query->RepLast(')'); Query->Trim(); } // endif Prep #endif // MYSQL_PREPARED_STATEMENTS - if (oom) + if ((oom = Query->IsTruncated())) strcpy(g->Message, "MakeInsert: Out of memory"); return oom; @@ -684,18 +691,18 @@ bool TDBMYSQL::MakeCommand(PGLOBAL g) strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { - bool oom = Query->Set(Qrystr, p - qrystr); + Query->Set(Qrystr, p - qrystr); if (qtd && *(p-1) == ' ') { - oom |= Query->Append('`'); - oom |= Query->Append(TableName); - oom |= Query->Append('`'); + Query->Append('`'); + Query->Append(TableName); + Query->Append('`'); } else - oom |= Query->Append(TableName); + Query->Append(TableName); - oom |= Query->Append(Qrystr + (p - qrystr) + strlen(name)); + Query->Append(Qrystr + (p - qrystr) + strlen(name)); - if (oom) { + if (Query->IsTruncated()) { strcpy(g->Message, "MakeCommand: Out of memory"); return true; } else @@ -1096,7 +1103,7 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0); *To_CondFil->Body= 0; - if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond))) + if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); } // endif active_index @@ -1161,24 +1168,23 @@ int TDBMYSQL::WriteDB(PGLOBAL g) int rc; uint len = Query->GetLength(); char buf[64]; - bool oom = false; // Make the Insert command value list for (PCOL colp = Columns; colp; colp = colp->GetNext()) { if (!colp->GetValue()->IsNull()) { if (colp->GetResultType() == TYPE_STRING || colp->GetResultType() == TYPE_DATE) - oom |= Query->Append_quoted(colp->GetValue()->GetCharString(buf)); + Query->Append_quoted(colp->GetValue()->GetCharString(buf)); else - oom |= Query->Append(colp->GetValue()->GetCharString(buf)); + Query->Append(colp->GetValue()->GetCharString(buf)); } else - oom |= Query->Append("NULL"); + Query->Append("NULL"); - oom |= Query->Append(','); + Query->Append(','); } // endfor colp - if (unlikely(oom)) { + if (unlikely(Query->IsTruncated())) { strcpy(g->Message, "WriteDB: Out of memory"); rc = RC_FX; } else { @@ -1186,7 +1192,7 @@ int TDBMYSQL::WriteDB(PGLOBAL g) Myc.m_Rows = -1; // To execute the query rc = Myc.ExecSQL(g, Query->GetStr()); Query->Truncate(len); // Restore query - } // endif oom + } // endif Query return (rc == RC_NF) ? RC_OK : rc; // RC_NF is Ok } // end of WriteDB @@ -1234,7 +1240,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g) /***********************************************************************/ /* MYSQLCOL public constructor. */ /***********************************************************************/ -MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -1260,7 +1266,7 @@ MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) /***********************************************************************/ /* MYSQLCOL public constructor. */ /***********************************************************************/ -MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am) +MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am) : COLBLK(NULL, tdbp, i) { const char *chset = get_charset_name(fld->charsetnr); @@ -1407,11 +1413,7 @@ void MYSQLCOL::ReadColumn(PGLOBAL g) if (rc == RC_EF) sprintf(g->Message, MSG(INV_DEF_READ), rc); -#if defined(USE_TRY) throw 11; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 11); -#endif // !USE_TRY } else tdbp->Fetched = true; @@ -1673,7 +1675,7 @@ int TDBMYEXC::WriteDB(PGLOBAL g) /***********************************************************************/ /* MYXCOL public constructor. */ /***********************************************************************/ -MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : MYSQLCOL(cdp, tdbp, cprec, i, am) { // Set additional EXEC MYSQL access method information for column. @@ -1683,7 +1685,7 @@ MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) /***********************************************************************/ /* MYSQLCOL public constructor. */ /***********************************************************************/ -MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am) +MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am) : MYSQLCOL(fld, tdbp, i, am) { if (trace) diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h index 050fa59259b..3c37ae5bf3b 100644 --- a/storage/connect/tabmysql.h +++ b/storage/connect/tabmysql.h @@ -86,7 +86,7 @@ class TDBMYSQL : public TDBEXT { virtual void ResetDB(void) {N = 0;} virtual int RowNumber(PGLOBAL g, bool b = false); virtual bool IsView(void) {return Isview;} - virtual PSZ GetServer(void) {return Server;} + virtual PCSZ GetServer(void) {return Server;} void SetDatabase(LPCSTR db) {Schema = (char*)db;} // Schema routines @@ -109,7 +109,7 @@ class TDBMYSQL : public TDBEXT { // Internal functions bool MakeSelect(PGLOBAL g, bool mx); bool MakeInsert(PGLOBAL g); - int BindColumns(PGLOBAL g); + int BindColumns(PGLOBAL g __attribute__((unused))); virtual bool MakeCommand(PGLOBAL g); //int MakeUpdate(PGLOBAL g); //int MakeDelete(PGLOBAL g); @@ -146,8 +146,8 @@ class MYSQLCOL : public COLBLK { friend class TDBMYSQL; public: // Constructors - MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "MYSQL"); - MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am = "MYSQL"); + MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "MYSQL"); + MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am = "MYSQL"); MYSQLCOL(MYSQLCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -215,8 +215,8 @@ class MYXCOL : public MYSQLCOL { friend class TDBMYEXC; public: // Constructors - MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "MYSQL"); - MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PSZ am = "MYSQL"); + MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "MYSQL"); + MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am = "MYSQL"); MYXCOL(MYXCOL *colp, PTDB tdbp); // Constructor used in copy process // Methods @@ -242,10 +242,10 @@ class TDBMCL : public TDBCAT { virtual PQRYRES GetResult(PGLOBAL g); // Members - PSZ Host; // Host machine to use - PSZ Db; // Database to be used by server - PSZ Tab; // External table name - PSZ User; // User logon name - PSZ Pwd; // Password logon info - int Port; // MySQL port number (0 = default) + PCSZ Host; // Host machine to use + PCSZ Db; // Database to be used by server + PCSZ Tab; // External table name + PCSZ User; // User logon name + PCSZ Pwd; // Password logon info + int Port; // MySQL port number (0 = default) }; // end of class TDBMCL diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index b8c8a4d8cb4..34711d584f1 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -1,11 +1,11 @@ /************* Tabodbc C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: TABODBC */ /* ------------- */ -/* Version 3.1 */ +/* Version 3.2 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2000-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2000-2017 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -231,7 +231,7 @@ PCOL TDBODBC::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /* This used for Multiple(1) tables. Also prepare a connect string */ /* with a place holder to be used by SetFile. */ /***********************************************************************/ -PSZ TDBODBC::GetFile(PGLOBAL g) +PCSZ TDBODBC::GetFile(PGLOBAL g) { if (Connect) { char *p1, *p2; @@ -297,9 +297,10 @@ void TDBODBC::SetFile(PGLOBAL g, PSZ fn) /***********************************************************************/ bool TDBODBC::MakeInsert(PGLOBAL g) { - char *schmp = NULL, *catp = NULL, buf[NAM_LEN * 3]; + PCSZ schmp = NULL; + char *catp = NULL, buf[NAM_LEN * 3]; int len = 0; - bool b = false, oom = false; + bool oom, b = false; PTABLE tablep = To_Table; PCOL colp; @@ -336,32 +337,32 @@ bool TDBODBC::MakeInsert(PGLOBAL g) Query = new(g) STRING(g, len, "INSERT INTO "); if (catp) { - oom |= Query->Append(catp); + Query->Append(catp); if (schmp) { - oom |= Query->Append('.'); - oom |= Query->Append(schmp); + Query->Append('.'); + Query->Append(schmp); } // endif schmp - oom |= Query->Append('.'); + Query->Append('.'); } else if (schmp) { - oom |= Query->Append(schmp); - oom |= Query->Append('.'); + Query->Append(schmp); + Query->Append('.'); } // endif schmp if (Quote) { // Put table name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); - oom |= Query->Append('('); + Query->Append('('); for (colp = Columns; colp; colp = colp->GetNext()) { if (b) - oom |= Query->Append(", "); + Query->Append(", "); else b = true; @@ -370,20 +371,20 @@ bool TDBODBC::MakeInsert(PGLOBAL g) if (Quote) { // Put column name between identifier quotes in case in contains blanks - oom |= Query->Append(Quote); - oom |= Query->Append(buf); - oom |= Query->Append(Quote); + Query->Append(Quote); + Query->Append(buf); + Query->Append(Quote); } else - oom |= Query->Append(buf); + Query->Append(buf); } // endfor colp - oom |= Query->Append(") VALUES ("); + Query->Append(") VALUES ("); for (int i = 0; i < Nparm; i++) - oom |= Query->Append("?,"); + Query->Append("?,"); - if (oom) + if ((oom = Query->IsTruncated())) strcpy(g->Message, "MakeInsert: Out of memory"); else Query->RepLast(')'); @@ -733,7 +734,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) To_CondFil->Body= (char*)PlugSubAlloc(g, NULL, 0); *To_CondFil->Body= 0; - if ((To_CondFil = hc->CheckCond(g, To_CondFil, To_CondFil->Cond))) + if ((To_CondFil = hc->CheckCond(g, To_CondFil, Cond))) PlugSubAlloc(g, NULL, strlen(To_CondFil->Body) + 1); } // endif active_index @@ -883,7 +884,7 @@ void TDBODBC::CloseDB(PGLOBAL g) /***********************************************************************/ /* ODBCCOL public constructor. */ /***********************************************************************/ -ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +ODBCCOL::ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) { // Set additional ODBC access method information for column. @@ -1301,7 +1302,7 @@ int TDBXDBC::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ /* XSRCCOL public constructor. */ /***********************************************************************/ -XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +XSRCCOL::XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : ODBCCOL(cdp, tdbp, cprec, i, am) { // Set additional ODBC access method information for column. diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index 7c65a8ea7e5..487a5073559 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -70,10 +70,10 @@ class TDBODBC : public TDBEXT { // Methods virtual PTDB Clone(PTABS t); virtual bool SetRecpos(PGLOBAL g, int recpos); - virtual PSZ GetFile(PGLOBAL g); + virtual PCSZ GetFile(PGLOBAL g); virtual void SetFile(PGLOBAL g, PSZ fn); virtual void ResetSize(void); - virtual PSZ GetServer(void) {return "ODBC";} + virtual PCSZ GetServer(void) {return "ODBC";} virtual int Indexable(void) {return 2;} // Database routines @@ -108,7 +108,7 @@ class ODBCCOL : public EXTCOL { friend class TDBODBC; public: // Constructors - ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ODBC"); + ODBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ODBC"); ODBCCOL(ODBCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -179,7 +179,7 @@ class XSRCCOL : public ODBCCOL { friend class TDBXDBC; public: // Constructors - XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ODBC"); + XSRCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ODBC"); XSRCCOL(XSRCCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -239,10 +239,10 @@ class TDBOTB : public TDBDRV { virtual PQRYRES GetResult(PGLOBAL g); // Members - char *Dsn; // Points to connection string - char *Schema; // Points to schema name or NULL - char *Tab; // Points to ODBC table name or pattern - char *Tabtyp; // Points to ODBC table type + PCSZ Dsn; // Points to connection string + PCSZ Schema; // Points to schema name or NULL + PCSZ Tab; // Points to ODBC table name or pattern + PCSZ Tabtyp; // Points to ODBC table type ODBCPARM Ops; // Additional parameters }; // end of class TDBOTB diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp index f24929eb2bb..76a46e6899b 100644 --- a/storage/connect/tabpivot.cpp +++ b/storage/connect/tabpivot.cpp @@ -106,7 +106,7 @@ bool PIVAID::SkipColumn(PCOLRES crp, char *skc) /* Make the Pivot table column list. */ /***********************************************************************/ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) - { +{ char *p, *query, *colname, *skc, buf[64]; int ndif, nblin, w = 0; bool b = false; @@ -114,205 +114,190 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) PQRYRES qrp; PCOLRES *pcrp, crp, fncrp = NULL; -#if defined(USE_TRY) try { -#else // !USE_TRY - // Save stack and allocation environment and prepare error return - if (g->jump_level == MAX_JUMP) { - strcpy(g->Message, MSG(TOO_MANY_JUMPS)); - return NULL; - } // endif jump_level - - if (setjmp(g->jumper[++g->jump_level])) { - goto err; - } // endif rc -#endif // !USE_TRY - - // Are there columns to skip? - if (Skcol) { - uint n = strlen(Skcol); - - skc = (char*)PlugSubAlloc(g, NULL, n + 2); - strcpy(skc, Skcol); - skc[n + 1] = 0; - - // Replace ; by nulls in skc - for (p = strchr(skc, ';'); p; p = strchr(p, ';')) - *p++ = 0; - - } else - skc = NULL; - - if (!Tabsrc && Tabname) { - // Locate the query - query = (char*)PlugSubAlloc(g, NULL, strlen(Tabname) + 26); - sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname); - } else if (!Tabsrc) { - strcpy(g->Message, MSG(SRC_TABLE_UNDEF)); - goto err; - } else - query = Tabsrc; - - // Open a MySQL connection for this table - if (!Myc.Open(g, Host, Database, User, Pwd, Port)) { - b = true; - - // Returned values must be in their original character set - if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) - goto err; - else - Myc.FreeResult(); - - } else - goto err; - - // Send the source command to MySQL - if (Myc.ExecSQL(g, query, &w) == RC_FX) - goto err; - - // We must have a storage query to get pivot column values - if (!(Qryp = Myc.GetResult(g, true))) - goto err; - - if (!Fncol) { - for (crp = Qryp->Colresp; crp; crp = crp->Next) - if ((!Picol || stricmp(Picol, crp->Name)) && !SkipColumn(crp, skc)) - Fncol = crp->Name; - - if (!Fncol) { - strcpy(g->Message, MSG(NO_DEF_FNCCOL)); - goto err; - } // endif Fncol - - } // endif Fncol - - if (!Picol) { - // Find default Picol as the last one not equal to Fncol - for (crp = Qryp->Colresp; crp; crp = crp->Next) - if (stricmp(Fncol, crp->Name) && !SkipColumn(crp, skc)) - Picol = crp->Name; - - if (!Picol) { - strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); - goto err; - } // endif Picol - - } // endif picol - - // Prepare the column list - for (pcrp = &Qryp->Colresp; crp = *pcrp; ) - if (SkipColumn(crp, skc)) { - // Ignore this column - *pcrp = crp->Next; - } else if (!stricmp(Picol, crp->Name)) { - if (crp->Nulls) { - sprintf(g->Message, "Pivot column %s cannot be nullable", Picol); - goto err; - } // endif Nulls - - Rblkp = crp->Kdata; - *pcrp = crp->Next; - } else if (!stricmp(Fncol, crp->Name)) { - fncrp = crp; - *pcrp = crp->Next; - } else - pcrp = &crp->Next; - - if (!Rblkp) { - strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); - goto err; - } else if (!fncrp) { - strcpy(g->Message, MSG(NO_DEF_FNCCOL)); - goto err; - } // endif - - if (Tabsrc) { - Myc.Close(); - b = false; + // Are there columns to skip? + if (Skcol) { + uint n = strlen(Skcol); + + skc = (char*)PlugSubAlloc(g, NULL, n + 2); + strcpy(skc, Skcol); + skc[n + 1] = 0; + + // Replace ; by nulls in skc + for (p = strchr(skc, ';'); p; p = strchr(p, ';')) + *p++ = 0; + + } else + skc = NULL; + + if (!Tabsrc && Tabname) { + // Locate the query + query = (char*)PlugSubAlloc(g, NULL, strlen(Tabname) + 26); + sprintf(query, "SELECT * FROM `%s` LIMIT 1", Tabname); + } else if (!Tabsrc) { + strcpy(g->Message, MSG(SRC_TABLE_UNDEF)); + goto err; + } else + query = (char*)Tabsrc; - // Before calling sort, initialize all - nblin = Qryp->Nblin; + // Open a MySQL connection for this table + if (!Myc.Open(g, Host, Database, User, Pwd, Port)) { + b = true; - Index.Size = nblin * sizeof(int); - Index.Sub = TRUE; // Should be small enough + // Returned values must be in their original character set + if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) + goto err; + else + Myc.FreeResult(); - if (!PlgDBalloc(g, NULL, Index)) + } else goto err; - Offset.Size = (nblin + 1) * sizeof(int); - Offset.Sub = TRUE; // Should be small enough - - if (!PlgDBalloc(g, NULL, Offset)) + // Send the source command to MySQL + if (Myc.ExecSQL(g, query, &w) == RC_FX) goto err; - ndif = Qsort(g, nblin); - - if (ndif < 0) // error + // We must have a storage query to get pivot column values + if (!(Qryp = Myc.GetResult(g, true))) goto err; - } else { - // The query was limited, we must get pivot column values - // Returned values must be in their original character set -// if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) -// goto err; - - query = (char*)PlugSubAlloc(g, NULL, 0); - sprintf(query, "SELECT DISTINCT `%s` FROM `%s`", Picol, Tabname); - PlugSubAlloc(g, NULL, strlen(query) + 1); - Myc.FreeResult(); - - // Send the source command to MySQL - if (Myc.ExecSQL(g, query, &w) == RC_FX) - goto err; - - // We must have a storage query to get pivot column values - if (!(qrp = Myc.GetResult(g, true))) - goto err; - - Myc.Close(); - b = false; - - // Get the column list - crp = qrp->Colresp; - Rblkp = crp->Kdata; - ndif = qrp->Nblin; - } // endif Tabsrc - - // Allocate the Value used to retieve column names - if (!(valp = AllocateValue(g, Rblkp->GetType(), - Rblkp->GetVlen(), - Rblkp->GetPrec()))) - goto err; - - // Now make the functional columns - for (int i = 0; i < ndif; i++) { - if (i) { - crp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); - memcpy(crp, fncrp, sizeof(COLRES)); - } else - crp = fncrp; - - // Get the value that will be the generated column name - if (Tabsrc) - valp->SetValue_pvblk(Rblkp, Pex[Pof[i]]); - else - valp->SetValue_pvblk(Rblkp, i); - - colname = valp->GetCharString(buf); - crp->Name = PlugDup(g, colname); - crp->Flag = 1; - - // Add this column - *pcrp = crp; - crp->Next = NULL; - pcrp = &crp->Next; - } // endfor i - - // We added ndif columns and removed 2 (picol and fncol) - Qryp->Nbcol += (ndif - 2); -#if defined(USE_TRY) - return Qryp; - + if (!Fncol) { + for (crp = Qryp->Colresp; crp; crp = crp->Next) + if ((!Picol || stricmp(Picol, crp->Name)) && !SkipColumn(crp, skc)) + Fncol = crp->Name; + + if (!Fncol) { + strcpy(g->Message, MSG(NO_DEF_FNCCOL)); + goto err; + } // endif Fncol + + } // endif Fncol + + if (!Picol) { + // Find default Picol as the last one not equal to Fncol + for (crp = Qryp->Colresp; crp; crp = crp->Next) + if (stricmp(Fncol, crp->Name) && !SkipColumn(crp, skc)) + Picol = crp->Name; + + if (!Picol) { + strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); + goto err; + } // endif Picol + + } // endif picol + + // Prepare the column list + for (pcrp = &Qryp->Colresp; crp = *pcrp; ) + if (SkipColumn(crp, skc)) { + // Ignore this column + *pcrp = crp->Next; + } else if (!stricmp(Picol, crp->Name)) { + if (crp->Nulls) { + sprintf(g->Message, "Pivot column %s cannot be nullable", Picol); + goto err; + } // endif Nulls + + Rblkp = crp->Kdata; + *pcrp = crp->Next; + } else if (!stricmp(Fncol, crp->Name)) { + fncrp = crp; + *pcrp = crp->Next; + } else + pcrp = &crp->Next; + + if (!Rblkp) { + strcpy(g->Message, MSG(NO_DEF_PIVOTCOL)); + goto err; + } else if (!fncrp) { + strcpy(g->Message, MSG(NO_DEF_FNCCOL)); + goto err; + } // endif + + if (Tabsrc) { + Myc.Close(); + b = false; + + // Before calling sort, initialize all + nblin = Qryp->Nblin; + + Index.Size = nblin * sizeof(int); + Index.Sub = TRUE; // Should be small enough + + if (!PlgDBalloc(g, NULL, Index)) + goto err; + + Offset.Size = (nblin + 1) * sizeof(int); + Offset.Sub = TRUE; // Should be small enough + + if (!PlgDBalloc(g, NULL, Offset)) + goto err; + + ndif = Qsort(g, nblin); + + if (ndif < 0) // error + goto err; + + } else { + // The query was limited, we must get pivot column values + // Returned values must be in their original character set + // if (Myc.ExecSQL(g, "SET character_set_results=NULL", &w) == RC_FX) + // goto err; + + query = (char*)PlugSubAlloc(g, NULL, 0); + sprintf(query, "SELECT DISTINCT `%s` FROM `%s`", Picol, Tabname); + PlugSubAlloc(g, NULL, strlen(query) + 1); + Myc.FreeResult(); + + // Send the source command to MySQL + if (Myc.ExecSQL(g, query, &w) == RC_FX) + goto err; + + // We must have a storage query to get pivot column values + if (!(qrp = Myc.GetResult(g, true))) + goto err; + + Myc.Close(); + b = false; + + // Get the column list + crp = qrp->Colresp; + Rblkp = crp->Kdata; + ndif = qrp->Nblin; + } // endif Tabsrc + + // Allocate the Value used to retieve column names + if (!(valp = AllocateValue(g, Rblkp->GetType(), + Rblkp->GetVlen(), + Rblkp->GetPrec()))) + goto err; + + // Now make the functional columns + for (int i = 0; i < ndif; i++) { + if (i) { + crp = (PCOLRES)PlugSubAlloc(g, NULL, sizeof(COLRES)); + memcpy(crp, fncrp, sizeof(COLRES)); + } else + crp = fncrp; + + // Get the value that will be the generated column name + if (Tabsrc) + valp->SetValue_pvblk(Rblkp, Pex[Pof[i]]); + else + valp->SetValue_pvblk(Rblkp, i); + + colname = valp->GetCharString(buf); + crp->Name = PlugDup(g, colname); + crp->Flag = 1; + + // Add this column + *pcrp = crp; + crp->Next = NULL; + pcrp = &crp->Next; + } // endfor i + + // We added ndif columns and removed 2 (picol and fncol) + Qryp->Nbcol += (ndif - 2); + return Qryp; } catch (int n) { if (trace) htrc("Exception %d: %s\n", n, g->Message); @@ -321,19 +306,11 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) } // end catch err: -#else // !USE_TRY - g->jump_level--; - return Qryp; - -err: - g->jump_level--; -#endif // !USE_TRY - if (b) Myc.Close(); return NULL; - } // end of MakePivotColumns +} // end of MakePivotColumns /***********************************************************************/ /* PIVAID: Compare routine for sorting pivot column values. */ diff --git a/storage/connect/tabpivot.h b/storage/connect/tabpivot.h index 07d5c3e456b..6c2d53e9527 100644 --- a/storage/connect/tabpivot.h +++ b/storage/connect/tabpivot.h @@ -32,16 +32,16 @@ class PIVAID : public CSORT { protected: // Members MYSQLC Myc; // MySQL connection class - char *Host; // Host machine to use - char *User; // User logon info - char *Pwd; // Password logon info - char *Database; // Database to be used by server + PCSZ Host; // Host machine to use + PCSZ User; // User logon info + PCSZ Pwd; // Password logon info + PCSZ Database; // Database to be used by server PQRYRES Qryp; // Points to Query result block - char *Tabname; // Name of source table - char *Tabsrc; // SQL of source table - char *Picol; // Pivot column name - char *Fncol; // Function column name - char *Skcol; // Skipped columns + PCSZ Tabname; // Name of source table + PCSZ Tabsrc; // SQL of source table + PCSZ Picol; // Pivot column name + PCSZ Fncol; // Function column name + PCSZ Skcol; // Skipped columns PVBLK Rblkp; // The value block of the pivot column int Port; // MySQL port number }; // end of class PIVAID diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp index d2f5cdb2780..7f0d9881298 100644 --- a/storage/connect/tabsys.cpp +++ b/storage/connect/tabsys.cpp @@ -355,7 +355,7 @@ void TDBINI::CloseDB(PGLOBAL) /***********************************************************************/ /* INICOL public constructor. */ /***********************************************************************/ -INICOL::INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) +INICOL::INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -511,19 +511,11 @@ void INICOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (Flag == 1) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_SEC_UPDATE)); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (*p) { tdbp->Section = p; } else @@ -532,11 +524,7 @@ void INICOL::WriteColumn(PGLOBAL g) return; } else if (!tdbp->Section) { strcpy(g->Message, MSG(SEC_NAME_FIRST)); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endif's /*********************************************************************/ @@ -548,11 +536,7 @@ void INICOL::WriteColumn(PGLOBAL g) if (!rc) { sprintf(g->Message, "Error %d writing to %s", GetLastError(), tdbp->Ifile); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endif rc } // endif Status @@ -785,7 +769,7 @@ int TDBXIN::DeleteDB(PGLOBAL g, int irc) /***********************************************************************/ /* XINCOL public constructor. */ /***********************************************************************/ -XINCOL::XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +XINCOL::XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : INICOL(cdp, tdbp, cprec, i, am) { } // end of XINCOL constructor @@ -853,19 +837,11 @@ void XINCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (Flag == 1) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_SEC_UPDATE)); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (*p) { tdbp->Section = p; } else @@ -875,11 +851,7 @@ void XINCOL::WriteColumn(PGLOBAL g) } else if (Flag == 2) { if (tdbp->Mode == MODE_UPDATE) { strcpy(g->Message, MSG(NO_KEY_UPDATE)); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } else if (*p) { tdbp->Keycur = p; } else @@ -888,11 +860,7 @@ void XINCOL::WriteColumn(PGLOBAL g) return; } else if (!tdbp->Section || !tdbp->Keycur) { strcpy(g->Message, MSG(SEC_KEY_FIRST)); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endif's /*********************************************************************/ @@ -904,11 +872,7 @@ void XINCOL::WriteColumn(PGLOBAL g) if (!rc) { sprintf(g->Message, "Error %d writing to %s", GetLastError(), tdbp->Ifile); -#if defined(USE_TRY) throw 31; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 31); -#endif // !USE_TRY } // endif rc } // endif Status diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h index ff1b8335690..44b5a137a70 100644 --- a/storage/connect/tabsys.h +++ b/storage/connect/tabsys.h @@ -61,7 +61,7 @@ class TDBINI : public TDBASE { virtual int GetRecpos(void) {return N;} virtual int GetProgCur(void) {return N;} //virtual int GetAffectedRows(void) {return 0;} - virtual PSZ GetFile(PGLOBAL g) {return Ifile;} + virtual PCSZ GetFile(PGLOBAL g) {return Ifile;} virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;} virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;} virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;} @@ -93,7 +93,7 @@ class TDBINI : public TDBASE { class INICOL : public COLBLK { public: // Constructors - INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); + INICOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "INI"); INICOL(INICOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation @@ -165,7 +165,7 @@ class TDBXIN : public TDBINI { class XINCOL : public INICOL { public: // Constructors - XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "INI"); + XINCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "INI"); XINCOL(XINCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index 762c61bd1a1..574cef28ea3 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -119,7 +119,8 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db, FLD_LENGTH, FLD_SCALE, FLD_RADIX, FLD_NULL, FLD_REM, FLD_NO, FLD_CHARSET}; unsigned int length[] = {0, 4, 16, 4, 4, 4, 4, 4, 0, 32, 32}; - char *pn, *tn, *fld, *colname, *chset, *fmt, v; + PCSZ fmt; + char *pn, *tn, *fld, *colname, *chset, v; int i, n, ncol = sizeof(buftyp) / sizeof(int); int prec, len, type, scale; int zconv = GetConvSize(); @@ -227,7 +228,7 @@ PQRYRES TabColumns(PGLOBAL g, THD *thd, const char *db, fmt = MyDateFmt(fp->type()); prec = len = strlen(fmt); } else { - fmt = (char*)fp->option_struct->dateformat; + fmt = (PCSZ)fp->option_struct->dateformat; prec = len = fp->field_length; } // endif mysql @@ -314,7 +315,7 @@ bool PRXDEF::DefineAM(PGLOBAL g, LPCSTR, int) strcpy(g->Message, "Missing object table definition"); return true; } else - tab = "Noname"; + tab = PlugDup(g, "Noname"); } else // Analyze the table name, it may have the format: [dbname.]tabname @@ -626,7 +627,7 @@ void TDBPRX::RemoveNext(PTABLE tp) /***********************************************************************/ /* PRXCOL public constructor. */ /***********************************************************************/ -PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) +PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -741,7 +742,14 @@ void PRXCOL::ReadColumn(PGLOBAL g) if (Nullable) Value->SetNull(Value->IsNull()); - } // endif Colp + } else { + Value->Reset(); + + // Set null when applicable + if (Nullable) + Value->SetNull(true); + + } // endif Colp } // end of ReadColumn diff --git a/storage/connect/tabutil.h b/storage/connect/tabutil.h index 8e56aecff86..62678508ca1 100644 --- a/storage/connect/tabutil.h +++ b/storage/connect/tabutil.h @@ -71,7 +71,7 @@ class DllExport TDBPRX : public TDBASE { virtual int GetRecpos(void) {return Tdbp->GetRecpos();} virtual void ResetDB(void) {Tdbp->ResetDB();} virtual int RowNumber(PGLOBAL g, bool b = FALSE); - virtual PSZ GetServer(void) {return (Tdbp) ? Tdbp->GetServer() : (PSZ)"?";} + virtual PCSZ GetServer(void) {return (Tdbp) ? Tdbp->GetServer() : (PSZ)"?";} // Database routines virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); @@ -101,7 +101,7 @@ class DllExport PRXCOL : public COLBLK { friend class TDBOCCUR; public: // Constructors - PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "PRX"); + PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "PRX"); PRXCOL(PRXCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index 902137ead2e..533986e44da 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -174,7 +174,7 @@ bool VCTDEF::Erase(char *filename) /***********************************************************************/ int VCTDEF::MakeFnPattern(char *fpat) { - char pat[8]; + char pat[16]; #if defined(__WIN__) char drive[_MAX_DRIVE]; #else @@ -490,11 +490,7 @@ void VCTCOL::ReadBlock(PGLOBAL g) #if defined(_DEBUG) if (!Blk) { strcpy(g->Message, MSG(TO_BLK_IS_NULL)); -#if defined(USE_TRY) throw 58; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 58); -#endif // !USE_TRY } // endif #endif @@ -502,11 +498,7 @@ void VCTCOL::ReadBlock(PGLOBAL g) /* Read column block according to used access method. */ /*********************************************************************/ if (txfp->ReadBlock(g, this)) -#if defined(USE_TRY) throw 6; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 6); -#endif // !USE_TRY ColBlk = txfp->CurBlk; ColPos = -1; // Any invalid position @@ -526,11 +518,7 @@ void VCTCOL::WriteBlock(PGLOBAL g) #if defined(_DEBUG) if (!Blk) { strcpy(g->Message, MSG(BLK_IS_NULL)); -#if defined(USE_TRY) throw 56; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 56); -#endif // !USE_TRY } // endif #endif @@ -538,11 +526,7 @@ void VCTCOL::WriteBlock(PGLOBAL g) /* Write column block according to used access method. */ /*******************************************************************/ if (txfp->WriteBlock(g, this)) -#if defined(USE_TRY) throw 6; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 6); -#endif // !USE_TRY Modif = 0; } // endif Modif diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp index dc57c9f3538..84b3dd1787b 100644 --- a/storage/connect/tabvir.cpp +++ b/storage/connect/tabvir.cpp @@ -269,7 +269,7 @@ int TDBVIR::DeleteDB(PGLOBAL g, int) /***********************************************************************/ /* VIRCOL public constructor. */ /***********************************************************************/ -VIRCOL::VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ) +VIRCOL::VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ) : COLBLK(cdp, tdbp, i) { if (cprec) { @@ -289,11 +289,7 @@ void VIRCOL::ReadColumn(PGLOBAL g) { // This should never be called sprintf(g->Message, "ReadColumn: Column %s is not virtual", Name); -#if defined(USE_TRY) throw TYPE_COLBLK; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_COLBLK); -#endif // !USE_TRY } // end of ReadColumn /* ---------------------------TDBVICL class -------------------------- */ diff --git a/storage/connect/tabvir.h b/storage/connect/tabvir.h index a53aceaeceb..e7313bbae67 100644 --- a/storage/connect/tabvir.h +++ b/storage/connect/tabvir.h @@ -76,7 +76,7 @@ class VIRCOL : public COLBLK { friend class TDBVIR; public: // Constructors - VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "VIRTUAL"); + VIRCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "VIRTUAL"); // Implementation virtual int GetAmType(void) {return TYPE_AM_VIR;} diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp index 4871a1d66dc..335ffce5d7f 100644 --- a/storage/connect/tabwmi.cpp +++ b/storage/connect/tabwmi.cpp @@ -27,7 +27,7 @@ /***********************************************************************/ /* Initialize WMI operations. */ /***********************************************************************/ -PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname) +PWMIUT InitWMI(PGLOBAL g, PCSZ nsp, PCSZ classname) { IWbemLocator *loc; char *p; @@ -132,7 +132,7 @@ PWMIUT InitWMI(PGLOBAL g, char *nsp, char *classname) /* WMIColumns: constructs the result blocks containing the description */ /* of all the columns of a WMI table of a specified class. */ /***********************************************************************/ -PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info) +PQRYRES WMIColumns(PGLOBAL g, PCSZ nsp, PCSZ cls, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT}; diff --git a/storage/connect/tabwmi.h b/storage/connect/tabwmi.h index 6abb85453a1..7a18453374e 100644 --- a/storage/connect/tabwmi.h +++ b/storage/connect/tabwmi.h @@ -27,7 +27,7 @@ typedef struct _WMIutil { /***********************************************************************/ /* Functions used externally. */ /***********************************************************************/ -PQRYRES WMIColumns(PGLOBAL g, char *nsp, char *cls, bool info); +PQRYRES WMIColumns(PGLOBAL g, PCSZ nsp, PCSZ cls, bool info); /* -------------------------- WMI classes ---------------------------- */ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 386d016d082..80d4395058e 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -118,10 +118,11 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) static XFLD fldtyp[] = {FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC, FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT}; static unsigned int length[] = {0, 6, 8, 10, 10, 6, 6, 0}; - char *fn, *op, colname[65], fmt[129], buf[512]; + char colname[65], fmt[129], buf[512]; int i, j, lvl, n = 0; int ncol = sizeof(buftyp) / sizeof(int); bool ok = true; + PCSZ fn, op; PXCL xcol, xcp, fxcp = NULL, pxcp = NULL; PLVL *lvlp, vp; PXNODE node = NULL; @@ -161,7 +162,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) if (!(tdp->Database = SetPath(g, db))) return NULL; - tdp->Tabname = tab; + tdp->Tabname = tab; tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); @@ -362,7 +363,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) skipit: if (trace) - htrc("CSVColumns: n=%d len=%d\n", n, length[0]); + htrc("XMLColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ /* Allocate the structures used to refer to the result set. */ @@ -451,7 +452,8 @@ XMLDEF::XMLDEF(void) /***********************************************************************/ bool XMLDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { - char *defrow, *defcol, buf[10]; + PCSZ defrow, defcol; + char buf[10]; Fn = GetStringCatInfo(g, "Filename", NULL); Encoding = GetStringCatInfo(g, "Encoding", "UTF-8"); @@ -1317,11 +1319,7 @@ void TDBXML::CloseDB(PGLOBAL g) Docp->CloseDoc(g, To_Xb); // This causes a crash in Diagnostics_area::set_error_status -//#if defined(USE_TRY) // throw TYPE_AM_XML; -//#else // !USE_TRY -// longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -//#endif // !USE_TRY } // endif DumpDoc } // endif Changed @@ -1364,8 +1362,8 @@ void TDBXML::CloseDB(PGLOBAL g) /***********************************************************************/ /* XMLCOL public constructor. */ /***********************************************************************/ -XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : COLBLK(cdp, tdbp, i) +XMLCOL::XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : COLBLK(cdp, tdbp, i) { if (cprec) { Next = cprec->GetNext(); @@ -1644,11 +1642,7 @@ void XMLCOL::ReadColumn(PGLOBAL g) if (ValNode->GetType() != XML_ELEMENT_NODE && ValNode->GetType() != XML_ATTRIBUTE_NODE) { sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endif type // Get the Xname value from the XML file @@ -1659,11 +1653,7 @@ void XMLCOL::ReadColumn(PGLOBAL g) PushWarning(g, Tdbp); break; default: -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endswitch Value->SetValue_psz(Valbuf); @@ -1714,11 +1704,7 @@ void XMLCOL::WriteColumn(PGLOBAL g) /* For columns having an Xpath, the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, Nod || Tdbp->Colname)) -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY /*********************************************************************/ /* Null values are represented by no node. */ @@ -1790,15 +1776,7 @@ void XMLCOL::WriteColumn(PGLOBAL g) if (ColNode == NULL) { strcpy(g->Message, MSG(COL_ALLOC_ERR)); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY -#if defined(USE_TRY) - throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY -#endif // !USE_TRY } // endif ColNode } // endif ColNode @@ -1827,11 +1805,7 @@ void XMLCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } else strcpy(Valbuf, p); @@ -1881,11 +1855,7 @@ void XMULCOL::ReadColumn(PGLOBAL g) if (ValNode->GetType() != XML_ELEMENT_NODE && ValNode->GetType() != XML_ATTRIBUTE_NODE) { sprintf(g->Message, MSG(BAD_VALNODE), ValNode->GetType(), Name); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endif type // Get the Xname value from the XML file @@ -1971,11 +1941,7 @@ void XMULCOL::WriteColumn(PGLOBAL g) /* For columns having an Xpath, the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, Nod)) -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY /*********************************************************************/ /* Find the column and value nodes to update or insert. */ @@ -2024,11 +1990,7 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (len > 1 && !Tdbp->Xpand) { sprintf(g->Message, MSG(BAD_VAL_UPDATE), Name); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } else ValNode = Nlx->GetItem(g, Tdbp->Nsub, Vxnp); @@ -2070,11 +2032,7 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (ColNode == NULL) { strcpy(g->Message, MSG(COL_ALLOC_ERR)); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endif ColNode } // endif ColNode @@ -2103,11 +2061,7 @@ void XMULCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } else strcpy(Valbuf, p); @@ -2139,11 +2093,7 @@ void XPOSCOL::ReadColumn(PGLOBAL g) if (Tdbp->Clist == NULL) { strcpy(g->Message, MSG(MIS_TAG_LIST)); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endif Clist if ((ValNode = Tdbp->Clist->GetItem(g, Rank, Vxnp))) { @@ -2155,11 +2105,7 @@ void XPOSCOL::ReadColumn(PGLOBAL g) PushWarning(g, Tdbp); break; default: -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endswitch Value->SetValue_psz(Valbuf); @@ -2210,22 +2156,14 @@ void XPOSCOL::WriteColumn(PGLOBAL g) /* For all columns the Clist must be updated. */ /*********************************************************************/ if (Tdbp->CheckRow(g, true)) -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY /*********************************************************************/ /* Find the column and value nodes to update or insert. */ /*********************************************************************/ if (Tdbp->Clist == NULL) { strcpy(g->Message, MSG(MIS_TAG_LIST)); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } // endif Clist n = Tdbp->Clist->GetLength(); @@ -2250,11 +2188,7 @@ void XPOSCOL::WriteColumn(PGLOBAL g) if (strlen(p) > (unsigned)Long) { sprintf(g->Message, MSG(VALUE_TOO_LONG), p, Name, Long); -#if defined(USE_TRY) throw TYPE_AM_XML; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_AM_XML); -#endif // !USE_TRY } else strcpy(Valbuf, p); diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h index 65b353072cb..6b18eb645f0 100644 --- a/storage/connect/tabxml.h +++ b/storage/connect/tabxml.h @@ -31,7 +31,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */ protected: // Members - char *Fn; /* Path/Name of corresponding file */ + PCSZ Fn; /* Path/Name of corresponding file */ char *Encoding; /* New XML table file encoding */ char *Tabname; /* Name of Table node */ char *Rowname; /* Name of first level nodes */ @@ -42,7 +42,7 @@ class DllExport XMLDEF : public TABDEF { /* Logical table description */ char *DefNs; /* Dummy name of default namespace */ char *Attrib; /* Table node attributes */ char *Hdattr; /* Header node attributes */ - char *Entry; /* Zip entry name or pattern */ + PCSZ Entry; /* Zip entry name or pattern */ int Coltype; /* Default column type */ int Limit; /* Limit of multiple values */ int Header; /* n first rows are header rows */ @@ -74,7 +74,7 @@ class DllExport TDBXML : public TDBASE { virtual PTDB Clone(PTABS t); virtual int GetRecpos(void); virtual int GetProgCur(void) {return N;} - virtual PSZ GetFile(PGLOBAL g) {return Xfile;} + virtual PCSZ GetFile(PGLOBAL g) {return Xfile;} virtual void SetFile(PGLOBAL g, PSZ fn) {Xfile = fn;} virtual void ResetDB(void) {N = 0;} virtual void ResetSize(void) {MaxSize = -1;} @@ -127,7 +127,7 @@ class DllExport TDBXML : public TDBASE { bool Void; // True if the file does not exist bool Zipped; // True if Zipped XML file(s) bool Mulentries; // True if multiple entries in zip file - char *Xfile; // The XML file + PCSZ Xfile; // The XML file char *Enc; // New XML table file encoding char *Tabname; // Name of Table node char *Rowname; // Name of first level nodes @@ -138,7 +138,7 @@ class DllExport TDBXML : public TDBASE { char *DefNs; // Dummy name of default namespace char *Attrib; // Table node attribut(s) char *Hdattr; // Header node attribut(s) - char *Entry; // Zip entry name or pattern + PCSZ Entry; // Zip entry name or pattern int Coltype; // Default column type int Limit; // Limit of multiple values int Header; // n first rows are header rows @@ -155,7 +155,7 @@ class DllExport TDBXML : public TDBASE { class XMLCOL : public COLBLK { public: // Constructors - XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "XML"); + XMLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "XML"); XMLCOL(XMLCOL *colp, PTDB tdbp); // Constructor used in copy process // Implementation diff --git a/storage/connect/tabzip.cpp b/storage/connect/tabzip.cpp index b91059a3843..c026744dba8 100644 --- a/storage/connect/tabzip.cpp +++ b/storage/connect/tabzip.cpp @@ -195,8 +195,8 @@ void TDBZIP::CloseDB(PGLOBAL g) /***********************************************************************/ /* ZIPCOL public constructor. */ /***********************************************************************/ -ZIPCOL::ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am) - : COLBLK(cdp, tdbp, i) +ZIPCOL::ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) + : COLBLK(cdp, tdbp, i) { if (cprec) { Next = cprec->GetNext(); diff --git a/storage/connect/tabzip.h b/storage/connect/tabzip.h index dcec3475371..32b15281f81 100644 --- a/storage/connect/tabzip.h +++ b/storage/connect/tabzip.h @@ -34,7 +34,7 @@ public: protected: // Members - PSZ target; // The inside file to query + PCSZ target; // The inside file to query }; // end of ZIPDEF /***********************************************************************/ @@ -68,7 +68,7 @@ protected: // Members unzFile zipfile; // The ZIP container file - PSZ zfn; // The ZIP file name + PCSZ zfn; // The ZIP file name //PSZ target; unz_file_info64 finfo; // The current file info char fn[FILENAME_MAX]; // The current file name @@ -82,7 +82,7 @@ class DllExport ZIPCOL : public COLBLK { friend class TDBZIP; public: // Constructors - ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PSZ am = "ZIP"); + ZIPCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "ZIP"); // Implementation virtual int GetAmType(void) { return TYPE_AM_ZIP; } diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index 34d192361a5..ca3557666a4 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /** @file user_connect.cc diff --git a/storage/connect/user_connect.h b/storage/connect/user_connect.h index 7f37973f378..a883eb85934 100644 --- a/storage/connect/user_connect.h +++ b/storage/connect/user_connect.h @@ -11,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ /** @file user_connect.h diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp index 4331962e0ad..5b98f3eb425 100644 --- a/storage/connect/valblk.cpp +++ b/storage/connect/valblk.cpp @@ -1,5 +1,5 @@ /************ Valblk C++ Functions Source Code File (.CPP) *************/ -/* Name: VALBLK.CPP Version 2.2 */ +/* Name: VALBLK.CPP Version 2.3 */ /* */ /* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ /* */ @@ -138,18 +138,14 @@ PSZ VALBLK::GetCharValue(int) assert(g); sprintf(g->Message, MSG(NO_CHAR_FROM), Type); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY return NULL; } // end of GetCharValue /***********************************************************************/ /* Set format so formatted dates can be converted on input. */ /***********************************************************************/ -bool VALBLK::SetFormat(PGLOBAL g, PSZ, int, int) +bool VALBLK::SetFormat(PGLOBAL g, PCSZ, int, int) { sprintf(g->Message, MSG(NO_DATE_FMT), Type); return true; @@ -210,11 +206,7 @@ void VALBLK::ChkIndx(int n) if (n < 0 || n >= Nval) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_VALBLK_INDX)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif n } // end of ChkIndx @@ -224,11 +216,7 @@ void VALBLK::ChkTyp(PVAL v) if (Check && (Type != v->GetType() || Unsigned != v->IsUnsigned())) { PGLOBAL& g = Global; strcpy(g->Message, MSG(VALTYPE_NOMATCH)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Type } // end of ChkTyp @@ -238,11 +226,7 @@ void VALBLK::ChkTyp(PVBLK vb) if (Check && (Type != vb->GetType() || Unsigned != vb->IsUnsigned())) { PGLOBAL& g = Global; strcpy(g->Message, MSG(VALTYPE_NOMATCH)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Type } // end of ChkTyp @@ -351,18 +335,14 @@ uchar TYPBLK::GetTypedValue(PVAL valp) /* Set one value in a block from a zero terminated string. */ /***********************************************************************/ template -void TYPBLK::SetValue(PSZ p, int n) +void TYPBLK::SetValue(PCSZ p, int n) { ChkIndx(n); if (Check) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_SET_STRING)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Check bool minus; @@ -405,18 +385,14 @@ template <> ulonglong TYPBLK::MaxVal(void) {return ULONGLONG_MAX;} template <> -void TYPBLK::SetValue(PSZ p, int n) +void TYPBLK::SetValue(PCSZ p, int n) { ChkIndx(n); if (Check) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BAD_SET_STRING)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Check Typp[n] = atof(p); @@ -427,7 +403,7 @@ void TYPBLK::SetValue(PSZ p, int n) /* Set one value in a block from an array of characters. */ /***********************************************************************/ template -void TYPBLK::SetValue(char *sp, uint len, int n) +void TYPBLK::SetValue(PCSZ sp, uint len, int n) { PGLOBAL& g = Global; PSZ spz = (PSZ)PlugSubAlloc(g, NULL, 0); // Temporary @@ -802,7 +778,7 @@ void CHRBLK::SetValue(PVAL valp, int n) /***********************************************************************/ /* Set one value in a block from a zero terminated string. */ /***********************************************************************/ -void CHRBLK::SetValue(PSZ sp, int n) +void CHRBLK::SetValue(PCSZ sp, int n) { uint len = (sp) ? strlen(sp) : 0; SetValue(sp, len, n); @@ -811,7 +787,7 @@ void CHRBLK::SetValue(PSZ sp, int n) /***********************************************************************/ /* Set one value in a block from an array of characters. */ /***********************************************************************/ -void CHRBLK::SetValue(char *sp, uint len, int n) +void CHRBLK::SetValue(const char *sp, uint len, int n) { char *p = Chrp + n * Long; @@ -819,11 +795,7 @@ void CHRBLK::SetValue(char *sp, uint len, int n) if (Check && (signed)len > Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(SET_STR_TRUNC)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Check #endif // _DEBUG @@ -851,11 +823,7 @@ void CHRBLK::SetValue(PVBLK pv, int n1, int n2) if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BLKTYPLEN_MISM)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Type if (!(b = pv->IsNull(n2))) @@ -906,11 +874,7 @@ void CHRBLK::SetValues(PVBLK pv, int k, int n) if (Type != pv->GetType() || Long != ((CHRBLK*)pv)->Long) { PGLOBAL& g = Global; strcpy(g->Message, MSG(BLKTYPLEN_MISM)); -#if defined(USE_TRY) throw Type; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY } // endif Type #endif // _DEBUG char *p = ((CHRBLK*)pv)->Chrp; @@ -1188,7 +1152,7 @@ void STRBLK::SetValue(PVAL valp, int n) /***********************************************************************/ /* Set one value in a block from a zero terminated string. */ /***********************************************************************/ -void STRBLK::SetValue(PSZ p, int n) +void STRBLK::SetValue(PCSZ p, int n) { if (p) { if (!Sorted || !n || !Strp[n-1] || strcmp(p, Strp[n-1])) @@ -1204,7 +1168,7 @@ void STRBLK::SetValue(PSZ p, int n) /***********************************************************************/ /* Set one value in a block from an array of characters. */ /***********************************************************************/ -void STRBLK::SetValue(char *sp, uint len, int n) +void STRBLK::SetValue(const char *sp, uint len, int n) { PSZ p; @@ -1352,7 +1316,7 @@ DATBLK::DATBLK(void *mp, int nval) : TYPBLK(mp, nval, TYPE_INT) /***********************************************************************/ /* Set format so formatted dates can be converted on input. */ /***********************************************************************/ -bool DATBLK::SetFormat(PGLOBAL g, PSZ fmt, int len, int year) +bool DATBLK::SetFormat(PGLOBAL g, PCSZ fmt, int len, int year) { if (!(Dvalp = AllocateValue(g, TYPE_DATE, len, year, false, fmt))) return true; @@ -1379,7 +1343,7 @@ char *DATBLK::GetCharString(char *p, int n) /***********************************************************************/ /* Set one value in a block from a char string. */ /***********************************************************************/ -void DATBLK::SetValue(PSZ p, int n) +void DATBLK::SetValue(PCSZ p, int n) { if (Dvalp) { // Decode the string according to format diff --git a/storage/connect/valblk.h b/storage/connect/valblk.h index c3cad79b234..38a73424985 100644 --- a/storage/connect/valblk.h +++ b/storage/connect/valblk.h @@ -91,7 +91,7 @@ class VALBLK : public BLOCK { virtual char *GetCharString(char *p, int n) = 0; virtual void ReAlloc(void *mp, int n) {Blkp = mp; Nval = n;} virtual void Reset(int n) = 0; - virtual bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0); + virtual bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); virtual void SetPrec(int p) {} virtual bool IsCi(void) {return false;} @@ -105,8 +105,8 @@ class VALBLK : public BLOCK { virtual void SetValue(double, int) {assert(false);} virtual void SetValue(char, int) {assert(false);} virtual void SetValue(uchar, int) {assert(false);} - virtual void SetValue(PSZ, int) {assert(false);} - virtual void SetValue(char *, uint, int) {assert(false);} + virtual void SetValue(PCSZ, int) {assert(false);} + virtual void SetValue(const char *, uint, int) {assert(false);} virtual void SetValue(PVAL valp, int n) = 0; virtual void SetValue(PVBLK pv, int n1, int n2) = 0; virtual void SetMin(PVAL valp, int n) = 0; @@ -165,8 +165,8 @@ class TYPBLK : public VALBLK { // Methods using VALBLK::SetValue; - virtual void SetValue(PSZ sp, int n); - virtual void SetValue(char *sp, uint len, int n); + virtual void SetValue(PCSZ sp, int n); + virtual void SetValue(const char *sp, uint len, int n); virtual void SetValue(short sval, int n) {Typp[n] = (TYPE)sval; SetNull(n, false);} virtual void SetValue(ushort sval, int n) @@ -236,8 +236,8 @@ class CHRBLK : public VALBLK { // Methods using VALBLK::SetValue; - virtual void SetValue(PSZ sp, int n); - virtual void SetValue(char *sp, uint len, int n); + virtual void SetValue(PCSZ sp, int n); + virtual void SetValue(const char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); virtual void SetMin(PVAL valp, int n); @@ -290,8 +290,8 @@ class STRBLK : public VALBLK { // Methods using VALBLK::SetValue; - virtual void SetValue(PSZ sp, int n); - virtual void SetValue(char *sp, uint len, int n); + virtual void SetValue(PCSZ sp, int n); + virtual void SetValue(const char *sp, uint len, int n); virtual void SetValue(PVAL valp, int n); virtual void SetValue(PVBLK pv, int n1, int n2); virtual void SetMin(PVAL valp, int n); @@ -322,12 +322,12 @@ class DATBLK : public TYPBLK { DATBLK(void *mp, int size); // Implementation - virtual bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0); + virtual bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); virtual char *GetCharString(char *p, int n); // Methods using TYPBLK::SetValue; - virtual void SetValue(PSZ sp, int n); + virtual void SetValue(PCSZ sp, int n); protected: // Members @@ -352,7 +352,7 @@ class PTRBLK : public STRBLK { // Methods using STRBLK::SetValue; using STRBLK::CompVal; - virtual void SetValue(PSZ p, int n) {Strp[n] = p;} + virtual void SetValue(PCSZ p, int n) {Strp[n] = (char*)p;} virtual int CompVal(int i1, int i2); protected: diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index fec38217b0b..1e215c4df69 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -1,5 +1,5 @@ /************* Value C++ Functions Source Code File (.CPP) *************/ -/* Name: VALUE.CPP Version 2.7 */ +/* Name: VALUE.CPP Version 2.8 */ /* */ /* (C) Copyright to the author Olivier BERTRAND 2001-2017 */ /* */ @@ -57,17 +57,10 @@ /* Check macro's. */ /***********************************************************************/ #if defined(_DEBUG) -#if defined(USE_TRY) #define CheckType(V) if (Type != V->GetType()) { \ PGLOBAL& g = Global; \ strcpy(g->Message, MSG(VALTYPE_NOMATCH)); \ throw Type; -#else // !USE_TRY -#define CheckType(V) if (Type != V->GetType()) { \ - PGLOBAL& g = Global; \ - strcpy(g->Message, MSG(VALTYPE_NOMATCH)); \ - longjmp(g->jumper[g->jump_level], Type); -#endif // !USE_TRY #else #define CheckType(V) #endif @@ -101,12 +94,12 @@ PSZ strlwr(PSZ s); /* OUT minus: Set to true if the number is negative */ /* Returned val: The resulting number */ /***********************************************************************/ -ulonglong CharToNumber(char *p, int n, ulonglong maxval, +ulonglong CharToNumber(const char *p, int n, ulonglong maxval, bool un, bool *minus, bool *rc) { - char *p2; - uchar c; - ulonglong val; + const char *p2; + uchar c; + ulonglong val; if (minus) *minus = false; if (rc) *rc = false; @@ -145,9 +138,9 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, /***********************************************************************/ /* GetTypeName: returns the PlugDB internal type name. */ /***********************************************************************/ -PSZ GetTypeName(int type) +PCSZ GetTypeName(int type) { - PSZ name; + PCSZ name; switch (type) { case TYPE_STRING: name = "CHAR"; break; @@ -191,9 +184,9 @@ int GetTypeSize(int type, int len) /***********************************************************************/ /* GetFormatType: returns the FORMAT character(s) according to type. */ /***********************************************************************/ -char *GetFormatType(int type) +const char *GetFormatType(int type) { - char *c = "X"; + const char *c = "X"; switch (type) { case TYPE_STRING: c = "C"; break; @@ -377,7 +370,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type, short prec) /* Allocate a variable Value according to type, length and precision. */ /***********************************************************************/ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, - bool uns, PSZ fmt) + bool uns, PCSZ fmt) { PVAL valp; @@ -565,6 +558,38 @@ bool VALUE::Compute(PGLOBAL g, PVAL *, int, OPVAL) return true; } // end of Compute +/***********************************************************************/ +/* Make file output of an object value. */ +/***********************************************************************/ +void VALUE::Print(PGLOBAL g, FILE *f, uint n) +{ + char m[64], buf[64]; + + memset(m, ' ', n); /* Make margin string */ + m[n] = '\0'; + + if (Null) + fprintf(f, "%s\n", m); + else + fprintf(f, strcat(strcat(GetCharString(buf), "\n"), m)); + +} /* end of Print */ + +/***********************************************************************/ +/* Make string output of an object value. */ +/***********************************************************************/ +void VALUE::Print(PGLOBAL g, char *ps, uint z) +{ + char *p, buf[64]; + + if (Null) + p = strcpy(buf, ""); + else + p = GetCharString(buf); + + strncpy(ps, p, z); +} // end of Print + /* -------------------------- Class TYPVAL ---------------------------- */ /***********************************************************************/ @@ -689,7 +714,7 @@ uchar TYPVAL::GetTypedValue(PVAL valp) /* TYPVAL SetValue: convert chars extracted from a line to TYPE value.*/ /***********************************************************************/ template -bool TYPVAL::SetValue_char(char *p, int n) +bool TYPVAL::SetValue_char(const char *p, int n) { bool rc, minus; ulonglong maxval = MaxVal(); @@ -711,7 +736,7 @@ bool TYPVAL::SetValue_char(char *p, int n) } // end of SetValue template <> -bool TYPVAL::SetValue_char(char *p, int n) +bool TYPVAL::SetValue_char(const char *p, int n) { if (p && n > 0) { char buf[64]; @@ -739,7 +764,7 @@ bool TYPVAL::SetValue_char(char *p, int n) /* TYPVAL SetValue: fill a typed value from a string. */ /***********************************************************************/ template -void TYPVAL::SetValue_psz(PSZ s) +void TYPVAL::SetValue_psz(PCSZ s) { if (s) { SetValue_char(s, (int)strlen(s)); @@ -1026,19 +1051,11 @@ TYPE TYPVAL::SafeAdd(TYPE n1, TYPE n2) if ((n2 > 0) && (n < n1)) { // Overflow strcpy(g->Message, MSG(FIX_OVFLW_ADD)); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY } else if ((n2 < 0) && (n > n1)) { // Underflow strcpy(g->Message, MSG(FIX_UNFLW_ADD)); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY } // endif's n2 return n; @@ -1062,19 +1079,11 @@ TYPE TYPVAL::SafeMult(TYPE n1, TYPE n2) if (n > MinMaxVal(true)) { // Overflow strcpy(g->Message, MSG(FIX_OVFLW_TIMES)); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY } else if (n < MinMaxVal(false)) { // Underflow strcpy(g->Message, MSG(FIX_UNFLW_TIMES)); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY } // endif's n2 return (TYPE)n; @@ -1193,7 +1202,7 @@ bool TYPVAL::Compall(PGLOBAL g, PVAL *vp, int np, OPVAL op) /* This function assumes that the format matches the value type. */ /***********************************************************************/ template -bool TYPVAL::FormatValue(PVAL vp, char *fmt) +bool TYPVAL::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough int n = sprintf(buf, fmt, Tval); @@ -1215,37 +1224,6 @@ bool TYPVAL::SetConstFormat(PGLOBAL g, FORMAT& fmt) return false; } // end of SetConstFormat -/***********************************************************************/ -/* Make file output of a typed object. */ -/***********************************************************************/ -template -void TYPVAL::Print(PGLOBAL g, FILE *f, uint n) - { - char m[64], buf[12]; - - memset(m, ' ', n); /* Make margin string */ - m[n] = '\0'; - - if (Null) - fprintf(f, "%s\n", m); - else - fprintf(f, strcat(strcat(strcpy(buf, "%s"), Fmt), "\n"), m, Tval); - - } /* end of Print */ - -/***********************************************************************/ -/* Make string output of a int object. */ -/***********************************************************************/ -template -void TYPVAL::Print(PGLOBAL g, char *ps, uint z) - { - if (Null) - strcpy(ps, ""); - else - sprintf(ps, Fmt, Tval); - - } /* end of Print */ - /* -------------------------- Class STRING --------------------------- */ /***********************************************************************/ @@ -1384,25 +1362,25 @@ bool TYPVAL::SetValue_pval(PVAL valp, bool chktype) /***********************************************************************/ /* STRING SetValue: fill string with chars extracted from a line. */ /***********************************************************************/ -bool TYPVAL::SetValue_char(char *p, int n) +bool TYPVAL::SetValue_char(const char *cp, int n) { bool rc = false; - if (!p || n == 0) { + if (!cp || n == 0) { Reset(); Null = Nullable; - } else if (p != Strp) { - rc = n > Len; + } else if (cp != Strp) { + const char *p = cp + n - 1; - if ((n = MY_MIN(n, Len))) { - strncpy(Strp, p, n); + for (p; p >= cp; p--, n--) + if (*p && *p != ' ') + break; -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; - for (p = Strp + n - 1; p >= Strp; p--) - if (*p && *p != ' ') - break; + rc = n > Len; - *(++p) = '\0'; + if ((n = MY_MIN(n, Len))) { + strncpy(Strp, cp, n); + Strp[n] = '\0'; if (trace > 1) htrc(" Setting string to: '%s'\n", Strp); @@ -1419,7 +1397,7 @@ bool TYPVAL::SetValue_char(char *p, int n) /***********************************************************************/ /* STRING SetValue: fill string with another string. */ /***********************************************************************/ -void TYPVAL::SetValue_psz(PSZ s) +void TYPVAL::SetValue_psz(PCSZ s) { if (!s) { Reset(); @@ -1455,15 +1433,7 @@ void TYPVAL::SetValue(int n) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY -#if defined(USE_TRY) - throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY -#endif // !USE_TRY } else SetValue_psz(buf); @@ -1517,11 +1487,7 @@ void TYPVAL::SetValue(longlong n) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY } else SetValue_psz(buf); @@ -1564,11 +1530,7 @@ void TYPVAL::SetValue(double f) if (k > Len) { sprintf(g->Message, MSG(VALSTR_TOO_LONG), buf, Len); -#if defined(USE_TRY) throw 138; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 138); -#endif // !USE_TRY } else SetValue_psz(buf); @@ -1598,7 +1560,7 @@ void TYPVAL::SetValue(uchar c) /***********************************************************************/ void TYPVAL::SetBinValue(void *p) { - SetValue_char((char *)p, Len); + SetValue_char((const char *)p, Len); } // end of SetBinValue /***********************************************************************/ @@ -1728,7 +1690,7 @@ bool TYPVAL::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op) /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ /***********************************************************************/ -bool TYPVAL::FormatValue(PVAL vp, char *fmt) +bool TYPVAL::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough int n = sprintf(buf, fmt, Strp); @@ -1747,6 +1709,18 @@ bool TYPVAL::SetConstFormat(PGLOBAL, FORMAT& fmt) return false; } // end of SetConstFormat +/***********************************************************************/ +/* Make string output of an object value. */ +/***********************************************************************/ +void TYPVAL::Print(PGLOBAL g, char *ps, uint z) +{ + if (Null) + strncpy(ps, "null", z); + else + strcat(strncat(strncpy(ps, "\"", z), Strp, z-2), "\""); + +} // end of Print + /* -------------------------- Class DECIMAL -------------------------- */ /***********************************************************************/ @@ -1836,102 +1810,6 @@ bool DECVAL::GetBinValue(void *buf, int buflen, bool go) return false; } // end of GetBinValue -#if 0 -/***********************************************************************/ -/* DECIMAL SetValue: copy the value of another Value object. */ -/***********************************************************************/ -bool DECVAL::SetValue_pval(PVAL valp, bool chktype) - { - if (chktype && (valp->GetType() != Type || valp->GetSize() > Len)) - return true; - - char buf[64]; - - if (!(Null = valp->IsNull() && Nullable)) - strncpy(Strp, valp->GetCharString(buf), Len); - else - Reset(); - - return false; - } // end of SetValue_pval - -/***********************************************************************/ -/* DECIMAL SetValue: fill string with chars extracted from a line. */ -/***********************************************************************/ -bool DECVAL::SetValue_char(char *p, int n) - { - bool rc; - - if (p && n > 0) { - rc = n > Len; - - if ((n = MY_MIN(n, Len))) { - strncpy(Strp, p, n); - -// for (p = Strp + n - 1; p >= Strp && (*p == ' ' || *p == '\0'); p--) ; - for (p = Strp + n - 1; p >= Strp; p--) - if (*p && *p != ' ') - break; - - *(++p) = '\0'; - - if (trace > 1) - htrc(" Setting string to: '%s'\n", Strp); - - } else - Reset(); - - Null = false; - } else { - rc = false; - Reset(); - Null = Nullable; - } // endif p - - return rc; - } // end of SetValue_char - -/***********************************************************************/ -/* DECIMAL SetValue: fill string with another string. */ -/***********************************************************************/ -void DECVAL::SetValue_psz(PSZ s) - { - if (s) { - strncpy(Strp, s, Len); - Null = false; - } else { - Reset(); - Null = Nullable; - } // endif s - - } // end of SetValue_psz - -/***********************************************************************/ -/* DECIMAL SetValue: fill string with a string extracted from a block.*/ -/***********************************************************************/ -void DECVAL::SetValue_pvblk(PVBLK blk, int n) - { - // STRBLK's can return a NULL pointer - SetValue_psz(blk->GetCharValue(n)); - } // end of SetValue_pvblk - -/***********************************************************************/ -/* DECIMAL SetBinValue: fill string with chars extracted from a line. */ -/***********************************************************************/ -void DECVAL::SetBinValue(void *p) - { - SetValue_char((char *)p, Len); - } // end of SetBinValue - -/***********************************************************************/ -/* DECIMAL GetCharString: get string representation of a char value. */ -/***********************************************************************/ -char *DECVAL::GetCharString(char *p) - { - return Strp; - } // end of GetCharString -#endif // 0 - /***********************************************************************/ /* DECIMAL compare value with another Value. */ /***********************************************************************/ @@ -1966,32 +1844,6 @@ int DECVAL::CompareValue(PVAL vp) return (f > n) ? 1 : (f < n) ? (-1) : 0; } // end of CompareValue -#if 0 -/***********************************************************************/ -/* FormatValue: This function set vp (a STRING value) to the string */ -/* constructed from its own value formated using the fmt format. */ -/* This function assumes that the format matches the value type. */ -/***********************************************************************/ -bool DECVAL::FormatValue(PVAL vp, char *fmt) - { - char *buf = (char*)vp->GetTo_Val(); // Should be big enough - int n = sprintf(buf, fmt, Strp); - - return (n > vp->GetValLen()); - } // end of FormatValue - -/***********************************************************************/ -/* DECIMAL SetFormat function (used to set SELECT output format). */ -/***********************************************************************/ -bool DECVAL::SetConstFormat(PGLOBAL g, FORMAT& fmt) - { - fmt.Type[0] = 'C'; - fmt.Length = Len; - fmt.Prec = 0; - return false; - } // end of SetConstFormat -#endif // 0 - /* -------------------------- Class BINVAL --------------------------- */ /***********************************************************************/ @@ -2149,7 +2001,7 @@ bool BINVAL::SetValue_pval(PVAL valp, bool chktype) /***********************************************************************/ /* BINVAL SetValue: fill value with chars extracted from a line. */ /***********************************************************************/ -bool BINVAL::SetValue_char(char *p, int n) +bool BINVAL::SetValue_char(const char *p, int n) { bool rc; @@ -2170,7 +2022,7 @@ bool BINVAL::SetValue_char(char *p, int n) /***********************************************************************/ /* BINVAL SetValue: fill value with another string. */ /***********************************************************************/ -void BINVAL::SetValue_psz(PSZ s) +void BINVAL::SetValue_psz(PCSZ s) { if (s) { Len = MY_MIN(Clen, (signed)strlen(s)); @@ -2396,7 +2248,7 @@ bool BINVAL::IsEqual(PVAL vp, bool chktype) /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ /***********************************************************************/ -bool BINVAL::FormatValue(PVAL vp, char *fmt) +bool BINVAL::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough int n = sprintf(buf, fmt, Len, Binp); @@ -2420,7 +2272,7 @@ bool BINVAL::SetConstFormat(PGLOBAL, FORMAT& fmt) /***********************************************************************/ /* DTVAL public constructor for new void values. */ /***********************************************************************/ -DTVAL::DTVAL(PGLOBAL g, int n, int prec, PSZ fmt) +DTVAL::DTVAL(PGLOBAL g, int n, int prec, PCSZ fmt) : TYPVAL((int)0, TYPE_DATE) { if (!fmt) { @@ -2449,7 +2301,7 @@ DTVAL::DTVAL(int n) : TYPVAL(n, TYPE_DATE) /***********************************************************************/ /* Set format so formatted dates can be converted on input/output. */ /***********************************************************************/ -bool DTVAL::SetFormat(PGLOBAL g, PSZ fmt, int len, int year) +bool DTVAL::SetFormat(PGLOBAL g, PCSZ fmt, int len, int year) { Pdtp = MakeDateFormat(g, fmt, true, true, (year > 9999) ? 1 : 0); Sdate = (char*)PlugSubAlloc(g, NULL, len + 1); @@ -2707,7 +2559,11 @@ bool DTVAL::SetValue_pval(PVAL valp, bool chktype) ndv = ExtractDate(valp->GetCharValue(), Pdtp, DefYear, dval); MakeDate(NULL, dval, ndv); - } else + } else if (valp->GetType() == TYPE_BIGINT && + !(valp->GetBigintValue() % 1000)) { + // Assuming that this timestamp is in milliseconds + Tval = valp->GetBigintValue() / 1000; + } else Tval = valp->GetIntValue(); } else @@ -2721,14 +2577,14 @@ bool DTVAL::SetValue_pval(PVAL valp, bool chktype) /***********************************************************************/ /* SetValue: convert chars extracted from a line to date value. */ /***********************************************************************/ -bool DTVAL::SetValue_char(char *p, int n) +bool DTVAL::SetValue_char(const char *p, int n) { bool rc= 0; if (Pdtp) { - char *p2; - int ndv; - int dval[6]; + const char *p2; + int ndv; + int dval[6]; if (n > 0) { // Trim trailing blanks @@ -2760,11 +2616,11 @@ bool DTVAL::SetValue_char(char *p, int n) /***********************************************************************/ /* SetValue: convert a char string to date value. */ /***********************************************************************/ -void DTVAL::SetValue_psz(PSZ p) +void DTVAL::SetValue_psz(PCSZ p) { if (Pdtp) { - int ndv; - int dval[6]; + int ndv; + int dval[6]; strncpy(Sdate, p, Len); Sdate[Len] = '\0'; @@ -2854,8 +2710,10 @@ char *DTVAL::ShowValue(char *buf, int len) strncat(p, "Error", m); } // endif n - } else - p = ""; // DEFAULT VALUE ??? + } else { + p = buf; + *p = '\0'; // DEFAULT VALUE ??? + } // endif Null return p; } else @@ -2920,7 +2778,7 @@ bool DTVAL::WeekNum(PGLOBAL g, int& nval) /* constructed from its own value formated using the fmt format. */ /* This function assumes that the format matches the value type. */ /***********************************************************************/ -bool DTVAL::FormatValue(PVAL vp, char *fmt) +bool DTVAL::FormatValue(PVAL vp, PCSZ fmt) { char *buf = (char*)vp->GetTo_Val(); // Should be big enough struct tm tm, *ptm = GetGmTime(&tm); diff --git a/storage/connect/value.h b/storage/connect/value.h index 14a568c3549..cf6682f56f2 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -1,7 +1,7 @@ /**************** Value H Declares Source Code File (.H) ***************/ -/* Name: VALUE.H Version 2.2 */ +/* Name: VALUE.H Version 2.3 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2001-2016 */ +/* (C) Copyright to the author Olivier BERTRAND 2001-2017 */ /* */ /* This file contains the VALUE and derived classes declares. */ /***********************************************************************/ @@ -40,14 +40,14 @@ typedef struct _datpar *PDTP; // For DTVAL /* Utilities used to test types and to allocated values. */ /***********************************************************************/ // Exported functions -DllExport PSZ GetTypeName(int); +DllExport PCSZ GetTypeName(int); DllExport int GetTypeSize(int, int); #ifdef ODBC_SUPPORT /* This function is exported for use in OEM table type DLLs */ DllExport int TranslateSQLType(int stp, int prec, int& len, char& v, bool& w); #endif -DllExport char *GetFormatType(int); +DllExport const char *GetFormatType(int); DllExport int GetFormatType(char); DllExport bool IsTypeChar(int type); DllExport bool IsTypeNum(int type); @@ -55,8 +55,8 @@ DllExport int ConvertType(int, int, CONV, bool match = false); DllExport PVAL AllocateValue(PGLOBAL, void *, short, short = 2); DllExport PVAL AllocateValue(PGLOBAL, PVAL, int = TYPE_VOID, int = 0); DllExport PVAL AllocateValue(PGLOBAL, int, int len = 0, int prec = 0, - bool uns = false, PSZ fmt = NULL); -DllExport ulonglong CharToNumber(char *, int, ulonglong, bool, + bool uns = false, PCSZ fmt = NULL); +DllExport ulonglong CharToNumber(PCSZ, int, ulonglong, bool, bool *minus = NULL, bool *rc = NULL); DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc); @@ -100,8 +100,8 @@ class DllExport VALUE : public BLOCK { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype = false) = 0; - virtual bool SetValue_char(char *p, int n) = 0; - virtual void SetValue_psz(PSZ s) = 0; + virtual bool SetValue_char(const char *p, int n) = 0; + virtual void SetValue_psz(PCSZ s) = 0; virtual void SetValue_bool(bool) {assert(FALSE);} virtual int CompareValue(PVAL vp) = 0; virtual BYTE TestValue(PVAL vp); @@ -121,7 +121,9 @@ class DllExport VALUE : public BLOCK { virtual char *GetCharString(char *p) = 0; virtual bool IsEqual(PVAL vp, bool chktype) = 0; virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); - virtual bool FormatValue(PVAL vp, char *fmt) = 0; + virtual bool FormatValue(PVAL vp, PCSZ fmt) = 0; + virtual void Print(PGLOBAL g, FILE *, uint); + virtual void Print(PGLOBAL g, char *ps, uint z); /** Set value from a non-aligned in-memory value in the machine byte order. @@ -211,8 +213,8 @@ class DllExport TYPVAL : public VALUE { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_bool(bool b) {Tval = (b) ? 1 : 0;} virtual int CompareValue(PVAL vp); virtual void SetValue(char c) {Tval = (TYPE)c; Null = false;} @@ -232,9 +234,7 @@ class DllExport TYPVAL : public VALUE { virtual bool IsEqual(PVAL vp, bool chktype); virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); virtual bool SetConstFormat(PGLOBAL, FORMAT&); - virtual bool FormatValue(PVAL vp, char *fmt); - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); + virtual bool FormatValue(PVAL vp, PCSZ fmt); protected: static TYPE MinMaxVal(bool b); @@ -287,8 +287,8 @@ class DllExport TYPVAL: public VALUE { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); virtual void SetValue(char c); virtual void SetValue(uchar c); @@ -306,8 +306,9 @@ class DllExport TYPVAL: public VALUE { virtual char *GetCharString(char *p); virtual bool IsEqual(PVAL vp, bool chktype); virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); - virtual bool FormatValue(PVAL vp, char *fmt); + virtual bool FormatValue(PVAL vp, PCSZ fmt); virtual bool SetConstFormat(PGLOBAL, FORMAT&); + virtual void Print(PGLOBAL g, char *ps, uint z); protected: // Members @@ -371,8 +372,8 @@ class DllExport BINVAL: public VALUE { // Methods virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); virtual void SetValue(char c); virtual void SetValue(uchar c); @@ -389,7 +390,7 @@ class DllExport BINVAL: public VALUE { virtual char *ShowValue(char *buf, int); virtual char *GetCharString(char *p); virtual bool IsEqual(PVAL vp, bool chktype); - virtual bool FormatValue(PVAL vp, char *fmt); + virtual bool FormatValue(PVAL vp, PCSZ fmt); virtual bool SetConstFormat(PGLOBAL, FORMAT&); protected: @@ -405,18 +406,18 @@ class DllExport BINVAL: public VALUE { class DllExport DTVAL : public TYPVAL { public: // Constructors - DTVAL(PGLOBAL g, int n, int p, PSZ fmt); + DTVAL(PGLOBAL g, int n, int p, PCSZ fmt); DTVAL(int n); // Implementation virtual bool SetValue_pval(PVAL valp, bool chktype); - virtual bool SetValue_char(char *p, int n); - virtual void SetValue_psz(PSZ s); + virtual bool SetValue_char(const char *p, int n); + virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); virtual char *GetCharString(char *p); virtual char *ShowValue(char *buf, int); - virtual bool FormatValue(PVAL vp, char *fmt); - bool SetFormat(PGLOBAL g, PSZ fmt, int len, int year = 0); + virtual bool FormatValue(PVAL vp, PCSZ fmt); + bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); bool SetFormat(PGLOBAL g, PVAL valp); bool IsFormatted(void) {return Pdtp != NULL;} bool MakeTime(struct tm *ptm); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 48edd12b3d8..e6e530c4dc2 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -446,11 +446,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) #if 0 if (!dup->Step) { strcpy(g->Message, MSG(QUERY_CANCELLED)); -#if defined(USE_TRY) throw 99; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], 99); -#endif // !USE_TRY } // endif Step #endif // 0 @@ -823,7 +819,7 @@ bool XINDEX::Reorder(PGLOBAL g __attribute__((unused))) /***********************************************************************/ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) { - char *ftype; + PCSZ ftype; char fn[_MAX_PATH]; int n[NZ], nof = (Mul) ? (Ndif + 1) : 0; int id = -1, size = 0; @@ -952,7 +948,7 @@ bool XINDEX::Init(PGLOBAL g) /* Table will be accessed through an index table. */ /* If sorting is required, this will be done later. */ /*********************************************************************/ - char *ftype; + PCSZ ftype; char fn[_MAX_PATH]; int k, n, nv[NZ], id = -1; bool estim = false; @@ -969,8 +965,8 @@ bool XINDEX::Init(PGLOBAL g) // For DBF tables, Cardinality includes bad or soft deleted lines // that are not included in the index, and can be larger then the // index size. - estim = (Tdbp->Ftype == RECFM_DBF || Tdbp->Txfp->GetAmType() == TYPE_AM_ZIP); - n = Tdbp->Cardinality(g); // n is exact table size + estim = (Tdbp->Ftype == RECFM_DBF || Tdbp->Txfp->GetAmType() == TYPE_AM_ZIP); + n = Tdbp->Cardinality(g); // n is exact table size } else { // Variable table not optimized estim = true; // n is an estimate of the size @@ -1416,7 +1412,7 @@ err: /***********************************************************************/ bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) { - char *ftype; + PCSZ ftype; char fn[_MAX_PATH]; int nv[NZ], id = -1; // n //bool estim = false; @@ -2324,9 +2320,9 @@ XFILE::XFILE(void) : XLOAD() /***********************************************************************/ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) { - char *pmod; - bool rc; - IOFF noff[MAX_INDX]; + PCSZ pmod; + bool rc; + IOFF noff[MAX_INDX]; /*********************************************************************/ /* Open the index file according to mode. */ @@ -3036,7 +3032,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) return true; Klen = Valp->GetClen(); - Keys.Size = n * Klen; + Keys.Size = (size_t)n * (size_t)Klen; if (!PlgDBalloc(g, NULL, Keys)) { sprintf(g->Message, MSG(KEY_ALLOC_ERROR), Klen, n); diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index 7d1b6b9c992..cdffcb2fa44 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -84,11 +84,7 @@ double XOBJECT::GetFloatValue(void) CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) { if (!(Value = AllocateValue(g, value, (int)type))) -#if defined(USE_TRY) throw TYPE_CONST; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_CONST); -#endif // !USE_TRY Constant = true; } // end of CONSTANT constructor @@ -99,11 +95,7 @@ CONSTANT::CONSTANT(PGLOBAL g, void *value, short type) CONSTANT::CONSTANT(PGLOBAL g, int n) { if (!(Value = AllocateValue(g, &n, TYPE_INT))) -#if defined(USE_TRY) throw TYPE_CONST; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_CONST); -#endif // !USE_TRY Constant = true; } // end of CONSTANT constructor @@ -125,11 +117,7 @@ void CONSTANT::Convert(PGLOBAL g, int newtype) { if (Value->GetType() != newtype) if (!(Value = AllocateValue(g, Value, newtype))) -#if defined(USE_TRY) throw TYPE_CONST; -#else // !USE_TRY - longjmp(g->jumper[g->jump_level], TYPE_CONST); -#endif // !USE_TRY } // end of Convert @@ -204,7 +192,7 @@ void CONSTANT::Print(PGLOBAL g, char *ps, uint z) /* STRING public constructor for new char values. Alloc Size must be */ /* calculated because PlugSubAlloc rounds up size to multiple of 8. */ /***********************************************************************/ -STRING::STRING(PGLOBAL g, uint n, char *str) +STRING::STRING(PGLOBAL g, uint n, PCSZ str) { G = g; Length = (str) ? strlen(str) : 0; @@ -217,10 +205,12 @@ STRING::STRING(PGLOBAL g, uint n, char *str) Next = GetNext(); Size = Next - Strp; + Trc = false; } else { // This should normally never happen Next = NULL; Size = 0; + Trc = true; } // endif Strp } // end of STRING constructor @@ -241,6 +231,7 @@ char *STRING::Realloc(uint len) if (!p) { // No more room in Sarea; this is very unlikely strcpy(G->Message, "No more room in work area"); + Trc = true; return NULL; } // endif p @@ -255,7 +246,7 @@ char *STRING::Realloc(uint len) /***********************************************************************/ /* Set a STRING new PSZ value. */ /***********************************************************************/ -bool STRING::Set(PSZ s) +bool STRING::Set(PCSZ s) { if (!s) return false; @@ -345,9 +336,9 @@ bool STRING::Append(const char *s, uint ln, bool nq) } // end of Append /***********************************************************************/ -/* Append a PSZ to a STRING. */ +/* Append a PCSZ to a STRING. */ /***********************************************************************/ -bool STRING::Append(PSZ s) +bool STRING::Append(PCSZ s) { if (!s) return false; @@ -404,11 +395,11 @@ bool STRING::Append(char c) /***********************************************************************/ /* Append a quoted PSZ to a STRING. */ /***********************************************************************/ -bool STRING::Append_quoted(PSZ s) +bool STRING::Append_quoted(PCSZ s) { bool b = Append('\''); - if (s) for (char *p = s; !b && *p; p++) + if (s) for (const char *p = s; !b && *p; p++) switch (*p) { case '\'': case '\\': diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index 8f6c23c4aeb..204144182c8 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -123,24 +123,25 @@ class DllExport CONSTANT : public XOBJECT { class DllExport STRING : public BLOCK { public: // Constructor - STRING(PGLOBAL g, uint n, PSZ str = NULL); + STRING(PGLOBAL g, uint n, PCSZ str = NULL); // Implementation inline int GetLength(void) {return (int)Length;} inline void SetLength(uint n) {Length = n;} inline PSZ GetStr(void) {return Strp;} inline uint32 GetSize(void) {return Size;} + inline bool IsTruncated(void) {return Trc;} // Methods inline void Reset(void) {*Strp = 0;} - bool Set(PSZ s); + bool Set(PCSZ s); bool Set(char *s, uint n); bool Append(const char *s, uint ln, bool nq = false); - bool Append(PSZ s); + bool Append(PCSZ s); bool Append(STRING &str); bool Append(char c); bool Resize(uint n); - bool Append_quoted(PSZ s); + bool Append_quoted(PCSZ s); inline void Trim(void) {(void)Resize(Length + 1);} inline void Chop(void) {if (Length) Strp[--Length] = 0;} inline void RepLast(char c) {if (Length) Strp[Length-1] = c;} @@ -156,6 +157,7 @@ class DllExport STRING : public BLOCK { PSZ Strp; // The char string uint Length; // String length uint Size; // Allocated size + bool Trc; // When truncated char *Next; // Next alloc position }; // end of class STRING diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h index 4aeea05946a..3da0c17bdfe 100644 --- a/storage/connect/xtable.h +++ b/storage/connect/xtable.h @@ -33,29 +33,6 @@ class CMD : public BLOCK { char *Cmd; }; // end of class CMD -#if 0 -// Condition filter structure -class CONDFIL : public BLOCK { - public: - // Constructor - CONDFIL(const Item *cond, uint idx, AMT type) - { - Cond = cond; Idx = idx; Type = type; Op = OP_XX; - Cmds = NULL; All = true; Body = NULL, Having = NULL; - } - - // Members - const Item *Cond; - AMT Type; - uint Idx; - OPVAL Op; - PCMD Cmds; - bool All; - char *Body; - char *Having; -}; // end of class CONDFIL -#endif // 0 - typedef class EXTCOL *PEXTCOL; typedef class CONDFIL *PCFIL; typedef class TDBCAT *PTDBCAT; @@ -84,7 +61,6 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline PFIL GetFilter(void) {return To_Filter;} inline PCOL GetSetCols(void) {return To_SetCols;} inline void SetSetCols(PCOL colp) {To_SetCols = colp;} - inline void SetFilter(PFIL fp) {To_Filter = fp;} inline void SetOrig(PTDB txp) {To_Orig = txp;} inline void SetUse(TUSE n) {Use = n;} inline void SetCondFil(PCFIL cfp) {To_CondFil = cfp;} @@ -94,11 +70,14 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. inline void SetColumns(PCOL colp) {Columns = colp;} inline void SetDegree(int degree) {Degree = degree;} inline void SetMode(MODE mode) {Mode = mode;} + inline const Item *GetCond(void) {return Cond;} + inline void SetCond(const Item *cond) {Cond = cond;} // Properties virtual AMT GetAmType(void) {return TYPE_AM_ERROR;} virtual bool IsRemote(void) {return false;} virtual bool IsIndexed(void) {return false;} + virtual void SetFilter(PFIL fp) {To_Filter = fp;} virtual int GetTdb_No(void) {return Tdb_No;} virtual PTDB GetNext(void) {return Next;} virtual PCATLG GetCat(void) {return NULL;} @@ -110,7 +89,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual bool IsSpecial(PSZ name); virtual bool IsReadOnly(void) {return Read_Only;} virtual bool IsView(void) {return FALSE;} - virtual PSZ GetPath(void); + virtual PCSZ GetPath(void); virtual RECFM GetFtype(void) {return RECFM_NAF;} virtual bool GetBlockValues(PGLOBAL) { return false; } virtual int Cardinality(PGLOBAL) {return 0;} @@ -119,11 +98,12 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual int GetMaxSize(PGLOBAL) = 0; virtual int GetProgMax(PGLOBAL) = 0; virtual int GetProgCur(void) {return GetRecpos();} - virtual PSZ GetFile(PGLOBAL) {return "Not a file";} - virtual void SetFile(PGLOBAL, PSZ) {} + virtual PCSZ GetFile(PGLOBAL) {return "Not a file";} + virtual void SetFile(PGLOBAL, PCSZ) {} virtual void ResetDB(void) {} virtual void ResetSize(void) {MaxSize = -1;} virtual int RowNumber(PGLOBAL g, bool b = false); + virtual bool CanBeFiltered(void) {return true;} virtual PTDB Duplicate(PGLOBAL) {return NULL;} virtual PTDB Clone(PTABS) {return this;} virtual PTDB Copy(PTABS t); @@ -131,7 +111,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. {fprintf(f, "%s AM(%d)\n", m, GetAmType());} virtual void Print(PGLOBAL g, FILE *f, uint n); virtual void Print(PGLOBAL g, char *ps, uint z); - virtual PSZ GetServer(void) = 0; + virtual PCSZ GetServer(void) = 0; virtual int GetBadLines(void) {return 0;} virtual CHARSET_INFO *data_charset(void); @@ -157,6 +137,7 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. TUSE Use; PFIL To_Filter; PCFIL To_CondFil; // To condition filter structure + const Item *Cond; // The condition used to make filters static int Tnum; // Used to generate Tdb_no's const int Tdb_No; // GetTdb_No() is always 0 for OPJOIN PTDB Next; // Next in linearized queries @@ -187,9 +168,6 @@ class DllExport TDBASE : public TDB { // Implementation inline int GetKnum(void) {return Knum;} -//inline PTABDEF GetDef(void) {return To_Def;} -//inline PCOL GetSetCols(void) {return To_SetCols;} -//inline void SetSetCols(PCOL colp) {To_SetCols = colp;} inline void SetKey_Col(PCOL *cpp) {To_Key_Col = cpp;} inline void SetXdp(PIXDEF xdp) {To_Xdp = xdp;} inline void SetKindex(PKXBASE kxp) {To_Kindex = kxp;} @@ -201,36 +179,14 @@ class DllExport TDBASE : public TDB { // Methods virtual bool IsUsingTemp(PGLOBAL) {return false;} -//virtual bool IsIndexed(void) {return false;} -//virtual bool IsSpecial(PSZ name); virtual PCATLG GetCat(void); -//virtual PSZ GetPath(void); virtual void PrintAM(FILE *f, char *m); -//virtual RECFM GetFtype(void) {return RECFM_NAF;} -//virtual int GetAffectedRows(void) {return -1;} -//virtual int GetRecpos(void) = 0; -//virtual bool SetRecpos(PGLOBAL g, int recpos); -//virtual bool IsReadOnly(void) {return Read_Only;} -//virtual bool IsView(void) {return FALSE;} -//virtual CHARSET_INFO *data_charset(void); virtual int GetProgMax(PGLOBAL g) {return GetMaxSize(g);} -//virtual int GetProgCur(void) {return GetRecpos();} -//virtual PSZ GetFile(PGLOBAL) {return "Not a file";} -//virtual int GetRemote(void) {return 0;} -//virtual void SetFile(PGLOBAL, PSZ) {} -//virtual void ResetDB(void) {} -//virtual void ResetSize(void) {MaxSize = -1;} virtual void RestoreNrec(void) {} virtual int ResetTableOpt(PGLOBAL g, bool dop, bool dox); - virtual PSZ GetServer(void) {return "Current";} + virtual PCSZ GetServer(void) {return "Current";} // Database routines -//virtual PCOL ColDB(PGLOBAL g, PSZ name, int num); -//virtual PCOL MakeCol(PGLOBAL, PCOLDEF, PCOL, int) -// {assert(false); return NULL;} -//virtual PCOL InsertSpecialColumn(PCOL colp); -//virtual PCOL InsertSpcBlk(PGLOBAL g, PCOLDEF cdp); -//virtual void MarkDB(PGLOBAL g, PTDB tdb2); virtual int MakeIndex(PGLOBAL g, PIXDEF, bool) {strcpy(g->Message, "Remote index"); return RC_INFO;} virtual bool ReadKey(PGLOBAL, OPVAL, const key_range *) @@ -241,18 +197,12 @@ class DllExport TDBASE : public TDB { "This function should not be called for this table"); return true;} // Members -//PTABDEF To_Def; // Points to catalog description block PXOB *To_Link; // Points to column of previous relations PCOL *To_Key_Col; // Points to key columns in current file PKXBASE To_Kindex; // Points to table key index PIXDEF To_Xdp; // To the index definition block -//PCOL To_SetCols; // Points to updated columns RECFM Ftype; // File type: 0-var 1-fixed 2-binary (VCT) -//int MaxSize; // Max size in number of lines int Knum; // Size of key arrays -//bool Read_Only; // True for read only tables -//const CHARSET_INFO *m_data_charset; -//const char *csname; // Table charset name }; // end of class TDBASE /***********************************************************************/ diff --git a/storage/connect/zip.c b/storage/connect/zip.c index ea54853e858..4bbe31ab7dd 100644 --- a/storage/connect/zip.c +++ b/storage/connect/zip.c @@ -637,7 +637,7 @@ local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib return relativeOffset; } -int LoadCentralDirectoryRecord(zip64_internal* pziinit) +static int LoadCentralDirectoryRecord(zip64_internal* pziinit) { int err=ZIP_OK; ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ @@ -846,7 +846,7 @@ int LoadCentralDirectoryRecord(zip64_internal* pziinit) /************************************************************/ -extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def) +static zipFile zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def) { zip64_internal ziinit; zip64_internal* zi; @@ -955,7 +955,7 @@ extern zipFile ZEXPORT zipOpen64 (const void* pathname, int append) return zipOpen3(pathname,append,NULL,NULL); } -int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local) +static int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local) { /* write the local header */ int err; @@ -1752,7 +1752,7 @@ extern int ZEXPORT zipCloseFileInZip (zipFile file) return zipCloseFileInZipRaw (file,0,0); } -int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip) +static int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip) { int err = ZIP_OK; ZPOS64_T pos = zip64eocd_pos_inzip - zi->add_position_when_writting_offset; @@ -1774,7 +1774,7 @@ int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eo return err; } -int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) +static int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) { int err = ZIP_OK; @@ -1813,7 +1813,7 @@ int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centra } return err; } -int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) +static int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) { int err = ZIP_OK; @@ -1861,7 +1861,7 @@ int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, return err; } -int Write_GlobalComment(zip64_internal* zi, const char* global_comment) +static int Write_GlobalComment(zip64_internal* zi, const char* global_comment) { int err = ZIP_OK; uInt size_global_comment = 0; -- cgit v1.2.1 From 436070c6e106d22f2afce3a28fc15e33a2241ef9 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 12 May 2017 00:33:33 +0200 Subject: Fix failing test connect.json for MariaDB 10.0 Suppressing Uri and dsn from json tables (was MGO) modified: storage/connect/ha_connect.cc modified: storage/connect/tabdos.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h --- storage/connect/ha_connect.cc | 8 +++----- storage/connect/tabdos.cpp | 2 -- storage/connect/tabjson.cpp | 9 ++++----- storage/connect/tabjson.h | 3 +-- 4 files changed, 8 insertions(+), 14 deletions(-) (limited to 'storage') diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 240625d03fb..d20fa05a463 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -209,7 +209,7 @@ pthread_mutex_t parmut = PTHREAD_MUTEX_INITIALIZER; /***********************************************************************/ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); PQRYRES VirColumns(PGLOBAL g, bool info); -PQRYRES JSONColumns(PGLOBAL g, char *db, char *dsn, PTOS topt, bool info); +PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info); PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v); void PushWarning(PGLOBAL g, THD *thd, int level); @@ -5557,9 +5557,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT case TAB_JSON: - dsn = strz(g, create_info->connect_string); - - if (!fn && !zfn && !mul && !dsn) + if (!fn && !zfn && !mul) sprintf(g->Message, "Missing %s file name", topt->type); else ok = true; @@ -5705,7 +5703,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, qrp = VirColumns(g, fnc == FNC_COL); break; case TAB_JSON: - qrp = JSONColumns(g, (char*)db, dsn, topt, fnc == FNC_COL); + qrp = JSONColumns(g, (char*)db, topt, fnc == FNC_COL); break; #if defined(LIBXML2_SUPPORT) || defined(DOMDOC_SUPPORT) case TAB_XML: diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 6f95eafe838..87925161320 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -132,7 +132,6 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) bool map = (am && (*am == 'M' || *am == 'm')); LPCSTR dfm = (am && (*am == 'F' || *am == 'f')) ? "F" : (am && (*am == 'B' || *am == 'b')) ? "B" - : (am && (*am == 'X' || *am == 'x')) ? "X" : (am && !stricmp(am, "DBF")) ? "D" : "V"; if ((Zipped = GetBoolCatInfo("Zipped", false))) { @@ -149,7 +148,6 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) GetCharCatInfo("Recfm", (PSZ)dfm, buf, sizeof(buf)); Recfm = (toupper(*buf) == 'F') ? RECFM_FIX : (toupper(*buf) == 'B') ? RECFM_BIN : - (toupper(*buf) == 'X') ? RECFM_NAF : // MGO (toupper(*buf) == 'D') ? RECFM_DBF : RECFM_VAR; Lrecl = GetIntCatInfo("Lrecl", 0); diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index edfe710bc92..5853425a4e5 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -63,7 +63,7 @@ typedef struct _jncol { /* JSONColumns: construct the result blocks containing the description */ /* of all the columns of a table contained inside a JSON file. */ /***********************************************************************/ -PQRYRES JSONColumns(PGLOBAL g, char *db, char *dsn, PTOS topt, bool info) +PQRYRES JSONColumns(PGLOBAL g, char *db, PTOS topt, bool info) { static int buftyp[] = {TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING}; @@ -112,7 +112,7 @@ PQRYRES JSONColumns(PGLOBAL g, char *db, char *dsn, PTOS topt, bool info) #endif // ZIP_SUPPORT tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); - if (!tdp->Fn && !dsn) { + if (!tdp->Fn) { strcpy(g->Message, MSG(MISSING_FNAME)); return NULL; } // endif Fn @@ -424,7 +424,7 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff) Pretty = GetIntCatInfo("Pretty", 2); Limit = GetIntCatInfo("Limit", 10); Base = GetIntCatInfo("Base", 0) ? 1 : 0; - return DOSDEF::DefineAM(g, (Uri ? "XMGO" : "DOS"), poff); + return DOSDEF::DefineAM(g, "DOS", poff); } // end of DefineAM /***********************************************************************/ @@ -2048,7 +2048,6 @@ TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) { Topt = tdp->GetTopt(); Db = (char*)tdp->GetDB(); - Dsn = (char*)tdp->Uri; } // end of TDBJCL constructor /***********************************************************************/ @@ -2056,7 +2055,7 @@ TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) /***********************************************************************/ PQRYRES TDBJCL::GetResult(PGLOBAL g) { - return JSONColumns(g, Db, Dsn, Topt, false); + return JSONColumns(g, Db, Topt, false); } // end of GetResult /* --------------------------- End of json --------------------------- */ diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 2c8f226b5ca..c16cf6846b6 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -36,7 +36,7 @@ class DllExport JSONDEF : public DOSDEF { /* Table description */ friend class TDBJSON; friend class TDBJSN; friend class TDBJCL; - friend PQRYRES JSONColumns(PGLOBAL, char*, char*, PTOS, bool); + friend PQRYRES JSONColumns(PGLOBAL, char*, PTOS, bool); public: // Constructor JSONDEF(void); @@ -58,7 +58,6 @@ public: int Level; /* Used for catalog table */ int Base; /* The array index base */ bool Strict; /* Strict syntax checking */ - const char *Uri; /* MongoDB connection URI */ }; // end of JSONDEF /* -------------------------- TDBJSN class --------------------------- */ -- cgit v1.2.1 From fd0335686b050dc9ba091fe664aa5cd99bd20739 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 12 May 2017 11:35:57 +0200 Subject: MDEV-12651: change error code to ER_ILLEGAL_HA in rnd_pos (ha_connect.cc) --- storage/connect/ha_connect.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index d20fa05a463..a0dfbaceca0 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -3998,7 +3998,7 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) } else { PGLOBAL g = GetPlug((table) ? table->in_use : NULL, xp); strcpy(g->Message, "Not supported by this table type"); - my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); + my_message(ER_ILLEGAL_HA, g->Message, MYF(0)); rc= HA_ERR_INTERNAL_ERROR; } // endif SetRecpos -- cgit v1.2.1 From 8417252b04404496c33cf049c1914a92653acd5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 15 May 2017 10:26:42 +0300 Subject: Fix the Solaris compilation after MDEV-12674 simple_counter::add(): Add a type cast to the os_atomic_increment_ulint() call, because GCC would check the type compatibility even when the code branch is not being instantiated (atomic=false). On Solaris, os_atomic_increment_ulint() actually needs a compatible parameter type, and an error would be emitted due to an incompatible 64-bit type, for srv_stats.n_lock_wait_time.add(diff_time). --- storage/innobase/include/os0sync.h | 9 ++++++++- storage/xtradb/include/os0sync.h | 9 ++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h index 54032b5ad31..bb225c53dfe 100644 --- a/storage/innobase/include/os0sync.h +++ b/storage/innobase/include/os0sync.h @@ -908,7 +908,14 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) simple_counter { compile_time_assert(!atomic || sizeof(Type) == sizeof(ulint)); if (atomic) { - return os_atomic_increment_ulint(&m_counter, i); + /* GCC would perform a type check in this code + also in case the template is instantiated with + simple_counter. + On Solaris, os_atomic_increment_ulint() maps + to atomic_add_long_nv(), which expects the + parameter to be correctly typed. */ + return os_atomic_increment_ulint( + reinterpret_cast(&m_counter), i); } else { return m_counter += i; } diff --git a/storage/xtradb/include/os0sync.h b/storage/xtradb/include/os0sync.h index b152ab53e68..7bc591b2911 100644 --- a/storage/xtradb/include/os0sync.h +++ b/storage/xtradb/include/os0sync.h @@ -960,7 +960,14 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) simple_counter { compile_time_assert(!atomic || sizeof(Type) == sizeof(ulint)); if (atomic) { - return os_atomic_increment_ulint(&m_counter, i); + /* GCC would perform a type check in this code + also in case the template is instantiated with + simple_counter. + On Solaris, os_atomic_increment_ulint() maps + to atomic_add_long_nv(), which expects the + parameter to be correctly typed. */ + return os_atomic_increment_ulint( + reinterpret_cast(&m_counter), i); } else { return m_counter += i; } -- cgit v1.2.1 From 0af9818240a0745aca3fd94891664fe008de24fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Mon, 15 May 2017 17:17:16 +0300 Subject: 5.6.36 --- storage/innobase/btr/btr0sea.cc | 2 +- storage/innobase/buf/buf0buf.cc | 2 +- storage/innobase/buf/buf0dblwr.cc | 4 +- storage/innobase/buf/buf0dump.cc | 4 +- storage/innobase/buf/buf0flu.cc | 4 +- storage/innobase/dict/dict0boot.cc | 7 +- storage/innobase/dict/dict0stats.cc | 15 +-- storage/innobase/dict/dict0stats_bg.cc | 4 +- storage/innobase/fil/fil0fil.cc | 20 +-- storage/innobase/handler/ha_innodb.cc | 7 +- storage/innobase/include/buf0dblwr.h | 4 +- storage/innobase/include/fil0fil.h | 14 +- storage/innobase/include/ha0ha.h | 6 +- storage/innobase/include/os0file.h | 134 ++++++++++++++----- storage/innobase/include/os0file.ic | 130 ++++++++++++++---- storage/innobase/include/row0mysql.h | 8 ++ storage/innobase/include/srv0srv.h | 9 +- storage/innobase/include/srv0start.h | 4 +- storage/innobase/include/trx0xa.h | 15 +-- storage/innobase/include/univ.i | 10 +- storage/innobase/log/log0log.cc | 9 +- storage/innobase/log/log0recv.cc | 4 +- storage/innobase/os/os0file.cc | 128 +++++++++--------- storage/innobase/row/row0log.cc | 17 ++- storage/innobase/row/row0merge.cc | 35 +++-- storage/innobase/row/row0sel.cc | 235 ++++++++++++++++++++++++++------- storage/innobase/srv/srv0srv.cc | 26 +++- storage/innobase/srv/srv0start.cc | 66 ++++++--- storage/innobase/trx/trx0purge.cc | 5 +- storage/innobase/trx/trx0roll.cc | 4 +- storage/innobase/trx/trx0sys.cc | 18 +-- 31 files changed, 658 insertions(+), 292 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index dd28f50f4f6..01f03b2eaf7 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -175,7 +175,7 @@ btr_search_sys_create( btr_search_sys = (btr_search_sys_t*) mem_alloc(sizeof(btr_search_sys_t)); - btr_search_sys->hash_index = ha_create(hash_size, 0, + btr_search_sys->hash_index = ib_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG btr_search_sys->hash_index->adaptive = TRUE; diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 6f206918212..fa73e80b358 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -1366,7 +1366,7 @@ buf_pool_init_instance( ut_a(srv_n_page_hash_locks != 0); ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS); - buf_pool->page_hash = ha_create(2 * buf_pool->curr_size, + buf_pool->page_hash = ib_create(2 * buf_pool->curr_size, srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH, SYNC_BUF_PAGE_HASH); diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 62222993622..a752c3dfda5 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -355,7 +355,7 @@ recovery, this function loads the pages from double write buffer into memory. */ void buf_dblwr_init_or_load_pages( /*=========================*/ - os_file_t file, + pfs_os_file_t file, char* path, bool load_corrupt_pages) { diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index ed27a70307d..6c830205e2b 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -597,6 +597,7 @@ DECLARE_THREAD(buf_dump_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); srv_buf_dump_thread_active = TRUE; @@ -632,6 +633,7 @@ DECLARE_THREAD(buf_dump_thread)( srv_buf_dump_thread_active = FALSE; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 1cdd1610c4f..6550f916805 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2388,6 +2388,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ulint next_loop_time = ut_time_ms() + 1000; ulint n_flushed = 0; ulint last_activity = srv_get_activity_count(); @@ -2506,6 +2507,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( thread_exit: buf_page_cleaner_is_active = FALSE; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc index 1a1dd29a202..7873da30207 100644 --- a/storage/innobase/dict/dict0boot.cc +++ b/storage/innobase/dict/dict0boot.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -454,7 +454,10 @@ dict_boot(void) dberr_t err = DB_SUCCESS; - if (srv_read_only_mode && !ibuf_is_empty()) { + /** If innodb_force_recovery is set to 6 then allow + the server to start even though ibuf is not empty. */ + if (srv_force_recovery != SRV_FORCE_NO_LOG_REDO + && srv_read_only_mode && !ibuf_is_empty()) { ib_logf(IB_LOG_LEVEL_ERROR, "Change buffer must be empty when --innodb-read-only " diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index b0ba98308be..55a26268579 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2009, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -690,6 +690,9 @@ dict_stats_copy( && (src_idx = dict_table_get_next_index(src_idx)))) { if (dict_stats_should_ignore_index(dst_idx)) { + if (!(dst_idx->type & DICT_FTS)) { + dict_stats_empty_index(dst_idx); + } continue; } @@ -1096,10 +1099,10 @@ dict_stats_analyze_index_level( leaf-level delete marks because delete marks on non-leaf level do not make sense. */ - if (level == 0 && srv_stats_include_delete_marked? 0: + if (level == 0 && (srv_stats_include_delete_marked ? 0: rec_get_deleted_flag( rec, - page_is_comp(btr_pcur_get_page(&pcur)))) { + page_is_comp(btr_pcur_get_page(&pcur))))) { if (rec_is_last_on_page && !prev_rec_is_copied @@ -3229,12 +3232,6 @@ dict_stats_update( dict_table_stats_lock(table, RW_X_LATCH); - /* Initialize all stats to dummy values before - copying because dict_stats_table_clone_create() does - skip corrupted indexes so our dummy object 't' may - have less indexes than the real object 'table'. */ - dict_stats_empty_table(table); - dict_stats_copy(table, t); dict_stats_assert_initialized(table); diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 6f01c379776..975c8a50803 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -334,6 +334,7 @@ DECLARE_THREAD(dict_stats_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_a(!srv_read_only_mode); srv_dict_stats_thread_active = TRUE; @@ -359,6 +360,7 @@ DECLARE_THREAD(dict_stats_thread)( srv_dict_stats_thread_active = FALSE; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit instead of return(). */ os_thread_exit(NULL); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 5e1a9d6c05e..20c6cc6201e 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -146,7 +146,7 @@ struct fil_node_t { belongs */ char* name; /*!< path to the file */ ibool open; /*!< TRUE if file open */ - os_file_t handle; /*!< OS handle to the file, if file open */ + pfs_os_file_t handle; /*!< OS handle to the file, if file open */ os_event_t sync_event;/*!< Condition event to group and serialize calls to fsync */ ibool is_raw_disk;/*!< TRUE if the 'file' is actually a raw @@ -316,7 +316,8 @@ initialized. */ static fil_system_t* fil_system = NULL; /** Determine if (i) is a user tablespace id or not. */ -# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open) +# define fil_is_user_tablespace_id(i) (i != 0 \ + && !srv_is_undo_tablespace(i)) /** Determine if user has explicitly disabled fsync(). */ #ifndef __WIN__ @@ -2025,7 +2026,7 @@ UNIV_INTERN const char* fil_read_first_page( /*================*/ - os_file_t data_file, /*!< in: open data file */ + pfs_os_file_t data_file, /*!< in: open data file */ ibool one_read_already, /*!< in: TRUE if min and max parameters below already contain sensible data */ @@ -3360,7 +3361,7 @@ fil_open_linked_file( /*===============*/ const char* tablename, /*!< in: database/tablename */ char** remote_filepath,/*!< out: remote filepath */ - os_file_t* remote_file) /*!< out: remote file handle */ + pfs_os_file_t* remote_file) /*!< out: remote file handle */ { ibool success; @@ -3420,7 +3421,8 @@ fil_create_new_single_table_tablespace( tablespace file in pages, must be >= FIL_IBD_FILE_INITIAL_SIZE */ { - os_file_t file; + pfs_os_file_t file; + ibool ret; dberr_t err; byte* buf2; @@ -5862,7 +5864,7 @@ fil_flush( { fil_space_t* space; fil_node_t* node; - os_file_t file; + pfs_os_file_t file; mutex_enter(&fil_system->mutex); @@ -6224,7 +6226,7 @@ fil_buf_block_init( } struct fil_iterator_t { - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ const char* filepath; /*!< File path name */ os_offset_t start; /*!< From where to start */ os_offset_t end; /*!< Where to stop */ @@ -6359,7 +6361,7 @@ fil_tablespace_iterate( PageCallback& callback) { dberr_t err; - os_file_t file; + pfs_os_file_t file; char* filepath; ut_a(n_io_buffers > 0); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 41c767a4bfc..7843c328bc8 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. @@ -5052,6 +5052,8 @@ table_opened: prebuilt->default_rec = table->s->default_values; ut_ad(prebuilt->default_rec); + prebuilt->mysql_handler = this; + /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ primary_key = table->s->primary_key; key_used_on_scan = primary_key; @@ -10168,7 +10170,8 @@ ha_innobase::delete_table( extension, in contrast to ::create */ normalize_table_name(norm_name, name); - if (srv_read_only_mode) { + if (srv_read_only_mode + || srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) { DBUG_RETURN(HA_ERR_TABLE_READONLY); } else if (row_is_magic_monitor_table(norm_name) && check_global_access(thd, PROCESS_ACL)) { diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index a62a6400d97..18e124b7437 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -55,7 +55,7 @@ recovery, this function loads the pages from double write buffer into memory. */ void buf_dblwr_init_or_load_pages( /*=========================*/ - os_file_t file, + pfs_os_file_t file, char* path, bool load_corrupt_pages); diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 4d2913846b5..5806863f926 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -181,7 +181,7 @@ struct fsp_open_info { ibool success; /*!< Has the tablespace been opened? */ const char* check_msg; /*!< fil_check_first_page() message */ ibool valid; /*!< Is the tablespace valid? */ - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ char* filepath; /*!< File path to open */ lsn_t lsn; /*!< Flushed LSN from header page */ ulint id; /*!< Space ID */ @@ -384,7 +384,7 @@ UNIV_INTERN const char* fil_read_first_page( /*================*/ - os_file_t data_file, /*!< in: open data file */ + pfs_os_file_t data_file, /*!< in: open data file */ ibool one_read_already, /*!< in: TRUE if min and max parameters below already contain sensible data */ @@ -902,12 +902,12 @@ struct PageCallback { Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; + block->frame + UNIV_PAGE_SIZE; @param offset - physical offset within the file @param block - block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ virtual dberr_t operator()( - os_offset_t offset, + os_offset_t offset, buf_block_t* block) UNIV_NOTHROW = 0; /** @@ -915,7 +915,7 @@ struct PageCallback { to open it for the file that is being iterated over. @param filename - then physical name of the tablespace file. @param file - OS file handle */ - void set_file(const char* filename, os_file_t file) UNIV_NOTHROW + void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW { m_file = file; m_filepath = filename; @@ -951,7 +951,7 @@ struct PageCallback { ulint m_page_size; /** File handle to the tablespace */ - os_file_t m_file; + pfs_os_file_t m_file; /** Physical file path. */ const char* m_filepath; diff --git a/storage/innobase/include/ha0ha.h b/storage/innobase/include/ha0ha.h index 7351b407e8c..58eb581e76a 100644 --- a/storage/innobase/include/ha0ha.h +++ b/storage/innobase/include/ha0ha.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -107,7 +107,7 @@ chosen to be a slightly bigger prime number. @param level in: level of the mutexes in the latching order @param n_m in: number of mutexes to protect the hash table; must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) +# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) #else /* UNIV_SYNC_DEBUG */ /** Creates a hash table. @return own: created table @@ -116,7 +116,7 @@ chosen to be a slightly bigger prime number. @param level in: level of the mutexes in the latching order @param n_m in: number of mutexes to protect the hash table; must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) +# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) #endif /* UNIV_SYNC_DEBUG */ /*************************************************************//** diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index f047302c60c..76a389f9051 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -89,6 +89,16 @@ typedef int os_file_t; # define OS_FILE_FROM_FD(fd) fd #endif +/*Common file descriptor for file IO instrumentation with PFS +on windows and other platforms */ +struct pfs_os_file_t +{ + os_file_t m_file; +#ifdef UNIV_PFS_IO + struct PSI_file *m_psi; +#endif +}; + /** Umask for creating files */ extern ulint os_innodb_umask; @@ -216,6 +226,8 @@ extern mysql_pfs_key_t innodb_file_temp_key; various file I/O operations with performance schema. 1) register_pfs_file_open_begin() and register_pfs_file_open_end() are used to register file creation, opening, closing and renaming. +2) register_pfs_file_rename_begin() and register_pfs_file_rename_end() +are used to register file renaming 2) register_pfs_file_io_begin() and register_pfs_file_io_end() are used to register actual file read, write and flush 3) register_pfs_file_close_begin() and register_pfs_file_close_end() @@ -225,17 +237,30 @@ are used to register file deletion operations*/ do { \ locker = PSI_FILE_CALL(get_thread_file_name_locker)( \ state, key, op, name, &locker); \ - if (UNIV_LIKELY(locker != NULL)) { \ + if (locker != NULL) { \ PSI_FILE_CALL(start_file_open_wait)( \ locker, src_file, src_line); \ } \ } while (0) -# define register_pfs_file_open_end(locker, file) \ +# define register_pfs_file_open_end(locker, file, result) \ do { \ - if (UNIV_LIKELY(locker != NULL)) { \ - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\ - locker, file); \ + if (locker != NULL) { \ + file.m_psi = PSI_FILE_CALL( \ + end_file_open_wait)( \ + locker, result); \ + } \ +} while (0) + +# define register_pfs_file_rename_begin(state, locker, key, op, name, \ + src_file, src_line) \ + register_pfs_file_open_begin(state, locker, key, op, name, \ + src_file, src_line) \ + +# define register_pfs_file_rename_end(locker, result) \ +do { \ + if (locker != NULL) { \ + PSI_FILE_CALL(end_file_open_wait)(locker, result); \ } \ } while (0) @@ -261,9 +286,9 @@ do { \ # define register_pfs_file_io_begin(state, locker, file, count, op, \ src_file, src_line) \ do { \ - locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \ - state, file, op); \ - if (UNIV_LIKELY(locker != NULL)) { \ + locker = PSI_FILE_CALL(get_thread_file_stream_locker)( \ + state, file.m_psi, op); \ + if (locker != NULL) { \ PSI_FILE_CALL(start_file_wait)( \ locker, count, src_file, src_line); \ } \ @@ -271,7 +296,7 @@ do { \ # define register_pfs_file_io_end(locker, count) \ do { \ - if (UNIV_LIKELY(locker != NULL)) { \ + if (locker != NULL) { \ PSI_FILE_CALL(end_file_wait)(locker, count); \ } \ } while (0) @@ -289,7 +314,9 @@ os_file_rename os_aio os_file_read os_file_read_no_error_handling +os_file_read_no_error_handling_int_fd os_file_write +os_file_write_int_fd The wrapper functions have the prefix of "innodb_". */ @@ -307,7 +334,7 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_file_create_simple_no_error_handling_func( \ key, name, create_mode, access, success, __FILE__, __LINE__) -# define os_file_close(file) \ +# define os_file_close_pfs(file) \ pfs_os_file_close_func(file, __FILE__, __LINE__) # define os_aio(type, mode, name, file, buf, offset, \ @@ -315,18 +342,27 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_aio_func(type, mode, name, file, buf, offset, \ n, message1, message2, __FILE__, __LINE__) -# define os_file_read(file, buf, offset, n) \ +# define os_file_read_pfs(file, buf, offset, n) \ pfs_os_file_read_func(file, buf, offset, n, __FILE__, __LINE__) # define os_file_read_no_error_handling(file, buf, offset, n) \ pfs_os_file_read_no_error_handling_func(file, buf, offset, n, \ __FILE__, __LINE__) -# define os_file_write(name, file, buf, offset, n) \ +# define os_file_read_no_error_handling_int_fd( \ + file, buf, offset, n) \ + pfs_os_file_read_no_error_handling_int_fd_func( \ + file, buf, offset, n, __FILE__, __LINE__) + +# define os_file_write_pfs(name, file, buf, offset, n) \ pfs_os_file_write_func(name, file, buf, offset, \ n, __FILE__, __LINE__) -# define os_file_flush(file) \ +# define os_file_write_int_fd(name, file, buf, offset, n) \ + pfs_os_file_write_int_fd_func(name, file, buf, offset, \ + n, __FILE__, __LINE__) + +# define os_file_flush_pfs(file) \ pfs_os_file_flush_func(file, __FILE__, __LINE__) # define os_file_rename(key, oldpath, newpath) \ @@ -352,22 +388,29 @@ to original un-instrumented file I/O APIs */ os_file_create_simple_no_error_handling_func( \ name, create_mode, access, success) -# define os_file_close(file) os_file_close_func(file) +# define os_file_close_pfs(file) \ + os_file_close_func(file) # define os_aio(type, mode, name, file, buf, offset, n, message1, message2) \ os_aio_func(type, mode, name, file, buf, offset, n, \ message1, message2) -# define os_file_read(file, buf, offset, n) \ +# define os_file_read_pfs(file, buf, offset, n) \ os_file_read_func(file, buf, offset, n) # define os_file_read_no_error_handling(file, buf, offset, n) \ os_file_read_no_error_handling_func(file, buf, offset, n) +# define os_file_read_no_error_handling_int_fd( \ + file, buf, offset, n) \ + os_file_read_no_error_handling_func(file, buf, offset, n) -# define os_file_write(name, file, buf, offset, n) \ +# define os_file_write_int_fd(name, file, buf, offset, n) \ + os_file_write_func(name, file, buf, offset, n) +# define os_file_write_pfs(name, file, buf, offset, n) \ os_file_write_func(name, file, buf, offset, n) -# define os_file_flush(file) os_file_flush_func(file) + +# define os_file_flush_pfs(file) os_file_flush_func(file) # define os_file_rename(key, oldpath, newpath) \ os_file_rename_func(oldpath, newpath) @@ -379,6 +422,33 @@ to original un-instrumented file I/O APIs */ #endif /* UNIV_PFS_IO */ +#ifdef UNIV_PFS_IO + #define os_file_close(file) os_file_close_pfs(file) +#else + #define os_file_close(file) os_file_close_pfs((file).m_file) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_read(file, buf, offset, n) \ + os_file_read_pfs(file, buf, offset, n) +#else + #define os_file_read(file, buf, offset, n) \ + os_file_read_pfs(file.m_file, buf, offset, n) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_flush(file) os_file_flush_pfs(file) +#else + #define os_file_flush(file) os_file_flush_pfs(file.m_file) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_write(name, file, buf, offset, n) \ + os_file_write_pfs(name, file, buf, offset, n) +#else + #define os_file_write(name, file, buf, offset, n) \ + os_file_write_pfs(name, file.m_file, buf, offset, n) +#endif /* File types for directory entry data type */ enum os_file_type_t { @@ -518,7 +588,7 @@ A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( /*=========================================*/ const char* name, /*!< in: name of the file or path as a @@ -548,7 +618,7 @@ Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_func( /*================*/ const char* name, /*!< in: name of the file or path as a @@ -617,7 +687,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( /*===========================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -640,7 +710,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( /*=============================================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -664,7 +734,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( /*====================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -693,7 +763,7 @@ UNIV_INLINE ibool pfs_os_file_close_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ /*******************************************************************//** @@ -706,7 +776,7 @@ UNIV_INLINE ibool pfs_os_file_read_func( /*==================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -724,7 +794,7 @@ UNIV_INLINE ibool pfs_os_file_read_no_error_handling_func( /*====================================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -745,7 +815,7 @@ pfs_os_aio_func( ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -772,7 +842,7 @@ pfs_os_file_write_func( /*===================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ os_offset_t offset, /*!< in: file offset where to write */ ulint n, /*!< in: number of bytes to write */ @@ -789,7 +859,7 @@ UNIV_INLINE ibool pfs_os_file_flush_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ @@ -860,7 +930,7 @@ UNIV_INTERN os_offset_t os_file_get_size( /*=============*/ - os_file_t file) /*!< in: handle to a file */ + pfs_os_file_t file) /*!< in: handle to a file */ MY_ATTRIBUTE((warn_unused_result)); /***********************************************************************//** Write the specified number of zeros to a newly created file. @@ -871,7 +941,7 @@ os_file_set_size( /*=============*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ os_offset_t size) /*!< in: file size */ MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** @@ -1109,7 +1179,7 @@ os_aio_func( caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ diff --git a/storage/innobase/include/os0file.ic b/storage/innobase/include/os0file.ic index defd8204ba3..31d5cc1d6a2 100644 --- a/storage/innobase/include/os0file.ic +++ b/storage/innobase/include/os0file.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -34,7 +34,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( /*===========================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -47,7 +47,7 @@ pfs_os_file_create_simple_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -58,11 +58,13 @@ pfs_os_file_create_simple_func( : PSI_FILE_OPEN), name, src_file, src_line); - file = os_file_create_simple_func(name, create_mode, + file.m_file = os_file_create_simple_func(name, create_mode, access_type, success); + file.m_psi = NULL; - /* Regsiter the returning "file" value with the system */ - register_pfs_file_open_end(locker, file); + /* Regsiter psi value for the file */ + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -76,7 +78,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( /*=============================================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -91,7 +93,7 @@ pfs_os_file_create_simple_no_error_handling_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -104,8 +106,10 @@ pfs_os_file_create_simple_no_error_handling_func( file = os_file_create_simple_no_error_handling_func( name, create_mode, access_type, success); + file.m_psi = NULL; - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -118,7 +122,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( /*====================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -137,7 +141,7 @@ pfs_os_file_create_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -149,8 +153,10 @@ pfs_os_file_create_func( name, src_file, src_line); file = os_file_create_func(name, create_mode, purpose, type, success); + file.m_psi = NULL; - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -164,7 +170,7 @@ UNIV_INLINE ibool pfs_os_file_close_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { @@ -176,7 +182,7 @@ pfs_os_file_close_func( register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CLOSE, src_file, src_line); - result = os_file_close_func(file); + result = os_file_close_func(file.m_file); register_pfs_file_io_end(locker, 0); @@ -197,7 +203,7 @@ pfs_os_aio_func( ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -242,7 +248,7 @@ UNIV_INLINE ibool pfs_os_file_read_func( /*==================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -256,7 +262,7 @@ pfs_os_file_read_func( register_pfs_file_io_begin(&state, locker, file, n, PSI_FILE_READ, src_file, src_line); - result = os_file_read_func(file, buf, offset, n); + result = os_file_read_func(file.m_file, buf, offset, n); register_pfs_file_io_end(locker, n); @@ -275,7 +281,7 @@ UNIV_INLINE ibool pfs_os_file_read_no_error_handling_func( /*====================================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -289,13 +295,50 @@ pfs_os_file_read_no_error_handling_func( register_pfs_file_io_begin(&state, locker, file, n, PSI_FILE_READ, src_file, src_line); - result = os_file_read_no_error_handling_func(file, buf, offset, n); + result = os_file_read_no_error_handling_func(file.m_file, buf, offset, n); register_pfs_file_io_end(locker, n); return(result); } +/** NOTE! Please use the corresponding macro +os_file_read_no_error_handling_int_fd(), not directly this function! +This is the performance schema instrumented wrapper function for +os_file_read_no_error_handling_int_fd_func() which requests a +synchronous read operation. +@return TRUE if request was successful, FALSE if fail */ +UNIV_INLINE +ibool +pfs_os_file_read_no_error_handling_int_fd_func( + int file, /*!< in: handle to a file */ + void* buf, /*!< in: buffer where to read */ + os_offset_t offset, /*!< in: file offset where to read */ + ulint n, /*!< in: number of bytes to read */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + + PSI_file_locker_state state; + struct PSI_file_locker* locker = NULL; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_READ); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ibool result = os_file_read_no_error_handling_func( + OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(result); +} + /*******************************************************************//** NOTE! Please use the corresponding macro os_file_write(), not directly this function! @@ -308,7 +351,7 @@ pfs_os_file_write_func( /*===================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ os_offset_t offset, /*!< in: file offset where to write */ ulint n, /*!< in: number of bytes to write */ @@ -322,13 +365,50 @@ pfs_os_file_write_func( register_pfs_file_io_begin(&state, locker, file, n, PSI_FILE_WRITE, src_file, src_line); - result = os_file_write_func(name, file, buf, offset, n); + result = os_file_write_func(name, file.m_file, buf, offset, n); register_pfs_file_io_end(locker, n); return(result); } +/** NOTE! Please use the corresponding macro os_file_write(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_write() which requests a synchronous write operation. +@return TRUE if request was successful, FALSE if fail */ +UNIV_INLINE +ibool +pfs_os_file_write_int_fd_func( + const char* name, /*!< in: name of the file or path as a + null-terminated string */ + int file, /*!< in: handle to a file */ + const void* buf, /*!< in: buffer from which to write */ + os_offset_t offset, /*!< in: file offset where to write */ + ulint n, /*!< in: number of bytes to write */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + PSI_file_locker_state state; + struct PSI_file_locker* locker = NULL; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_WRITE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ibool result = os_file_write_func( + name, OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(result); +} + /***********************************************************************//** NOTE! Please use the corresponding macro os_file_flush(), not directly this function! @@ -339,7 +419,7 @@ UNIV_INLINE ibool pfs_os_file_flush_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { @@ -349,7 +429,7 @@ pfs_os_file_flush_func( register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_SYNC, src_file, src_line); - result = os_file_flush_func(file); + result = os_file_flush_func(file.m_file); register_pfs_file_io_end(locker, 0); @@ -377,12 +457,12 @@ pfs_os_file_rename_func( struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, key, PSI_FILE_RENAME, newpath, + register_pfs_file_rename_begin(&state, locker, key, PSI_FILE_RENAME, newpath, src_file, src_line); result = os_file_rename_func(oldpath, newpath); - register_pfs_file_open_end(locker, 0); + register_pfs_file_rename_end(locker, 0); return(result); } diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index fc1846b76f3..d0c1bdaa585 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -652,6 +652,8 @@ struct mysql_row_templ_t { #define ROW_PREBUILT_ALLOCATED 78540783 #define ROW_PREBUILT_FREED 26423527 +class ha_innobase; + /** A struct for (sometimes lazily) prebuilt structures in an Innobase table handle used within MySQL; these are used to save CPU time. */ @@ -879,6 +881,12 @@ struct row_prebuilt_t { to InnoDB format.*/ uint srch_key_val_len; /*!< Size of search key */ + /** MySQL handler object. */ + ha_innobase* mysql_handler; + + /** True if exceeded the end_range while filling the prefetch cache. */ + bool end_range; + }; /** Callback for row_mysql_sys_index_iterate() */ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 042ced75d10..7d701536341 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. @@ -785,6 +785,13 @@ void srv_purge_wakeup(void); /*==================*/ +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +bool +srv_is_undo_tablespace( + ulint space_id); + /** Status variables to be passed to MySQL */ struct export_var_t{ ulint innodb_data_pending_reads; /*!< Pending reads */ diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h index 963b767f0fb..a60776a4665 100644 --- a/storage/innobase/include/srv0start.h +++ b/storage/innobase/include/srv0start.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -139,6 +139,8 @@ extern ibool srv_startup_is_before_trx_rollback_phase; /** TRUE if a raw partition is in use */ extern ibool srv_start_raw_disk_in_use; +/** Undo tablespaces starts with space_id. */ +extern ulint srv_undo_space_id_start; /** Shutdown state */ enum srv_shutdown_state { diff --git a/storage/innobase/include/trx0xa.h b/storage/innobase/include/trx0xa.h index 7caddfb7ba4..255431293f5 100644 --- a/storage/innobase/include/trx0xa.h +++ b/storage/innobase/include/trx0xa.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,6 +24,8 @@ this program; if not, write to the Free Software Foundation, Inc., #ifndef XA_H #define XA_H +#include "xa.h" + /* * Transaction branch identification: XID and NULLXID: */ @@ -35,17 +37,6 @@ this program; if not, write to the Free Software Foundation, Inc., #define MAXGTRIDSIZE 64 /*!< maximum size in bytes of gtrid */ #define MAXBQUALSIZE 64 /*!< maximum size in bytes of bqual */ -/** X/Open XA distributed transaction identifier */ -struct xid_t { - long formatID; /*!< format identifier; -1 - means that the XID is null */ - long gtrid_length; /*!< value from 1 through 64 */ - long bqual_length; /*!< value from 1 through 64 */ - char data[XIDDATASIZE]; /*!< distributed transaction - identifier */ -}; -/** X/Open XA distributed transaction identifier */ -typedef struct xid_t XID; #endif /** X/Open XA distributed transaction status codes */ /* @{ */ diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index dfb59673f0b..5d3dcafb322 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -133,14 +133,8 @@ HAVE_PSI_INTERFACE is defined. */ #if defined HAVE_PSI_INTERFACE && !defined UNIV_HOTBACKUP # define UNIV_PFS_MUTEX # define UNIV_PFS_RWLOCK -/* For I/O instrumentation, performance schema rely -on a native descriptor to identify the file, this -descriptor could conflict with our OS level descriptor. -Disable IO instrumentation on Windows until this is -resolved */ -# ifndef __WIN__ -# define UNIV_PFS_IO -# endif + +# define UNIV_PFS_IO # define UNIV_PFS_THREAD /* There are mutexes/rwlocks that we want to exclude from diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 8ff8e39d352..51b057ec09a 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -3406,7 +3406,12 @@ loop: lsn = log_sys->lsn; - if (lsn != log_sys->last_checkpoint_lsn + const bool is_last = ((srv_force_recovery == SRV_FORCE_NO_LOG_REDO + && lsn == log_sys->last_checkpoint_lsn + + LOG_BLOCK_HDR_SIZE) + || lsn == log_sys->last_checkpoint_lsn); + + if (!is_last #ifdef UNIV_LOG_ARCHIVE || (srv_log_archive_on && lsn != log_sys->archived_lsn + LOG_BLOCK_HDR_SIZE) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 85f4f6ea671..69d74a99ab5 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -332,6 +332,7 @@ DECLARE_THREAD(recv_writer_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -364,6 +365,7 @@ DECLARE_THREAD(recv_writer_thread)( recv_writer_thread_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 46a7b801521..dbd30c3d658 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -163,7 +163,7 @@ struct os_aio_slot_t{ byte* buf; /*!< buffer used in i/o */ ulint type; /*!< OS_FILE_READ or OS_FILE_WRITE */ os_offset_t offset; /*!< file offset in bytes */ - os_file_t file; /*!< file where to read or write */ + pfs_os_file_t file; /*!< file where to read or write */ const char* name; /*!< file name or path */ ibool io_already_done;/*!< used only in simulated aio: TRUE if the physical i/o already @@ -1306,7 +1306,7 @@ A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( /*=========================================*/ const char* name, /*!< in: name of the file or path as a @@ -1318,7 +1318,7 @@ os_file_create_simple_no_error_handling_func( used by a backup program reading the file */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { - os_file_t file; + pfs_os_file_t file; *success = FALSE; #ifdef __WIN__ @@ -1326,7 +1326,6 @@ os_file_create_simple_no_error_handling_func( DWORD create_flag; DWORD attributes = 0; DWORD share_mode = FILE_SHARE_READ; - ut_a(name); ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT)); @@ -1343,8 +1342,8 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file create mode (%lu) for file '%s'", create_mode, name); - - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); } if (access_type == OS_FILE_READ_ONLY) { @@ -1367,11 +1366,11 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file access type (%lu) for file '%s'", access_type, name); - - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); } - file = CreateFile((LPCTSTR) name, + file.m_file = CreateFile((LPCTSTR) name, access, share_mode, NULL, // Security attributes @@ -1379,11 +1378,10 @@ os_file_create_simple_no_error_handling_func( attributes, NULL); // No template file - *success = (file != INVALID_HANDLE_VALUE); + *success = (file.m_file != INVALID_HANDLE_VALUE); #else /* __WIN__ */ int create_flag; const char* mode_str = NULL; - ut_a(name); ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT)); @@ -1425,13 +1423,13 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file create mode (%lu) for file '%s'", create_mode, name); - - return((os_file_t) -1); + file.m_file = -1; + return(file); } - file = ::open(name, create_flag, os_innodb_umask); + file.m_file = ::open(name, create_flag, os_innodb_umask); - *success = file == -1 ? FALSE : TRUE; + *success = file.m_file == -1 ? FALSE : TRUE; /* This function is always called for data files, we should disable OS caching (O_DIRECT) here as we do in os_file_create_func(), so @@ -1441,18 +1439,18 @@ os_file_create_simple_no_error_handling_func( && (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT || srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)) { - os_file_set_nocache(file, name, mode_str); + os_file_set_nocache(file.m_file, name, mode_str); } #ifdef USE_FILE_LOCK if (!srv_read_only_mode && *success && access_type == OS_FILE_READ_WRITE - && os_file_lock(file, name)) { + && os_file_lock(file.m_file, name)) { *success = FALSE; - close(file); - file = -1; + close(file.m_file); + file.m_file = -1; } #endif /* USE_FILE_LOCK */ @@ -1527,7 +1525,7 @@ Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_func( /*================*/ const char* name, /*!< in: name of the file or path as a @@ -1543,24 +1541,25 @@ os_file_create_func( ulint type, /*!< in: OS_DATA_FILE or OS_LOG_FILE */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { - os_file_t file; + pfs_os_file_t file; ibool retry; ibool on_error_no_exit; ibool on_error_silent; - #ifdef __WIN__ DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; SetLastError(ERROR_DISK_FULL); - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); ); #else /* __WIN__ */ DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; errno = ENOSPC; - return((os_file_t) -1); + file.m_file = -1; + return(file); ); #endif /* __WIN__ */ @@ -1611,7 +1610,8 @@ os_file_create_func( "Unknown file create mode (%lu) for file '%s'", create_mode, name); - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); } DWORD attributes = 0; @@ -1636,8 +1636,8 @@ os_file_create_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown purpose flag (%lu) while opening file '%s'", purpose, name); - - return((os_file_t)(-1)); + file.m_file = (os_file_t)-1; + return(file); } #ifdef UNIV_NON_BUFFERED_IO @@ -1664,11 +1664,11 @@ os_file_create_func( do { /* Use default security attributes and no template file. */ - file = CreateFile( + file.m_file = CreateFile( (LPCTSTR) name, access, share_mode, NULL, create_flag, attributes, NULL); - if (file == INVALID_HANDLE_VALUE) { + if (file.m_file == INVALID_HANDLE_VALUE) { const char* operation; operation = (create_mode == OS_FILE_CREATE @@ -1693,7 +1693,6 @@ os_file_create_func( #else /* __WIN__ */ int create_flag; const char* mode_str = NULL; - on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT ? TRUE : FALSE; on_error_silent = create_mode & OS_FILE_ON_ERROR_SILENT @@ -1731,7 +1730,8 @@ os_file_create_func( "Unknown file create mode (%lu) for file '%s'", create_mode, name); - return((os_file_t) -1); + file.m_file = -1; + return(file); } ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE); @@ -1751,9 +1751,9 @@ os_file_create_func( #endif /* O_SYNC */ do { - file = ::open(name, create_flag, os_innodb_umask); + file.m_file = ::open(name, create_flag, os_innodb_umask); - if (file == -1) { + if (file.m_file == -1) { const char* operation; operation = (create_mode == OS_FILE_CREATE @@ -1783,14 +1783,14 @@ os_file_create_func( && (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT || srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)) { - os_file_set_nocache(file, name, mode_str); + os_file_set_nocache(file.m_file, name, mode_str); } #ifdef USE_FILE_LOCK if (!srv_read_only_mode && *success && create_mode != OS_FILE_OPEN_RAW - && os_file_lock(file, name)) { + && os_file_lock(file.m_file, name)) { if (create_mode == OS_FILE_OPEN_RETRY) { @@ -1802,7 +1802,7 @@ os_file_create_func( for (int i = 0; i < 100; i++) { os_thread_sleep(1000000); - if (!os_file_lock(file, name)) { + if (!os_file_lock(file.m_file, name)) { *success = TRUE; return(file); } @@ -1813,8 +1813,8 @@ os_file_create_func( } *success = FALSE; - close(file); - file = -1; + close(file.m_file); + file.m_file = -1; } #endif /* USE_FILE_LOCK */ @@ -2086,14 +2086,14 @@ UNIV_INTERN os_offset_t os_file_get_size( /*=============*/ - os_file_t file) /*!< in: handle to a file */ + pfs_os_file_t file) /*!< in: handle to a file */ { #ifdef __WIN__ os_offset_t offset; DWORD high; DWORD low; - low = GetFileSize(file, &high); + low = GetFileSize(file.m_file, &high); if ((low == 0xFFFFFFFF) && (GetLastError() != NO_ERROR)) { return((os_offset_t) -1); @@ -2103,7 +2103,8 @@ os_file_get_size( return(offset); #else - return((os_offset_t) lseek(file, 0, SEEK_END)); + return((os_offset_t) lseek(file.m_file, 0, SEEK_END)); + #endif /* __WIN__ */ } @@ -2116,7 +2117,7 @@ os_file_set_size( /*=============*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ os_offset_t size) /*!< in: file size */ { os_offset_t current_size; @@ -4187,7 +4188,7 @@ os_aio_array_reserve_slot( the aio operation */ void* message2,/*!< in: message to be passed along with the aio operation */ - os_file_t file, /*!< in: file handle */ + pfs_os_file_t file, /*!< in: file handle */ const char* name, /*!< in: name of the file or path as a null-terminated string */ void* buf, /*!< in: buffer where to read or from which @@ -4307,10 +4308,10 @@ found: iocb = &slot->control; if (type == OS_FILE_READ) { - io_prep_pread(iocb, file, buf, len, aio_offset); + io_prep_pread(iocb, file.m_file, buf, len, aio_offset); } else { ut_a(type == OS_FILE_WRITE); - io_prep_pwrite(iocb, file, buf, len, aio_offset); + io_prep_pwrite(iocb, file.m_file, buf, len, aio_offset); } iocb->data = (void*) slot; @@ -4548,7 +4549,7 @@ os_aio_func( caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -4573,8 +4574,7 @@ os_aio_func( ulint dummy_type; #endif /* WIN_ASYNC_IO */ ulint wake_later; - - ut_ad(file); + ut_ad(file.m_file); ut_ad(buf); ut_ad(n > 0); ut_ad(n % OS_FILE_LOG_BLOCK_SIZE == 0); @@ -4606,13 +4606,11 @@ os_aio_func( and os_file_write_func() */ if (type == OS_FILE_READ) { - return(os_file_read_func(file, buf, offset, n)); + return(os_file_read_func(file.m_file, buf, offset, n)); } - ut_ad(!srv_read_only_mode); ut_a(type == OS_FILE_WRITE); - - return(os_file_write_func(name, file, buf, offset, n)); + return(os_file_write_func(name, file.m_file, buf, offset, n)); } try_again: @@ -4664,9 +4662,8 @@ try_again: os_n_file_reads++; os_bytes_read_since_printout += n; #ifdef WIN_ASYNC_IO - ret = ReadFile(file, buf, (DWORD) n, &len, + ret = ReadFile(file.m_file, buf, (DWORD) n, &len, &(slot->control)); - #elif defined(LINUX_NATIVE_AIO) if (!os_aio_linux_dispatch(array, slot)) { goto err_exit; @@ -4684,9 +4681,8 @@ try_again: if (srv_use_native_aio) { os_n_file_writes++; #ifdef WIN_ASYNC_IO - ret = WriteFile(file, buf, (DWORD) n, &len, + ret = WriteFile(file.m_file, buf, (DWORD) n, &len, &(slot->control)); - #elif defined(LINUX_NATIVE_AIO) if (!os_aio_linux_dispatch(array, slot)) { goto err_exit; @@ -4840,8 +4836,7 @@ os_aio_windows_handle( srv_set_io_thread_op_info( orig_seg, "get windows aio return value"); } - - ret = GetOverlappedResult(slot->file, &(slot->control), &len, TRUE); + ret = GetOverlappedResult(slot->file.m_file, &(slot->control), &len, TRUE); *message1 = slot->message1; *message2 = slot->message2; @@ -4870,7 +4865,8 @@ os_aio_windows_handle( and os_file_write APIs, need to register with performance schema explicitly here. */ struct PSI_file_locker* locker = NULL; - register_pfs_file_io_begin(locker, slot->file, slot->len, + PSI_file_locker_state state; + register_pfs_file_io_begin(&state, locker, slot->file, slot->len, (slot->type == OS_FILE_WRITE) ? PSI_FILE_WRITE : PSI_FILE_READ, @@ -4881,16 +4877,14 @@ os_aio_windows_handle( switch (slot->type) { case OS_FILE_WRITE: - ret = WriteFile(slot->file, slot->buf, + ret = WriteFile(slot->file.m_file, slot->buf, (DWORD) slot->len, &len, &(slot->control)); - break; case OS_FILE_READ: - ret = ReadFile(slot->file, slot->buf, + ret = ReadFile(slot->file.m_file, slot->buf, (DWORD) slot->len, &len, &(slot->control)); - break; default: ut_error; @@ -4906,8 +4900,7 @@ os_aio_windows_handle( file where we also use async i/o: in Windows we must use the same wait mechanism as for async i/o */ - - ret = GetOverlappedResult(slot->file, + ret = GetOverlappedResult(slot->file.m_file, &(slot->control), &len, TRUE); } @@ -5354,12 +5347,11 @@ consecutive_loop: os_aio_slot_t* slot; slot = os_aio_array_get_nth_slot(array, i + segment * n); - if (slot->reserved && slot != aio_slot && slot->offset == aio_slot->offset + aio_slot->len && slot->type == aio_slot->type - && slot->file == aio_slot->file) { + && slot->file.m_file == aio_slot->file.m_file) { /* Found a consecutive i/o request */ diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 54183759e8d..4596e2fb951 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -363,9 +363,9 @@ row_log_online_op( goto err_exit; } - ret = os_file_write( + ret = os_file_write_int_fd( "(modification log)", - OS_FILE_FROM_FD(log->fd), + log->fd, log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; if (!ret) { @@ -479,9 +479,9 @@ row_log_table_close_func( goto err_exit; } - ret = os_file_write( + ret = os_file_write_int_fd( "(modification log)", - OS_FILE_FROM_FD(log->fd), + log->fd, log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; if (!ret) { @@ -2609,11 +2609,10 @@ all_done: goto func_exit; } - success = os_file_read_no_error_handling( - OS_FILE_FROM_FD(index->online_log->fd), + success = os_file_read_no_error_handling_int_fd( + index->online_log->fd, index->online_log->head.block, ofs, srv_sort_buf_size); - if (!success) { fprintf(stderr, "InnoDB: unable to read temporary file" " for table %s\n", index->table_name); @@ -3436,8 +3435,8 @@ all_done: goto func_exit; } - success = os_file_read_no_error_handling( - OS_FILE_FROM_FD(index->online_log->fd), + success = os_file_read_no_error_handling_int_fd( + index->online_log->fd, index->online_log->head.block, ofs, srv_sort_buf_size); diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index f8bea67906c..2d63352feaf 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -869,8 +869,9 @@ row_merge_read( } #endif /* UNIV_DEBUG */ - success = os_file_read_no_error_handling(OS_FILE_FROM_FD(fd), buf, + success = os_file_read_no_error_handling_int_fd(fd, buf, ofs, srv_sort_buf_size); + #ifdef POSIX_FADV_DONTNEED /* Each block is read exactly once. Free up the file cache. */ posix_fadvise(fd, ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED); @@ -904,7 +905,7 @@ row_merge_write( DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE);); - ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf, ofs, buf_len); + ret = os_file_write_int_fd("(merge)", fd, buf, ofs, buf_len); #ifdef UNIV_DEBUG if (row_merge_print_block_write) { @@ -3121,14 +3122,21 @@ row_merge_file_create_low( performance schema */ struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, innodb_file_temp_key, - PSI_FILE_OPEN, - "Innodb Merge Temp File", - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_name_locker)( + &state, innodb_file_temp_key, PSI_FILE_OPEN, + "Innodb Merge Temp File", &locker); + if (locker != NULL) { + PSI_FILE_CALL(start_file_open_wait)(locker, + __FILE__, + __LINE__); + } #endif fd = innobase_mysql_tmpfile(path); #ifdef UNIV_PFS_IO - register_pfs_file_open_end(locker, fd); + if (locker != NULL) { + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)( + locker, fd); + } #endif if (fd < 0) { @@ -3175,15 +3183,20 @@ row_merge_file_destroy_low( #ifdef UNIV_PFS_IO struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_io_begin(&state, locker, - fd, 0, PSI_FILE_CLOSE, - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, fd, PSI_FILE_CLOSE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, 0, __FILE__, __LINE__); + } #endif if (fd >= 0) { close(fd); } #ifdef UNIV_PFS_IO - register_pfs_file_io_end(locker, 0); + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, 0); + } #endif } /*********************************************************************//** diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 42b1ab57630..c53c1154f60 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -61,6 +61,9 @@ Created 12/19/1997 Heikki Tuuri #include "my_sys.h" /* DEBUG_SYNC_C */ #include "my_compare.h" /* enum icp_result */ +#include "thr_lock.h" +#include "handler.h" +#include "ha_innodb.h" /* Maximum number of rows to prefetch; MySQL interface has another parameter */ #define SEL_MAX_N_PREFETCH 16 @@ -2743,34 +2746,43 @@ row_sel_field_store_in_mysql_format_func( #ifdef UNIV_DEBUG /** Convert a field from Innobase format to MySQL format. */ -# define row_sel_store_mysql_field(m,p,r,i,o,f,t) \ - row_sel_store_mysql_field_func(m,p,r,i,o,f,t) +# define row_sel_store_mysql_field(m,p,r,i,o,f,t,c) \ + row_sel_store_mysql_field_func(m,p,r,i,o,f,t,c) #else /* UNIV_DEBUG */ /** Convert a field from Innobase format to MySQL format. */ -# define row_sel_store_mysql_field(m,p,r,i,o,f,t) \ - row_sel_store_mysql_field_func(m,p,r,o,f,t) +# define row_sel_store_mysql_field(m,p,r,i,o,f,t,c) \ + row_sel_store_mysql_field_func(m,p,r,o,f,t,c) #endif /* UNIV_DEBUG */ -/**************************************************************//** -Convert a field in the Innobase format to a field in the MySQL format. */ +/** Convert a field in the Innobase format to a field in the MySQL format. +@param[out] mysql_rec record in the MySQL format +@param[in,out] prebuilt prebuilt struct +@param[in] rec InnoDB record; must be protected + by a page latch +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets() +@param[in] field_no templ->rec_field_no or + templ->clust_rec_field_no + or templ->icp_rec_field_no + or sec field no if clust_templ_for_sec + is TRUE +@param[in] templ row template +@param[in] clust_templ_for_sec TRUE if rec belongs to secondary index + but prebuilt template is in clustered + index format and used only for end + range comparison. */ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_field_func( -/*===========================*/ - byte* mysql_rec, /*!< out: record in the - MySQL format */ - row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */ - const rec_t* rec, /*!< in: InnoDB record; - must be protected by - a page latch */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, #ifdef UNIV_DEBUG - const dict_index_t* index, /*!< in: index of rec */ + const dict_index_t* index, #endif - const ulint* offsets, /*!< in: array returned by - rec_get_offsets() */ - ulint field_no, /*!< in: templ->rec_field_no or - templ->clust_rec_field_no or - templ->icp_rec_field_no */ - const mysql_row_templ_t*templ) /*!< in: row template */ + const ulint* offsets, + ulint field_no, + const mysql_row_templ_t*templ, + bool clust_templ_for_sec) { const byte* data; ulint len; @@ -2779,10 +2791,12 @@ row_sel_store_mysql_field_func( ut_ad(templ); ut_ad(templ >= prebuilt->mysql_template); ut_ad(templ < &prebuilt->mysql_template[prebuilt->n_template]); - ut_ad(field_no == templ->clust_rec_field_no + ut_ad(clust_templ_for_sec + || field_no == templ->clust_rec_field_no || field_no == templ->rec_field_no || field_no == templ->icp_rec_field_no); - ut_ad(rec_offs_validate(rec, index, offsets)); + ut_ad(rec_offs_validate(rec, + clust_templ_for_sec == true ? prebuilt->index : index, offsets)); if (UNIV_UNLIKELY(rec_offs_nth_extern(offsets, field_no))) { @@ -2896,30 +2910,37 @@ row_sel_store_mysql_field_func( return(TRUE); } -/**************************************************************//** -Convert a row in the Innobase format to a row in the MySQL format. +/** Convert a row in the Innobase format to a row in the MySQL format. Note that the template in prebuilt may advise us to copy only a few columns to mysql_rec, other columns are left blank. All columns may not be needed in the query. +@param[out] mysql_rec row in the MySQL format +@param[in] prebuilt prebuilt structure +@param[in] rec Innobase record in the index + which was described in prebuilt's + template, or in the clustered index; + must be protected by a page latch +@param[in] rec_clust TRUE if the rec in the clustered index +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets(rec) +@param[in] clust_templ_for_sec TRUE if rec belongs to secondary index + but the prebuilt->template is in + clustered index format and it is + used only for end range comparison @return TRUE on success, FALSE if not all columns could be retrieved */ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_rec( -/*====================*/ - byte* mysql_rec, /*!< out: row in the MySQL format */ - row_prebuilt_t* prebuilt, /*!< in: prebuilt struct */ - const rec_t* rec, /*!< in: Innobase record in the index - which was described in prebuilt's - template, or in the clustered index; - must be protected by a page latch */ - ibool rec_clust, /*!< in: TRUE if rec is in the - clustered index instead of - prebuilt->index */ - const dict_index_t* index, /*!< in: index of rec */ - const ulint* offsets) /*!< in: array returned by - rec_get_offsets(rec) */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + ibool rec_clust, + const dict_index_t* index, + const ulint* offsets, + bool clust_templ_for_sec) { - ulint i; + ulint i; + std::vector template_col; ut_ad(rec_clust || index == prebuilt->index); ut_ad(!rec_clust || dict_index_is_clust(index)); @@ -2929,9 +2950,20 @@ row_sel_store_mysql_rec( prebuilt->blob_heap = NULL; } + if (clust_templ_for_sec) { + /* Store all clustered index field of + secondary index record. */ + for (i = 0; i < dict_index_get_n_fields( + prebuilt->index); i++) { + ulint sec_field = dict_index_get_nth_field_pos( + index, prebuilt->index, i); + template_col.push_back(sec_field); + } + } + for (i = 0; i < prebuilt->n_template; i++) { const mysql_row_templ_t*templ = &prebuilt->mysql_template[i]; - const ulint field_no + ulint field_no = rec_clust ? templ->clust_rec_field_no : templ->rec_field_no; @@ -2940,9 +2972,24 @@ row_sel_store_mysql_rec( ut_ad(dict_index_get_nth_field(index, field_no)->prefix_len == 0); + if (clust_templ_for_sec) { + std::vector::iterator it; + it = std::find(template_col.begin(), + template_col.end(), field_no); + + if (it == template_col.end()) { + continue; + } + + ut_ad(templ->rec_field_no == templ->clust_rec_field_no); + + field_no = it - template_col.begin(); + } + if (!row_sel_store_mysql_field(mysql_rec, prebuilt, rec, index, offsets, - field_no, templ)) { + field_no, templ, + clust_templ_for_sec)) { return(FALSE); } } @@ -3582,7 +3629,7 @@ row_search_idx_cond_check( if (!row_sel_store_mysql_field(mysql_rec, prebuilt, rec, prebuilt->index, offsets, templ->icp_rec_field_no, - templ)) { + templ, false)) { return(ICP_NO_MATCH); } } @@ -3603,7 +3650,7 @@ row_search_idx_cond_check( || dict_index_is_clust(prebuilt->index)) { if (!row_sel_store_mysql_rec( mysql_rec, prebuilt, rec, FALSE, - prebuilt->index, offsets)) { + prebuilt->index, offsets, false)) { ut_ad(dict_index_is_clust(prebuilt->index)); return(ICP_NO_MATCH); } @@ -3622,6 +3669,27 @@ row_search_idx_cond_check( return(result); } +/** Check the pushed down end range condition to avoid extra traversal +if records are not within view and also to avoid prefetching in the +cache buffer. +@param[in] mysql_rec record in MySQL format +@param[in,out] handler the MySQL handler performing the scan +@retval true if the row in mysql_rec is out of range +@retval false if the row in mysql_rec is in range */ +static +bool +row_search_end_range_check( + const byte* mysql_rec, + ha_innobase* handler) +{ + if (handler->end_range && + handler->compare_key_in_buffer(mysql_rec) > 0) { + return(true); + } + + return(false); +} + /********************************************************************//** Searches for rows in the database. This is used in the interface to MySQL. This function opens a cursor, and also implements fetch next @@ -3659,7 +3727,9 @@ row_search_for_mysql( trx_t* trx = prebuilt->trx; dict_index_t* clust_index; que_thr_t* thr; - const rec_t* rec; + const rec_t* prev_rec = NULL; + const rec_t* rec = NULL; + byte* end_range_cache = NULL; const rec_t* result_rec = NULL; const rec_t* clust_rec; dberr_t err = DB_SUCCESS; @@ -3684,6 +3754,7 @@ row_search_for_mysql( ulint* offsets = offsets_; ibool table_lock_waited = FALSE; byte* next_buf = 0; + ulint end_loop = 0; rec_offs_init(offsets_); @@ -3823,6 +3894,10 @@ row_search_for_mysql( err = DB_SUCCESS; goto func_exit; + } else if (prebuilt->end_range == true) { + prebuilt->end_range = false; + err = DB_RECORD_NOT_FOUND; + goto func_exit; } if (prebuilt->fetch_cache_first > 0 @@ -3956,7 +4031,8 @@ row_search_for_mysql( if (!row_sel_store_mysql_rec( buf, prebuilt, - rec, FALSE, index, offsets)) { + rec, FALSE, index, + offsets, false)) { /* Only fresh inserts may contain incomplete externally stored columns. Pretend that such @@ -4205,11 +4281,62 @@ rec_loop: and neither can a record lock be placed on it: we skip such a record. */ + prev_rec = NULL; goto next_rec; } if (page_rec_is_supremum(rec)) { + /** Compare the last record of the page with end range + passed to InnoDB when there is no ICP and number of loops + in row_search_for_mysql for rows found but not + reporting due to search views etc. */ + if (prev_rec != NULL + && prebuilt->mysql_handler->end_range != NULL + && prebuilt->idx_cond == NULL + && end_loop >= 100) { + + dict_index_t* key_index = prebuilt->index; + bool clust_templ_for_sec = false; + + if (end_range_cache == NULL) { + end_range_cache = static_cast( + ut_malloc(prebuilt->mysql_row_len)); + } + + if (index != clust_index + && prebuilt->need_to_access_clustered) { + /** Secondary index record but the template + based on PK. */ + key_index = clust_index; + clust_templ_for_sec = true; + } + + /** Create offsets based on prebuilt index. */ + offsets = rec_get_offsets(prev_rec, prebuilt->index, + offsets, ULINT_UNDEFINED, &heap); + + if (row_sel_store_mysql_rec( + end_range_cache, prebuilt, prev_rec, + clust_templ_for_sec, key_index, offsets, + clust_templ_for_sec)) { + + if (row_search_end_range_check( + end_range_cache, + prebuilt->mysql_handler)) { + + /** In case of prebuilt->fetch, + set the error in prebuilt->end_range. */ + if (prebuilt->n_fetch_cached > 0) { + prebuilt->end_range = true; + } + + err = DB_RECORD_NOT_FOUND; + goto normal_return; + } + } + } + if (set_also_gap_locks && !(srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) @@ -4241,6 +4368,7 @@ rec_loop: /* A page supremum record cannot be in the result set: skip it now that we have placed a possible lock on it */ + prev_rec = NULL; goto next_rec; } @@ -4308,6 +4436,7 @@ wrong_offs: btr_pcur_move_to_last_on_page(pcur, &mtr); + prev_rec = NULL; goto next_rec; } } @@ -4334,10 +4463,13 @@ wrong_offs: fputs(". We try to skip the record.\n", stderr); + prev_rec = NULL; goto next_rec; } } + prev_rec = rec; + /* Note that we cannot trust the up_match value in the cursor at this place because we can arrive here after moving the cursor! Thus we have to recompare rec and search_tuple to determine if they @@ -4562,6 +4694,7 @@ no_gap_lock: did_semi_consistent_read = TRUE; rec = old_vers; + prev_rec = rec; break; default: @@ -4608,6 +4741,7 @@ no_gap_lock: } rec = old_vers; + prev_rec = rec; } } else { /* We are looking into a non-clustered index, @@ -4785,7 +4919,7 @@ requires_clust_rec: appropriate version of the clustered index record. */ if (!row_sel_store_mysql_rec( buf, prebuilt, result_rec, - TRUE, clust_index, offsets)) { + TRUE, clust_index, offsets, false)) { goto next_rec; } } @@ -4853,7 +4987,7 @@ requires_clust_rec: next_buf, prebuilt, result_rec, result_rec != rec, result_rec != rec ? clust_index : index, - offsets)) { + offsets, false)) { if (next_buf == buf) { ut_a(prebuilt->n_fetch_cached == 0); @@ -4908,7 +5042,7 @@ requires_clust_rec: buf, prebuilt, result_rec, result_rec != rec, result_rec != rec ? clust_index : index, - offsets)) { + offsets, false)) { /* Only fresh inserts may contain incomplete externally stored columns. Pretend that such records do @@ -4960,6 +5094,8 @@ idx_cond_failed: goto normal_return; next_rec: + end_loop++; + /* Reset the old and new "did semi-consistent read" flags. */ if (UNIV_UNLIKELY(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT)) { @@ -5146,6 +5282,11 @@ normal_return: func_exit: trx->op_info = ""; + + if (end_range_cache != NULL) { + ut_free(end_range_cache); + } + if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index a67f3a776c5..173577740f8 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. @@ -2315,6 +2315,8 @@ DECLARE_THREAD(srv_master_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint old_activity_count = srv_get_activity_count(); ib_time_t last_print_time; @@ -2376,6 +2378,7 @@ suspend_thread: os_event_wait(slot->event); if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { + my_thread_end(); os_thread_exit(NULL); } @@ -2458,6 +2461,8 @@ DECLARE_THREAD(srv_worker_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ut_ad(!srv_read_only_mode); @@ -2516,6 +2521,7 @@ DECLARE_THREAD(srv_worker_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -2716,6 +2722,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint n_total_purged = ULINT_UNDEFINED; @@ -2821,6 +2829,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( srv_release_threads(SRV_WORKER, srv_n_purge_threads - 1); } + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -2889,3 +2898,18 @@ srv_purge_wakeup(void) } } +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +bool +srv_is_undo_tablespace( + ulint space_id) +{ + if (srv_undo_space_id_start == 0) { + return (false); + } + + return(space_id >= srv_undo_space_id_start + && space_id < (srv_undo_space_id_start + + srv_undo_tablespaces_open)); +} diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 697107a1e0d..f6b68a73b74 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. @@ -109,6 +109,9 @@ UNIV_INTERN ibool srv_have_fullfsync = FALSE; /** TRUE if a raw partition is in use */ UNIV_INTERN ibool srv_start_raw_disk_in_use = FALSE; +/** UNDO tablespaces starts with space id. */ +ulint srv_undo_space_id_start; + /** TRUE if the server is being started, before rolling back any incomplete transactions */ UNIV_INTERN ibool srv_startup_is_before_trx_rollback_phase = FALSE; @@ -124,7 +127,7 @@ SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ UNIV_INTERN enum srv_shutdown_state srv_shutdown_state = SRV_SHUTDOWN_NONE; /** Files comprising the system tablespace */ -static os_file_t files[1000]; +static pfs_os_file_t files[1000]; /** io_handler_thread parameters for thread identification */ static ulint n[SRV_MAX_N_IO_THREADS + 6]; @@ -530,7 +533,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t create_log_file( /*============*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name) /*!< in: log file name */ { ibool ret; @@ -737,7 +740,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_log_file( /*==========*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name, /*!< in: log file name */ os_offset_t* size) /*!< out: file size */ { @@ -853,7 +856,7 @@ open_or_create_data_files( && os_file_get_last_error(false) != OS_FILE_ALREADY_EXISTS #ifdef UNIV_AIX - /* AIX 5.1 after security patch ML7 may have + /* AIX 5.1 after security patch ML7 may have errno set to 0 here, which causes our function to return 100; work around that AIX problem */ @@ -1156,7 +1159,7 @@ srv_undo_tablespace_create( const char* name, /*!< in: tablespace name */ ulint size) /*!< in: tablespace size in pages */ { - os_file_t fh; + pfs_os_file_t fh; ibool ret; dberr_t err = DB_SUCCESS; @@ -1233,7 +1236,7 @@ srv_undo_tablespace_open( const char* name, /*!< in: tablespace name */ ulint space) /*!< in: tablespace id */ { - os_file_t fh; + pfs_os_file_t fh; dberr_t err = DB_ERROR; ibool ret; ulint flags; @@ -1332,13 +1335,23 @@ srv_undo_tablespaces_init( for (i = 0; create_new_db && i < n_conf_tablespaces; ++i) { char name[OS_FILE_MAX_PATH]; + ulint space_id = i + 1; + + DBUG_EXECUTE_IF("innodb_undo_upgrade", + space_id = i + 3;); ut_snprintf( name, sizeof(name), "%s%cundo%03lu", - srv_undo_dir, SRV_PATH_SEPARATOR, i + 1); + srv_undo_dir, SRV_PATH_SEPARATOR, space_id); + + if (i == 0) { + srv_undo_space_id_start = space_id; + prev_space_id = srv_undo_space_id_start - 1; + } + + undo_tablespace_ids[i] = space_id; - /* Undo space ids start from 1. */ err = srv_undo_tablespace_create( name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES); @@ -1360,14 +1373,16 @@ srv_undo_tablespaces_init( if (!create_new_db) { n_undo_tablespaces = trx_rseg_get_n_undo_tablespaces( undo_tablespace_ids); - } else { - n_undo_tablespaces = n_conf_tablespaces; - for (i = 1; i <= n_undo_tablespaces; ++i) { - undo_tablespace_ids[i - 1] = i; + if (n_undo_tablespaces != 0) { + srv_undo_space_id_start = undo_tablespace_ids[0]; + prev_space_id = srv_undo_space_id_start - 1; } - undo_tablespace_ids[i] = ULINT_UNDEFINED; + } else { + n_undo_tablespaces = n_conf_tablespaces; + + undo_tablespace_ids[n_conf_tablespaces] = ULINT_UNDEFINED; } /* Open all the undo tablespaces that are currently in use. If we @@ -1391,8 +1406,6 @@ srv_undo_tablespaces_init( ut_a(undo_tablespace_ids[i] != 0); ut_a(undo_tablespace_ids[i] != ULINT_UNDEFINED); - /* Undo space ids start from 1. */ - err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]); if (err != DB_SUCCESS) { @@ -1427,11 +1440,23 @@ srv_undo_tablespaces_init( break; } + /** Note the first undo tablespace id in case of + no active undo tablespace. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = i; + } + ++n_undo_tablespaces; ++*n_opened; } + /** Explictly specify the srv_undo_space_id_start + as zero when there are no undo tablespaces. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = 0; + } + /* If the user says that there are fewer than what we find we tolerate that discrepancy but not the inverse. Because there could be unused undo tablespaces for future use. */ @@ -1476,10 +1501,11 @@ srv_undo_tablespaces_init( mtr_start(&mtr); /* The undo log tablespace */ - for (i = 1; i <= n_undo_tablespaces; ++i) { + for (i = 0; i < n_undo_tablespaces; ++i) { fsp_header_init( - i, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); + undo_tablespace_ids[i], + SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); } mtr_commit(&mtr); @@ -1554,6 +1580,10 @@ innobase_start_or_create_for_mysql(void) char* logfile0 = NULL; size_t dirnamelen; + if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) { + srv_read_only_mode = 1; + } + high_level_read_only = srv_read_only_mode || srv_force_recovery > SRV_FORCE_NO_TRX_UNDO; diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index efc600d16b1..55af54991c6 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -692,7 +692,8 @@ trx_purge_get_rseg_with_min_trx_id( /* We assume in purge of externally stored fields that space id is in the range of UNDO tablespace space ids */ - ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open); + ut_a(purge_sys->rseg->space == 0 + || srv_is_undo_tablespace(purge_sys->rseg->space)); zip_size = purge_sys->rseg->zip_size; diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc index e1e253cbb76..09e8e018c4f 100644 --- a/storage/innobase/trx/trx0roll.cc +++ b/storage/innobase/trx/trx0roll.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -800,6 +800,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -810,6 +811,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( trx_rollback_or_clean_is_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index e5f03f4b96a..00d79e7ff51 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -909,18 +909,12 @@ trx_sys_create_rsegs( ulint new_rsegs = n_rsegs - n_used; for (i = 0; i < new_rsegs; ++i) { - ulint space; + ulint space_id; + space_id = (n_spaces == 0) ? 0 + : (srv_undo_space_id_start + i % n_spaces); - /* Tablespace 0 is the system tablespace. All UNDO - log tablespaces start from 1. */ - - if (n_spaces > 0) { - space = (i % n_spaces) + 1; - } else { - space = 0; /* System tablespace */ - } - - if (trx_rseg_create(space) != NULL) { + /* Tablespace 0 is the system tablespace. */ + if (trx_rseg_create(space_id) != NULL) { ++n_used; } else { break; -- cgit v1.2.1 From 217b8115c8c8392fa1781dfd88bd1a77e728d5d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 15 May 2017 12:02:19 +0300 Subject: MDEV-12188 information schema - errors populating fail to free memory, unlock mutexes Given the OK macro used in innodb does a DBUG_RETURN(1) on expression failure the innodb implementation has a number of errors in i_s.cc. We introduce a new macro BREAK_IF that replaces some use of the OK macro. Also, do some other cleanup detailed below. When invoking Field::store() on integers, always pass the parameter is_unsigned=true to avoid an unnecessary conversion to double. i_s_fts_deleted_generic_fill(), i_s_fts_config_fill(): Use the BREAK_IF macro instead of OK. i_s_fts_index_cache_fill_one_index(), i_s_fts_index_table_fill_one_index(): Add a parameter for conv_string, and let the caller allocate that buffer. i_s_fts_index_cache_fill(): Check the return status of i_s_fts_index_cache_fill_one_index(). i_s_fts_index_table_fill(): Check the return status of i_s_fts_index_table_fill_one_index(). i_s_fts_index_table_fill_one_fetch(): Always let the caller invoke i_s_fts_index_table_free_one_fetch(). i_s_innodb_buffer_page_fill(), i_s_innodb_buf_page_lru_fill(): Do release dict_sys->mutex if filling the buffers fails. i_s_innodb_buf_page_lru_fill(): Also display the value INFORMATION_SCHEMA.INNODB_BUFFER_PAGE.PAGE_IO_FIX='IO_PIN' when a block is in that state. Remove the unnecessary variable 'heap'. --- storage/innobase/handler/i_s.cc | 285 ++++++++++++++++++++-------------------- storage/xtradb/handler/i_s.cc | 285 ++++++++++++++++++++-------------------- 2 files changed, 284 insertions(+), 286 deletions(-) (limited to 'storage') diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 50340e05860..2f5fe45656e 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -143,6 +144,7 @@ struct buf_page_info_t{ if ((expr) != 0) { \ DBUG_RETURN(1); \ } +#define BREAK_IF(expr) if ((expr)) break #define RETURN_IF_INNODB_NOT_STARTED(plugin_name) \ do { \ @@ -2994,14 +2996,16 @@ i_s_fts_deleted_generic_fill( fields = table->field; + int ret = 0; + for (ulint j = 0; j < ib_vector_size(deleted->doc_ids); ++j) { doc_id_t doc_id; doc_id = *(doc_id_t*) ib_vector_get_const(deleted->doc_ids, j); - OK(fields[I_S_FTS_DOC_ID]->store((longlong) doc_id, true)); + BREAK_IF(ret = fields[I_S_FTS_DOC_ID]->store(doc_id, true)); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); } trx_free_for_background(trx); @@ -3012,7 +3016,7 @@ i_s_fts_deleted_generic_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3252,13 +3256,13 @@ i_s_fts_index_cache_fill_one_index( /*===============================*/ fts_index_cache_t* index_cache, /*!< in: FTS index cache */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { TABLE* table = (TABLE*) tables->table; Field** fields; CHARSET_INFO* index_charset; const ib_rbt_node_t* rbt_node; - fts_string_t conv_str; uint dummy_errors; char* word_str; @@ -3267,10 +3271,9 @@ i_s_fts_index_cache_fill_one_index( fields = table->field; index_charset = index_cache->charset; - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); - conv_str.f_n_char = 0; + conv_str->f_n_char = 0; + + int ret = 0; /* Go through each word in the index cache */ for (rbt_node = rbt_first(index_cache->words); @@ -3282,16 +3285,16 @@ i_s_fts_index_cache_fill_one_index( /* Convert word from index charset to system_charset_info */ if (index_charset->cset != system_charset_info->cset) { - conv_str.f_n_char = my_convert( - reinterpret_cast(conv_str.f_str), - static_cast(conv_str.f_len), + conv_str->f_n_char = my_convert( + reinterpret_cast(conv_str->f_str), + static_cast(conv_str->f_len), system_charset_info, reinterpret_cast(word->text.f_str), static_cast(word->text.f_len), index_charset, &dummy_errors); - ut_ad(conv_str.f_n_char <= conv_str.f_len); - conv_str.f_str[conv_str.f_n_char] = 0; - word_str = reinterpret_cast(conv_str.f_str); + ut_ad(conv_str->f_n_char <= conv_str->f_len); + conv_str->f_str[conv_str->f_n_char] = 0; + word_str = reinterpret_cast(conv_str->f_str); } else { word_str = reinterpret_cast(word->text.f_str); } @@ -3349,9 +3352,7 @@ i_s_fts_index_cache_fill_one_index( } } - ut_free(conv_str.f_str); - - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED @@ -3395,18 +3396,27 @@ i_s_fts_index_cache_fill( ut_a(cache); + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); + for (ulint i = 0; i < ib_vector_size(cache->indexes); i++) { fts_index_cache_t* index_cache; index_cache = static_cast ( ib_vector_get(cache->indexes, i)); - i_s_fts_index_cache_fill_one_index(index_cache, thd, tables); + BREAK_IF(ret = i_s_fts_index_cache_fill_one_index( + index_cache, thd, &conv_str, tables)); } + ut_free(conv_str.f_str); + dict_table_close(user_table, FALSE, FALSE); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3710,8 +3720,6 @@ i_s_fts_index_table_fill_one_fetch( } } - i_s_fts_index_table_free_one_fetch(words); - DBUG_RETURN(ret); } @@ -3725,13 +3733,13 @@ i_s_fts_index_table_fill_one_index( /*===============================*/ dict_index_t* index, /*!< in: FTS index */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { ib_vector_t* words; mem_heap_t* heap; fts_string_t word; CHARSET_INFO* index_charset; - fts_string_t conv_str; dberr_t error; int ret = 0; @@ -3748,10 +3756,6 @@ i_s_fts_index_table_fill_one_index( word.f_n_char = 0; index_charset = fts_index_get_charset(index); - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); - conv_str.f_n_char = 0; /* Iterate through each auxiliary table as described in fts_index_selector */ @@ -3785,17 +3789,17 @@ i_s_fts_index_table_fill_one_index( /* Fill into tables */ ret = i_s_fts_index_table_fill_one_fetch( - index_charset, thd, tables, words, &conv_str, has_more); + index_charset, thd, tables, words, conv_str, + has_more); + i_s_fts_index_table_free_one_fetch(words); if (ret != 0) { - i_s_fts_index_table_free_one_fetch(words); goto func_exit; } } while (has_more); } func_exit: - ut_free(conv_str.f_str); mem_heap_free(heap); DBUG_RETURN(ret); @@ -3837,10 +3841,17 @@ i_s_fts_index_table_fill( DBUG_RETURN(0); } + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); + for (index = dict_table_get_first_index(user_table); index; index = dict_table_get_next_index(index)) { if (index->type & DICT_FTS) { - i_s_fts_index_table_fill_one_index(index, thd, tables); + BREAK_IF(ret = i_s_fts_index_table_fill_one_index( + index, thd, &conv_str, tables)); } } @@ -3848,7 +3859,9 @@ i_s_fts_index_table_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + ut_free(conv_str.f_str); + + DBUG_RETURN(ret); } /*******************************************************************//** @@ -4014,6 +4027,8 @@ i_s_fts_config_fill( DBUG_ASSERT(!dict_index_is_online_ddl(index)); } + int ret = 0; + while (fts_config_key[i]) { fts_string_t value; char* key_name; @@ -4038,13 +4053,14 @@ i_s_fts_config_fill( ut_free(key_name); } - OK(field_store_string( - fields[FTS_CONFIG_KEY], fts_config_key[i])); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_KEY], fts_config_key[i])); - OK(field_store_string( - fields[FTS_CONFIG_VALUE], (const char*) value.f_str)); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_VALUE], + reinterpret_cast(value.f_str))); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); i++; } @@ -4057,7 +4073,7 @@ i_s_fts_config_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -4895,34 +4911,29 @@ i_s_innodb_buffer_page_fill( state_str = NULL; OK(fields[IDX_BUFFER_POOL_ID]->store( - static_cast(page_info->pool_id))); + page_info->pool_id, true)); OK(fields[IDX_BUFFER_BLOCK_ID]->store( - static_cast(page_info->block_id))); + page_info->block_id, true)); OK(fields[IDX_BUFFER_PAGE_SPACE]->store( - static_cast(page_info->space_id))); + page_info->space_id, true)); OK(fields[IDX_BUFFER_PAGE_NUM]->store( - static_cast(page_info->page_num))); + page_info->page_num, true)); OK(field_store_string( fields[IDX_BUFFER_PAGE_TYPE], i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUFFER_PAGE_FLUSH_TYPE]->store( - page_info->flush_type)); + page_info->flush_type, true)); OK(fields[IDX_BUFFER_PAGE_FIX_COUNT]->store( - page_info->fix_count)); + page_info->fix_count, true)); - if (page_info->hashed) { - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], "YES")); - } else { - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], "NO")); - } + OK(field_store_string(fields[IDX_BUFFER_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_NEWEST_MOD]->store( (longlong) page_info->newest_mod, true)); @@ -4931,7 +4942,7 @@ i_s_innodb_buffer_page_fill( (longlong) page_info->oldest_mod, true)); OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null(); @@ -4940,44 +4951,48 @@ i_s_innodb_buffer_page_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd, TRUE); - OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store( - table_name, - static_cast(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUFFER_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUFFER_PAGE_TABLE_NAME] + ->store(table_name, + static_cast( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUFFER_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store( - page_info->num_recs)); + page_info->num_recs, true)); OK(fields[IDX_BUFFER_PAGE_DATA_SIZE]->store( - page_info->data_size)); + page_info->data_size, true)); OK(fields[IDX_BUFFER_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize - ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize - : 0)); + page_info->zip_ssize + ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize + : 0, true)); #if BUF_PAGE_STATE_BITS > 3 # error "BUF_PAGE_STATE_BITS > 3, please ensure that all 1<io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; break; case BUF_IO_PIN: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_PIN")); + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD], (page_info->is_old) ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_FREE_CLOCK]->store( page_info->freed_page_clock)); - if (schema_table_store_record(thd, table)) { - DBUG_RETURN(1); - } + OK(schema_table_store_record(thd, table)); } DBUG_RETURN(0); @@ -5584,17 +5596,10 @@ i_s_innodb_buf_page_lru_fill( ulint num_page) /*!< in: number of page info cached */ { - TABLE* table; - Field** fields; - mem_heap_t* heap; - DBUG_ENTER("i_s_innodb_buf_page_lru_fill"); - table = tables->table; - - fields = table->field; - - heap = mem_heap_create(1000); + TABLE* table = tables->table; + Field** fields = table->field; /* Iterate through the cached array and fill the I_S table rows */ for (ulint i = 0; i < num_page; i++) { @@ -5609,43 +5614,37 @@ i_s_innodb_buf_page_lru_fill( page_info = info_array + i; OK(fields[IDX_BUF_LRU_POOL_ID]->store( - static_cast(page_info->pool_id))); - + page_info->pool_id, true)); OK(fields[IDX_BUF_LRU_POS]->store( - static_cast(page_info->block_id))); + page_info->block_id, true)); OK(fields[IDX_BUF_LRU_PAGE_SPACE]->store( - static_cast(page_info->space_id))); + page_info->space_id, true)); OK(fields[IDX_BUF_LRU_PAGE_NUM]->store( - static_cast(page_info->page_num))); + page_info->page_num, true)); OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_TYPE], - i_s_page_type[page_info->page_type].type_str)); + fields[IDX_BUF_LRU_PAGE_TYPE], + i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUF_LRU_PAGE_FLUSH_TYPE]->store( - static_cast(page_info->flush_type))); + page_info->flush_type, true)); OK(fields[IDX_BUF_LRU_PAGE_FIX_COUNT]->store( - static_cast(page_info->fix_count))); + page_info->fix_count, true)); - if (page_info->hashed) { - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], "YES")); - } else { - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], "NO")); - } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_NEWEST_MOD]->store( - page_info->newest_mod, true)); + page_info->newest_mod, true)); OK(fields[IDX_BUF_LRU_PAGE_OLDEST_MOD]->store( - page_info->oldest_mod, true)); + page_info->oldest_mod, true)); OK(fields[IDX_BUF_LRU_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_null(); @@ -5654,43 +5653,47 @@ i_s_innodb_buf_page_lru_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd, TRUE); - OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store( - table_name, - static_cast(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUF_LRU_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUF_LRU_PAGE_TABLE_NAME] + ->store(table_name, + static_cast( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUF_LRU_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store( - page_info->num_recs)); + page_info->num_recs, true)); OK(fields[IDX_BUF_LRU_PAGE_DATA_SIZE]->store( - page_info->data_size)); + page_info->data_size, true)); OK(fields[IDX_BUF_LRU_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize ? - 512 << page_info->zip_ssize : 0)); + page_info->zip_ssize + ? 512 << page_info->zip_ssize : 0, true)); state = static_cast(page_info->page_state); @@ -5719,35 +5722,31 @@ i_s_innodb_buf_page_lru_fill( switch (page_info->io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; + break; + case BUF_IO_PIN: + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IS_OLD], - (page_info->is_old) ? "YES" : "NO")); + page_info->is_old ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_FREE_CLOCK]->store( - page_info->freed_page_clock)); + page_info->freed_page_clock, true)); - if (schema_table_store_record(thd, table)) { - mem_heap_free(heap); - DBUG_RETURN(1); - } - - mem_heap_empty(heap); + OK(schema_table_store_record(thd, table)); } - mem_heap_free(heap); - DBUG_RETURN(0); } diff --git a/storage/xtradb/handler/i_s.cc b/storage/xtradb/handler/i_s.cc index f2686f1049b..aac1074e152 100644 --- a/storage/xtradb/handler/i_s.cc +++ b/storage/xtradb/handler/i_s.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -155,6 +156,7 @@ struct buf_page_info_t{ if ((expr) != 0) { \ DBUG_RETURN(1); \ } +#define BREAK_IF(expr) if ((expr)) break #define RETURN_IF_INNODB_NOT_STARTED(plugin_name) \ do { \ @@ -2989,14 +2991,16 @@ i_s_fts_deleted_generic_fill( fields = table->field; + int ret = 0; + for (ulint j = 0; j < ib_vector_size(deleted->doc_ids); ++j) { doc_id_t doc_id; doc_id = *(doc_id_t*) ib_vector_get_const(deleted->doc_ids, j); - OK(fields[I_S_FTS_DOC_ID]->store((longlong) doc_id, true)); + BREAK_IF(ret = fields[I_S_FTS_DOC_ID]->store(doc_id, true)); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); } trx_free_for_background(trx); @@ -3007,7 +3011,7 @@ i_s_fts_deleted_generic_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3245,13 +3249,13 @@ i_s_fts_index_cache_fill_one_index( /*===============================*/ fts_index_cache_t* index_cache, /*!< in: FTS index cache */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { TABLE* table = (TABLE*) tables->table; Field** fields; CHARSET_INFO* index_charset; const ib_rbt_node_t* rbt_node; - fts_string_t conv_str; uint dummy_errors; char* word_str; @@ -3260,10 +3264,9 @@ i_s_fts_index_cache_fill_one_index( fields = table->field; index_charset = index_cache->charset; - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); - conv_str.f_n_char = 0; + conv_str->f_n_char = 0; + + int ret = 0; /* Go through each word in the index cache */ for (rbt_node = rbt_first(index_cache->words); @@ -3275,16 +3278,16 @@ i_s_fts_index_cache_fill_one_index( /* Convert word from index charset to system_charset_info */ if (index_charset->cset != system_charset_info->cset) { - conv_str.f_n_char = my_convert( - reinterpret_cast(conv_str.f_str), - static_cast(conv_str.f_len), + conv_str->f_n_char = my_convert( + reinterpret_cast(conv_str->f_str), + static_cast(conv_str->f_len), system_charset_info, reinterpret_cast(word->text.f_str), static_cast(word->text.f_len), index_charset, &dummy_errors); - ut_ad(conv_str.f_n_char <= conv_str.f_len); - conv_str.f_str[conv_str.f_n_char] = 0; - word_str = reinterpret_cast(conv_str.f_str); + ut_ad(conv_str->f_n_char <= conv_str->f_len); + conv_str->f_str[conv_str->f_n_char] = 0; + word_str = reinterpret_cast(conv_str->f_str); } else { word_str = reinterpret_cast(word->text.f_str); } @@ -3342,9 +3345,7 @@ i_s_fts_index_cache_fill_one_index( } } - ut_free(conv_str.f_str); - - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** Fill the dynamic table INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHED @@ -3388,18 +3389,27 @@ i_s_fts_index_cache_fill( ut_a(cache); + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); + for (ulint i = 0; i < ib_vector_size(cache->indexes); i++) { fts_index_cache_t* index_cache; index_cache = static_cast ( ib_vector_get(cache->indexes, i)); - i_s_fts_index_cache_fill_one_index(index_cache, thd, tables); + BREAK_IF(ret = i_s_fts_index_cache_fill_one_index( + index_cache, thd, &conv_str, tables)); } + ut_free(conv_str.f_str); + dict_table_close(user_table, FALSE, FALSE); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -3702,8 +3712,6 @@ i_s_fts_index_table_fill_one_fetch( } } - i_s_fts_index_table_free_one_fetch(words); - DBUG_RETURN(ret); } @@ -3717,13 +3725,13 @@ i_s_fts_index_table_fill_one_index( /*===============================*/ dict_index_t* index, /*!< in: FTS index */ THD* thd, /*!< in: thread */ + fts_string_t* conv_str, /*!< in/out: buffer */ TABLE_LIST* tables) /*!< in/out: tables to fill */ { ib_vector_t* words; mem_heap_t* heap; fts_string_t word; CHARSET_INFO* index_charset; - fts_string_t conv_str; dberr_t error; int ret = 0; @@ -3740,10 +3748,6 @@ i_s_fts_index_table_fill_one_index( word.f_n_char = 0; index_charset = fts_index_get_charset(index); - conv_str.f_len = system_charset_info->mbmaxlen - * FTS_MAX_WORD_LEN_IN_CHAR; - conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); - conv_str.f_n_char = 0; /* Iterate through each auxiliary table as described in fts_index_selector */ @@ -3777,17 +3781,17 @@ i_s_fts_index_table_fill_one_index( /* Fill into tables */ ret = i_s_fts_index_table_fill_one_fetch( - index_charset, thd, tables, words, &conv_str, has_more); + index_charset, thd, tables, words, conv_str, + has_more); + i_s_fts_index_table_free_one_fetch(words); if (ret != 0) { - i_s_fts_index_table_free_one_fetch(words); goto func_exit; } } while (has_more); } func_exit: - ut_free(conv_str.f_str); mem_heap_free(heap); DBUG_RETURN(ret); @@ -3829,10 +3833,17 @@ i_s_fts_index_table_fill( DBUG_RETURN(0); } + int ret = 0; + fts_string_t conv_str; + conv_str.f_len = system_charset_info->mbmaxlen + * FTS_MAX_WORD_LEN_IN_CHAR; + conv_str.f_str = static_cast(ut_malloc(conv_str.f_len)); + for (index = dict_table_get_first_index(user_table); index; index = dict_table_get_next_index(index)) { if (index->type & DICT_FTS) { - i_s_fts_index_table_fill_one_index(index, thd, tables); + BREAK_IF(ret = i_s_fts_index_table_fill_one_index( + index, thd, &conv_str, tables)); } } @@ -3840,7 +3851,9 @@ i_s_fts_index_table_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + ut_free(conv_str.f_str); + + DBUG_RETURN(ret); } /*******************************************************************//** @@ -4005,6 +4018,8 @@ i_s_fts_config_fill( DBUG_ASSERT(!dict_index_is_online_ddl(index)); } + int ret = 0; + while (fts_config_key[i]) { fts_string_t value; char* key_name; @@ -4029,13 +4044,14 @@ i_s_fts_config_fill( ut_free(key_name); } - OK(field_store_string( - fields[FTS_CONFIG_KEY], fts_config_key[i])); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_KEY], fts_config_key[i])); - OK(field_store_string( - fields[FTS_CONFIG_VALUE], (const char*) value.f_str)); + BREAK_IF(ret = field_store_string( + fields[FTS_CONFIG_VALUE], + reinterpret_cast(value.f_str))); - OK(schema_table_store_record(thd, table)); + BREAK_IF(ret = schema_table_store_record(thd, table)); i++; } @@ -4048,7 +4064,7 @@ i_s_fts_config_fill( rw_lock_s_unlock(&dict_operation_lock); - DBUG_RETURN(0); + DBUG_RETURN(ret); } /*******************************************************************//** @@ -4883,34 +4899,29 @@ i_s_innodb_buffer_page_fill( state_str = NULL; OK(fields[IDX_BUFFER_POOL_ID]->store( - static_cast(page_info->pool_id))); + page_info->pool_id, true)); OK(fields[IDX_BUFFER_BLOCK_ID]->store( - static_cast(page_info->block_id))); + page_info->block_id, true)); OK(fields[IDX_BUFFER_PAGE_SPACE]->store( - static_cast(page_info->space_id))); + page_info->space_id, true)); OK(fields[IDX_BUFFER_PAGE_NUM]->store( - static_cast(page_info->page_num))); + page_info->page_num, true)); OK(field_store_string( fields[IDX_BUFFER_PAGE_TYPE], i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUFFER_PAGE_FLUSH_TYPE]->store( - page_info->flush_type)); + page_info->flush_type, true)); OK(fields[IDX_BUFFER_PAGE_FIX_COUNT]->store( - page_info->fix_count)); + page_info->fix_count, true)); - if (page_info->hashed) { - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], "YES")); - } else { - OK(field_store_string( - fields[IDX_BUFFER_PAGE_HASHED], "NO")); - } + OK(field_store_string(fields[IDX_BUFFER_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_NEWEST_MOD]->store( (longlong) page_info->newest_mod, true)); @@ -4919,7 +4930,7 @@ i_s_innodb_buffer_page_fill( (longlong) page_info->oldest_mod, true)); OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null(); @@ -4928,44 +4939,48 @@ i_s_innodb_buffer_page_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd, TRUE); - OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store( - table_name, - static_cast(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUFFER_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUFFER_PAGE_TABLE_NAME] + ->store(table_name, + static_cast( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUFFER_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store( - page_info->num_recs)); + page_info->num_recs, true)); OK(fields[IDX_BUFFER_PAGE_DATA_SIZE]->store( - page_info->data_size)); + page_info->data_size, true)); OK(fields[IDX_BUFFER_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize - ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize - : 0)); + page_info->zip_ssize + ? (UNIV_ZIP_SIZE_MIN >> 1) << page_info->zip_ssize + : 0, true)); #if BUF_PAGE_STATE_BITS > 3 # error "BUF_PAGE_STATE_BITS > 3, please ensure that all 1<io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; break; case BUF_IO_PIN: - OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], - "IO_PIN")); + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUFFER_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUFFER_PAGE_IS_OLD], (page_info->is_old) ? "YES" : "NO")); OK(fields[IDX_BUFFER_PAGE_FREE_CLOCK]->store( page_info->freed_page_clock)); - if (schema_table_store_record(thd, table)) { - DBUG_RETURN(1); - } + OK(schema_table_store_record(thd, table)); } DBUG_RETURN(0); @@ -5569,17 +5581,10 @@ i_s_innodb_buf_page_lru_fill( ulint num_page) /*!< in: number of page info cached */ { - TABLE* table; - Field** fields; - mem_heap_t* heap; - DBUG_ENTER("i_s_innodb_buf_page_lru_fill"); - table = tables->table; - - fields = table->field; - - heap = mem_heap_create(1000); + TABLE* table = tables->table; + Field** fields = table->field; /* Iterate through the cached array and fill the I_S table rows */ for (ulint i = 0; i < num_page; i++) { @@ -5594,43 +5599,37 @@ i_s_innodb_buf_page_lru_fill( page_info = info_array + i; OK(fields[IDX_BUF_LRU_POOL_ID]->store( - static_cast(page_info->pool_id))); - + page_info->pool_id, true)); OK(fields[IDX_BUF_LRU_POS]->store( - static_cast(page_info->block_id))); + page_info->block_id, true)); OK(fields[IDX_BUF_LRU_PAGE_SPACE]->store( - static_cast(page_info->space_id))); + page_info->space_id, true)); OK(fields[IDX_BUF_LRU_PAGE_NUM]->store( - static_cast(page_info->page_num))); + page_info->page_num, true)); OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_TYPE], - i_s_page_type[page_info->page_type].type_str)); + fields[IDX_BUF_LRU_PAGE_TYPE], + i_s_page_type[page_info->page_type].type_str)); OK(fields[IDX_BUF_LRU_PAGE_FLUSH_TYPE]->store( - static_cast(page_info->flush_type))); + page_info->flush_type, true)); OK(fields[IDX_BUF_LRU_PAGE_FIX_COUNT]->store( - static_cast(page_info->fix_count))); + page_info->fix_count, true)); - if (page_info->hashed) { - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], "YES")); - } else { - OK(field_store_string( - fields[IDX_BUF_LRU_PAGE_HASHED], "NO")); - } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_HASHED], + page_info->hashed ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_NEWEST_MOD]->store( - page_info->newest_mod, true)); + page_info->newest_mod, true)); OK(fields[IDX_BUF_LRU_PAGE_OLDEST_MOD]->store( - page_info->oldest_mod, true)); + page_info->oldest_mod, true)); OK(fields[IDX_BUF_LRU_PAGE_ACCESS_TIME]->store( - page_info->access_time)); + page_info->access_time, true)); fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_null(); @@ -5639,43 +5638,47 @@ i_s_innodb_buf_page_lru_fill( /* If this is an index page, fetch the index name and table name */ if (page_info->page_type == I_S_PAGE_TYPE_INDEX) { - const dict_index_t* index; + bool ret = false; mutex_enter(&dict_sys->mutex); - index = dict_index_get_if_in_cache_low( - page_info->index_id); - - if (index) { + if (const dict_index_t* index = + dict_index_get_if_in_cache_low( + page_info->index_id)) { table_name_end = innobase_convert_name( table_name, sizeof(table_name), index->table_name, strlen(index->table_name), thd, TRUE); - OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store( - table_name, - static_cast(table_name_end - table_name), - system_charset_info)); - fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); - - OK(field_store_index_name( - fields[IDX_BUF_LRU_PAGE_INDEX_NAME], - index->name)); + ret = fields[IDX_BUF_LRU_PAGE_TABLE_NAME] + ->store(table_name, + static_cast( + table_name_end + - table_name), + system_charset_info) + || field_store_index_name( + fields + [IDX_BUF_LRU_PAGE_INDEX_NAME], + index->name); } mutex_exit(&dict_sys->mutex); + + OK(ret); + + fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull(); } OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store( - page_info->num_recs)); + page_info->num_recs, true)); OK(fields[IDX_BUF_LRU_PAGE_DATA_SIZE]->store( - page_info->data_size)); + page_info->data_size, true)); OK(fields[IDX_BUF_LRU_PAGE_ZIP_SIZE]->store( - page_info->zip_ssize ? - 512 << page_info->zip_ssize : 0)); + page_info->zip_ssize + ? 512 << page_info->zip_ssize : 0, true)); state = static_cast(page_info->page_state); @@ -5704,35 +5707,31 @@ i_s_innodb_buf_page_lru_fill( switch (page_info->io_fix) { case BUF_IO_NONE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_NONE")); + state_str = "IO_NONE"; break; case BUF_IO_READ: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_READ")); + state_str = "IO_READ"; break; case BUF_IO_WRITE: - OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], - "IO_WRITE")); + state_str = "IO_WRITE"; + break; + case BUF_IO_PIN: + state_str = "IO_PIN"; break; } + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IO_FIX], + state_str)); + OK(field_store_string(fields[IDX_BUF_LRU_PAGE_IS_OLD], - (page_info->is_old) ? "YES" : "NO")); + page_info->is_old ? "YES" : "NO")); OK(fields[IDX_BUF_LRU_PAGE_FREE_CLOCK]->store( - page_info->freed_page_clock)); - - if (schema_table_store_record(thd, table)) { - mem_heap_free(heap); - DBUG_RETURN(1); - } + page_info->freed_page_clock, true)); - mem_heap_empty(heap); + OK(schema_table_store_record(thd, table)); } - mem_heap_free(heap); - DBUG_RETURN(0); } -- cgit v1.2.1 From f9264280d68dcbca2f9312c7d8620b2bcc9d0694 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 12 May 2017 14:27:49 +0200 Subject: MDEV-12761 Error return from external_lock make the server crash bunch of bugs when external_lock() fails on unlock: * mi_lock_database() used mi_mark_crashed() under share->intern_lock, but mi_mark_crashed() itself locks this mutex. * handler::close() required table to be unlocked, but failed external_lock didn't count as unlock * mysql_unlock_tables() ignored all unlock errors, but they still set the error status in stmt_da. --- storage/myisam/mi_locking.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'storage') diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index 531b800c63e..1921926463e 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -29,7 +29,7 @@ static void mi_update_status_with_lock(MI_INFO *info); int mi_lock_database(MI_INFO *info, int lock_type) { - int error; + int error, mark_crashed= 0; uint count; MYISAM_SHARE *share=info->s; DBUG_ENTER("mi_lock_database"); @@ -52,6 +52,7 @@ int mi_lock_database(MI_INFO *info, int lock_type) } error= 0; + DBUG_EXECUTE_IF ("mi_lock_database_failure", error= EINVAL;); mysql_mutex_lock(&share->intern_lock); if (share->kfile >= 0) /* May only be false on windows */ { @@ -75,17 +76,15 @@ int mi_lock_database(MI_INFO *info, int lock_type) &share->dirty_part_map, FLUSH_KEEP)) { - error=my_errno; + mark_crashed= error=my_errno; mi_print_error(info->s, HA_ERR_CRASHED); - mi_mark_crashed(info); /* Mark that table must be checked */ } if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { if (end_io_cache(&info->rec_cache)) { - error=my_errno; + mark_crashed= error=my_errno; mi_print_error(info->s, HA_ERR_CRASHED); - mi_mark_crashed(info); } } if (!count) @@ -110,22 +109,19 @@ int mi_lock_database(MI_INFO *info, int lock_type) share->state.unique= info->last_unique= info->this_unique; share->state.update_count= info->last_loop= ++info->this_loop; if (mi_state_info_write(share->kfile, &share->state, 1)) - error=my_errno; + mark_crashed= error=my_errno; share->changed=0; if (myisam_flush) { if (mysql_file_sync(share->kfile, MYF(0))) - error= my_errno; + mark_crashed= error= my_errno; if (mysql_file_sync(info->dfile, MYF(0))) - error= my_errno; + mark_crashed= error= my_errno; } else share->not_flushed=1; if (error) - { mi_print_error(info->s, HA_ERR_CRASHED); - mi_mark_crashed(info); - } } if (info->lock_type != F_EXTRA_LCK) { @@ -260,6 +256,8 @@ int mi_lock_database(MI_INFO *info, int lock_type) } #endif mysql_mutex_unlock(&share->intern_lock); + if (mark_crashed) + mi_mark_crashed(info); DBUG_RETURN(error); } /* mi_lock_database */ -- cgit v1.2.1 From 71b45032421fe47fceabe419c63e79c44794428d Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 12 May 2017 15:10:17 +0200 Subject: MDEV-9998 Fix issues caught by Clang's -Wpointer-bool-conversion warning remove useless checks and a couple of others --- storage/connect/filamzip.cpp | 2 +- storage/connect/ha_connect.cc | 2 +- storage/connect/tabext.h | 2 +- storage/innobase/dict/dict0dict.cc | 5 ++--- storage/innobase/handler/ha_innodb.cc | 11 +++++------ storage/sphinx/ha_sphinx.cc | 2 +- storage/spider/spd_table.cc | 1 - storage/xtradb/handler/ha_innodb.cc | 11 +++++------ 8 files changed, 16 insertions(+), 20 deletions(-) (limited to 'storage') diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 3d157da5e87..fa857837427 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -341,7 +341,7 @@ bool ZIPUTIL::OpenTable(PGLOBAL g, MODE mode, char *fn, bool append) bool ZIPUTIL::addEntry(PGLOBAL g, char *entry) { //?? we dont need the stinking time - zip_fileinfo zi = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + zip_fileinfo zi = { {0, 0, 0, 0, 0, 0}, 0, 0, 0 }; getTime(zi.tmz_date); target = entry; diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index d1ab18f52d5..1f2bd0e5992 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1192,7 +1192,7 @@ char *ha_connect::GetRealString(const char *s) { char *sv; - if (IsPartitioned() && s && partname && *partname) { + if (IsPartitioned() && s && *partname) { sv= (char*)PlugSubAlloc(xp->g, NULL, 0); sprintf(sv, s, partname); PlugSubAlloc(xp->g, NULL, strlen(sv) + 1); diff --git a/storage/connect/tabext.h b/storage/connect/tabext.h index 2ef20c89f2c..5133bd9dd5f 100644 --- a/storage/connect/tabext.h +++ b/storage/connect/tabext.h @@ -7,7 +7,7 @@ /***********************************************************************/ #ifndef __TABEXT_H -#define __TABEXTF_H +#define __TABEXT_H #include "reldef.h" diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index 6fd8afd2aee..b6ca02fa164 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -6251,6 +6251,7 @@ dict_set_corrupted_index_cache_only( dict_table_t* table) /*!< in/out: table */ { ut_ad(index != NULL); + ut_ad(table != NULL); ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); @@ -6260,9 +6261,7 @@ dict_set_corrupted_index_cache_only( if (dict_index_is_clust(index)) { dict_table_t* corrupt_table; - corrupt_table = (table != NULL) ? table : index->table; - ut_ad((index->table != NULL) || (table != NULL) - || index->table == table); + corrupt_table = table; if (corrupt_table) { corrupt_table->corrupted = TRUE; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 91a507ef1b9..1538471db4f 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2000,12 +2000,11 @@ innobase_get_stmt( { const char* query = NULL; LEX_STRING *stmt = NULL; - if (thd) { - stmt = thd_query_string(thd); - if (stmt) { - *length = stmt->length; - query = stmt->str; - } + ut_ad(thd != NULL); + stmt = thd_query_string(thd); + if (stmt) { + *length = stmt->length; + query = stmt->str; } return (query); } diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index ff3b63465eb..3f035c7bc7d 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -911,7 +911,7 @@ bool sphinx_show_status ( THD * thd ) } // show last error or warning (either in addition to stats, or on their own) - if ( pTls && pTls->m_pHeadTable && pTls->m_pHeadTable->m_tStats.m_sLastMessage && pTls->m_pHeadTable->m_tStats.m_sLastMessage[0] ) + if ( pTls && pTls->m_pHeadTable && pTls->m_pHeadTable->m_tStats.m_sLastMessage[0] ) { const char * sMessageType = pTls->m_pHeadTable->m_tStats.m_bLastError ? "error" : "warning"; diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index ead335c07ea..66f558ecd91 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -517,7 +517,6 @@ int spider_free_share_alloc( ) { int roop_count; DBUG_ENTER("spider_free_share_alloc"); - if (share->dbton_bitmap) { for (roop_count = SPIDER_DBTON_SIZE - 1; roop_count >= 0; roop_count--) { diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 9433ee4c7c5..12ae71508c7 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -2326,12 +2326,11 @@ innobase_get_stmt( { const char* query = NULL; LEX_STRING *stmt = NULL; - if (thd) { - stmt = thd_query_string(thd); - if (stmt) { - *length = stmt->length; - query = stmt->str; - } + ut_ad(thd != NULL); + stmt = thd_query_string(thd); + if (stmt) { + *length = stmt->length; + query = stmt->str; } return (query); } -- cgit v1.2.1 From a65623b3eb2695069791aa21d278b8bc751a560e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 12 May 2017 16:52:09 +0200 Subject: MDEV-11883 MariaDB crashes with out-of-memory when query information_schema CSV engine didn't expect CSM files to be read-only --- storage/csv/ha_tina.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'storage') diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 92ac20a8f82..35596a59c86 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -289,7 +289,7 @@ static int read_meta_file(File meta_file, ha_rows *rows) mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0)); if (mysql_file_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(my_errno= HA_ERR_CRASHED_ON_USAGE); /* Parse out the meta data, we ignore version at the moment @@ -418,10 +418,13 @@ static int free_share(TINA_SHARE *share) int result_code= 0; if (!--share->use_count){ /* Write the meta file. Mark it as crashed if needed. */ - (void)write_meta_file(share->meta_file, share->rows_recorded, - share->crashed ? TRUE :FALSE); - if (mysql_file_close(share->meta_file, MYF(0))) - result_code= 1; + if (share->meta_file != -1) + { + (void)write_meta_file(share->meta_file, share->rows_recorded, + share->crashed ? TRUE :FALSE); + if (mysql_file_close(share->meta_file, MYF(0))) + result_code= 1; + } if (share->tina_write_opened) { if (mysql_file_close(share->tina_write_filedes, MYF(0))) @@ -930,7 +933,7 @@ int ha_tina::open(const char *name, int mode, uint open_options) if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR)) { free_share(share); - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(my_errno ? my_errno : HA_ERR_CRASHED_ON_USAGE); } local_data_file_version= share->data_file_version; -- cgit v1.2.1 From a3cf69e2e5ba8e3954b24bf3f8bcf8705c102f64 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 13 May 2017 13:52:58 +0200 Subject: MDEV-10936 CONNECT engine JDBC type can't find JdbcInterface Don't use instal_jar() from Use_Java.cmake - at least in CentOS 7 (cmake 2.8.12.2) it doesn't yet support the COMPONENT option. --- storage/connect/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index a602084b5bd..840ccdfb0a8 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -270,8 +270,8 @@ IF(CONNECT_WITH_JDBC) # Find required libraries and include directories SET (JAVA_SOURCES JdbcInterface.java) add_jar(JdbcInterface ${JAVA_SOURCES}) - install_jar(JdbcInterface DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/JavaWrappers.jar + ${CMAKE_CURRENT_BINARY_DIR}/JdbcInterface.jar DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) add_definitions(-DJDBC_SUPPORT) ELSE() -- cgit v1.2.1 From 314f32c8c23a2be752691dfb77d929ac498a0478 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 16 May 2017 12:42:53 +0200 Subject: MDEV-5477 sphinxSE GROUP BY on multiple attributes Translate "multi:" to SPH_GROUPBY_MULTIPLE. SPH_GROUPBY_ATTRPAIR is still not supported. --- storage/sphinx/ha_sphinx.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index 3f035c7bc7d..c196295f69e 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -216,7 +216,9 @@ enum ESphGroupBy SPH_GROUPBY_WEEK = 1, ///< group by week SPH_GROUPBY_MONTH = 2, ///< group by month SPH_GROUPBY_YEAR = 3, ///< group by year - SPH_GROUPBY_ATTR = 4 ///< group by attribute value + SPH_GROUPBY_ATTR = 4, ///< group by attribute value + SPH_GROUPBY_ATTRPAIR = 5, ///< group by sequential attrs pair (rendered redundant by 64bit attrs support; removed) + SPH_GROUPBY_MULTIPLE = 6 ///< group by on multiple attribute values }; /// known attribute types @@ -1555,6 +1557,7 @@ bool CSphSEQuery::ParseField ( char * sField ) { "month:", SPH_GROUPBY_MONTH }, { "year:", SPH_GROUPBY_YEAR }, { "attr:", SPH_GROUPBY_ATTR }, + { "multi:", SPH_GROUPBY_MULTIPLE } }; int i; -- cgit v1.2.1 From 24ff1793114fa9f15d493c26d879000307e6f00d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 16 May 2017 13:53:15 +0300 Subject: 5.6.36 --- storage/perfschema/pfs.cc | 3 +- storage/perfschema/pfs_digest.cc | 5 ++- storage/perfschema/unittest/pfs-t.cc | 46 ++++++++++++++++------ storage/perfschema/unittest/pfs_account-oom-t.cc | 4 +- storage/perfschema/unittest/pfs_connect_attr-t.cc | 4 +- storage/perfschema/unittest/pfs_host-oom-t.cc | 4 +- storage/perfschema/unittest/pfs_instr-oom-t.cc | 29 ++++++++++++-- storage/perfschema/unittest/pfs_instr-t.cc | 9 ++--- .../perfschema/unittest/pfs_instr_class-oom-t.cc | 4 +- storage/perfschema/unittest/pfs_instr_class-t.cc | 4 +- storage/perfschema/unittest/pfs_misc-t.cc | 4 +- storage/perfschema/unittest/pfs_timer-t.cc | 4 +- storage/perfschema/unittest/pfs_user-oom-t.cc | 4 +- 13 files changed, 85 insertions(+), 39 deletions(-) (limited to 'storage') diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc index 3bbc0a4602e..18c58294b5c 100644 --- a/storage/perfschema/pfs.cc +++ b/storage/perfschema/pfs.cc @@ -3951,9 +3951,11 @@ static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker, switch (state->m_operation) { case PSI_FILE_STAT: + case PSI_FILE_RENAME: break; case PSI_FILE_STREAM_OPEN: case PSI_FILE_CREATE: + case PSI_FILE_OPEN: if (result != NULL) { PFS_file_class *klass= reinterpret_cast (state->m_class); @@ -3964,7 +3966,6 @@ static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker, state->m_file= reinterpret_cast (pfs_file); } break; - case PSI_FILE_OPEN: default: DBUG_ASSERT(false); break; diff --git a/storage/perfschema/pfs_digest.cc b/storage/perfschema/pfs_digest.cc index 5886c379b2f..3b5f1d35862 100644 --- a/storage/perfschema/pfs_digest.cc +++ b/storage/perfschema/pfs_digest.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -255,10 +255,11 @@ search: if (safe_index == 0) { /* Record [0] is reserved. */ - safe_index= 1; + continue; } /* Add a new record in digest stat array. */ + DBUG_ASSERT(safe_index < digest_max); pfs= &statements_digest_stat_array[safe_index]; if (pfs->m_lock.is_free()) diff --git a/storage/perfschema/unittest/pfs-t.cc b/storage/perfschema/unittest/pfs-t.cc index 8b24cee9eaf..7861c74a401 100644 --- a/storage/perfschema/unittest/pfs-t.cc +++ b/storage/perfschema/unittest/pfs-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -27,6 +27,8 @@ #include "stub_print_error.h" #include "stub_pfs_defaults.h" +void unload_performance_schema(); + /* test helpers, to simulate the setup */ void setup_thread(PSI_thread *t, bool enabled) @@ -126,7 +128,7 @@ void test_bootstrap() psi_2= boot->get_interface(PSI_VERSION_2); ok(psi_2 == NULL, "version 2"); - shutdown_performance_schema(); + unload_performance_schema(); } /* @@ -183,6 +185,28 @@ PSI * load_perfschema() return (PSI*) psi; } +void unload_performance_schema() +{ + cleanup_table_share(); + cleanup_instruments(); + cleanup_sync_class(); + cleanup_thread_class(); + cleanup_table_share(); + cleanup_file_class(); + cleanup_stage_class(); + cleanup_statement_class(); + cleanup_socket_class(); + cleanup_events_waits_history_long(); + cleanup_events_stages_history_long(); + cleanup_events_statements_history_long(); + cleanup_table_share_hash(); + cleanup_file_hash(); + cleanup_digest(); + PFS_atomic::cleanup(); + + shutdown_performance_schema(); +} + void test_bad_registration() { PSI *psi; @@ -581,8 +605,7 @@ void test_bad_registration() psi->register_socket("X", bad_socket_3, 1); ok(dummy_socket_key == 2, "assigned key"); - - shutdown_performance_schema(); + unload_performance_schema(); } void test_init_disabled() @@ -1016,7 +1039,7 @@ void test_init_disabled() socket_A1= psi->init_socket(99, NULL, NULL, 0); ok(socket_A1 == NULL, "broken socket key not instrumented"); - shutdown_performance_schema(); + unload_performance_schema(); } void test_locker_disabled() @@ -1321,8 +1344,9 @@ void test_locker_disabled() ok(socket_A1 != NULL, "instrumented"); /* Socket thread owner has not been set */ socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12); - ok(socket_locker == NULL, "no locker (no thread owner)"); - + ok(socket_locker != NULL, "locker (owner not used)"); + psi->end_socket_wait(socket_locker, 10); + /* Pretend the running thread is not instrumented */ /* ---------------------------------------------- */ @@ -1350,7 +1374,7 @@ void test_locker_disabled() socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12); ok(socket_locker == NULL, "no locker"); - shutdown_performance_schema(); + unload_performance_schema(); } void test_file_instrumentation_leak() @@ -1437,7 +1461,7 @@ void test_file_instrumentation_leak() file_locker= psi->get_thread_file_descriptor_locker(&file_state, (File) 12, PSI_FILE_WRITE); ok(file_locker == NULL, "no locker, no leak"); - shutdown_performance_schema(); + unload_performance_schema(); } void test_enabled() @@ -1473,7 +1497,7 @@ void test_enabled() { & cond_key_B, "C-B", 0} }; - shutdown_performance_schema(); + unload_performance_schema(); #endif } @@ -1642,6 +1666,6 @@ int main(int, char **) plan(216); MY_INIT("pfs-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_account-oom-t.cc b/storage/perfschema/unittest/pfs_account-oom-t.cc index ef8a7c195e0..fccb9ef2e43 100644 --- a/storage/perfschema/unittest/pfs_account-oom-t.cc +++ b/storage/perfschema/unittest/pfs_account-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -111,6 +111,6 @@ int main(int, char **) plan(6); MY_INIT("pfs_account-oom-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_connect_attr-t.cc b/storage/perfschema/unittest/pfs_connect_attr-t.cc index 7bee1d063a1..ecf790eeede 100644 --- a/storage/perfschema/unittest/pfs_connect_attr-t.cc +++ b/storage/perfschema/unittest/pfs_connect_attr-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -341,5 +341,5 @@ int main(int, char **) diag("skipping the cp1251 tests : missing character set"); plan(59 + (cs_cp1251 ? 10 : 0)); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_host-oom-t.cc b/storage/perfschema/unittest/pfs_host-oom-t.cc index 6627dd48df2..95a375c4d10 100644 --- a/storage/perfschema/unittest/pfs_host-oom-t.cc +++ b/storage/perfschema/unittest/pfs_host-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -111,6 +111,6 @@ int main(int, char **) plan(6); MY_INIT("pfs_host-oom-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr-oom-t.cc b/storage/perfschema/unittest/pfs_instr-oom-t.cc index 161060aa6bf..f58d9026c7d 100644 --- a/storage/perfschema/unittest/pfs_instr-oom-t.cc +++ b/storage/perfschema/unittest/pfs_instr-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -355,6 +355,11 @@ void test_oom() rc= init_instruments(& param); ok(rc == 1, "oom (per thread wait)"); + cleanup_sync_class(); + cleanup_thread_class(); + cleanup_file_class(); + cleanup_instruments(); + param.m_enabled= true; param.m_mutex_class_sizing= 0; param.m_rwlock_class_sizing= 0; @@ -432,6 +437,8 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (thread stages history sizing)"); + + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -467,6 +474,9 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (per thread stages)"); + + cleanup_stage_class(); + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -502,6 +512,8 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (thread statements history sizing)"); + + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -537,6 +549,9 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (per thread statements)"); + + cleanup_statement_class(); + cleanup_thread_class(); cleanup_instruments(); param.m_enabled= true; @@ -572,6 +587,8 @@ void test_oom() init_event_name_sizing(& param); rc= init_instruments(& param); ok(rc == 1, "oom (global waits)"); + + cleanup_sync_class(); cleanup_instruments(); param.m_enabled= true; @@ -609,8 +626,10 @@ void test_oom() ok(rc == 0, "init stage class"); rc= init_instruments(& param); ok(rc == 1, "oom (global stages)"); - cleanup_instruments(); + + cleanup_sync_class(); cleanup_stage_class(); + cleanup_instruments(); param.m_enabled= true; param.m_mutex_class_sizing= 10; @@ -647,8 +666,10 @@ void test_oom() ok(rc == 0, "init statement class"); rc= init_instruments(& param); ok(rc == 1, "oom (global statements)"); - cleanup_instruments(); + + cleanup_sync_class(); cleanup_statement_class(); + cleanup_instruments(); } void do_all_tests() @@ -665,6 +686,6 @@ int main(int, char **) plan(20); MY_INIT("pfs_instr-oom-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr-t.cc b/storage/perfschema/unittest/pfs_instr-t.cc index 3cc4c48388d..f457bf1f811 100644 --- a/storage/perfschema/unittest/pfs_instr-t.cc +++ b/storage/perfschema/unittest/pfs_instr-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -23,10 +23,11 @@ #include +PFS_global_param param; + void test_no_instruments() { int rc; - PFS_global_param param; memset(& param, 0xFF, sizeof(param)); param.m_enabled= true; @@ -86,7 +87,6 @@ void test_no_instances() PFS_file *file; PFS_socket *socket; PFS_table *table; - PFS_global_param param; memset(& param, 0xFF, sizeof(param)); param.m_enabled= true; @@ -227,7 +227,6 @@ void test_with_instances() PFS_socket *socket_2; PFS_table *table_1; PFS_table *table_2; - PFS_global_param param; memset(& param, 0xFF, sizeof(param)); param.m_enabled= true; @@ -417,6 +416,6 @@ int main(int, char **) plan(103); MY_INIT("pfs_instr-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr_class-oom-t.cc b/storage/perfschema/unittest/pfs_instr_class-oom-t.cc index e98250d8567..9b01044e31f 100644 --- a/storage/perfschema/unittest/pfs_instr_class-oom-t.cc +++ b/storage/perfschema/unittest/pfs_instr_class-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -67,6 +67,6 @@ int main(int, char **) plan(9); MY_INIT("pfs_instr_info-oom-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_instr_class-t.cc b/storage/perfschema/unittest/pfs_instr_class-t.cc index ee483312e3c..b58ae2e371d 100644 --- a/storage/perfschema/unittest/pfs_instr_class-t.cc +++ b/storage/perfschema/unittest/pfs_instr_class-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -673,6 +673,6 @@ int main(int, char **) plan(181); MY_INIT("pfs_instr_info-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_misc-t.cc b/storage/perfschema/unittest/pfs_misc-t.cc index a0fff2f593c..d960b5fe0a0 100644 --- a/storage/perfschema/unittest/pfs_misc-t.cc +++ b/storage/perfschema/unittest/pfs_misc-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -66,6 +66,6 @@ int main(int, char **) plan(2); MY_INIT("pfs_misc-t"); do_all_tests(); - return exit_status(); + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_timer-t.cc b/storage/perfschema/unittest/pfs_timer-t.cc index 9c9ae0f75f1..7ea1326365e 100644 --- a/storage/perfschema/unittest/pfs_timer-t.cc +++ b/storage/perfschema/unittest/pfs_timer-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -117,6 +117,6 @@ int main(int, char **) plan(5); MY_INIT("pfs_timer-t"); do_all_tests(); - return 0; + return (exit_status()); } diff --git a/storage/perfschema/unittest/pfs_user-oom-t.cc b/storage/perfschema/unittest/pfs_user-oom-t.cc index 0e4c5eb3411..87558e9e813 100644 --- a/storage/perfschema/unittest/pfs_user-oom-t.cc +++ b/storage/perfschema/unittest/pfs_user-oom-t.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -110,6 +110,6 @@ int main(int, char **) plan(6); MY_INIT("pfs_user-oom-t"); do_all_tests(); - return 0; + return (exit_status()); } -- cgit v1.2.1 From 360a4a037271d65ab6471f7ab3f9b6a893d90a31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 16 May 2017 14:16:11 +0300 Subject: 5.6.36-82.0 --- storage/xtradb/btr/btr0sea.cc | 2 +- storage/xtradb/buf/buf0buf.cc | 2 +- storage/xtradb/buf/buf0dblwr.cc | 4 +- storage/xtradb/buf/buf0dump.cc | 4 +- storage/xtradb/buf/buf0flu.cc | 38 +++- storage/xtradb/dict/dict0boot.cc | 7 +- storage/xtradb/dict/dict0dict.cc | 21 +- storage/xtradb/dict/dict0stats.cc | 15 +- storage/xtradb/dict/dict0stats_bg.cc | 4 +- storage/xtradb/fil/fil0fil.cc | 24 ++- storage/xtradb/fts/fts0que.cc | 20 ++ storage/xtradb/handler/ha_innodb.cc | 179 +++++++++------- storage/xtradb/handler/ha_innodb.h | 6 +- storage/xtradb/handler/handler0alter.cc | 6 +- storage/xtradb/include/buf0dblwr.h | 4 +- storage/xtradb/include/dict0dict.h | 11 +- storage/xtradb/include/dict0dict.ic | 9 +- storage/xtradb/include/fil0fil.h | 14 +- storage/xtradb/include/ha0ha.h | 6 +- storage/xtradb/include/log0online.h | 2 +- storage/xtradb/include/os0file.h | 335 ++++++++++++++++++++++++----- storage/xtradb/include/os0file.ic | 215 ++++++++++++++++--- storage/xtradb/include/row0mysql.h | 18 +- storage/xtradb/include/srv0srv.h | 17 +- storage/xtradb/include/srv0start.h | 4 +- storage/xtradb/include/trx0xa.h | 15 +- storage/xtradb/include/univ.i | 12 +- storage/xtradb/log/log0log.cc | 19 +- storage/xtradb/log/log0online.cc | 62 +++--- storage/xtradb/log/log0recv.cc | 4 +- storage/xtradb/os/os0file.cc | 240 +++++++++++++-------- storage/xtradb/pars/pars0opt.cc | 6 +- storage/xtradb/pars/pars0pars.cc | 3 +- storage/xtradb/row/row0log.cc | 17 +- storage/xtradb/row/row0merge.cc | 35 +++- storage/xtradb/row/row0sel.cc | 361 +++++++++++++++++++++++++++----- storage/xtradb/srv/srv0srv.cc | 41 +++- storage/xtradb/srv/srv0start.cc | 66 ++++-- storage/xtradb/trx/trx0purge.cc | 5 +- storage/xtradb/trx/trx0rec.cc | 3 +- storage/xtradb/trx/trx0roll.cc | 4 +- storage/xtradb/trx/trx0sys.cc | 18 +- 42 files changed, 1419 insertions(+), 459 deletions(-) (limited to 'storage') diff --git a/storage/xtradb/btr/btr0sea.cc b/storage/xtradb/btr/btr0sea.cc index 78cb9dfd6e0..2ed383b0dcb 100644 --- a/storage/xtradb/btr/btr0sea.cc +++ b/storage/xtradb/btr/btr0sea.cc @@ -199,7 +199,7 @@ btr_search_sys_create( &btr_search_latch_arr[i], SYNC_SEARCH_SYS); btr_search_sys->hash_tables[i] - = ha_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0); + = ib_create(hash_size, 0, MEM_HEAP_FOR_BTR_SEARCH, 0); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG btr_search_sys->hash_tables[i]->adaptive = TRUE; diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 978d94f07ec..b946b1dd4e4 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -1413,7 +1413,7 @@ buf_pool_init_instance( ut_a(srv_n_page_hash_locks != 0); ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS); - buf_pool->page_hash = ha_create(2 * buf_pool->curr_size, + buf_pool->page_hash = ib_create(2 * buf_pool->curr_size, srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH, SYNC_BUF_PAGE_HASH); diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc index 3c12d6da73f..46296814bb6 100644 --- a/storage/xtradb/buf/buf0dblwr.cc +++ b/storage/xtradb/buf/buf0dblwr.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -355,7 +355,7 @@ recovery, this function loads the pages from double write buffer into memory. */ void buf_dblwr_init_or_load_pages( /*=========================*/ - os_file_t file, + pfs_os_file_t file, char* path, bool load_corrupt_pages) { diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc index 88ff4399115..8ecdedc613b 100644 --- a/storage/xtradb/buf/buf0dump.cc +++ b/storage/xtradb/buf/buf0dump.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2011, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -597,6 +597,7 @@ DECLARE_THREAD(buf_dump_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); srv_buf_dump_thread_active = TRUE; @@ -632,6 +633,7 @@ DECLARE_THREAD(buf_dump_thread)( srv_buf_dump_thread_active = FALSE; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index 5dd2efcf0c3..36e24fb3a1b 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -356,6 +356,7 @@ buf_flush_insert_into_flush_list( buf_block_t* block, /*!< in/out: block which is modified */ lsn_t lsn) /*!< in: oldest modification */ { + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(log_flush_order_mutex_own()); ut_ad(mutex_own(&block->mutex)); @@ -414,6 +415,7 @@ buf_flush_insert_sorted_into_flush_list( buf_page_t* prev_b; buf_page_t* b; + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(log_flush_order_mutex_own()); ut_ad(mutex_own(&block->mutex)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE); @@ -724,6 +726,7 @@ buf_flush_write_complete( buf_page_set_io_fix(bpage, BUF_IO_NONE); buf_pool->n_flush[flush_type]--; + ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX); /* fprintf(stderr, "n pending flush %lu\n", buf_pool->n_flush[flush_type]); */ @@ -1061,6 +1064,7 @@ buf_flush_page( } ++buf_pool->n_flush[flush_type]; + ut_ad(buf_pool->n_flush[flush_type] != 0); mutex_exit(&buf_pool->flush_state_mutex); @@ -2206,13 +2210,14 @@ Clears up tail of the LRU lists: * Flush dirty pages at the tail of LRU to the disk The depth to which we scan each buffer pool is controlled by dynamic config parameter innodb_LRU_scan_depth. -@return number of pages flushed */ +@return number of flushed and evicted pages */ UNIV_INTERN ulint buf_flush_LRU_tail(void) /*====================*/ { ulint total_flushed = 0; + ulint total_evicted = 0; ulint start_time = ut_time_ms(); ulint scan_depth[MAX_BUFFER_POOLS]; ulint requested_pages[MAX_BUFFER_POOLS]; @@ -2278,6 +2283,7 @@ buf_flush_LRU_tail(void) limited_scan[i] = (previous_evicted[i] > n.evicted); previous_evicted[i] = n.evicted; + total_evicted += n.evicted; requested_pages[i] += lru_chunk_size; @@ -2310,7 +2316,7 @@ buf_flush_LRU_tail(void) MONITOR_LRU_BATCH_PAGES, total_flushed); } - return(total_flushed); + return(total_flushed + total_evicted); } /*********************************************************************//** @@ -2604,6 +2610,23 @@ buf_get_total_free_list_length(void) return result; } +/** Returns the aggregate LRU list length over all buffer pool instances. +@return total LRU list length. */ +MY_ATTRIBUTE((warn_unused_result)) +static +ulint +buf_get_total_LRU_list_length(void) +{ + ulint result = 0; + + for (ulint i = 0; i < srv_buf_pool_instances; i++) { + + result += UT_LIST_GET_LEN(buf_pool_from_array(i)->LRU); + } + + return result; +} + /*********************************************************************//** Adjust the desired page cleaner thread sleep time for LRU flushes. */ MY_ATTRIBUTE((nonnull)) @@ -2616,8 +2639,9 @@ page_cleaner_adapt_lru_sleep_time( ulint lru_n_flushed) /*!< in: number of flushed in previous batch */ { - ulint free_len = buf_get_total_free_list_length(); - ulint max_free_len = srv_LRU_scan_depth * srv_buf_pool_instances; + ulint free_len = buf_get_total_free_list_length(); + ulint max_free_len = ut_min(buf_get_total_LRU_list_length(), + srv_LRU_scan_depth * srv_buf_pool_instances); if (free_len < max_free_len / 100 && lru_n_flushed) { @@ -2629,7 +2653,7 @@ page_cleaner_adapt_lru_sleep_time( /* Free lists filled more than 20% or no pages flushed in previous batch, sleep a bit more */ - *lru_sleep_time += 50; + *lru_sleep_time += 1; if (*lru_sleep_time > srv_cleaner_max_lru_time) *lru_sleep_time = srv_cleaner_max_lru_time; } else if (free_len < max_free_len / 20 && *lru_sleep_time >= 50) { @@ -2676,6 +2700,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ulint next_loop_time = ut_time_ms() + 1000; ulint n_flushed = 0; ulint last_activity = srv_get_activity_count(); @@ -2812,6 +2837,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( thread_exit: buf_page_cleaner_is_active = FALSE; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); diff --git a/storage/xtradb/dict/dict0boot.cc b/storage/xtradb/dict/dict0boot.cc index c0bb0298bea..4c8420252c2 100644 --- a/storage/xtradb/dict/dict0boot.cc +++ b/storage/xtradb/dict/dict0boot.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -464,7 +464,10 @@ dict_boot(void) dberr_t err = DB_SUCCESS; - if (srv_read_only_mode && !ibuf_is_empty()) { + /** If innodb_force_recovery is set to 6 then allow + the server to start even though ibuf is not empty. */ + if (srv_force_recovery != SRV_FORCE_NO_LOG_REDO + && srv_read_only_mode && !ibuf_is_empty()) { ib_logf(IB_LOG_LEVEL_ERROR, "Change buffer must be empty when --innodb-read-only " diff --git a/storage/xtradb/dict/dict0dict.cc b/storage/xtradb/dict/dict0dict.cc index bd62744a49c..eec681c4005 100644 --- a/storage/xtradb/dict/dict0dict.cc +++ b/storage/xtradb/dict/dict0dict.cc @@ -835,16 +835,24 @@ dict_index_get_nth_col_or_prefix_pos( /*=================================*/ const dict_index_t* index, /*!< in: index */ ulint n, /*!< in: column number */ - ibool inc_prefix) /*!< in: TRUE=consider + ibool inc_prefix, /*!< in: TRUE=consider column prefixes too */ + ulint* prefix_col_pos) /*!< out: col num if prefix */ { const dict_field_t* field; const dict_col_t* col; ulint pos; ulint n_fields; + ulint prefixed_pos_dummy; ut_ad(index); ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); + ut_ad((inc_prefix && !prefix_col_pos) || (!inc_prefix)); + + if (!prefix_col_pos) { + prefix_col_pos = &prefixed_pos_dummy; + } + *prefix_col_pos = ULINT_UNDEFINED; col = dict_table_get_nth_col(index->table, n); @@ -858,10 +866,11 @@ dict_index_get_nth_col_or_prefix_pos( for (pos = 0; pos < n_fields; pos++) { field = dict_index_get_nth_field(index, pos); - if (col == field->col - && (inc_prefix || field->prefix_len == 0)) { - - return(pos); + if (col == field->col) { + *prefix_col_pos = pos; + if (inc_prefix || field->prefix_len == 0) { + return(pos); + } } } @@ -1005,7 +1014,7 @@ dict_table_get_nth_col_pos( ulint n) /*!< in: column number */ { return(dict_index_get_nth_col_pos(dict_table_get_first_index(table), - n)); + n, NULL)); } /********************************************************************//** diff --git a/storage/xtradb/dict/dict0stats.cc b/storage/xtradb/dict/dict0stats.cc index b0ba98308be..55a26268579 100644 --- a/storage/xtradb/dict/dict0stats.cc +++ b/storage/xtradb/dict/dict0stats.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2009, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2009, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -690,6 +690,9 @@ dict_stats_copy( && (src_idx = dict_table_get_next_index(src_idx)))) { if (dict_stats_should_ignore_index(dst_idx)) { + if (!(dst_idx->type & DICT_FTS)) { + dict_stats_empty_index(dst_idx); + } continue; } @@ -1096,10 +1099,10 @@ dict_stats_analyze_index_level( leaf-level delete marks because delete marks on non-leaf level do not make sense. */ - if (level == 0 && srv_stats_include_delete_marked? 0: + if (level == 0 && (srv_stats_include_delete_marked ? 0: rec_get_deleted_flag( rec, - page_is_comp(btr_pcur_get_page(&pcur)))) { + page_is_comp(btr_pcur_get_page(&pcur))))) { if (rec_is_last_on_page && !prev_rec_is_copied @@ -3229,12 +3232,6 @@ dict_stats_update( dict_table_stats_lock(table, RW_X_LATCH); - /* Initialize all stats to dummy values before - copying because dict_stats_table_clone_create() does - skip corrupted indexes so our dummy object 't' may - have less indexes than the real object 'table'. */ - dict_stats_empty_table(table); - dict_stats_copy(table, t); dict_stats_assert_initialized(table); diff --git a/storage/xtradb/dict/dict0stats_bg.cc b/storage/xtradb/dict/dict0stats_bg.cc index 6f01c379776..975c8a50803 100644 --- a/storage/xtradb/dict/dict0stats_bg.cc +++ b/storage/xtradb/dict/dict0stats_bg.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -334,6 +334,7 @@ DECLARE_THREAD(dict_stats_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_a(!srv_read_only_mode); srv_dict_stats_thread_active = TRUE; @@ -359,6 +360,7 @@ DECLARE_THREAD(dict_stats_thread)( srv_dict_stats_thread_active = FALSE; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit instead of return(). */ os_thread_exit(NULL); diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 57e415ae939..d701bddebfc 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -146,7 +146,7 @@ struct fil_node_t { belongs */ char* name; /*!< path to the file */ ibool open; /*!< TRUE if file open */ - os_file_t handle; /*!< OS handle to the file, if file open */ + pfs_os_file_t handle; /*!< OS handle to the file, if file open */ os_event_t sync_event;/*!< Condition event to group and serialize calls to fsync */ ibool is_raw_disk;/*!< TRUE if the 'file' is actually a raw @@ -317,7 +317,8 @@ initialized. */ static fil_system_t* fil_system = NULL; /** Determine if (i) is a user tablespace id or not. */ -# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open) +# define fil_is_user_tablespace_id(i) (i != 0 \ + && !srv_is_undo_tablespace(i)) /** Determine if user has explicitly disabled fsync(). */ #ifndef __WIN__ @@ -2084,7 +2085,7 @@ UNIV_INTERN const char* fil_read_first_page( /*================*/ - os_file_t data_file, /*!< in: open data file */ + pfs_os_file_t data_file, /*!< in: open data file */ ibool one_read_already, /*!< in: TRUE if min and max parameters below already contain sensible data */ @@ -3407,7 +3408,7 @@ fil_open_linked_file( /*===============*/ const char* tablename, /*!< in: database/tablename */ char** remote_filepath,/*!< out: remote filepath */ - os_file_t* remote_file) /*!< out: remote file handle */ + pfs_os_file_t* remote_file) /*!< out: remote file handle */ { ibool success; @@ -3467,7 +3468,8 @@ fil_create_new_single_table_tablespace( tablespace file in pages, must be >= FIL_IBD_FILE_INITIAL_SIZE */ { - os_file_t file; + pfs_os_file_t file; + ibool ret; dberr_t err; byte* buf2; @@ -5206,8 +5208,8 @@ retry: os_offset_t end_offset = (size_after_extend - file_start_page_no) * page_size; - success = (posix_fallocate(node->handle, start_offset, - end_offset) == 0); + success = (os_file_allocate(node->handle, start_offset, + end_offset) == 0); if (!success) { ib_logf(IB_LOG_LEVEL_ERROR, @@ -5960,7 +5962,7 @@ fil_flush( { fil_space_t* space; fil_node_t* node; - os_file_t file; + pfs_os_file_t file; mutex_enter(&fil_system->mutex); @@ -6319,7 +6321,7 @@ fil_buf_block_init( } struct fil_iterator_t { - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ const char* filepath; /*!< File path name */ os_offset_t start; /*!< From where to start */ os_offset_t end; /*!< Where to stop */ @@ -6454,7 +6456,7 @@ fil_tablespace_iterate( PageCallback& callback) { dberr_t err; - os_file_t file; + pfs_os_file_t file; char* filepath; ut_a(n_io_buffers > 0); diff --git a/storage/xtradb/fts/fts0que.cc b/storage/xtradb/fts/fts0que.cc index 2c44a21a8f2..d926d9b0675 100644 --- a/storage/xtradb/fts/fts0que.cc +++ b/storage/xtradb/fts/fts0que.cc @@ -953,6 +953,18 @@ fts_query_free_doc_ids( query->total_size -= SIZEOF_RBT_CREATE; } +/** +Free the query intersection +@param[in] query query instance */ +static +void +fts_query_free_intersection( + fts_query_t* query) +{ + fts_query_free_doc_ids(query, query->intersection); + query->intersection = NULL; +} + /*******************************************************************//** Add the word to the documents "list" of matching words from the query. We make a copy of the word from the query heap. */ @@ -1311,6 +1323,7 @@ fts_query_intersect( /* error is passed by 'query->error' */ if (query->error != DB_SUCCESS) { ut_ad(query->error == DB_FTS_EXCEED_RESULT_CACHE_LIMIT); + fts_query_free_intersection(query); return(query->error); } @@ -1339,6 +1352,8 @@ fts_query_intersect( ut_a(!query->multi_exist || (query->multi_exist && rbt_size(query->doc_ids) <= n_doc_ids)); + } else if (query->intersection != NULL) { + fts_query_free_intersection(query); } } @@ -1557,6 +1572,11 @@ fts_merge_doc_ids( query, ranking->doc_id, ranking->rank); if (query->error != DB_SUCCESS) { + if (query->intersection != NULL) + { + ut_a(query->oper == FTS_EXIST); + fts_query_free_intersection(query); + } DBUG_RETURN(query->error); } diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 05a77436a12..d1f6c3b499a 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2000, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. @@ -701,6 +701,32 @@ innobase_is_fake_change( THD* thd); /*!< in: MySQL thread handle of the user for whom the transaction is being committed */ +/** Empty free list algorithm. +Checks if buffer pool is big enough to enable backoff algorithm. +InnoDB empty free list algorithm backoff requires free pages +from LRU for the best performance. +buf_LRU_buf_pool_running_out cancels query if 1/4 of +buffer pool belongs to LRU or freelist. +At the same time buf_flush_LRU_list_batch +keeps up to BUF_LRU_MIN_LEN in LRU. +In order to avoid deadlock baclkoff requires buffer pool +to be at least 4*BUF_LRU_MIN_LEN, +but flush peformance is bad because of trashing +and additional BUF_LRU_MIN_LEN pages are requested. +@param[in] algorithm desired algorithm from srv_empty_free_list_t +@return true if it's possible to enable backoff. */ +static inline +bool +innodb_empty_free_list_algorithm_allowed( + srv_empty_free_list_t algorithm) +{ + long long buf_pool_pages = srv_buf_pool_size / srv_page_size + / srv_buf_pool_instances; + + return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1) + || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF); +} + /** Get the list of foreign keys referencing a specified table table. @param thd The thread handle @@ -970,6 +996,10 @@ static SHOW_VAR innodb_status_variables[]= { (char*) &export_vars.innodb_x_lock_spin_rounds, SHOW_LONGLONG}, {"x_lock_spin_waits", (char*) &export_vars.innodb_x_lock_spin_waits, SHOW_LONGLONG}, + {"secondary_index_triggered_cluster_reads", + (char*) &export_vars.innodb_sec_rec_cluster_reads, SHOW_LONG}, + {"secondary_index_triggered_cluster_reads_avoided", + (char*) &export_vars.innodb_sec_rec_cluster_reads_avoided, SHOW_LONG}, {NullS, NullS, SHOW_LONG} }; @@ -1384,28 +1414,6 @@ innobase_drop_zip_dict( ulint* name_len); /*!< in/out: zip dictionary name length */ -/*************************************************************//** -Checks if buffer pool is big enough to enable backoff algorithm. -InnoDB empty free list algorithm backoff requires free pages -from LRU for the best performance. -buf_LRU_buf_pool_running_out cancels query if 1/4 of -buffer pool belongs to LRU or freelist. -At the same time buf_flush_LRU_list_batch -keeps up to BUF_LRU_MIN_LEN in LRU. -In order to avoid deadlock baclkoff requires buffer pool -to be at least 4*BUF_LRU_MIN_LEN, -but flush peformance is bad because of trashing -and additional BUF_LRU_MIN_LEN pages are requested. -@return true if it's possible to enable backoff. */ -static -bool -innodb_empty_free_list_algorithm_backoff_allowed( - srv_empty_free_list_t - algorithm, /*!< in: desired algorithm - from srv_empty_free_list_t */ - long long buf_pool_pages); /*!< in: total number - of pages inside buffer pool */ - /*************************************************************//** Removes old archived transaction log files. @return true on error */ @@ -3446,7 +3454,8 @@ innobase_init( innobase_hton->flush_logs = innobase_flush_logs; innobase_hton->show_status = innobase_show_status; innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | - HTON_SUPPORTS_ONLINE_BACKUPS | HTON_SUPPORTS_FOREIGN_KEYS; + HTON_SUPPORTS_ONLINE_BACKUPS | HTON_SUPPORTS_FOREIGN_KEYS | + HTON_SUPPORTS_COMPRESSED_COLUMNS; innobase_hton->release_temporary_latches = innobase_release_temporary_latches; @@ -3915,10 +3924,9 @@ innobase_change_buffering_inited_ok: innobase_commit_concurrency_init_default(); /* Do not enable backoff algorithm for small buffer pool. */ - if (!innodb_empty_free_list_algorithm_backoff_allowed( + if (!innodb_empty_free_list_algorithm_allowed( static_cast( - srv_empty_free_list_algorithm), - innobase_buffer_pool_size / srv_page_size)) { + srv_empty_free_list_algorithm))) { sql_print_information( "InnoDB: innodb_empty_free_list_algorithm " "has been changed to legacy " @@ -4160,6 +4168,10 @@ innobase_create_zip_dict( if (UNIV_UNLIKELY(high_level_read_only)) { DBUG_RETURN(HA_CREATE_ZIP_DICT_READ_ONLY); } + + if (UNIV_UNLIKELY(THDVAR(NULL, fake_changes))) { + DBUG_RETURN(HA_CREATE_ZIP_DICT_FAKE_CHANGES); + } if (UNIV_UNLIKELY(*name_len > ZIP_DICT_MAX_NAME_LENGTH)) { *name_len = ZIP_DICT_MAX_NAME_LENGTH; @@ -4178,6 +4190,15 @@ innobase_create_zip_dict( case DB_DUPLICATE_KEY: result = HA_CREATE_ZIP_DICT_ALREADY_EXISTS; break; + case DB_OUT_OF_MEMORY: + result = HA_CREATE_ZIP_DICT_OUT_OF_MEMORY; + break; + case DB_OUT_OF_FILE_SPACE: + result = HA_CREATE_ZIP_DICT_OUT_OF_FILE_SPACE; + break; + case DB_TOO_MANY_CONCURRENT_TRXS: + result = HA_CREATE_ZIP_DICT_TOO_MANY_CONCURRENT_TRXS; + break; default: ut_ad(0); result = HA_CREATE_ZIP_DICT_UNKNOWN_ERROR; @@ -4204,6 +4225,10 @@ innobase_drop_zip_dict( DBUG_RETURN(HA_DROP_ZIP_DICT_READ_ONLY); } + if (UNIV_UNLIKELY(THDVAR(NULL, fake_changes))) { + DBUG_RETURN(HA_DROP_ZIP_DICT_FAKE_CHANGES); + } + switch (dict_drop_zip_dict(name, *name_len)) { case DB_SUCCESS: result = HA_DROP_ZIP_DICT_OK; @@ -4214,6 +4239,15 @@ innobase_drop_zip_dict( case DB_ROW_IS_REFERENCED: result = HA_DROP_ZIP_DICT_IS_REFERENCED; break; + case DB_OUT_OF_MEMORY: + result = HA_DROP_ZIP_DICT_OUT_OF_MEMORY; + break; + case DB_OUT_OF_FILE_SPACE: + result = HA_DROP_ZIP_DICT_OUT_OF_FILE_SPACE; + break; + case DB_TOO_MANY_CONCURRENT_TRXS: + result = HA_DROP_ZIP_DICT_TOO_MANY_CONCURRENT_TRXS; + break; default: ut_ad(0); result = HA_DROP_ZIP_DICT_UNKNOWN_ERROR; @@ -6032,6 +6066,8 @@ table_opened: prebuilt->default_rec = table->s->default_values; ut_ad(prebuilt->default_rec); + prebuilt->mysql_handler = this; + /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ primary_key = table->s->primary_key; key_used_on_scan = primary_key; @@ -7181,9 +7217,31 @@ build_template_field( ut_a(templ->clust_rec_field_no != ULINT_UNDEFINED); if (dict_index_is_clust(index)) { + templ->rec_field_is_prefix = false; templ->rec_field_no = templ->clust_rec_field_no; + templ->rec_prefix_field_no = ULINT_UNDEFINED; } else { - templ->rec_field_no = dict_index_get_nth_col_pos(index, i); + /* If we're in a secondary index, keep track of the original + index position even if this is just a prefix index; we will use + this later to avoid a cluster index lookup in some cases.*/ + + templ->rec_field_no = dict_index_get_nth_col_pos(index, i, + &templ->rec_prefix_field_no); + templ->rec_field_is_prefix + = (templ->rec_field_no == ULINT_UNDEFINED) + && (templ->rec_prefix_field_no != ULINT_UNDEFINED); +#ifdef UNIV_DEBUG + if (templ->rec_prefix_field_no != ULINT_UNDEFINED) + { + const dict_field_t* field = dict_index_get_nth_field( + index, + templ->rec_prefix_field_no); + ut_ad(templ->rec_field_is_prefix + == (field->prefix_len != 0)); + } else { + ut_ad(!templ->rec_field_is_prefix); + } +#endif } if (field->real_maybe_null()) { @@ -7373,7 +7431,8 @@ ha_innobase::build_template( } else { templ->icp_rec_field_no = dict_index_get_nth_col_pos( - prebuilt->index, i); + prebuilt->index, i, + NULL); } if (dict_index_is_clust(prebuilt->index)) { @@ -7403,7 +7462,7 @@ ha_innobase::build_template( templ->icp_rec_field_no = dict_index_get_nth_col_or_prefix_pos( - prebuilt->index, i, TRUE); + prebuilt->index, i, TRUE, NULL); ut_ad(templ->icp_rec_field_no != ULINT_UNDEFINED); @@ -11288,7 +11347,8 @@ ha_innobase::delete_table( extension, in contrast to ::create */ normalize_table_name(norm_name, name); - if (srv_read_only_mode) { + if (srv_read_only_mode + || srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) { DBUG_RETURN(HA_ERR_TABLE_READONLY); } else if (row_is_magic_monitor_table(norm_name) && check_global_access(thd, PROCESS_ACL)) { @@ -15301,16 +15361,21 @@ and SYS_ZIP_DICT_COLS for all columns marked with COLUMN_FORMAT_TYPE_COMPRESSED flag and updates zip_dict_name / zip_dict_data for those which have associated compression dictionaries. + +@param part_name Full table name (including partition part). + Must be non-NULL only if called from ha_partition. */ UNIV_INTERN void -ha_innobase::update_field_defs_with_zip_dict_info() +ha_innobase::update_field_defs_with_zip_dict_info(const char *part_name) { DBUG_ENTER("update_field_defs_with_zip_dict_info"); ut_ad(!mutex_own(&dict_sys->mutex)); char norm_name[FN_REFLEN]; - normalize_table_name(norm_name, table_share->normalized_path.str); + normalize_table_name(norm_name, + part_name != 0 ? part_name : + table_share->normalized_path.str); dict_table_t* ib_table = dict_table_open_on_name( norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE); @@ -16687,15 +16752,17 @@ innodb_buffer_pool_evict_uncompressed(void) ut_ad(block->page.in_LRU_list); mutex_enter(&block->mutex); - if (!buf_LRU_free_page(&block->page, false)) { - mutex_exit(&block->mutex); - all_evicted = false; - } else { - mutex_exit(&block->mutex); + all_evicted = buf_LRU_free_page(&block->page, false); + mutex_exit(&block->mutex); + + if (all_evicted) { + mutex_enter(&buf_pool->LRU_list_mutex); - } + block = UT_LIST_GET_LAST(buf_pool->unzip_LRU); + } else { - block = prev_block; + block = prev_block; + } } mutex_exit(&buf_pool->LRU_list_mutex); @@ -17497,32 +17564,6 @@ innodb_status_output_update( os_event_set(lock_sys->timeout_event); } -/*************************************************************//** -Empty free list algorithm. -Checks if buffer pool is big enough to enable backoff algorithm. -InnoDB empty free list algorithm backoff requires free pages -from LRU for the best performance. -buf_LRU_buf_pool_running_out cancels query if 1/4 of -buffer pool belongs to LRU or freelist. -At the same time buf_flush_LRU_list_batch -keeps up to BUF_LRU_MIN_LEN in LRU. -In order to avoid deadlock baclkoff requires buffer pool -to be at least 4*BUF_LRU_MIN_LEN, -but flush peformance is bad because of trashing -and additional BUF_LRU_MIN_LEN pages are requested. -@return true if it's possible to enable backoff. */ -static -bool -innodb_empty_free_list_algorithm_backoff_allowed( - srv_empty_free_list_t algorithm, /*!< in: desired algorithm - from srv_empty_free_list_t */ - long long buf_pool_pages) /*!< in: total number - of pages inside buffer pool */ -{ - return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1) - || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF); -} - /*************************************************************//** Empty free list algorithm. This function is registered as a callback with MySQL. @@ -17564,13 +17605,11 @@ innodb_srv_empty_free_list_algorithm_validate( return(1); algorithm = static_cast(algo); - if (!innodb_empty_free_list_algorithm_backoff_allowed( - algorithm, - innobase_buffer_pool_size / srv_page_size)) { + if (!innodb_empty_free_list_algorithm_allowed(algorithm)) { sql_print_warning( "InnoDB: innodb_empty_free_list_algorithm " "= 'backoff' requires at least" - " 20MB buffer pool.\n"); + " 20MB buffer pool instances.\n"); return(1); } diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index 39de6b179ab..99953a22121 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -295,8 +295,12 @@ class ha_innobase: public handler COLUMN_FORMAT_TYPE_COMPRESSED flag and updates zip_dict_name / zip_dict_data for those which have associated compression dictionaries. + + @param part_name Full table name (including partition part). + Must be non-NULL only if called from ha_partition. */ - virtual void update_field_defs_with_zip_dict_info(); + virtual void update_field_defs_with_zip_dict_info( + const char* part_name); private: /** Builds a 'template' to the prebuilt struct. diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index 098d93d4529..d6576fe27d5 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -1148,7 +1148,8 @@ innobase_rec_to_mysql( field->reset(); - ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE); + ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE, + NULL); if (ipos == ULINT_UNDEFINED || rec_offs_nth_extern(offsets, ipos)) { @@ -1196,7 +1197,8 @@ innobase_fields_to_mysql( field->reset(); - ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE); + ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE, + NULL); if (ipos == ULINT_UNDEFINED || dfield_is_ext(&fields[ipos]) diff --git a/storage/xtradb/include/buf0dblwr.h b/storage/xtradb/include/buf0dblwr.h index a62a6400d97..18e124b7437 100644 --- a/storage/xtradb/include/buf0dblwr.h +++ b/storage/xtradb/include/buf0dblwr.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -55,7 +55,7 @@ recovery, this function loads the pages from double write buffer into memory. */ void buf_dblwr_init_or_load_pages( /*=========================*/ - os_file_t file, + pfs_os_file_t file, char* path, bool load_corrupt_pages); diff --git a/storage/xtradb/include/dict0dict.h b/storage/xtradb/include/dict0dict.h index 99d745eb49f..7ca2f45cb80 100644 --- a/storage/xtradb/include/dict0dict.h +++ b/storage/xtradb/include/dict0dict.h @@ -1142,8 +1142,9 @@ ulint dict_index_get_nth_col_pos( /*=======================*/ const dict_index_t* index, /*!< in: index */ - ulint n) /*!< in: column number */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + ulint n, /*!< in: column number */ + ulint* prefix_col_pos) /*!< out: col num if prefix */ + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /********************************************************************//** Looks for column n in an index. @return position in internal representation of the index; @@ -1154,9 +1155,11 @@ dict_index_get_nth_col_or_prefix_pos( /*=================================*/ const dict_index_t* index, /*!< in: index */ ulint n, /*!< in: column number */ - ibool inc_prefix) /*!< in: TRUE=consider + ibool inc_prefix, /*!< in: TRUE=consider column prefixes too */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + ulint* prefix_col_pos) /*!< out: col num if prefix */ + + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /********************************************************************//** Returns TRUE if the index contains a column or a prefix of that column. @return TRUE if contains the column or its prefix */ diff --git a/storage/xtradb/include/dict0dict.ic b/storage/xtradb/include/dict0dict.ic index 58a9ef4d65d..32bc4f376d9 100644 --- a/storage/xtradb/include/dict0dict.ic +++ b/storage/xtradb/include/dict0dict.ic @@ -1049,7 +1049,8 @@ dict_index_get_sys_col_pos( } return(dict_index_get_nth_col_pos( - index, dict_table_get_sys_col_no(index->table, type))); + index, dict_table_get_sys_col_no(index->table, type), + NULL)); } /*********************************************************************//** @@ -1101,9 +1102,11 @@ ulint dict_index_get_nth_col_pos( /*=======================*/ const dict_index_t* index, /*!< in: index */ - ulint n) /*!< in: column number */ + ulint n, /*!< in: column number */ + ulint* prefix_col_pos) /*!< out: col num if prefix */ { - return(dict_index_get_nth_col_or_prefix_pos(index, n, FALSE)); + return(dict_index_get_nth_col_or_prefix_pos(index, n, FALSE, + prefix_col_pos)); } #ifndef UNIV_HOTBACKUP diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 14dfa296435..78222b3122a 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -182,7 +182,7 @@ struct fsp_open_info { ibool success; /*!< Has the tablespace been opened? */ const char* check_msg; /*!< fil_check_first_page() message */ ibool valid; /*!< Is the tablespace valid? */ - os_file_t file; /*!< File handle */ + pfs_os_file_t file; /*!< File handle */ char* filepath; /*!< File path to open */ lsn_t lsn; /*!< Flushed LSN from header page */ ulint id; /*!< Space ID */ @@ -390,7 +390,7 @@ UNIV_INTERN const char* fil_read_first_page( /*================*/ - os_file_t data_file, /*!< in: open data file */ + pfs_os_file_t data_file, /*!< in: open data file */ ibool one_read_already, /*!< in: TRUE if min and max parameters below already contain sensible data */ @@ -906,12 +906,12 @@ struct PageCallback { Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; + block->frame + UNIV_PAGE_SIZE; @param offset - physical offset within the file @param block - block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ virtual dberr_t operator()( - os_offset_t offset, + os_offset_t offset, buf_block_t* block) UNIV_NOTHROW = 0; /** @@ -919,7 +919,7 @@ struct PageCallback { to open it for the file that is being iterated over. @param filename - then physical name of the tablespace file. @param file - OS file handle */ - void set_file(const char* filename, os_file_t file) UNIV_NOTHROW + void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW { m_file = file; m_filepath = filename; @@ -955,7 +955,7 @@ struct PageCallback { ulint m_page_size; /** File handle to the tablespace */ - os_file_t m_file; + pfs_os_file_t m_file; /** Physical file path. */ const char* m_filepath; diff --git a/storage/xtradb/include/ha0ha.h b/storage/xtradb/include/ha0ha.h index 7351b407e8c..58eb581e76a 100644 --- a/storage/xtradb/include/ha0ha.h +++ b/storage/xtradb/include/ha0ha.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -107,7 +107,7 @@ chosen to be a slightly bigger prime number. @param level in: level of the mutexes in the latching order @param n_m in: number of mutexes to protect the hash table; must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) +# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,level,n_m,type) #else /* UNIV_SYNC_DEBUG */ /** Creates a hash table. @return own: created table @@ -116,7 +116,7 @@ chosen to be a slightly bigger prime number. @param level in: level of the mutexes in the latching order @param n_m in: number of mutexes to protect the hash table; must be a power of 2, or 0 */ -# define ha_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) +# define ib_create(n_c,n_m,type,level) ha_create_func(n_c,n_m,type) #endif /* UNIV_SYNC_DEBUG */ /*************************************************************//** diff --git a/storage/xtradb/include/log0online.h b/storage/xtradb/include/log0online.h index 722336dd6b4..5c3e7d07fd9 100644 --- a/storage/xtradb/include/log0online.h +++ b/storage/xtradb/include/log0online.h @@ -130,7 +130,7 @@ log_online_bitmap_iterator_next( /** Struct for single bitmap file information */ struct log_online_bitmap_file_struct { char name[FN_REFLEN]; /*!< Name with full path */ - os_file_t file; /*!< Handle to opened file */ + pfs_os_file_t file; /*!< Handle to opened file */ ib_uint64_t size; /*!< Size of the file */ os_offset_t offset; /*!< Offset of the next read, or count of already-read bytes diff --git a/storage/xtradb/include/os0file.h b/storage/xtradb/include/os0file.h index cf8935ff7d2..4dab704ad50 100644 --- a/storage/xtradb/include/os0file.h +++ b/storage/xtradb/include/os0file.h @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -78,7 +78,6 @@ typedef ib_uint64_t os_offset_t; #define SRV_PATH_SEPARATOR '\\' /** File handle */ # define os_file_t HANDLE -# define os_file_invalid INVALID_HANDLE_VALUE /** Convert a C file descriptor to a native file handle @param fd file descriptor @return native file handle */ @@ -87,13 +86,22 @@ typedef ib_uint64_t os_offset_t; #define SRV_PATH_SEPARATOR '/' /** File handle */ typedef int os_file_t; -# define os_file_invalid (-1) /** Convert a C file descriptor to a native file handle @param fd file descriptor @return native file handle */ # define OS_FILE_FROM_FD(fd) fd #endif +/*Common file descriptor for file IO instrumentation with PFS +on windows and other platforms */ +struct pfs_os_file_t +{ + os_file_t m_file; +#ifdef UNIV_PFS_IO + struct PSI_file *m_psi; +#endif +}; + /** Umask for creating files */ extern ulint os_innodb_umask; @@ -129,6 +137,21 @@ enum os_file_create_t { ON_ERROR_NO_EXIT is set */ }; +/** Options for os_file_advise_func @{ */ +enum os_file_advise_t { + OS_FILE_ADVISE_NORMAL = 1, /*!< no advice on access pattern + (default) */ + OS_FILE_ADVISE_RANDOM = 2, /*!< access in random order */ + OS_FILE_ADVISE_SEQUENTIAL = 4, /*!< access the specified data + sequentially (with lower offsets read + before higher ones) */ + OS_FILE_ADVISE_WILLNEED = 8, /*!< specified data will be accessed + in the near future */ + OS_FILE_ADVISE_DONTNEED = 16, /*!< specified data will not be + accessed in the near future */ + OS_FILE_ADVISE_NOREUSE = 32 /*!< access only once */ +}; + #define OS_FILE_READ_ONLY 333 #define OS_FILE_READ_WRITE 444 #define OS_FILE_READ_ALLOW_DELETE 555 /* for mysqlbackup */ @@ -230,6 +253,8 @@ extern mysql_pfs_key_t innodb_file_bmp_key; various file I/O operations with performance schema. 1) register_pfs_file_open_begin() and register_pfs_file_open_end() are used to register file creation, opening, closing and renaming. +2) register_pfs_file_rename_begin() and register_pfs_file_rename_end() +are used to register file renaming 2) register_pfs_file_io_begin() and register_pfs_file_io_end() are used to register actual file read, write and flush 3) register_pfs_file_close_begin() and register_pfs_file_close_end() @@ -239,17 +264,30 @@ are used to register file deletion operations*/ do { \ locker = PSI_FILE_CALL(get_thread_file_name_locker)( \ state, key, op, name, &locker); \ - if (UNIV_LIKELY(locker != NULL)) { \ + if (locker != NULL) { \ PSI_FILE_CALL(start_file_open_wait)( \ locker, src_file, src_line); \ } \ } while (0) -# define register_pfs_file_open_end(locker, file) \ +# define register_pfs_file_open_end(locker, file, result) \ do { \ - if (UNIV_LIKELY(locker != NULL)) { \ - PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\ - locker, file); \ + if (locker != NULL) { \ + file.m_psi = PSI_FILE_CALL( \ + end_file_open_wait)( \ + locker, result); \ + } \ +} while (0) + +# define register_pfs_file_rename_begin(state, locker, key, op, name, \ + src_file, src_line) \ + register_pfs_file_open_begin(state, locker, key, op, name, \ + src_file, src_line) \ + +# define register_pfs_file_rename_end(locker, result) \ +do { \ + if (locker != NULL) { \ + PSI_FILE_CALL(end_file_open_wait)(locker, result); \ } \ } while (0) @@ -275,9 +313,9 @@ do { \ # define register_pfs_file_io_begin(state, locker, file, count, op, \ src_file, src_line) \ do { \ - locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \ - state, file, op); \ - if (UNIV_LIKELY(locker != NULL)) { \ + locker = PSI_FILE_CALL(get_thread_file_stream_locker)( \ + state, file.m_psi, op); \ + if (locker != NULL) { \ PSI_FILE_CALL(start_file_wait)( \ locker, count, src_file, src_line); \ } \ @@ -285,7 +323,7 @@ do { \ # define register_pfs_file_io_end(locker, count) \ do { \ - if (UNIV_LIKELY(locker != NULL)) { \ + if (locker != NULL) { \ PSI_FILE_CALL(end_file_wait)(locker, count); \ } \ } while (0) @@ -299,11 +337,16 @@ os_file_create os_file_create_simple os_file_create_simple_no_error_handling os_file_close +os_file_close_no_error_handling os_file_rename os_aio os_file_read os_file_read_no_error_handling +os_file_read_no_error_handling_int_fd os_file_write +os_file_write_int_fd +os_file_set_eof_at +os_file_allocate The wrapper functions have the prefix of "innodb_". */ @@ -321,20 +364,23 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_file_create_simple_no_error_handling_func( \ key, name, create_mode, access, success, __FILE__, __LINE__) -# define os_file_close(file) \ +# define os_file_close_pfs(file) \ pfs_os_file_close_func(file, __FILE__, __LINE__) +# define os_file_close_no_error_handling_pfs(file) \ + pfs_os_file_close_no_error_handling_func(file, __FILE__, __LINE__) + # define os_aio(type, mode, name, file, buf, offset, \ n, message1, message2, space_id, trx) \ pfs_os_aio_func(type, mode, name, file, buf, offset, \ n, message1, message2, space_id, trx, \ __FILE__, __LINE__) -# define os_file_read(file, buf, offset, n) \ +# define os_file_read_pfs(file, buf, offset, n) \ pfs_os_file_read_func(file, buf, offset, n, NULL, \ __FILE__, __LINE__) -# define os_file_read_trx(file, buf, offset, n, trx) \ +# define os_file_read_trx_pfs(file, buf, offset, n, trx) \ pfs_os_file_read_func(file, buf, offset, n, trx, \ __FILE__, __LINE__) @@ -342,11 +388,20 @@ The wrapper functions have the prefix of "innodb_". */ pfs_os_file_read_no_error_handling_func(file, buf, offset, n, \ __FILE__, __LINE__) -# define os_file_write(name, file, buf, offset, n) \ +# define os_file_read_no_error_handling_int_fd( \ + file, buf, offset, n) \ + pfs_os_file_read_no_error_handling_int_fd_func( \ + file, buf, offset, n, __FILE__, __LINE__) + +# define os_file_write_pfs(name, file, buf, offset, n) \ pfs_os_file_write_func(name, file, buf, offset, \ n, __FILE__, __LINE__) -# define os_file_flush(file) \ +# define os_file_write_int_fd(name, file, buf, offset, n) \ + pfs_os_file_write_int_fd_func(name, file, buf, offset, \ + n, __FILE__, __LINE__) + +# define os_file_flush_pfs(file) \ pfs_os_file_flush_func(file, __FILE__, __LINE__) # define os_file_rename(key, oldpath, newpath) \ @@ -357,6 +412,15 @@ The wrapper functions have the prefix of "innodb_". */ # define os_file_delete_if_exists(key, name) \ pfs_os_file_delete_if_exists_func(key, name, __FILE__, __LINE__) + +# define os_file_set_eof_at_pfs(file, new_len) \ + pfs_os_file_set_eof_at_func(file, new_len, __FILE__, __LINE__) + +# ifdef HAVE_POSIX_FALLOCATE +# define os_file_allocate_pfs(file, offset, len) \ + pfs_os_file_allocate_func(file, offset, len, __FILE__, __LINE__) +# endif + #else /* UNIV_PFS_IO */ /* If UNIV_PFS_IO is not defined, these I/O APIs point @@ -372,26 +436,36 @@ to original un-instrumented file I/O APIs */ os_file_create_simple_no_error_handling_func( \ name, create_mode, access, success) -# define os_file_close(file) os_file_close_func(file) +# define os_file_close_pfs(file) \ + os_file_close_func(file) + +# define os_file_close_no_error_handling_pfs(file) \ + os_file_close_no_error_handling_func(file) # define os_aio(type, mode, name, file, buf, offset, n, message1, \ message2, space_id, trx) \ os_aio_func(type, mode, name, file, buf, offset, n, \ message1, message2, space_id, trx) -# define os_file_read(file, buf, offset, n) \ +# define os_file_read_pfs(file, buf, offset, n) \ os_file_read_func(file, buf, offset, n, NULL) -# define os_file_read_trx(file, buf, offset, n, trx) \ +# define os_file_read_trx_pfs(file, buf, offset, n, trx) \ os_file_read_func(file, buf, offset, n, trx) # define os_file_read_no_error_handling(file, buf, offset, n) \ os_file_read_no_error_handling_func(file, buf, offset, n) +# define os_file_read_no_error_handling_int_fd( \ + file, buf, offset, n) \ + os_file_read_no_error_handling_func(file, buf, offset, n) -# define os_file_write(name, file, buf, offset, n) \ +# define os_file_write_int_fd(name, file, buf, offset, n) \ os_file_write_func(name, file, buf, offset, n) +# define os_file_write_pfs(name, file, buf, offset, n) \ + os_file_write_func(name, file, buf, offset, n) + -# define os_file_flush(file) os_file_flush_func(file) +# define os_file_flush_pfs(file) os_file_flush_func(file) # define os_file_rename(key, oldpath, newpath) \ os_file_rename_func(oldpath, newpath) @@ -401,8 +475,78 @@ to original un-instrumented file I/O APIs */ # define os_file_delete_if_exists(key, name) \ os_file_delete_if_exists_func(name) +# define os_file_set_eof_at_pfs(file, new_len) \ + os_file_set_eof_at_func(file, new_len) + +# ifdef HAVE_POSIX_FALLOCATE +# define os_file_allocate_pfs(file, offset, len) \ + os_file_allocate_func(file, offset, len) +# endif + #endif /* UNIV_PFS_IO */ +#ifdef UNIV_PFS_IO + #define os_file_close(file) os_file_close_pfs(file) +#else + #define os_file_close(file) os_file_close_pfs((file).m_file) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_close_no_error_handling(file) \ + os_file_close_no_error_handling_pfs(file) +#else + #define os_file_close_no_error_handling(file) \ + os_file_close_no_error_handling_pfs((file).m_file) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_read(file, buf, offset, n) \ + os_file_read_pfs(file, buf, offset, n) +#else + #define os_file_read(file, buf, offset, n) \ + os_file_read_pfs(file.m_file, buf, offset, n) +#endif + +#ifdef UNIV_PFS_IO + # define os_file_read_trx(file, buf, offset, n, trx) \ + os_file_read_trx_pfs(file, buf, offset, n, trx) +#else + # define os_file_read_trx(file, buf, offset, n, trx) \ + os_file_read_trx_pfs(file.m_file, buf, offset, n, trx) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_flush(file) os_file_flush_pfs(file) +#else + #define os_file_flush(file) os_file_flush_pfs(file.m_file) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_write(name, file, buf, offset, n) \ + os_file_write_pfs(name, file, buf, offset, n) +#else + #define os_file_write(name, file, buf, offset, n) \ + os_file_write_pfs(name, file.m_file, buf, offset, n) +#endif + +#ifdef UNIV_PFS_IO + #define os_file_set_eof_at(file, new_len) \ + os_file_set_eof_at_pfs(file, new_len) +#else + #define os_file_set_eof_at(file, new_len) \ + os_file_set_eof_at_pfs(file.m_file, new_len) +#endif + +#ifdef HAVE_POSIX_FALLOCATE +#ifdef UNIV_PFS_IO + #define os_file_allocate(file, offset, len) \ + os_file_allocate_pfs(file, offset, len) +#else + #define os_file_allocate(file, offset, len) \ + os_file_allocate_pfs(file.m_file, offset, len) +#endif +#endif + /* File types for directory entry data type */ enum os_file_type_t { @@ -536,7 +680,7 @@ A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( /*=========================================*/ const char* name, /*!< in: name of the file or path as a @@ -569,7 +713,7 @@ Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_func( /*================*/ const char* name, /*!< in: name of the file or path as a @@ -626,9 +770,45 @@ os_file_get_last_error. UNIV_INTERN ibool os_file_close_func( +/*===============*/ + os_file_t file); /*!< in, own: handle to a file */ +/***********************************************************************//** +NOTE! Use the corresponding macro os_file_close(), not directly this +function! +Closes a file handle. In case of error, error number can be retrieved with +os_file_get_last_error. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_close_no_error_handling_func( /*===============*/ os_file_t file); /*!< in, own: handle to a file */ +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_set_eof_at(), not +directly this function! +Truncates a file at the specified position. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_set_eof_at_func( + os_file_t file, /*!< in: handle to a file */ + ib_uint64_t new_len);/*!< in: new file length */ + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_allocate(), not +directly this function! +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_allocate_func( + os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len); /*!< in: file region length */ +#endif + #ifdef UNIV_PFS_IO /****************************************************************//** NOTE! Please use the corresponding macro os_file_create_simple(), @@ -638,7 +818,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( /*===========================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -661,7 +841,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( /*=============================================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -685,7 +865,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( /*====================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -714,7 +894,20 @@ UNIV_INLINE ibool pfs_os_file_close_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line);/*!< in: line where the func invoked */ +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_close_no_error_handling(), +not directly this function! +A performance schema instrumented wrapper function for +os_file_close_no_error_handling(). +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_close_no_error_handling_func( +/*===================*/ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ /*******************************************************************//** @@ -727,7 +920,7 @@ UNIV_INLINE ibool pfs_os_file_read_func( /*==================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -746,7 +939,7 @@ UNIV_INLINE ibool pfs_os_file_read_no_error_handling_func( /*====================================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -767,7 +960,7 @@ pfs_os_aio_func( ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -796,7 +989,7 @@ pfs_os_file_write_func( /*===================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ os_offset_t offset, /*!< in: file offset where to write */ ulint n, /*!< in: number of bytes to write */ @@ -813,7 +1006,7 @@ UNIV_INLINE ibool pfs_os_file_flush_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ @@ -865,16 +1058,66 @@ pfs_os_file_delete_if_exists_func( string */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line);/*!< in: line where the func invoked */ + +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_set_eof_at(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_set_eof_at() +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_set_eof_at_func( + pfs_os_file_t file, /*!< in: handle to a file */ + ib_uint64_t new_len,/*!< in: new file length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line);/*!< in: line where the func invoked */ + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_allocate(), not +directly this function! +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_allocate_func( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line);/*!< in: line where the func invoked */ +#endif + #endif /* UNIV_PFS_IO */ /***********************************************************************//** -Closes a file handle. -@return TRUE if success */ +Checks if the file is marked as invalid. +@return TRUE if invalid */ UNIV_INTERN -ibool -os_file_close_no_error_handling( -/*============================*/ - os_file_t file); /*!< in, own: handle to a file */ +bool +os_file_is_invalid( + pfs_os_file_t file); /*!< in, own: handle to a file */ + +/***********************************************************************//** +Marks the file as invalid. */ +UNIV_INTERN +void +os_file_mark_invalid( + pfs_os_file_t* file); /*!< out: pointer to a handle to a file */ + +/***********************************************************************//** +Announces an intention to access file data in a specific pattern in the +future. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_advise( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + ulint advice);/*!< in: advice for access pattern */ + /***********************************************************************//** Gets a file size. @return file size, or (os_offset_t) -1 on failure */ @@ -882,7 +1125,7 @@ UNIV_INTERN os_offset_t os_file_get_size( /*=============*/ - os_file_t file) /*!< in: handle to a file */ + pfs_os_file_t file) /*!< in: handle to a file */ MY_ATTRIBUTE((warn_unused_result)); /***********************************************************************//** Write the specified number of zeros to a newly created file. @@ -893,7 +1136,7 @@ os_file_set_size( /*=============*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ os_offset_t size) /*!< in: file size */ MY_ATTRIBUTE((nonnull, warn_unused_result)); /***********************************************************************//** @@ -905,14 +1148,6 @@ os_file_set_eof( /*============*/ FILE* file); /*!< in: file to be truncated */ /***********************************************************************//** -Truncates a file at the specified position. -@return TRUE if success */ -UNIV_INTERN -ibool -os_file_set_eof_at( - os_file_t file, /*!< in: handle to a file */ - ib_uint64_t new_len);/*!< in: new file length */ -/***********************************************************************//** NOTE! Use the corresponding macro os_file_flush(), not directly this function! Flushes the write buffers of a given file to the disk. @return TRUE if success */ @@ -1140,7 +1375,7 @@ os_aio_func( caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ diff --git a/storage/xtradb/include/os0file.ic b/storage/xtradb/include/os0file.ic index 25a1397147e..c82a2559fed 100644 --- a/storage/xtradb/include/os0file.ic +++ b/storage/xtradb/include/os0file.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2010, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -34,7 +34,7 @@ os_file_create_simple() which opens or creates a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_func( /*===========================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -47,7 +47,7 @@ pfs_os_file_create_simple_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -58,11 +58,13 @@ pfs_os_file_create_simple_func( : PSI_FILE_OPEN), name, src_file, src_line); - file = os_file_create_simple_func(name, create_mode, + file.m_file = os_file_create_simple_func(name, create_mode, access_type, success); + file.m_psi = NULL; - /* Regsiter the returning "file" value with the system */ - register_pfs_file_open_end(locker, file); + /* Regsiter psi value for the file */ + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -76,7 +78,7 @@ monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_simple_no_error_handling_func( /*=============================================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -91,7 +93,7 @@ pfs_os_file_create_simple_no_error_handling_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -104,8 +106,10 @@ pfs_os_file_create_simple_no_error_handling_func( file = os_file_create_simple_no_error_handling_func( name, create_mode, access_type, success); + file.m_psi = NULL; - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -118,7 +122,7 @@ Add instrumentation to monitor file creation/open. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INLINE -os_file_t +pfs_os_file_t pfs_os_file_create_func( /*====================*/ mysql_pfs_key_t key, /*!< in: Performance Schema Key */ @@ -137,7 +141,7 @@ pfs_os_file_create_func( const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { - os_file_t file; + pfs_os_file_t file; struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; @@ -149,8 +153,10 @@ pfs_os_file_create_func( name, src_file, src_line); file = os_file_create_func(name, create_mode, purpose, type, success); + file.m_psi = NULL; - register_pfs_file_open_end(locker, file); + register_pfs_file_open_end(locker, file, + (*success == TRUE ? success : 0)); return(file); } @@ -164,7 +170,7 @@ UNIV_INLINE ibool pfs_os_file_close_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { @@ -176,7 +182,35 @@ pfs_os_file_close_func( register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CLOSE, src_file, src_line); - result = os_file_close_func(file); + result = os_file_close_func(file.m_file); + + register_pfs_file_io_end(locker, 0); + + return(result); +} +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_close_no_error_handling(), +not directly this function! +A performance schema instrumented wrapper function for +os_file_close_no_error_handling(). +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_close_no_error_handling_func( +/*===================*/ + pfs_os_file_t file, /*!< in, own: handle to a file */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + bool result; + struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; + + /* register the file close */ + register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CLOSE, + src_file, src_line); + + result = os_file_close_no_error_handling_func(file.m_file); register_pfs_file_io_end(locker, 0); @@ -197,7 +231,7 @@ pfs_os_aio_func( ulint mode, /*!< in: OS_AIO_NORMAL etc. I/O mode */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -244,7 +278,7 @@ UNIV_INLINE ibool pfs_os_file_read_func( /*==================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -259,7 +293,7 @@ pfs_os_file_read_func( register_pfs_file_io_begin(&state, locker, file, n, PSI_FILE_READ, src_file, src_line); - result = os_file_read_func(file, buf, offset, n, trx); + result = os_file_read_func(file.m_file, buf, offset, n, trx); register_pfs_file_io_end(locker, n); @@ -278,7 +312,7 @@ UNIV_INLINE ibool pfs_os_file_read_no_error_handling_func( /*====================================*/ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read */ os_offset_t offset, /*!< in: file offset where to read */ ulint n, /*!< in: number of bytes to read */ @@ -292,13 +326,50 @@ pfs_os_file_read_no_error_handling_func( register_pfs_file_io_begin(&state, locker, file, n, PSI_FILE_READ, src_file, src_line); - result = os_file_read_no_error_handling_func(file, buf, offset, n); + result = os_file_read_no_error_handling_func(file.m_file, buf, offset, n); register_pfs_file_io_end(locker, n); return(result); } +/** NOTE! Please use the corresponding macro +os_file_read_no_error_handling_int_fd(), not directly this function! +This is the performance schema instrumented wrapper function for +os_file_read_no_error_handling_int_fd_func() which requests a +synchronous read operation. +@return TRUE if request was successful, FALSE if fail */ +UNIV_INLINE +ibool +pfs_os_file_read_no_error_handling_int_fd_func( + int file, /*!< in: handle to a file */ + void* buf, /*!< in: buffer where to read */ + os_offset_t offset, /*!< in: file offset where to read */ + ulint n, /*!< in: number of bytes to read */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + + PSI_file_locker_state state; + struct PSI_file_locker* locker = NULL; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_READ); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ibool result = os_file_read_no_error_handling_func( + OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(result); +} + /*******************************************************************//** NOTE! Please use the corresponding macro os_file_write(), not directly this function! @@ -311,7 +382,7 @@ pfs_os_file_write_func( /*===================*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ const void* buf, /*!< in: buffer from which to write */ os_offset_t offset, /*!< in: file offset where to write */ ulint n, /*!< in: number of bytes to write */ @@ -325,13 +396,50 @@ pfs_os_file_write_func( register_pfs_file_io_begin(&state, locker, file, n, PSI_FILE_WRITE, src_file, src_line); - result = os_file_write_func(name, file, buf, offset, n); + result = os_file_write_func(name, file.m_file, buf, offset, n); register_pfs_file_io_end(locker, n); return(result); } +/** NOTE! Please use the corresponding macro os_file_write(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_write() which requests a synchronous write operation. +@return TRUE if request was successful, FALSE if fail */ +UNIV_INLINE +ibool +pfs_os_file_write_int_fd_func( + const char* name, /*!< in: name of the file or path as a + null-terminated string */ + int file, /*!< in: handle to a file */ + const void* buf, /*!< in: buffer from which to write */ + os_offset_t offset, /*!< in: file offset where to write */ + ulint n, /*!< in: number of bytes to write */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + PSI_file_locker_state state; + struct PSI_file_locker* locker = NULL; + + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, file, PSI_FILE_WRITE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, n, + __FILE__, __LINE__); + } + ibool result = os_file_write_func( + name, OS_FILE_FROM_FD(file), buf, offset, n); + + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, n); + } + + return(result); +} + /***********************************************************************//** NOTE! Please use the corresponding macro os_file_flush(), not directly this function! @@ -342,7 +450,7 @@ UNIV_INLINE ibool pfs_os_file_flush_func( /*===================*/ - os_file_t file, /*!< in, own: handle to a file */ + pfs_os_file_t file, /*!< in, own: handle to a file */ const char* src_file,/*!< in: file name where func invoked */ ulint src_line)/*!< in: line where the func invoked */ { @@ -352,7 +460,7 @@ pfs_os_file_flush_func( register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_SYNC, src_file, src_line); - result = os_file_flush_func(file); + result = os_file_flush_func(file.m_file); register_pfs_file_io_end(locker, 0); @@ -380,12 +488,12 @@ pfs_os_file_rename_func( struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, key, PSI_FILE_RENAME, newpath, + register_pfs_file_rename_begin(&state, locker, key, PSI_FILE_RENAME, newpath, src_file, src_line); result = os_file_rename_func(oldpath, newpath); - register_pfs_file_open_end(locker, 0); + register_pfs_file_rename_end(locker, 0); return(result); } @@ -449,4 +557,61 @@ pfs_os_file_delete_if_exists_func( return(result); } + +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_set_eof_at(), not +directly this function! +This is the performance schema instrumented wrapper function for +os_file_set_eof_at() +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_set_eof_at_func( + pfs_os_file_t file, /*!< in: handle to a file */ + ib_uint64_t new_len,/*!< in: new file length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + bool result; + struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; + + register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CHSIZE, + src_file, src_line); + result = os_file_set_eof_at_func(file.m_file, new_len); + + register_pfs_file_io_end(locker, 0); + + return(result); +} + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +NOTE! Please use the corresponding macro os_file_allocate(), not +directly this function! +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INLINE +bool +pfs_os_file_allocate_func( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + const char* src_file,/*!< in: file name where func invoked */ + ulint src_line)/*!< in: line where the func invoked */ +{ + bool result; + struct PSI_file_locker* locker = NULL; + PSI_file_locker_state state; + + register_pfs_file_io_begin(&state, locker, file, 0, PSI_FILE_CHSIZE, + src_file, src_line); + result = os_file_allocate_func(file.m_file, offset, len); + + register_pfs_file_io_end(locker, 0); + + return(result); +} +#endif + #endif /* UNIV_PFS_IO */ diff --git a/storage/xtradb/include/row0mysql.h b/storage/xtradb/include/row0mysql.h index 27d3adfc7f0..eb604d05557 100644 --- a/storage/xtradb/include/row0mysql.h +++ b/storage/xtradb/include/row0mysql.h @@ -681,6 +681,12 @@ struct mysql_row_templ_t { Innobase record in the current index; not defined if template_type is ROW_MYSQL_WHOLE_ROW */ + bool rec_field_is_prefix; /* is this field in a prefix index? */ + ulint rec_prefix_field_no; /* record field, even if just a + prefix; same as rec_field_no when not a + prefix, otherwise rec_field_no is + ULINT_UNDEFINED but this is the true + field number*/ ulint clust_rec_field_no; /*!< field number of the column in an Innobase record in the clustered index; not defined if template_type is @@ -729,6 +735,8 @@ struct mysql_row_templ_t { #define ROW_PREBUILT_ALLOCATED 78540783 #define ROW_PREBUILT_FREED 26423527 +class ha_innobase; + /** A struct for (sometimes lazily) prebuilt structures in an Innobase table handle used within MySQL; these are used to save CPU time. */ @@ -784,7 +792,9 @@ struct row_prebuilt_t { columns through a secondary index and at least one column is not in the secondary index, then this is - set to TRUE */ + set to TRUE; note that sometimes this + is set but we later optimize out the + clustered index lookup */ unsigned templ_contains_blob:1;/*!< TRUE if the template contains a column with DATA_BLOB == get_innobase_type_from_mysql_type(); @@ -958,6 +968,12 @@ struct row_prebuilt_t { to InnoDB format.*/ uint srch_key_val_len; /*!< Size of search key */ + /** MySQL handler object. */ + ha_innobase* mysql_handler; + + /** True if exceeded the end_range while filling the prefetch cache. */ + bool end_range; + }; /** Callback for row_mysql_sys_index_iterate() */ diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index 882ceae2417..0fb563f539c 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. @@ -588,6 +588,11 @@ extern my_bool srv_print_all_deadlocks; extern my_bool srv_cmp_per_index_enabled; +/** Number of times secondary index lookup triggered cluster lookup */ +extern ulint srv_sec_rec_cluster_reads; +/** Number of times prefix optimization avoided triggering cluster lookup */ +extern ulint srv_sec_rec_cluster_reads_avoided; + /** Status variables to be passed to MySQL */ extern struct export_var_t export_vars; @@ -973,6 +978,13 @@ void srv_purge_wakeup(void); /*==================*/ +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +bool +srv_is_undo_tablespace( + ulint space_id); + /** Status variables to be passed to MySQL */ struct export_var_t{ ulint innodb_adaptive_hash_hash_searches; @@ -1085,6 +1097,9 @@ struct export_var_t{ #endif /* UNIV_DEBUG */ ulint innodb_column_compressed; /*!< srv_column_compressed */ ulint innodb_column_decompressed; /*!< srv_column_decompressed */ + + ulint innodb_sec_rec_cluster_reads; /*!< srv_sec_rec_cluster_reads */ + ulint innodb_sec_rec_cluster_reads_avoided; /*!< srv_sec_rec_cluster_reads_avoided */ }; /** Thread slot in the thread table. */ diff --git a/storage/xtradb/include/srv0start.h b/storage/xtradb/include/srv0start.h index 963b767f0fb..a60776a4665 100644 --- a/storage/xtradb/include/srv0start.h +++ b/storage/xtradb/include/srv0start.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -139,6 +139,8 @@ extern ibool srv_startup_is_before_trx_rollback_phase; /** TRUE if a raw partition is in use */ extern ibool srv_start_raw_disk_in_use; +/** Undo tablespaces starts with space_id. */ +extern ulint srv_undo_space_id_start; /** Shutdown state */ enum srv_shutdown_state { diff --git a/storage/xtradb/include/trx0xa.h b/storage/xtradb/include/trx0xa.h index 7caddfb7ba4..255431293f5 100644 --- a/storage/xtradb/include/trx0xa.h +++ b/storage/xtradb/include/trx0xa.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,6 +24,8 @@ this program; if not, write to the Free Software Foundation, Inc., #ifndef XA_H #define XA_H +#include "xa.h" + /* * Transaction branch identification: XID and NULLXID: */ @@ -35,17 +37,6 @@ this program; if not, write to the Free Software Foundation, Inc., #define MAXGTRIDSIZE 64 /*!< maximum size in bytes of gtrid */ #define MAXBQUALSIZE 64 /*!< maximum size in bytes of bqual */ -/** X/Open XA distributed transaction identifier */ -struct xid_t { - long formatID; /*!< format identifier; -1 - means that the XID is null */ - long gtrid_length; /*!< value from 1 through 64 */ - long bqual_length; /*!< value from 1 through 64 */ - char data[XIDDATASIZE]; /*!< distributed transaction - identifier */ -}; -/** X/Open XA distributed transaction identifier */ -typedef struct xid_t XID; #endif /** X/Open XA distributed transaction status codes */ /* @{ */ diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index 7da293542f5..b4ad030a565 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -47,7 +47,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH #ifndef PERCONA_INNODB_VERSION -#define PERCONA_INNODB_VERSION 80.0 +#define PERCONA_INNODB_VERSION 82.0 #endif /* Enable UNIV_LOG_ARCHIVE in XtraDB */ @@ -145,14 +145,8 @@ HAVE_PSI_INTERFACE is defined. */ #if defined HAVE_PSI_INTERFACE && !defined UNIV_HOTBACKUP # define UNIV_PFS_MUTEX # define UNIV_PFS_RWLOCK -/* For I/O instrumentation, performance schema rely -on a native descriptor to identify the file, this -descriptor could conflict with our OS level descriptor. -Disable IO instrumentation on Windows until this is -resolved */ -# ifndef __WIN__ -# define UNIV_PFS_IO -# endif + +# define UNIV_PFS_IO # define UNIV_PFS_THREAD /* There are mutexes/rwlocks that we want to exclude from diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 7784e8538b7..f73d5b458d1 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -2627,7 +2627,7 @@ log_group_archive( /*==============*/ log_group_t* group) /*!< in: log group */ { - os_file_t file_handle; + pfs_os_file_t file_handle; lsn_t start_lsn; lsn_t end_lsn; char name[OS_FILE_MAX_PATH]; @@ -3607,9 +3607,15 @@ loop: lsn = log_sys->lsn; - if (lsn != log_sys->last_checkpoint_lsn - || (srv_track_changed_pages - && (tracked_lsn != log_sys->last_checkpoint_lsn)) + const bool is_last = + ((srv_force_recovery == SRV_FORCE_NO_LOG_REDO + && lsn == log_sys->last_checkpoint_lsn + + LOG_BLOCK_HDR_SIZE) + || lsn == log_sys->last_checkpoint_lsn) + && (!srv_track_changed_pages + || tracked_lsn == log_sys->last_checkpoint_lsn); + + if (!is_last #ifdef UNIV_LOG_ARCHIVE || (srv_log_archive_on && lsn != log_sys->archived_lsn + LOG_BLOCK_HDR_SIZE) @@ -3675,7 +3681,8 @@ loop: ut_a(freed); ut_a(lsn == log_sys->lsn); - ut_ad(lsn == log_sys->last_checkpoint_lsn); + ut_ad(srv_force_recovery >= SRV_FORCE_NO_LOG_REDO + || lsn == log_sys->last_checkpoint_lsn); if (lsn < srv_start_lsn) { ib_logf(IB_LOG_LEVEL_ERROR, diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index 8d1952fdd04..99762c00a08 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -336,7 +336,7 @@ log_online_read_last_tracked_lsn(void) lsn_t result; os_offset_t read_offset = log_bmp_sys->out.offset; - while (!checksum_ok && read_offset > 0 && !is_last_page) + while ((!checksum_ok || !is_last_page) && read_offset > 0) { read_offset -= MODIFIED_PAGE_BLOCK_SIZE; log_bmp_sys->out.offset = read_offset; @@ -564,9 +564,9 @@ log_online_rotate_bitmap_file( lsn_t next_file_start_lsn) /*!out.file != os_file_invalid) { + if (!os_file_is_invalid(log_bmp_sys->out.file)) { os_file_close(log_bmp_sys->out.file); - log_bmp_sys->out.file = os_file_invalid; + os_file_mark_invalid(&log_bmp_sys->out.file); } log_bmp_sys->out_seq_num++; log_online_make_bitmap_name(next_file_start_lsn); @@ -736,7 +736,11 @@ log_online_read_init(void) } last_tracked_lsn = log_online_read_last_tracked_lsn(); + /* Do not rotate if we truncated the file to zero length - we + can just start writing there */ + const bool need_rotate = (last_tracked_lsn != 0); if (!last_tracked_lsn) { + last_tracked_lsn = last_file_start_lsn; } @@ -748,7 +752,10 @@ log_online_read_init(void) } else { file_start_lsn = tracking_start_lsn; } - if (!log_online_rotate_bitmap_file(file_start_lsn)) { + + if (need_rotate + && !log_online_rotate_bitmap_file(file_start_lsn)) { + exit(1); } @@ -788,9 +795,9 @@ log_online_read_shutdown(void) ib_rbt_node_t *free_list_node = log_bmp_sys->page_free_list; - if (log_bmp_sys->out.file != os_file_invalid) { + if (!os_file_is_invalid(log_bmp_sys->out.file)) { os_file_close(log_bmp_sys->out.file); - log_bmp_sys->out.file = os_file_invalid; + os_file_mark_invalid(&log_bmp_sys->out.file); } rbt_free(log_bmp_sys->modified_pages); @@ -1129,6 +1136,18 @@ log_online_write_bitmap_page( } }); + /* A crash injection site that ensures last checkpoint LSN > last + tracked LSN, so that LSN tracking for this interval is tested. */ + DBUG_EXECUTE_IF("crash_before_bitmap_write", + { + ulint space_id + = mach_read_from_4(block + + MODIFIED_PAGE_SPACE_ID); + if (space_id > 0) + DBUG_SUICIDE(); + }); + + ibool success = os_file_write(log_bmp_sys->out.name, log_bmp_sys->out.file, block, log_bmp_sys->out.offset, @@ -1152,10 +1171,8 @@ log_online_write_bitmap_page( return FALSE; } -#ifdef UNIV_LINUX - posix_fadvise(log_bmp_sys->out.file, log_bmp_sys->out.offset, - MODIFIED_PAGE_BLOCK_SIZE, POSIX_FADV_DONTNEED); -#endif + os_file_advise(log_bmp_sys->out.file, log_bmp_sys->out.offset, + MODIFIED_PAGE_BLOCK_SIZE, OS_FILE_ADVISE_DONTNEED); log_bmp_sys->out.offset += MODIFIED_PAGE_BLOCK_SIZE; return TRUE; @@ -1277,10 +1294,6 @@ log_online_follow_redo_log(void) group = UT_LIST_GET_NEXT(log_groups, group); } - /* A crash injection site that ensures last checkpoint LSN > last - tracked LSN, so that LSN tracking for this interval is tested. */ - DBUG_EXECUTE_IF("crash_before_bitmap_write", DBUG_SUICIDE();); - result = log_online_write_bitmap(); log_bmp_sys->start_lsn = log_bmp_sys->end_lsn; log_set_tracked_lsn(log_bmp_sys->start_lsn); @@ -1550,10 +1563,8 @@ log_online_open_bitmap_file_read_only( bitmap_file->size = os_file_get_size(bitmap_file->file); bitmap_file->offset = 0; -#ifdef UNIV_LINUX - posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_SEQUENTIAL); - posix_fadvise(bitmap_file->file, 0, 0, POSIX_FADV_NOREUSE); -#endif + os_file_advise(bitmap_file->file, 0, 0, OS_FILE_ADVISE_SEQUENTIAL); + os_file_advise(bitmap_file->file, 0, 0, OS_FILE_ADVISE_NOREUSE); return TRUE; } @@ -1639,7 +1650,7 @@ log_online_bitmap_iterator_init( /* Empty range */ i->in_files.count = 0; i->in_files.files = NULL; - i->in.file = os_file_invalid; + os_file_mark_invalid(&i->in.file); i->page = NULL; i->failed = FALSE; return TRUE; @@ -1657,7 +1668,7 @@ log_online_bitmap_iterator_init( if (i->in_files.count == 0) { /* Empty range */ - i->in.file = os_file_invalid; + os_file_mark_invalid(&i->in.file); i->page = NULL; i->failed = FALSE; return TRUE; @@ -1696,10 +1707,10 @@ log_online_bitmap_iterator_release( { ut_a(i); - if (i->in.file != os_file_invalid) { + if (!os_file_is_invalid(i->in.file)) { os_file_close(i->in.file); - i->in.file = os_file_invalid; + os_file_mark_invalid(&i->in.file); } if (i->in_files.files) { @@ -1753,8 +1764,9 @@ log_online_bitmap_iterator_next( /* Advance file */ i->in_i++; - success = os_file_close_no_error_handling(i->in.file); - i->in.file = os_file_invalid; + success = os_file_close_no_error_handling( + i->in.file); + os_file_mark_invalid(&i->in.file); if (UNIV_UNLIKELY(!success)) { os_file_get_last_error(TRUE); @@ -1863,7 +1875,7 @@ log_online_purge_changed_page_bitmaps( /* If we have to delete the current output file, close it first. */ os_file_close(log_bmp_sys->out.file); - log_bmp_sys->out.file = os_file_invalid; + os_file_mark_invalid(&log_bmp_sys->out.file); } for (i = 0; i < bitmap_files.count; i++) { diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index b80f1f8597e..b2e55faffca 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. This program is free software; you can redistribute it and/or modify it under @@ -324,6 +324,7 @@ DECLARE_THREAD(recv_writer_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -356,6 +357,7 @@ DECLARE_THREAD(recv_writer_thread)( recv_writer_thread_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index dac67055114..9b2a2746976 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -1,6 +1,6 @@ /*********************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. Portions of this file contain modifications contributed and copyrighted @@ -82,9 +82,11 @@ my_umask */ #ifndef __WIN__ /** Umask for creating files */ UNIV_INTERN ulint os_innodb_umask = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; +# define os_file_invalid (-1) #else /** Umask for creating files */ UNIV_INTERN ulint os_innodb_umask = 0; +# define os_file_invalid INVALID_HANDLE_VALUE #endif /* __WIN__ */ #ifndef UNIV_HOTBACKUP @@ -173,7 +175,7 @@ struct os_aio_slot_t{ byte* buf; /*!< buffer used in i/o */ ulint type; /*!< OS_FILE_READ or OS_FILE_WRITE */ os_offset_t offset; /*!< file offset in bytes */ - os_file_t file; /*!< file where to read or write */ + pfs_os_file_t file; /*!< file where to read or write */ const char* name; /*!< file name or path */ ibool io_already_done;/*!< used only in simulated aio: TRUE if the physical i/o already @@ -1347,7 +1349,7 @@ A simple function to open or create a file. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_simple_no_error_handling_func( /*=========================================*/ const char* name, /*!< in: name of the file or path as a @@ -1361,7 +1363,7 @@ os_file_create_simple_no_error_handling_func( if it would be enabled otherwise) */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { - os_file_t file; + pfs_os_file_t file; *success = FALSE; #ifdef __WIN__ @@ -1369,7 +1371,6 @@ os_file_create_simple_no_error_handling_func( DWORD create_flag; DWORD attributes = 0; DWORD share_mode = FILE_SHARE_READ; - ut_a(name); ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT)); @@ -1386,8 +1387,8 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file create mode (%lu) for file '%s'", create_mode, name); - - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); } if (access_type == OS_FILE_READ_ONLY) { @@ -1411,11 +1412,11 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file access type (%lu) for file '%s'", access_type, name); - - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); } - file = CreateFile((LPCTSTR) name, + file.m_file = CreateFile((LPCTSTR) name, access, share_mode, NULL, // Security attributes @@ -1423,11 +1424,10 @@ os_file_create_simple_no_error_handling_func( attributes, NULL); // No template file - *success = (file != INVALID_HANDLE_VALUE); + *success = (file.m_file != INVALID_HANDLE_VALUE); #else /* __WIN__ */ int create_flag; const char* mode_str = NULL; - ut_a(name); ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT)); @@ -1470,19 +1470,19 @@ os_file_create_simple_no_error_handling_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown file create mode (%lu) for file '%s'", create_mode, name); - - return((os_file_t) -1); + file.m_file = -1; + return(file); } - file = ::open(name, create_flag, os_innodb_umask); + file.m_file = ::open(name, create_flag, os_innodb_umask); - *success = file == -1 ? FALSE : TRUE; + *success = file.m_file == -1 ? FALSE : TRUE; /* This function is always called for data files, we should disable OS caching (O_DIRECT) here as we do in os_file_create_func(), so we open the same file in the same mode, see man page of open(2). */ if (*success) { - os_file_set_nocache_if_needed(file, name, mode_str, + os_file_set_nocache_if_needed(file.m_file, name, mode_str, OS_DATA_FILE, access_type); } @@ -1491,11 +1491,11 @@ os_file_create_simple_no_error_handling_func( && *success && (access_type == OS_FILE_READ_WRITE || access_type == OS_FILE_READ_WRITE_CACHED) - && os_file_lock(file, name)) { + && os_file_lock(file.m_file, name)) { *success = FALSE; - close(file); - file = -1; + close(file.m_file); + file.m_file = -1; } #endif /* USE_FILE_LOCK */ @@ -1606,7 +1606,7 @@ Opens an existing file or creates a new. @return own: handle to the file, not defined if error, error number can be retrieved with os_file_get_last_error */ UNIV_INTERN -os_file_t +pfs_os_file_t os_file_create_func( /*================*/ const char* name, /*!< in: name of the file or path as a @@ -1622,24 +1622,25 @@ os_file_create_func( ulint type, /*!< in: OS_DATA_FILE or OS_LOG_FILE */ ibool* success)/*!< out: TRUE if succeed, FALSE if error */ { - os_file_t file; + pfs_os_file_t file; ibool retry; ibool on_error_no_exit; ibool on_error_silent; - #ifdef __WIN__ DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; SetLastError(ERROR_DISK_FULL); - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); ); #else /* __WIN__ */ DBUG_EXECUTE_IF( "ib_create_table_fail_disk_full", *success = FALSE; errno = ENOSPC; - return((os_file_t) -1); + file.m_file = -1; + return(file); ); #endif /* __WIN__ */ @@ -1690,7 +1691,8 @@ os_file_create_func( "Unknown file create mode (%lu) for file '%s'", create_mode, name); - return((os_file_t) -1); + file.m_file = (os_file_t)-1; + return(file); } DWORD attributes = 0; @@ -1715,8 +1717,8 @@ os_file_create_func( ib_logf(IB_LOG_LEVEL_ERROR, "Unknown purpose flag (%lu) while opening file '%s'", purpose, name); - - return((os_file_t)(-1)); + file.m_file = (os_file_t)-1; + return(file); } #ifdef UNIV_NON_BUFFERED_IO @@ -1743,11 +1745,11 @@ os_file_create_func( do { /* Use default security attributes and no template file. */ - file = CreateFile( + file.m_file = CreateFile( (LPCTSTR) name, access, share_mode, NULL, create_flag, attributes, NULL); - if (file == INVALID_HANDLE_VALUE) { + if (file.m_file == INVALID_HANDLE_VALUE) { const char* operation; operation = (create_mode == OS_FILE_CREATE @@ -1772,7 +1774,6 @@ os_file_create_func( #else /* __WIN__ */ int create_flag; const char* mode_str = NULL; - on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT ? TRUE : FALSE; on_error_silent = create_mode & OS_FILE_ON_ERROR_SILENT @@ -1810,7 +1811,8 @@ os_file_create_func( "Unknown file create mode (%lu) for file '%s'", create_mode, name); - return((os_file_t) -1); + file.m_file = -1; + return(file); } ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE); @@ -1830,9 +1832,9 @@ os_file_create_func( #endif /* O_SYNC */ do { - file = ::open(name, create_flag, os_innodb_umask); + file.m_file = ::open(name, create_flag, os_innodb_umask); - if (file == -1) { + if (file.m_file == -1) { const char* operation; operation = (create_mode == OS_FILE_CREATE @@ -1856,14 +1858,15 @@ os_file_create_func( if (*success) { - os_file_set_nocache_if_needed(file, name, mode_str, type, 0); + os_file_set_nocache_if_needed(file.m_file, name, mode_str, + type, 0); } #ifdef USE_FILE_LOCK if (!srv_read_only_mode && *success && create_mode != OS_FILE_OPEN_RAW - && os_file_lock(file, name)) { + && os_file_lock(file.m_file, name)) { if (create_mode == OS_FILE_OPEN_RETRY) { @@ -1875,7 +1878,7 @@ os_file_create_func( for (int i = 0; i < 100; i++) { os_thread_sleep(1000000); - if (!os_file_lock(file, name)) { + if (!os_file_lock(file.m_file, name)) { *success = TRUE; return(file); } @@ -1886,17 +1889,18 @@ os_file_create_func( } *success = FALSE; - close(file); - file = -1; + close(file.m_file); + file.m_file = -1; } #endif /* USE_FILE_LOCK */ if (srv_use_atomic_writes && type == OS_DATA_FILE - && file != -1 && !os_file_set_atomic_writes(name, file)) { + && file.m_file != -1 + && !os_file_set_atomic_writes(name, file.m_file)) { *success = FALSE; - close(file); - file = -1; + close(file.m_file); + file.m_file = -1; } #endif /* __WIN__ */ @@ -2127,8 +2131,8 @@ os_file_close_func( Closes a file handle. @return TRUE if success */ UNIV_INTERN -ibool -os_file_close_no_error_handling( +bool +os_file_close_no_error_handling_func( /*============================*/ os_file_t file) /*!< in, own: handle to a file */ { @@ -2140,10 +2144,10 @@ os_file_close_no_error_handling( ret = CloseHandle(file); if (ret) { - return(TRUE); + return(true); } - return(FALSE); + return(false); #else int ret; @@ -2151,10 +2155,83 @@ os_file_close_no_error_handling( if (ret == -1) { - return(FALSE); + return(false); } - return(TRUE); + return(true); +#endif /* __WIN__ */ +} + +#ifdef HAVE_POSIX_FALLOCATE +/***********************************************************************//** +Ensures that disk space is allocated for the file. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_allocate_func( + os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len) /*!< in: file region length */ +{ + return(posix_fallocate(file, offset, len) == 0); +} +#endif + +/***********************************************************************//** +Checks if the file is marked as invalid. +@return TRUE if invalid */ +UNIV_INTERN +bool +os_file_is_invalid( + pfs_os_file_t file) /*!< in, own: handle to a file */ +{ + return(file.m_file == os_file_invalid); +} + +/***********************************************************************//** +Marks the file as invalid. */ +UNIV_INTERN +void +os_file_mark_invalid( + pfs_os_file_t* file) /*!< out: pointer to a handle to a file */ +{ + file->m_file = os_file_invalid; +} + +/***********************************************************************//** +Announces an intention to access file data in a specific pattern in the +future. +@return TRUE if success */ +UNIV_INTERN +bool +os_file_advise( + pfs_os_file_t file, /*!< in, own: handle to a file */ + os_offset_t offset, /*!< in: file region offset */ + os_offset_t len, /*!< in: file region length */ + ulint advice)/*!< in: advice for access pattern */ +{ +#ifdef __WIN__ + return(true); +#else +#ifdef UNIV_LINUX + int native_advice = 0; + if ((advice & OS_FILE_ADVISE_NORMAL) != 0) + native_advice |= POSIX_FADV_NORMAL; + if ((advice & OS_FILE_ADVISE_RANDOM) != 0) + native_advice |= POSIX_FADV_RANDOM; + if ((advice & OS_FILE_ADVISE_SEQUENTIAL) != 0) + native_advice |= POSIX_FADV_SEQUENTIAL; + if ((advice & OS_FILE_ADVISE_WILLNEED) != 0) + native_advice |= POSIX_FADV_WILLNEED; + if ((advice & OS_FILE_ADVISE_DONTNEED) != 0) + native_advice |= POSIX_FADV_DONTNEED; + if ((advice & OS_FILE_ADVISE_NOREUSE) != 0) + native_advice |= POSIX_FADV_NOREUSE; + + return(posix_fadvise(file.m_file, offset, len, native_advice) == 0); +#else + return(true); +#endif #endif /* __WIN__ */ } @@ -2165,14 +2242,14 @@ UNIV_INTERN os_offset_t os_file_get_size( /*=============*/ - os_file_t file) /*!< in: handle to a file */ + pfs_os_file_t file) /*!< in: handle to a file */ { #ifdef __WIN__ os_offset_t offset; DWORD high; DWORD low; - low = GetFileSize(file, &high); + low = GetFileSize(file.m_file, &high); if ((low == 0xFFFFFFFF) && (GetLastError() != NO_ERROR)) { return((os_offset_t) -1); @@ -2182,7 +2259,8 @@ os_file_get_size( return(offset); #else - return((os_offset_t) lseek(file, 0, SEEK_END)); + return((os_offset_t) lseek(file.m_file, 0, SEEK_END)); + #endif /* __WIN__ */ } @@ -2195,7 +2273,7 @@ os_file_set_size( /*=============*/ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ os_offset_t size) /*!< in: file size */ { os_offset_t current_size; @@ -2209,7 +2287,7 @@ os_file_set_size( #ifdef HAVE_POSIX_FALLOCATE if (srv_use_posix_fallocate) { - if (posix_fallocate(file, current_size, size) == -1) { + if (posix_fallocate(file.m_file, current_size, size) == -1) { ib_logf(IB_LOG_LEVEL_ERROR, "preallocating file " "space for file \'%s\' failed. Current size " @@ -2304,8 +2382,8 @@ os_file_set_eof( Truncates a file at the specified position. @return TRUE if success */ UNIV_INTERN -ibool -os_file_set_eof_at( +bool +os_file_set_eof_at_func( os_file_t file, /*!< in: handle to a file */ ib_uint64_t new_len)/*!< in: new file length */ { @@ -4400,7 +4478,7 @@ os_aio_array_reserve_slot( the aio operation */ void* message2,/*!< in: message to be passed along with the aio operation */ - os_file_t file, /*!< in: file handle */ + pfs_os_file_t file, /*!< in: file handle */ const char* name, /*!< in: name of the file or path as a null-terminated string */ void* buf, /*!< in: buffer where to read or from which @@ -4522,10 +4600,10 @@ found: iocb = &slot->control; if (type == OS_FILE_READ) { - io_prep_pread(iocb, file, buf, len, aio_offset); + io_prep_pread(iocb, file.m_file, buf, len, aio_offset); } else { ut_a(type == OS_FILE_WRITE); - io_prep_pwrite(iocb, file, buf, len, aio_offset); + io_prep_pwrite(iocb, file.m_file, buf, len, aio_offset); } iocb->data = (void*) slot; @@ -4763,7 +4841,7 @@ os_aio_func( caution! */ const char* name, /*!< in: name of the file or path as a null-terminated string */ - os_file_t file, /*!< in: handle to a file */ + pfs_os_file_t file, /*!< in: handle to a file */ void* buf, /*!< in: buffer where to read or from which to write */ os_offset_t offset, /*!< in: file offset where to read or write */ @@ -4790,8 +4868,7 @@ os_aio_func( ulint dummy_type; #endif /* WIN_ASYNC_IO */ ulint wake_later; - - ut_ad(file); + ut_ad(file.m_file); ut_ad(buf); ut_ad(n > 0); ut_ad(n % OS_MIN_LOG_BLOCK_SIZE == 0); @@ -4823,13 +4900,12 @@ os_aio_func( and os_file_write_func() */ if (type == OS_FILE_READ) { - return(os_file_read_func(file, buf, offset, n, trx)); + return(os_file_read_func(file.m_file, buf, offset, n, + trx)); } - ut_ad(!srv_read_only_mode); ut_a(type == OS_FILE_WRITE); - - return(os_file_write_func(name, file, buf, offset, n)); + return(os_file_write_func(name, file.m_file, buf, offset, n)); } try_again: @@ -4886,9 +4962,8 @@ try_again: os_n_file_reads++; os_bytes_read_since_printout += n; #ifdef WIN_ASYNC_IO - ret = ReadFile(file, buf, (DWORD) n, &len, + ret = ReadFile(file.m_file, buf, (DWORD) n, &len, &(slot->control)); - #elif defined(LINUX_NATIVE_AIO) if (!os_aio_linux_dispatch(array, slot)) { goto err_exit; @@ -4906,9 +4981,8 @@ try_again: if (srv_use_native_aio) { os_n_file_writes++; #ifdef WIN_ASYNC_IO - ret = WriteFile(file, buf, (DWORD) n, &len, + ret = WriteFile(file.m_file, buf, (DWORD) n, &len, &(slot->control)); - #elif defined(LINUX_NATIVE_AIO) if (!os_aio_linux_dispatch(array, slot)) { goto err_exit; @@ -5063,8 +5137,7 @@ os_aio_windows_handle( srv_set_io_thread_op_info( orig_seg, "get windows aio return value"); } - - ret = GetOverlappedResult(slot->file, &(slot->control), &len, TRUE); + ret = GetOverlappedResult(slot->file.m_file, &(slot->control), &len, TRUE); *message1 = slot->message1; *message2 = slot->message2; @@ -5094,7 +5167,8 @@ os_aio_windows_handle( and os_file_write APIs, need to register with performance schema explicitly here. */ struct PSI_file_locker* locker = NULL; - register_pfs_file_io_begin(locker, slot->file, slot->len, + PSI_file_locker_state state; + register_pfs_file_io_begin(&state, locker, slot->file, slot->len, (slot->type == OS_FILE_WRITE) ? PSI_FILE_WRITE : PSI_FILE_READ, @@ -5105,16 +5179,14 @@ os_aio_windows_handle( switch (slot->type) { case OS_FILE_WRITE: - ret = WriteFile(slot->file, slot->buf, + ret = WriteFile(slot->file.m_file, slot->buf, (DWORD) slot->len, &len, &(slot->control)); - break; case OS_FILE_READ: - ret = ReadFile(slot->file, slot->buf, + ret = ReadFile(slot->file.m_file, slot->buf, (DWORD) slot->len, &len, &(slot->control)); - break; default: ut_error; @@ -5130,8 +5202,7 @@ os_aio_windows_handle( file where we also use async i/o: in Windows we must use the same wait mechanism as for async i/o */ - - ret = GetOverlappedResult(slot->file, + ret = GetOverlappedResult(slot->file.m_file, &(slot->control), &len, TRUE); } @@ -5385,12 +5456,14 @@ found: iocb = &(slot->control); if (slot->type == OS_FILE_READ) { - io_prep_pread(&slot->control, slot->file, slot->buf, - slot->len, (off_t) slot->offset); + io_prep_pread(&slot->control, slot->file.m_file, + slot->buf, slot->len, + (off_t) slot->offset); } else { ut_a(slot->type == OS_FILE_WRITE); - io_prep_pwrite(&slot->control, slot->file, slot->buf, - slot->len, (off_t) slot->offset); + io_prep_pwrite(&slot->control, slot->file.m_file, + slot->buf, slot->len, + (off_t) slot->offset); } /* Resubmit an I/O request */ submit_ret = io_submit(array->aio_ctx[segment], 1, &iocb); @@ -5617,12 +5690,11 @@ consecutive_loop: os_aio_slot_t* slot; slot = os_aio_array_get_nth_slot(array, i + segment * n); - if (slot->reserved && slot != aio_slot && slot->offset == aio_slot->offset + aio_slot->len && slot->type == aio_slot->type - && slot->file == aio_slot->file) { + && slot->file.m_file == aio_slot->file.m_file) { /* Found a consecutive i/o request */ diff --git a/storage/xtradb/pars/pars0opt.cc b/storage/xtradb/pars/pars0opt.cc index cbed2b39eeb..5a7e1861d74 100644 --- a/storage/xtradb/pars/pars0opt.cc +++ b/storage/xtradb/pars/pars0opt.cc @@ -948,12 +948,14 @@ opt_find_all_cols( /* Fill in the field_no fields in sym_node */ sym_node->field_nos[SYM_CLUST_FIELD_NO] = dict_index_get_nth_col_pos( - dict_table_get_first_index(index->table), sym_node->col_no); + dict_table_get_first_index(index->table), sym_node->col_no, + NULL); if (!dict_index_is_clust(index)) { ut_a(plan); - col_pos = dict_index_get_nth_col_pos(index, sym_node->col_no); + col_pos = dict_index_get_nth_col_pos(index, sym_node->col_no, + NULL); if (col_pos == ULINT_UNDEFINED) { diff --git a/storage/xtradb/pars/pars0pars.cc b/storage/xtradb/pars/pars0pars.cc index b116357c5b9..ea65d16e9dc 100644 --- a/storage/xtradb/pars/pars0pars.cc +++ b/storage/xtradb/pars/pars0pars.cc @@ -1232,7 +1232,8 @@ pars_process_assign_list( col_sym = assign_node->col; upd_field_set_field_no(upd_field, dict_index_get_nth_col_pos( - clust_index, col_sym->col_no), + clust_index, col_sym->col_no, + NULL), clust_index, NULL); upd_field->exp = assign_node->val; diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc index 54183759e8d..4596e2fb951 100644 --- a/storage/xtradb/row/row0log.cc +++ b/storage/xtradb/row/row0log.cc @@ -363,9 +363,9 @@ row_log_online_op( goto err_exit; } - ret = os_file_write( + ret = os_file_write_int_fd( "(modification log)", - OS_FILE_FROM_FD(log->fd), + log->fd, log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; if (!ret) { @@ -479,9 +479,9 @@ row_log_table_close_func( goto err_exit; } - ret = os_file_write( + ret = os_file_write_int_fd( "(modification log)", - OS_FILE_FROM_FD(log->fd), + log->fd, log->tail.block, byte_offset, srv_sort_buf_size); log->tail.blocks++; if (!ret) { @@ -2609,11 +2609,10 @@ all_done: goto func_exit; } - success = os_file_read_no_error_handling( - OS_FILE_FROM_FD(index->online_log->fd), + success = os_file_read_no_error_handling_int_fd( + index->online_log->fd, index->online_log->head.block, ofs, srv_sort_buf_size); - if (!success) { fprintf(stderr, "InnoDB: unable to read temporary file" " for table %s\n", index->table_name); @@ -3436,8 +3435,8 @@ all_done: goto func_exit; } - success = os_file_read_no_error_handling( - OS_FILE_FROM_FD(index->online_log->fd), + success = os_file_read_no_error_handling_int_fd( + index->online_log->fd, index->online_log->head.block, ofs, srv_sort_buf_size); diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 5139adf4746..691b6b1e8de 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -876,8 +876,9 @@ row_merge_read( } #endif /* UNIV_DEBUG */ - success = os_file_read_no_error_handling(OS_FILE_FROM_FD(fd), buf, + success = os_file_read_no_error_handling_int_fd(fd, buf, ofs, srv_sort_buf_size); + #ifdef POSIX_FADV_DONTNEED /* Each block is read exactly once. Free up the file cache. */ posix_fadvise(fd, ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED); @@ -911,7 +912,7 @@ row_merge_write( DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE);); - ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf, ofs, buf_len); + ret = os_file_write_int_fd("(merge)", fd, buf, ofs, buf_len); #ifdef UNIV_DEBUG if (row_merge_print_block_write) { @@ -3134,14 +3135,21 @@ row_merge_file_create_low( performance schema */ struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_open_begin(&state, locker, innodb_file_temp_key, - PSI_FILE_OPEN, - "Innodb Merge Temp File", - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_name_locker)( + &state, innodb_file_temp_key, PSI_FILE_OPEN, + "Innodb Merge Temp File", &locker); + if (locker != NULL) { + PSI_FILE_CALL(start_file_open_wait)(locker, + __FILE__, + __LINE__); + } #endif fd = innobase_mysql_tmpfile(path); #ifdef UNIV_PFS_IO - register_pfs_file_open_end(locker, fd); + if (locker != NULL) { + PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)( + locker, fd); + } #endif if (fd < 0) { @@ -3188,15 +3196,20 @@ row_merge_file_destroy_low( #ifdef UNIV_PFS_IO struct PSI_file_locker* locker = NULL; PSI_file_locker_state state; - register_pfs_file_io_begin(&state, locker, - fd, 0, PSI_FILE_CLOSE, - __FILE__, __LINE__); + locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( + &state, fd, PSI_FILE_CLOSE); + if (locker != NULL) { + PSI_FILE_CALL(start_file_wait)( + locker, 0, __FILE__, __LINE__); + } #endif if (fd >= 0) { close(fd); } #ifdef UNIV_PFS_IO - register_pfs_file_io_end(locker, 0); + if (locker != NULL) { + PSI_FILE_CALL(end_file_wait)(locker, 0); + } #endif } /*********************************************************************//** diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index d2821abdc2e..9f66d12283a 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -62,6 +62,9 @@ Created 12/19/1997 Heikki Tuuri #include "my_sys.h" /* DEBUG_SYNC_C */ #include "my_compare.h" /* enum icp_result */ +#include "thr_lock.h" +#include "handler.h" +#include "ha_innodb.h" /* Maximum number of rows to prefetch; MySQL interface has another parameter */ #define SEL_MAX_N_PREFETCH 16 @@ -2733,7 +2736,8 @@ row_sel_field_store_in_mysql_format_func( || !(templ->mysql_col_len % templ->mbmaxlen)); ut_ad(len * templ->mbmaxlen >= templ->mysql_col_len || (field_no == templ->icp_rec_field_no - && field->prefix_len > 0)); + && field->prefix_len > 0) + || templ->rec_field_is_prefix); ut_ad(!(field->prefix_len % templ->mbmaxlen)); if (templ->mbminlen == 1 && templ->mbmaxlen != 1) { @@ -2768,34 +2772,43 @@ row_sel_field_store_in_mysql_format_func( #ifdef UNIV_DEBUG /** Convert a field from Innobase format to MySQL format. */ -# define row_sel_store_mysql_field(m,p,r,i,o,f,t) \ - row_sel_store_mysql_field_func(m,p,r,i,o,f,t) +# define row_sel_store_mysql_field(m,p,r,i,o,f,t,c) \ + row_sel_store_mysql_field_func(m,p,r,i,o,f,t,c) #else /* UNIV_DEBUG */ /** Convert a field from Innobase format to MySQL format. */ -# define row_sel_store_mysql_field(m,p,r,i,o,f,t) \ - row_sel_store_mysql_field_func(m,p,r,o,f,t) +# define row_sel_store_mysql_field(m,p,r,i,o,f,t,c) \ + row_sel_store_mysql_field_func(m,p,r,o,f,t,c) #endif /* UNIV_DEBUG */ -/**************************************************************//** -Convert a field in the Innobase format to a field in the MySQL format. */ +/** Convert a field in the Innobase format to a field in the MySQL format. +@param[out] mysql_rec record in the MySQL format +@param[in,out] prebuilt prebuilt struct +@param[in] rec InnoDB record; must be protected + by a page latch +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets() +@param[in] field_no templ->rec_field_no or + templ->clust_rec_field_no + or templ->icp_rec_field_no + or sec field no if clust_templ_for_sec + is TRUE +@param[in] templ row template +@param[in] clust_templ_for_sec TRUE if rec belongs to secondary index + but prebuilt template is in clustered + index format and used only for end + range comparison. */ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_field_func( -/*===========================*/ - byte* mysql_rec, /*!< out: record in the - MySQL format */ - row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct */ - const rec_t* rec, /*!< in: InnoDB record; - must be protected by - a page latch */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, #ifdef UNIV_DEBUG - const dict_index_t* index, /*!< in: index of rec */ + const dict_index_t* index, #endif - const ulint* offsets, /*!< in: array returned by - rec_get_offsets() */ - ulint field_no, /*!< in: templ->rec_field_no or - templ->clust_rec_field_no or - templ->icp_rec_field_no */ - const mysql_row_templ_t*templ) /*!< in: row template */ + const ulint* offsets, + ulint field_no, + const mysql_row_templ_t*templ, + bool clust_templ_for_sec) { const byte* data; ulint len; @@ -2804,10 +2817,12 @@ row_sel_store_mysql_field_func( ut_ad(templ); ut_ad(templ >= prebuilt->mysql_template); ut_ad(templ < &prebuilt->mysql_template[prebuilt->n_template]); - ut_ad(field_no == templ->clust_rec_field_no + ut_ad(clust_templ_for_sec + || field_no == templ->clust_rec_field_no || field_no == templ->rec_field_no || field_no == templ->icp_rec_field_no); - ut_ad(rec_offs_validate(rec, index, offsets)); + ut_ad(rec_offs_validate(rec, + clust_templ_for_sec == true ? prebuilt->index : index, offsets)); if (UNIV_UNLIKELY(rec_offs_nth_extern(offsets, field_no))) { @@ -2924,30 +2939,37 @@ row_sel_store_mysql_field_func( return(TRUE); } -/**************************************************************//** -Convert a row in the Innobase format to a row in the MySQL format. +/** Convert a row in the Innobase format to a row in the MySQL format. Note that the template in prebuilt may advise us to copy only a few columns to mysql_rec, other columns are left blank. All columns may not be needed in the query. +@param[out] mysql_rec row in the MySQL format +@param[in] prebuilt prebuilt structure +@param[in] rec Innobase record in the index + which was described in prebuilt's + template, or in the clustered index; + must be protected by a page latch +@param[in] rec_clust TRUE if the rec in the clustered index +@param[in] index index of rec +@param[in] offsets array returned by rec_get_offsets(rec) +@param[in] clust_templ_for_sec TRUE if rec belongs to secondary index + but the prebuilt->template is in + clustered index format and it is + used only for end range comparison @return TRUE on success, FALSE if not all columns could be retrieved */ static MY_ATTRIBUTE((warn_unused_result)) ibool row_sel_store_mysql_rec( -/*====================*/ - byte* mysql_rec, /*!< out: row in the MySQL format */ - row_prebuilt_t* prebuilt, /*!< in: prebuilt struct */ - const rec_t* rec, /*!< in: Innobase record in the index - which was described in prebuilt's - template, or in the clustered index; - must be protected by a page latch */ - ibool rec_clust, /*!< in: TRUE if rec is in the - clustered index instead of - prebuilt->index */ - const dict_index_t* index, /*!< in: index of rec */ - const ulint* offsets) /*!< in: array returned by - rec_get_offsets(rec) */ + byte* mysql_rec, + row_prebuilt_t* prebuilt, + const rec_t* rec, + ibool rec_clust, + const dict_index_t* index, + const ulint* offsets, + bool clust_templ_for_sec) { - ulint i; + ulint i; + std::vector template_col; ut_ad(rec_clust || index == prebuilt->index); ut_ad(!rec_clust || dict_index_is_clust(index)); @@ -2960,20 +2982,48 @@ row_sel_store_mysql_rec( if (UNIV_LIKELY_NULL(prebuilt->compress_heap)) mem_heap_empty(prebuilt->compress_heap); + if (clust_templ_for_sec) { + /* Store all clustered index field of + secondary index record. */ + for (i = 0; i < dict_index_get_n_fields( + prebuilt->index); i++) { + ulint sec_field = dict_index_get_nth_field_pos( + index, prebuilt->index, i); + template_col.push_back(sec_field); + } + } + for (i = 0; i < prebuilt->n_template; i++) { const mysql_row_templ_t*templ = &prebuilt->mysql_template[i]; - const ulint field_no + ulint field_no = rec_clust ? templ->clust_rec_field_no : templ->rec_field_no; /* We should never deliver column prefixes to MySQL, - except for evaluating innobase_index_cond(). */ + except for evaluating innobase_index_cond() and if the prefix + index is longer than the actual row data. */ + ut_ad(dict_index_get_nth_field(index, field_no)->prefix_len - == 0); + == 0 || templ->rec_field_is_prefix); + + if (clust_templ_for_sec) { + std::vector::iterator it; + it = std::find(template_col.begin(), + template_col.end(), field_no); + + if (it == template_col.end()) { + continue; + } + + ut_ad(templ->rec_field_no == templ->clust_rec_field_no); + + field_no = it - template_col.begin(); + } if (!row_sel_store_mysql_field(mysql_rec, prebuilt, rec, index, offsets, - field_no, templ)) { + field_no, templ, + clust_templ_for_sec)) { return(FALSE); } } @@ -3063,6 +3113,8 @@ row_sel_get_clust_rec_for_mysql( dberr_t err; trx_t* trx; + os_atomic_increment_ulint(&srv_sec_rec_cluster_reads, 1); + *out_rec = NULL; trx = thr_get_trx(thr); @@ -3613,7 +3665,7 @@ row_search_idx_cond_check( if (!row_sel_store_mysql_field(mysql_rec, prebuilt, rec, prebuilt->index, offsets, templ->icp_rec_field_no, - templ)) { + templ, false)) { return(ICP_NO_MATCH); } } @@ -3634,7 +3686,7 @@ row_search_idx_cond_check( || dict_index_is_clust(prebuilt->index)) { if (!row_sel_store_mysql_rec( mysql_rec, prebuilt, rec, FALSE, - prebuilt->index, offsets)) { + prebuilt->index, offsets, false)) { ut_ad(dict_index_is_clust(prebuilt->index)); return(ICP_NO_MATCH); } @@ -3653,6 +3705,50 @@ row_search_idx_cond_check( return(result); } +/** Check the pushed down end range condition to avoid extra traversal +if records are not within view and also to avoid prefetching in the +cache buffer. +@param[in] mysql_rec record in MySQL format +@param[in,out] handler the MySQL handler performing the scan +@retval true if the row in mysql_rec is out of range +@retval false if the row in mysql_rec is in range */ +static +bool +row_search_end_range_check( + const byte* mysql_rec, + ha_innobase* handler) +{ + if (handler->end_range && + handler->compare_key_in_buffer(mysql_rec) > 0) { + return(true); + } + + return(false); +} + +/** Return the record field length in characters. +@param[in] col table column of the field +@param[in] field_no field number +@param[in] rec physical record +@param[in] offsets field offsets in the physical record + +@return field length in characters */ +static +size_t +rec_field_len_in_chars(const dict_col_t &col, + const ulint field_no, + const rec_t *rec, + const ulint *offsets) +{ + const ulint cset = dtype_get_charset_coll(col.prtype); + const CHARSET_INFO* cs = all_charsets[cset]; + ulint rec_field_len; + const char* rec_field = reinterpret_cast( + rec_get_nth_field( + rec, offsets, field_no, &rec_field_len)); + return(cs->cset->numchars(cs, rec_field, rec_field + rec_field_len)); +} + /********************************************************************//** Searches for rows in the database. This is used in the interface to MySQL. This function opens a cursor, and also implements fetch next @@ -3690,7 +3786,9 @@ row_search_for_mysql( trx_t* trx = prebuilt->trx; dict_index_t* clust_index; que_thr_t* thr; - const rec_t* rec; + const rec_t* prev_rec = NULL; + const rec_t* rec = NULL; + byte* end_range_cache = NULL; const rec_t* result_rec = NULL; const rec_t* clust_rec; dberr_t err = DB_SUCCESS; @@ -3715,6 +3813,8 @@ row_search_for_mysql( ulint* offsets = offsets_; ibool table_lock_waited = FALSE; byte* next_buf = 0; + ulint end_loop = 0; + bool use_clustered_index = false; rec_offs_init(offsets_); @@ -3838,6 +3938,10 @@ row_search_for_mysql( err = DB_SUCCESS; goto func_exit; + } else if (prebuilt->end_range == true) { + prebuilt->end_range = false; + err = DB_RECORD_NOT_FOUND; + goto func_exit; } if (prebuilt->fetch_cache_first > 0 @@ -3970,7 +4074,8 @@ row_search_for_mysql( if (!row_sel_store_mysql_rec( buf, prebuilt, - rec, FALSE, index, offsets)) { + rec, FALSE, index, + offsets, false)) { /* Only fresh inserts may contain incomplete externally stored columns. Pretend that such @@ -4224,11 +4329,62 @@ rec_loop: and neither can a record lock be placed on it: we skip such a record. */ + prev_rec = NULL; goto next_rec; } if (page_rec_is_supremum(rec)) { + /** Compare the last record of the page with end range + passed to InnoDB when there is no ICP and number of loops + in row_search_for_mysql for rows found but not + reporting due to search views etc. */ + if (prev_rec != NULL + && prebuilt->mysql_handler->end_range != NULL + && prebuilt->idx_cond == NULL + && end_loop >= 100) { + + dict_index_t* key_index = prebuilt->index; + bool clust_templ_for_sec = false; + + if (end_range_cache == NULL) { + end_range_cache = static_cast( + ut_malloc(prebuilt->mysql_row_len)); + } + + if (index != clust_index + && prebuilt->need_to_access_clustered) { + /** Secondary index record but the template + based on PK. */ + key_index = clust_index; + clust_templ_for_sec = true; + } + + /** Create offsets based on prebuilt index. */ + offsets = rec_get_offsets(prev_rec, prebuilt->index, + offsets, ULINT_UNDEFINED, &heap); + + if (row_sel_store_mysql_rec( + end_range_cache, prebuilt, prev_rec, + clust_templ_for_sec, key_index, offsets, + clust_templ_for_sec)) { + + if (row_search_end_range_check( + end_range_cache, + prebuilt->mysql_handler)) { + + /** In case of prebuilt->fetch, + set the error in prebuilt->end_range. */ + if (prebuilt->n_fetch_cached > 0) { + prebuilt->end_range = true; + } + + err = DB_RECORD_NOT_FOUND; + goto normal_return; + } + } + } + if (set_also_gap_locks && !(srv_locks_unsafe_for_binlog || trx->isolation_level <= TRX_ISO_READ_COMMITTED) @@ -4260,6 +4416,7 @@ rec_loop: /* A page supremum record cannot be in the result set: skip it now that we have placed a possible lock on it */ + prev_rec = NULL; goto next_rec; } @@ -4334,6 +4491,7 @@ wrong_offs: btr_pcur_move_to_last_on_page(pcur, &mtr); + prev_rec = NULL; goto next_rec; } } @@ -4362,10 +4520,13 @@ wrong_offs: fputs(". We try to skip the record.\n", stderr); + prev_rec = NULL; goto next_rec; } } + prev_rec = rec; + /* Note that we cannot trust the up_match value in the cursor at this place because we can arrive here after moving the cursor! Thus we have to recompare rec and search_tuple to determine if they @@ -4590,6 +4751,7 @@ no_gap_lock: did_semi_consistent_read = TRUE; rec = old_vers; + prev_rec = rec; break; default: @@ -4636,6 +4798,7 @@ no_gap_lock: } rec = old_vers; + prev_rec = rec; } } else { /* We are looking into a non-clustered index, @@ -4729,10 +4892,97 @@ locks_ok: } /* Get the clustered index record if needed, if we did not do the - search using the clustered index. */ + search using the clustered index... */ + + use_clustered_index = + (index != clust_index && prebuilt->need_to_access_clustered); + + if (use_clustered_index && prebuilt->n_template <= index->n_fields) { + /* ...but, perhaps avoid the clustered index lookup if + all of the following are true: + 1) all columns are in the secondary index + 2) all values for columns that are prefix-only + indexes are shorter than the prefix size + This optimization can avoid many IOs for certain schemas. + */ + bool row_contains_all_values = true; + unsigned int i; + for (i = 0; i < prebuilt->n_template; i++) { + /* Condition (1) from above: is the field in the + index (prefix or not)? */ + const mysql_row_templ_t* templ = + prebuilt->mysql_template + i; + ulint secondary_index_field_no = + templ->rec_prefix_field_no; + if (secondary_index_field_no == ULINT_UNDEFINED) { + row_contains_all_values = false; + break; + } + /* Condition (2) from above: if this is a + prefix, is this row's value size shorter + than the prefix? */ + if (templ->rec_field_is_prefix) { + ulint record_size = rec_offs_nth_size( + offsets, + secondary_index_field_no); + const dict_field_t *field = + dict_index_get_nth_field( + index, + secondary_index_field_no); + ut_a(field->prefix_len > 0); + if (record_size + < field->prefix_len / templ->mbmaxlen) { + + /* Record in bytes shorter than the + index prefix length in characters */ + continue; + + } else if (record_size * templ->mbminlen + >= field->prefix_len) { + + /* The shortest represantable string by + the byte length of the record is longer + than the maximum possible index + prefix. */ + row_contains_all_values = false; + break; + } else { - if (index != clust_index && prebuilt->need_to_access_clustered) { + /* The record could or could not fit + into the index prefix, calculate length + to find out */ + + if (rec_field_len_in_chars( + *field->col, + secondary_index_field_no, + rec, offsets) + >= (field->prefix_len + / templ->mbmaxlen)) { + + row_contains_all_values = false; + break; + } + } + } + } + /* If (1) and (2) were true for all columns above, use + rec_prefix_field_no instead of rec_field_no, and skip + the clustered lookup below. */ + if (row_contains_all_values) { + for (i = 0; i < prebuilt->n_template; i++) { + mysql_row_templ_t* templ = + prebuilt->mysql_template + i; + templ->rec_field_no = + templ->rec_prefix_field_no; + ut_a(templ->rec_field_no != ULINT_UNDEFINED); + } + use_clustered_index = false; + os_atomic_increment_ulint( + &srv_sec_rec_cluster_reads_avoided, 1); + } + } + if (use_clustered_index) { requires_clust_rec: ut_ad(index != clust_index); /* We use a 'goto' to the preceding label if a consistent @@ -4813,7 +5063,7 @@ requires_clust_rec: appropriate version of the clustered index record. */ if (!row_sel_store_mysql_rec( buf, prebuilt, result_rec, - TRUE, clust_index, offsets)) { + TRUE, clust_index, offsets, false)) { goto next_rec; } } @@ -4881,7 +5131,7 @@ requires_clust_rec: next_buf, prebuilt, result_rec, result_rec != rec, result_rec != rec ? clust_index : index, - offsets)) { + offsets, false)) { if (next_buf == buf) { ut_a(prebuilt->n_fetch_cached == 0); @@ -4936,7 +5186,7 @@ requires_clust_rec: buf, prebuilt, result_rec, result_rec != rec, result_rec != rec ? clust_index : index, - offsets)) { + offsets, false)) { /* Only fresh inserts may contain incomplete externally stored columns. Pretend that such records do @@ -4988,6 +5238,8 @@ idx_cond_failed: goto normal_return; next_rec: + end_loop++; + /* Reset the old and new "did semi-consistent read" flags. */ if (UNIV_UNLIKELY(prebuilt->row_read_type == ROW_READ_DID_SEMI_CONSISTENT)) { @@ -5174,6 +5426,11 @@ normal_return: func_exit: trx->op_info = ""; + + if (end_range_cache != NULL) { + ut_free(end_range_cache); + } + if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 24c5a9bd5d9..2153440886c 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. @@ -155,7 +155,8 @@ UNIV_INTERN unsigned long long srv_online_max_size; OS (provided we compiled Innobase with it in), otherwise we will use simulated aio we build below with threads. Currently we support native aio on windows and linux */ -UNIV_INTERN my_bool srv_use_native_aio = TRUE; +/* make srv_use_native_aio to be visible for other plugins */ +my_bool srv_use_native_aio = TRUE; UNIV_INTERN my_bool srv_numa_interleave = FALSE; #ifdef __WIN__ @@ -574,6 +575,12 @@ static ulint srv_main_shutdown_loops = 0; /** Log writes involving flush. */ static ulint srv_log_writes_and_flush = 0; +/** Number of times secondary index lookup triggered cluster lookup */ +ulint srv_sec_rec_cluster_reads = 0; + +/** Number of times prefix optimization avoided triggering cluster lookup */ +ulint srv_sec_rec_cluster_reads_avoided = 0; + /* This is only ever touched by the master thread. It records the time when the last flush of log file has happened. The master thread ensures that we flush the log files at least once per @@ -1895,6 +1902,12 @@ srv_export_innodb_status(void) } #endif /* UNIV_DEBUG */ + os_rmb; + export_vars.innodb_sec_rec_cluster_reads = + srv_sec_rec_cluster_reads; + export_vars.innodb_sec_rec_cluster_reads_avoided = + srv_sec_rec_cluster_reads_avoided; + mutex_exit(&srv_innodb_monitor_mutex); } @@ -2956,6 +2969,8 @@ DECLARE_THREAD(srv_master_thread)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint old_activity_count = srv_get_activity_count(); ulint old_ibuf_merge_activity_count @@ -3029,6 +3044,7 @@ suspend_thread: os_event_wait(slot->event); if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { + my_thread_end(); os_thread_exit(NULL); } @@ -3115,6 +3131,8 @@ DECLARE_THREAD(srv_worker_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint tid_i = os_atomic_increment_ulint(&purge_tid_i, 1); @@ -3181,6 +3199,7 @@ DECLARE_THREAD(srv_worker_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -3383,6 +3402,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( void* arg MY_ATTRIBUTE((unused))) /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); + srv_slot_t* slot; ulint n_total_purged = ULINT_UNDEFINED; @@ -3495,6 +3516,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( srv_release_threads(SRV_WORKER, srv_n_purge_threads - 1); } + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ os_thread_exit(NULL); @@ -3563,3 +3585,18 @@ srv_purge_wakeup(void) } } +/** Check whether given space id is undo tablespace id +@param[in] space_id space id to check +@return true if it is undo tablespace else false. */ +bool +srv_is_undo_tablespace( + ulint space_id) +{ + if (srv_undo_space_id_start == 0) { + return (false); + } + + return(space_id >= srv_undo_space_id_start + && space_id < (srv_undo_space_id_start + + srv_undo_tablespaces_open)); +} diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index c53faba88ba..ac634df06dd 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. @@ -110,6 +110,9 @@ UNIV_INTERN ibool srv_have_fullfsync = FALSE; /** TRUE if a raw partition is in use */ UNIV_INTERN ibool srv_start_raw_disk_in_use = FALSE; +/** UNDO tablespaces starts with space id. */ +ulint srv_undo_space_id_start; + /** TRUE if the server is being started, before rolling back any incomplete transactions */ UNIV_INTERN ibool srv_startup_is_before_trx_rollback_phase = FALSE; @@ -125,7 +128,7 @@ SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ UNIV_INTERN enum srv_shutdown_state srv_shutdown_state = SRV_SHUTDOWN_NONE; /** Files comprising the system tablespace */ -static os_file_t files[1000]; +static pfs_os_file_t files[1000]; /** io_handler_thread parameters for thread identification */ static ulint n[SRV_MAX_N_IO_THREADS]; @@ -553,7 +556,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t create_log_file( /*============*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name) /*!< in: log file name */ { ibool ret; @@ -770,7 +773,7 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_log_file( /*==========*/ - os_file_t* file, /*!< out: file handle */ + pfs_os_file_t* file, /*!< out: file handle */ const char* name, /*!< in: log file name */ os_offset_t* size) /*!< out: file size */ { @@ -886,7 +889,7 @@ open_or_create_data_files( && os_file_get_last_error(false) != OS_FILE_ALREADY_EXISTS #ifdef UNIV_AIX - /* AIX 5.1 after security patch ML7 may have + /* AIX 5.1 after security patch ML7 may have errno set to 0 here, which causes our function to return 100; work around that AIX problem */ @@ -1183,7 +1186,7 @@ srv_undo_tablespace_create( const char* name, /*!< in: tablespace name */ ulint size) /*!< in: tablespace size in pages */ { - os_file_t fh; + pfs_os_file_t fh; ibool ret; dberr_t err = DB_SUCCESS; @@ -1260,7 +1263,7 @@ srv_undo_tablespace_open( const char* name, /*!< in: tablespace name */ ulint space) /*!< in: tablespace id */ { - os_file_t fh; + pfs_os_file_t fh; dberr_t err = DB_ERROR; ibool ret; ulint flags; @@ -1359,13 +1362,23 @@ srv_undo_tablespaces_init( for (i = 0; create_new_db && i < n_conf_tablespaces; ++i) { char name[OS_FILE_MAX_PATH]; + ulint space_id = i + 1; + + DBUG_EXECUTE_IF("innodb_undo_upgrade", + space_id = i + 3;); ut_snprintf( name, sizeof(name), "%s%cundo%03lu", - srv_undo_dir, SRV_PATH_SEPARATOR, i + 1); + srv_undo_dir, SRV_PATH_SEPARATOR, space_id); + + if (i == 0) { + srv_undo_space_id_start = space_id; + prev_space_id = srv_undo_space_id_start - 1; + } + + undo_tablespace_ids[i] = space_id; - /* Undo space ids start from 1. */ err = srv_undo_tablespace_create( name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES); @@ -1387,14 +1400,16 @@ srv_undo_tablespaces_init( if (!create_new_db) { n_undo_tablespaces = trx_rseg_get_n_undo_tablespaces( undo_tablespace_ids); - } else { - n_undo_tablespaces = n_conf_tablespaces; - for (i = 1; i <= n_undo_tablespaces; ++i) { - undo_tablespace_ids[i - 1] = i; + if (n_undo_tablespaces != 0) { + srv_undo_space_id_start = undo_tablespace_ids[0]; + prev_space_id = srv_undo_space_id_start - 1; } - undo_tablespace_ids[i] = ULINT_UNDEFINED; + } else { + n_undo_tablespaces = n_conf_tablespaces; + + undo_tablespace_ids[n_conf_tablespaces] = ULINT_UNDEFINED; } /* Open all the undo tablespaces that are currently in use. If we @@ -1418,8 +1433,6 @@ srv_undo_tablespaces_init( ut_a(undo_tablespace_ids[i] != 0); ut_a(undo_tablespace_ids[i] != ULINT_UNDEFINED); - /* Undo space ids start from 1. */ - err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]); if (err != DB_SUCCESS) { @@ -1454,11 +1467,23 @@ srv_undo_tablespaces_init( break; } + /** Note the first undo tablespace id in case of + no active undo tablespace. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = i; + } + ++n_undo_tablespaces; ++*n_opened; } + /** Explictly specify the srv_undo_space_id_start + as zero when there are no undo tablespaces. */ + if (n_undo_tablespaces == 0) { + srv_undo_space_id_start = 0; + } + /* If the user says that there are fewer than what we find we tolerate that discrepancy but not the inverse. Because there could be unused undo tablespaces for future use. */ @@ -1503,10 +1528,11 @@ srv_undo_tablespaces_init( mtr_start(&mtr); /* The undo log tablespace */ - for (i = 1; i <= n_undo_tablespaces; ++i) { + for (i = 0; i < n_undo_tablespaces; ++i) { fsp_header_init( - i, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); + undo_tablespace_ids[i], + SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); } mtr_commit(&mtr); @@ -1604,6 +1630,10 @@ innobase_start_or_create_for_mysql(void) char* logfile0 = NULL; size_t dirnamelen; + if (srv_force_recovery == SRV_FORCE_NO_LOG_REDO) { + srv_read_only_mode = 1; + } + high_level_read_only = srv_read_only_mode || srv_force_recovery > SRV_FORCE_NO_TRX_UNDO; diff --git a/storage/xtradb/trx/trx0purge.cc b/storage/xtradb/trx/trx0purge.cc index d9e40c5d6f5..d0d7ef2432e 100644 --- a/storage/xtradb/trx/trx0purge.cc +++ b/storage/xtradb/trx/trx0purge.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -696,7 +696,8 @@ trx_purge_get_rseg_with_min_trx_id( /* We assume in purge of externally stored fields that space id is in the range of UNDO tablespace space ids */ - ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open); + ut_a(purge_sys->rseg->space == 0 + || srv_is_undo_tablespace(purge_sys->rseg->space)); zip_size = purge_sys->rseg->zip_size; diff --git a/storage/xtradb/trx/trx0rec.cc b/storage/xtradb/trx/trx0rec.cc index 868a8a6c0b6..200ce015a7d 100644 --- a/storage/xtradb/trx/trx0rec.cc +++ b/storage/xtradb/trx/trx0rec.cc @@ -781,7 +781,8 @@ trx_undo_page_report_modify( } pos = dict_index_get_nth_col_pos(index, - col_no); + col_no, + NULL); ptr += mach_write_compressed(ptr, pos); /* Save the old value of field */ diff --git a/storage/xtradb/trx/trx0roll.cc b/storage/xtradb/trx/trx0roll.cc index e1e253cbb76..09e8e018c4f 100644 --- a/storage/xtradb/trx/trx0roll.cc +++ b/storage/xtradb/trx/trx0roll.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -800,6 +800,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( /*!< in: a dummy parameter required by os_thread_create */ { + my_thread_init(); ut_ad(!srv_read_only_mode); #ifdef UNIV_PFS_THREAD @@ -810,6 +811,7 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)( trx_rollback_or_clean_is_active = false; + my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit and not use return() to exit. */ diff --git a/storage/xtradb/trx/trx0sys.cc b/storage/xtradb/trx/trx0sys.cc index ccec5a7c113..4e8c50784bb 100644 --- a/storage/xtradb/trx/trx0sys.cc +++ b/storage/xtradb/trx/trx0sys.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -918,18 +918,12 @@ trx_sys_create_rsegs( ulint new_rsegs = n_rsegs - n_used; for (i = 0; i < new_rsegs; ++i) { - ulint space; + ulint space_id; + space_id = (n_spaces == 0) ? 0 + : (srv_undo_space_id_start + i % n_spaces); - /* Tablespace 0 is the system tablespace. All UNDO - log tablespaces start from 1. */ - - if (n_spaces > 0) { - space = (i % n_spaces) + 1; - } else { - space = 0; /* System tablespace */ - } - - if (trx_rseg_create(space) != NULL) { + /* Tablespace 0 is the system tablespace. */ + if (trx_rseg_create(space_id) != NULL) { ++n_used; } else { break; -- cgit v1.2.1 From 97c53cdfcc01a4f47d2757f5ca0b040f0e5a30e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 16 May 2017 14:26:11 +0300 Subject: 5.6.36-82.0 --- storage/tokudb/CMakeLists.txt | 5 +- storage/tokudb/PerconaFT/buildheader/make_tdb.cc | 3 + storage/tokudb/PerconaFT/ft/ft-ops.cc | 393 ++++++++++++++------- storage/tokudb/PerconaFT/src/ydb.cc | 162 ++++++++- storage/tokudb/ha_tokudb.cc | 28 +- .../tokudb/mysql-test/tokudb/r/bug-1657908.result | 70 ++++ storage/tokudb/mysql-test/tokudb/r/dir_cmd.result | 58 +++ .../tokudb/mysql-test/tokudb/t/bug-1657908.test | 72 ++++ storage/tokudb/mysql-test/tokudb/t/dir_cmd.test | 51 +++ .../tokudb_backup/r/backup_master_info.result | 26 ++ .../tokudb_backup/r/backup_master_state.result | 36 ++ .../tokudb_backup/r/empty_slave_info_file.result | 1 + .../r/innodb_use_native_aio_enabled.result | 5 + .../tokudb_backup/r/rpl_safe_slave.result | 77 ++++ .../tokudb_backup/r/rpl_tokudb_commit_sync.result | 59 ++++ .../tokudb_backup/t/backup_master_info.test | 94 +++++ .../tokudb_backup/t/backup_master_state.test | 87 +++++ .../tokudb_backup/t/empty_slave_info_file.test | 23 ++ .../t/innodb_use_native_aio_enabled-master.opt | 1 + .../t/innodb_use_native_aio_enabled.test | 19 + .../tokudb_backup/t/rpl_safe_slave-master.opt | 1 + .../tokudb_backup/t/rpl_safe_slave-slave.opt | 1 + .../mysql-test/tokudb_backup/t/rpl_safe_slave.cnf | 14 + .../mysql-test/tokudb_backup/t/rpl_safe_slave.inc | 112 ++++++ .../mysql-test/tokudb_backup/t/rpl_safe_slave.test | 49 +++ .../t/rpl_tokudb_commit_sync-slave.opt | 1 + .../tokudb_backup/t/rpl_tokudb_commit_sync.test | 72 ++++ .../tokudb/mysql-test/tokudb_backup/t/suite.opt | 2 +- .../tokudb/mysql-test/tokudb_bugs/r/db233.result | 10 - .../mysql-test/tokudb_bugs/r/simple_icp.result | 4 +- storage/tokudb/mysql-test/tokudb_bugs/t/db233.test | 18 - .../tokudb/mysql-test/tokudb_bugs/t/leak172.test | 4 +- storage/tokudb/tokudb_dir_cmd.cc | 331 +++++++++++++++++ storage/tokudb/tokudb_dir_cmd.h | 46 +++ storage/tokudb/tokudb_sysvars.cc | 72 +++- 35 files changed, 1817 insertions(+), 190 deletions(-) create mode 100644 storage/tokudb/mysql-test/tokudb/r/bug-1657908.result create mode 100644 storage/tokudb/mysql-test/tokudb/r/dir_cmd.result create mode 100644 storage/tokudb/mysql-test/tokudb/t/bug-1657908.test create mode 100644 storage/tokudb/mysql-test/tokudb/t/dir_cmd.test create mode 100644 storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result create mode 100644 storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result create mode 100644 storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result create mode 100644 storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result create mode 100644 storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result create mode 100644 storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt create mode 100644 storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test create mode 100644 storage/tokudb/tokudb_dir_cmd.cc create mode 100644 storage/tokudb/tokudb_dir_cmd.h (limited to 'storage') diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index 311df22ca5f..2438b7ac3fe 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(TOKUDB_VERSION 5.6.35-80.0) +SET(TOKUDB_VERSION 5.6.36-82.0) # PerconaFT only supports x86-64 and cmake-2.8.9+ IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT CMAKE_VERSION VERSION_LESS "2.8.9") @@ -111,7 +111,8 @@ SET(TOKUDB_SOURCES tokudb_background.cc tokudb_information_schema.cc tokudb_sysvars.cc - tokudb_thread.cc) + tokudb_thread.cc + tokudb_dir_cmd.cc) MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY LINK_LIBRARIES tokufractaltree_static tokuportability_static ${ZLIB_LIBRARY} stdc++) SET(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -flto -fuse-linker-plugin") diff --git a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc index 7ede78b3c0d..d0404b420f4 100644 --- a/storage/tokudb/PerconaFT/buildheader/make_tdb.cc +++ b/storage/tokudb/PerconaFT/buildheader/make_tdb.cc @@ -425,6 +425,9 @@ static void print_db_env_struct (void) { "bool (*set_dir_per_db)(DB_ENV *, bool new_val)", "bool (*get_dir_per_db)(DB_ENV *)", "const char *(*get_data_dir)(DB_ENV *env)", + "int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)", + "int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)", + "int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)", NULL}; sort_and_dump_fields("db_env", true, extra); diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc index ad9ecb1d074..63c6335dafd 100644 --- a/storage/tokudb/PerconaFT/ft/ft-ops.cc +++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc @@ -3900,25 +3900,34 @@ struct keyrange_compare_s { }; // TODO: Remove me, I'm boring -static int keyrange_compare(DBT const &kdbt, const struct keyrange_compare_s &s) { +static int keyrange_compare(DBT const &kdbt, + const struct keyrange_compare_s &s) { return s.ft->cmp(&kdbt, s.key); } -static void -keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node, - DBT* key_left, DBT* key_right, - int left_child_number, int right_child_number, uint64_t estimated_num_rows, - uint64_t *less, uint64_t* equal_left, uint64_t* middle, - uint64_t* equal_right, uint64_t* greater, bool* single_basement_node) +static void keysrange_in_leaf_partition(FT_HANDLE ft_handle, + FTNODE node, + DBT *key_left, + DBT *key_right, + int left_child_number, + int right_child_number, + uint64_t estimated_num_rows, + uint64_t *less, + uint64_t *equal_left, + uint64_t *middle, + uint64_t *equal_right, + uint64_t *greater, + bool *single_basement_node) // If the partition is in main memory then estimate the number // Treat key_left == NULL as negative infinity // Treat key_right == NULL as positive infinity { - paranoid_invariant(node->height == 0); // we are in a leaf + paranoid_invariant(node->height == 0); // we are in a leaf paranoid_invariant(!(key_left == NULL && key_right != NULL)); paranoid_invariant(left_child_number <= right_child_number); bool single_basement = left_child_number == right_child_number; - paranoid_invariant(!single_basement || (BP_STATE(node, left_child_number) == PT_AVAIL)); + paranoid_invariant(!single_basement || + (BP_STATE(node, left_child_number) == PT_AVAIL)); if (BP_STATE(node, left_child_number) == PT_AVAIL) { int r; // The partition is in main memory then get an exact count. @@ -3926,29 +3935,35 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node, BASEMENTNODE bn = BLB(node, left_child_number); uint32_t idx_left = 0; // if key_left is NULL then set r==-1 and idx==0. - r = key_left ? bn->data_buffer.find_zero(s_left, nullptr, nullptr, nullptr, &idx_left) : -1; + r = key_left + ? bn->data_buffer.find_zero( + s_left, nullptr, nullptr, nullptr, &idx_left) + : -1; *less = idx_left; - *equal_left = (r==0) ? 1 : 0; + *equal_left = (r == 0) ? 1 : 0; uint32_t size = bn->data_buffer.num_klpairs(); uint32_t idx_right = size; r = -1; if (single_basement && key_right) { struct keyrange_compare_s s_right = {ft_handle->ft, key_right}; - r = bn->data_buffer.find_zero(s_right, nullptr, nullptr, nullptr, &idx_right); + r = bn->data_buffer.find_zero( + s_right, nullptr, nullptr, nullptr, &idx_right); } *middle = idx_right - idx_left - *equal_left; - *equal_right = (r==0) ? 1 : 0; + *equal_right = (r == 0) ? 1 : 0; *greater = size - idx_right - *equal_right; } else { paranoid_invariant(!single_basement); uint32_t idx_left = estimated_num_rows / 2; if (!key_left) { - //Both nullptr, assume key_left belongs before leftmost entry, key_right belongs after rightmost entry + // Both nullptr, assume key_left belongs before leftmost entry, + // key_right belongs after rightmost entry idx_left = 0; paranoid_invariant(!key_right); } - // Assume idx_left and idx_right point to where key_left and key_right belong, (but are not there). + // Assume idx_left and idx_right point to where key_left and key_right + // belong, (but are not there). *less = idx_left; *equal_left = 0; *middle = estimated_num_rows - idx_left; @@ -3958,44 +3973,76 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node, *single_basement_node = single_basement; } -static int -toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, - DBT* key_left, DBT* key_right, bool may_find_right, - uint64_t* less, uint64_t* equal_left, uint64_t* middle, - uint64_t* equal_right, uint64_t* greater, bool* single_basement_node, - uint64_t estimated_num_rows, - ftnode_fetch_extra *min_bfe, // set up to read a minimal read. - ftnode_fetch_extra *match_bfe, // set up to read a basement node iff both keys in it - struct unlockers *unlockers, ANCESTORS ancestors, const pivot_bounds &bounds) -// Implementation note: Assign values to less, equal, and greater, and then on the way out (returning up the stack) we add more values in. +static int toku_ft_keysrange_internal( + FT_HANDLE ft_handle, + FTNODE node, + DBT *key_left, + DBT *key_right, + bool may_find_right, + uint64_t *less, + uint64_t *equal_left, + uint64_t *middle, + uint64_t *equal_right, + uint64_t *greater, + bool *single_basement_node, + uint64_t estimated_num_rows, + ftnode_fetch_extra *min_bfe, // set up to read a minimal read. + ftnode_fetch_extra + *match_bfe, // set up to read a basement node iff both keys in it + struct unlockers *unlockers, + ANCESTORS ancestors, + const pivot_bounds &bounds) +// Implementation note: Assign values to less, equal, and greater, and then on +// the way out (returning up the stack) we add more values in. { int r = 0; // if KEY is NULL then use the leftmost key. - int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, ft_handle->ft->cmp) : 0; - int right_child_number = node->n_children; // Sentinel that does not equal left_child_number. + int left_child_number = + key_left ? toku_ftnode_which_child(node, key_left, ft_handle->ft->cmp) + : 0; + int right_child_number = + node->n_children; // Sentinel that does not equal left_child_number. if (may_find_right) { - right_child_number = key_right ? toku_ftnode_which_child (node, key_right, ft_handle->ft->cmp) : node->n_children - 1; + right_child_number = + key_right + ? toku_ftnode_which_child(node, key_right, ft_handle->ft->cmp) + : node->n_children - 1; } uint64_t rows_per_child = estimated_num_rows / node->n_children; if (node->height == 0) { - keysrange_in_leaf_partition(ft_handle, node, key_left, key_right, left_child_number, right_child_number, - rows_per_child, less, equal_left, middle, equal_right, greater, single_basement_node); - - *less += rows_per_child * left_child_number; + keysrange_in_leaf_partition(ft_handle, + node, + key_left, + key_right, + left_child_number, + right_child_number, + rows_per_child, + less, + equal_left, + middle, + equal_right, + greater, + single_basement_node); + + *less += rows_per_child * left_child_number; if (*single_basement_node) { - *greater += rows_per_child * (node->n_children - left_child_number - 1); + *greater += + rows_per_child * (node->n_children - left_child_number - 1); } else { - *middle += rows_per_child * (node->n_children - left_child_number - 1); + *middle += + rows_per_child * (node->n_children - left_child_number - 1); } } else { // do the child. struct ancestors next_ancestors = {node, left_child_number, ancestors}; BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number); - uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, left_child_number); + uint32_t fullhash = + compute_child_fullhash(ft_handle->ft->cf, node, left_child_number); FTNODE childnode; bool msgs_applied = false; - bool child_may_find_right = may_find_right && left_child_number == right_child_number; + bool child_may_find_right = + may_find_right && left_child_number == right_child_number; r = toku_pin_ftnode_for_query( ft_handle, childblocknum, @@ -4006,27 +4053,45 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, child_may_find_right ? match_bfe : min_bfe, false, &childnode, - &msgs_applied - ); + &msgs_applied); paranoid_invariant(!msgs_applied); if (r != TOKUDB_TRY_AGAIN) { assert_zero(r); - struct unlock_ftnode_extra unlock_extra = {ft_handle,childnode,false}; - struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers}; - const pivot_bounds next_bounds = bounds.next_bounds(node, left_child_number); - - r = toku_ft_keysrange_internal(ft_handle, childnode, key_left, key_right, child_may_find_right, - less, equal_left, middle, equal_right, greater, single_basement_node, - rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, next_bounds); + struct unlock_ftnode_extra unlock_extra = { + ft_handle, childnode, false}; + struct unlockers next_unlockers = { + true, unlock_ftnode_fun, (void *)&unlock_extra, unlockers}; + const pivot_bounds next_bounds = + bounds.next_bounds(node, left_child_number); + + r = toku_ft_keysrange_internal(ft_handle, + childnode, + key_left, + key_right, + child_may_find_right, + less, + equal_left, + middle, + equal_right, + greater, + single_basement_node, + rows_per_child, + min_bfe, + match_bfe, + &next_unlockers, + &next_ancestors, + next_bounds); if (r != TOKUDB_TRY_AGAIN) { assert_zero(r); - *less += rows_per_child * left_child_number; + *less += rows_per_child * left_child_number; if (*single_basement_node) { - *greater += rows_per_child * (node->n_children - left_child_number - 1); + *greater += rows_per_child * + (node->n_children - left_child_number - 1); } else { - *middle += rows_per_child * (node->n_children - left_child_number - 1); + *middle += rows_per_child * + (node->n_children - left_child_number - 1); } assert(unlockers->locked); @@ -4037,10 +4102,21 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node, return r; } -void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p) -// Effect: Return an estimate of the number of keys to the left, the number equal (to left key), number between keys, number equal to right key, and the number to the right of both keys. +void toku_ft_keysrange(FT_HANDLE ft_handle, + DBT *key_left, + DBT *key_right, + uint64_t *less_p, + uint64_t *equal_left_p, + uint64_t *middle_p, + uint64_t *equal_right_p, + uint64_t *greater_p, + bool *middle_3_exact_p) +// Effect: Return an estimate of the number of keys to the left, the number +// equal (to left key), number between keys, number equal to right key, and the +// number to the right of both keys. // The values are an estimate. -// If you perform a keyrange on two keys that are in the same basement, equal_less, middle, and equal_right will be exact. +// If you perform a keyrange on two keys that are in the same basement, +// equal_less, middle, and equal_right will be exact. // 4184: What to do with a NULL key? // key_left==NULL is treated as -infinity // key_right==NULL is treated as +infinity @@ -4048,10 +4124,21 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6 // key_right can be non-null only if key_left is non-null; { if (!key_left && key_right) { - // Simplify internals by only supporting key_right != null when key_left != null - // If key_right != null and key_left == null, then swap them and fix up numbers. - uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; - toku_ft_keysrange(ft_handle, key_right, nullptr, &less, &equal_left, &middle, &equal_right, &greater, middle_3_exact_p); + // Simplify internals by only supporting key_right != null when key_left + // != null + // If key_right != null and key_left == null, then swap them and fix up + // numbers. + uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, + greater = 0; + toku_ft_keysrange(ft_handle, + key_right, + nullptr, + &less, + &equal_left, + &middle, + &equal_right, + &greater, + middle_3_exact_p); *less_p = 0; *equal_left_p = 0; *middle_p = less; @@ -4064,98 +4151,132 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6 paranoid_invariant(!(!key_left && key_right)); ftnode_fetch_extra min_bfe; ftnode_fetch_extra match_bfe; - min_bfe.create_for_min_read(ft_handle->ft); // read pivot keys but not message buffers - match_bfe.create_for_keymatch(ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it. -try_again: + min_bfe.create_for_min_read( + ft_handle->ft); // read pivot keys but not message buffers + match_bfe.create_for_keymatch( + ft_handle->ft, + key_left, + key_right, + false, + false); // read basement node only if both keys in it. +try_again : { + uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; + bool single_basement_node = false; + FTNODE node = NULL; { - uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0; - bool single_basement_node = false; - FTNODE node = NULL; - { - uint32_t fullhash; - CACHEKEY root_key; - toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); - toku_pin_ftnode( - ft_handle->ft, - root_key, - fullhash, - &match_bfe, - PL_READ, // may_modify_node, cannot change root during keyrange - &node, - true - ); - } + uint32_t fullhash; + CACHEKEY root_key; + toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash); + toku_pin_ftnode( + ft_handle->ft, + root_key, + fullhash, + &match_bfe, + PL_READ, // may_modify_node, cannot change root during keyrange + &node, + true); + } - struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false}; - struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL}; + struct unlock_ftnode_extra unlock_extra = {ft_handle, node, false}; + struct unlockers unlockers = { + true, unlock_ftnode_fun, (void *)&unlock_extra, (UNLOCKERS)NULL}; - { - int r; - int64_t numrows = ft_handle->ft->in_memory_stats.numrows; - if (numrows < 0) - numrows = 0; // prevent appearance of a negative number - r = toku_ft_keysrange_internal (ft_handle, node, key_left, key_right, true, - &less, &equal_left, &middle, &equal_right, &greater, - &single_basement_node, numrows, - &min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds()); + { + int r; + int64_t numrows = ft_handle->ft->in_memory_logical_rows; + if (numrows < 0) + numrows = 0; // prevent appearance of a negative number + r = toku_ft_keysrange_internal(ft_handle, + node, + key_left, + key_right, + true, + &less, + &equal_left, + &middle, + &equal_right, + &greater, + &single_basement_node, + numrows, + &min_bfe, + &match_bfe, + &unlockers, + (ANCESTORS)NULL, + pivot_bounds::infinite_bounds()); + assert(r == 0 || r == TOKUDB_TRY_AGAIN); + if (r == TOKUDB_TRY_AGAIN) { + assert(!unlockers.locked); + goto try_again; + } + // May need to do a second query. + if (!single_basement_node && key_right != nullptr) { + // "greater" is stored in "middle" + invariant_zero(equal_right); + invariant_zero(greater); + uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, + greater2 = 0; + bool ignore; + r = toku_ft_keysrange_internal(ft_handle, + node, + key_right, + nullptr, + false, + &less2, + &equal_left2, + &middle2, + &equal_right2, + &greater2, + &ignore, + numrows, + &min_bfe, + &match_bfe, + &unlockers, + (ANCESTORS) nullptr, + pivot_bounds::infinite_bounds()); assert(r == 0 || r == TOKUDB_TRY_AGAIN); if (r == TOKUDB_TRY_AGAIN) { assert(!unlockers.locked); goto try_again; } - // May need to do a second query. - if (!single_basement_node && key_right != nullptr) { - // "greater" is stored in "middle" - invariant_zero(equal_right); - invariant_zero(greater); - uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, greater2 = 0; - bool ignore; - r = toku_ft_keysrange_internal (ft_handle, node, key_right, nullptr, false, - &less2, &equal_left2, &middle2, &equal_right2, &greater2, - &ignore, numrows, - &min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, pivot_bounds::infinite_bounds()); - assert(r == 0 || r == TOKUDB_TRY_AGAIN); - if (r == TOKUDB_TRY_AGAIN) { - assert(!unlockers.locked); - goto try_again; - } - invariant_zero(equal_right2); - invariant_zero(greater2); - // Update numbers. - // less is already correct. - // equal_left is already correct. - - // "middle" currently holds everything greater than left_key in first query - // 'middle2' currently holds everything greater than right_key in second query - // 'equal_left2' is how many match right_key - - // Prevent underflow. - if (middle >= equal_left2 + middle2) { - middle -= equal_left2 + middle2; - } else { - middle = 0; - } - equal_right = equal_left2; - greater = middle2; + invariant_zero(equal_right2); + invariant_zero(greater2); + // Update numbers. + // less is already correct. + // equal_left is already correct. + + // "middle" currently holds everything greater than left_key in + // first query + // 'middle2' currently holds everything greater than right_key in + // second query + // 'equal_left2' is how many match right_key + + // Prevent underflow. + if (middle >= equal_left2 + middle2) { + middle -= equal_left2 + middle2; + } else { + middle = 0; } + equal_right = equal_left2; + greater = middle2; } - assert(unlockers.locked); - toku_unpin_ftnode_read_only(ft_handle->ft, node); - if (!key_right) { - paranoid_invariant_zero(equal_right); - paranoid_invariant_zero(greater); - } - if (!key_left) { - paranoid_invariant_zero(less); - paranoid_invariant_zero(equal_left); - } - *less_p = less; - *equal_left_p = equal_left; - *middle_p = middle; - *equal_right_p = equal_right; - *greater_p = greater; - *middle_3_exact_p = single_basement_node; } + assert(unlockers.locked); + toku_unpin_ftnode_read_only(ft_handle->ft, node); + if (!key_right) { + paranoid_invariant_zero(equal_right); + paranoid_invariant_zero(greater); + } + if (!key_left) { + paranoid_invariant_zero(less); + paranoid_invariant_zero(equal_left); + } + *less_p = less; + *equal_left_p = equal_left; + *middle_p = middle; + *equal_right_p = equal_right; + *greater_p = greater; + *middle_3_exact_p = single_basement_node; +} } struct get_key_after_bytes_iterate_extra { diff --git a/storage/tokudb/PerconaFT/src/ydb.cc b/storage/tokudb/PerconaFT/src/ydb.cc index 3341f6d76c6..6dd8564e6a1 100644 --- a/storage/tokudb/PerconaFT/src/ydb.cc +++ b/storage/tokudb/PerconaFT/src/ydb.cc @@ -70,6 +70,8 @@ const char *toku_copyright_string = "Copyright (c) 2006, 2015, Percona and/or it #include "util/status.h" #include "util/context.h" +#include + // Include ydb_lib.cc here so that its constructor/destructor gets put into // ydb.o, to make sure they don't get erased at link time (when linking to // a static libtokufractaltree.a that was compiled with gcc). See #5094. @@ -1314,6 +1316,159 @@ static const char *env_get_data_dir(DB_ENV *env) { return env->i->real_data_dir; } +static int env_dirtool_attach(DB_ENV *env, + DB_TXN *txn, + const char *dname, + const char *iname) { + int r; + DBT dname_dbt; + DBT iname_dbt; + + HANDLE_PANICKED_ENV(env); + if (!env_opened(env)) { + return EINVAL; + } + HANDLE_READ_ONLY_TXN(txn); + toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1); + toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1); + + r = toku_db_put(env->i->directory, + txn, + &dname_dbt, + &iname_dbt, + 0, + true); + return r; +} + +static int env_dirtool_detach(DB_ENV *env, + DB_TXN *txn, + const char *dname) { + int r; + DBT dname_dbt; + DBT old_iname_dbt; + + HANDLE_PANICKED_ENV(env); + if (!env_opened(env)) { + return EINVAL; + } + HANDLE_READ_ONLY_TXN(txn); + + toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1); + toku_init_dbt_flags(&old_iname_dbt, DB_DBT_REALLOC); + + r = toku_db_get(env->i->directory, + txn, + &dname_dbt, + &old_iname_dbt, + DB_SERIALIZABLE); // allocates memory for iname + if (r == DB_NOTFOUND) + return EEXIST; + toku_free(old_iname_dbt.data); + + r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true); + + return r; +} + +static int env_dirtool_move(DB_ENV *env, + DB_TXN *txn, + const char *old_dname, + const char *new_dname) { + int r; + DBT old_dname_dbt; + DBT new_dname_dbt; + DBT iname_dbt; + + HANDLE_PANICKED_ENV(env); + if (!env_opened(env)) { + return EINVAL; + } + HANDLE_READ_ONLY_TXN(txn); + + toku_fill_dbt(&old_dname_dbt, old_dname, strlen(old_dname) + 1); + toku_fill_dbt(&new_dname_dbt, new_dname, strlen(new_dname) + 1); + toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC); + + r = toku_db_get(env->i->directory, + txn, + &old_dname_dbt, + &iname_dbt, + DB_SERIALIZABLE); // allocates memory for iname + if (r == DB_NOTFOUND) + return EEXIST; + + r = toku_db_del( + env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true); + if (r != 0) + goto exit; + + r = toku_db_put( + env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true); + +exit: + toku_free(iname_dbt.data); + return r; +} + +static int locked_env_op(DB_ENV *env, + DB_TXN *txn, + std::function f) { + int ret, r; + HANDLE_READ_ONLY_TXN(txn); + HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn); + + DB_TXN *child_txn = NULL; + int using_txns = env->i->open_flags & DB_INIT_TXN; + if (using_txns) { + ret = toku_txn_begin(env, txn, &child_txn, 0); + lazy_assert_zero(ret); + } + + // cannot begin a checkpoint + toku_multi_operation_client_lock(); + r = f(child_txn); + toku_multi_operation_client_unlock(); + + if (using_txns) { + if (r == 0) { + ret = locked_txn_commit(child_txn, 0); + lazy_assert_zero(ret); + } else { + ret = locked_txn_abort(child_txn); + lazy_assert_zero(ret); + } + } + return r; + +} + +static int locked_env_dirtool_attach(DB_ENV *env, + DB_TXN *txn, + const char *dname, + const char *iname) { + auto f = std::bind( + env_dirtool_attach, env, std::placeholders::_1, dname, iname); + return locked_env_op(env, txn, f); +} + +static int locked_env_dirtool_detach(DB_ENV *env, + DB_TXN *txn, + const char *dname) { + auto f = std::bind( + env_dirtool_detach, env, std::placeholders::_1, dname); + return locked_env_op(env, txn, f); +} + +static int locked_env_dirtool_move(DB_ENV *env, + DB_TXN *txn, + const char *old_dname, + const char *new_dname) { + auto f = std::bind( + env_dirtool_move, env, std::placeholders::_1, old_dname, new_dname); + return locked_env_op(env, txn, f); +} + static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags); static int @@ -2646,6 +2801,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) { #define SENV(name) result->name = locked_env_ ## name SENV(dbremove); SENV(dbrename); + SENV(dirtool_attach); + SENV(dirtool_detach); + SENV(dirtool_move); //SENV(set_noticecall); #undef SENV #define USENV(name) result->name = env_ ## name @@ -2975,8 +3133,10 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u if (txn && r) { if (r == EMFILE || r == ENFILE) r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n"); - else + else if (r != ENOENT) r = toku_ydb_do_error(env, r, "toku dbremove failed\n"); + else + r = 0; goto exit; } if (txn) { diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 14d278a47d6..15276f2fb85 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -5254,17 +5254,17 @@ int ha_tokudb::fill_range_query_buf( DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range"); goto cleanup; } else if (result == ICP_NO_MATCH) { - // if we are performing a DESC ICP scan and have no end_range - // to compare to stop using ICP filtering as there isn't much more - // that we can do without going through contortions with remembering - // and comparing key parts. + // Optimizer change for MyRocks also benefits us here in TokuDB as + // opt_range.cc QUICK_SELECT::get_next now sets end_range during + // descending scan. We should not ever hit this condition, but + // leaving this code in to prevent any possibility of a descending + // scan to the beginning of an index and catch any possibility + // in debug builds with an assertion + assert_debug(!(!end_range && direction < 0)); if (!end_range && direction < 0) { - cancel_pushed_idx_cond(); - DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate"); } - error = TOKUDB_CURSOR_CONTINUE; goto cleanup; } @@ -6122,7 +6122,6 @@ int ha_tokudb::info(uint flag) { stats.records = share->row_count() + share->rows_from_locked_table; stats.deleted = 0; if (!(flag & HA_STATUS_NO_LOCK)) { - uint64_t num_rows = 0; error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd()); if (error) { @@ -6132,20 +6131,13 @@ int ha_tokudb::info(uint flag) { // we should always have a primary key assert_always(share->file != NULL); - error = estimate_num_rows(share->file, &num_rows, txn); - if (error == 0) { - share->set_row_count(num_rows, false); - stats.records = num_rows; - } else { - goto cleanup; - } - DB_BTREE_STAT64 dict_stats; error = share->file->stat64(share->file, txn, &dict_stats); if (error) { goto cleanup; } - + share->set_row_count(dict_stats.bt_ndata, false); + stats.records = dict_stats.bt_ndata; stats.create_time = dict_stats.bt_create_time_sec; stats.update_time = dict_stats.bt_modify_time_sec; stats.check_time = dict_stats.bt_verify_time_sec; @@ -7841,7 +7833,7 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* // As a result, equal may be 0 and greater may actually be equal+greater // So, we call key_range64 on the key, and the key that is after it. if (!start_key && !end_key) { - error = estimate_num_rows(kfile, &rows, transaction); + error = estimate_num_rows(share->file, &rows, transaction); if (error) { ret_val = HA_TOKUDB_RANGE_COUNT; goto cleanup; diff --git a/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result b/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result new file mode 100644 index 00000000000..1d86478d833 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/bug-1657908.result @@ -0,0 +1,70 @@ +SET GLOBAL tokudb_dir_per_db=ON; +CREATE PROCEDURE create_table() +BEGIN +CREATE TABLE test.t1 ( +a INT +) ENGINE = TokuDB +PARTITION BY RANGE (a) +(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB, +PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB, +PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB, +PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB +); +END| +### Create partitioned table +CALL create_table(); +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_P_p100_main_id.tokudb +t1_P_p100_status_id.tokudb +t1_P_p300_main_id.tokudb +t1_P_p300_status_id.tokudb +t1_P_p400_main_id.tokudb +t1_P_p400_status_id.tokudb +t1_P_p_to_del_main_id.tokudb +t1_P_p_to_del_status_id.tokudb +### Stop server +### Remove 'main' file of one of the partitions +### Start server +### Make sure 'main' partition file is deleted +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_P_p100_main_id.tokudb +t1_P_p100_status_id.tokudb +t1_P_p300_main_id.tokudb +t1_P_p300_status_id.tokudb +t1_P_p400_main_id.tokudb +t1_P_p400_status_id.tokudb +t1_P_p_to_del_status_id.tokudb +### Make sure the table still exists +SHOW TABLES; +Tables_in_test +t1 +### Drop table +DROP TABLE t1; +### Make sure the table is dropped +SHOW TABLES; +Tables_in_test +### Check what files still exist after DROP TABLE +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +### Remove the rest of the files +### Make sure there are no tokudb files +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +### Create the same table once more +CALL create_table(); +## Looking for *.tokudb files in data_dir +## Looking for *.tokudb files in data_dir/test +t1_P_p100_main_id.tokudb +t1_P_p100_status_id.tokudb +t1_P_p300_main_id.tokudb +t1_P_p300_status_id.tokudb +t1_P_p400_main_id.tokudb +t1_P_p400_status_id.tokudb +t1_P_p_to_del_main_id.tokudb +t1_P_p_to_del_status_id.tokudb +### Restore state +DROP TABLE t1; +DROP PROCEDURE create_table; +SET GLOBAL tokudb_dir_per_db=default; diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result b/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result new file mode 100644 index 00000000000..dd3b693db49 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/r/dir_cmd.result @@ -0,0 +1,58 @@ +SET GLOBAL tokudb_dir_per_db = ON; +SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1"; +SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +test_dname_1 test_iname_1 +test_dname_2 test_iname_2 +SET tokudb_dir_cmd = "detach test_dname_1"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +test_dname_2 test_iname_2 +SET tokudb_dir_cmd = "move test_dname_2 test_dname_3"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +test_dname_3 test_iname_2 +SET tokudb_dir_cmd = "detach test_dname_3"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +CREATE TABLE t1(a int) ENGINE=tokudb; +INSERT INTO t1 (a) VALUES (10); +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +./test/t1-main ./test/t1_main_id.tokudb +./test/t1-status ./test/t1_status_id.tokudb +SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb"; +SELECT dictionary_name, internal_file_name +FROM information_schema.TokuDB_file_map; +dictionary_name internal_file_name +./test/t1-main test/t1-main-renamed.tokudb +./test/t1-status ./test/t1_status_id.tokudb +### rename t1_main_id.tokudb to t1-main-renamed.tokudb +SELECT * FROM t1; +a +10 +### Test for errors notification +SET tokudb_dir_cmd = "detach foo"; +ERROR 42000: Variable 'tokudb_dir_cmd' can't be set to the value of 'detach foo' +SELECT @@tokudb_dir_cmd_last_error; +@@tokudb_dir_cmd_last_error +17 +SELECT @@tokudb_dir_cmd_last_error_string; +@@tokudb_dir_cmd_last_error_string +detach command error +SET @@tokudb_dir_cmd_last_error_string = "blablabla"; +SELECT @@tokudb_dir_cmd_last_error_string; +@@tokudb_dir_cmd_last_error_string +blablabla +SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR +SELECT @@tokudb_dir_cmd_last_error_string; +@@tokudb_dir_cmd_last_error_string +statement_blablabla +DROP TABLE t1; +SET GLOBAL tokudb_dir_per_db = default; diff --git a/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test new file mode 100644 index 00000000000..3c7b602b96d --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test @@ -0,0 +1,72 @@ +# See https://bugs.launchpad.net/percona-server/+bug/1657908 + +source include/have_tokudb.inc; + +SET GLOBAL tokudb_dir_per_db=ON; + +--let $DB= test +--let $DATADIR= `SELECT @@datadir` + +--delimiter | +CREATE PROCEDURE create_table() +BEGIN +CREATE TABLE test.t1 ( + a INT +) ENGINE = TokuDB +PARTITION BY RANGE (a) +(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB, + PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB, + PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB, + PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB +); +END| +--delimiter ; + +--echo ### Create partitioned table +CALL create_table(); +--source dir_per_db_show_table_files.inc + +--echo ### Stop server +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--shutdown_server +--source include/wait_until_disconnected.inc + +--echo ### Remove 'main' file of one of the partitions +--remove_files_wildcard $DATADIR/$DB t1_P_p_to_del_main_*.tokudb + +--echo ### Start server +--enable_reconnect +--exec echo "restart: --loose-tokudb-dir-per-db=ON" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +--source include/wait_until_connected_again.inc + +--echo ### Make sure 'main' partition file is deleted +--source dir_per_db_show_table_files.inc + +--echo ### Make sure the table still exists +SHOW TABLES; + +--echo ### Drop table +# error 1051 was here before the fix +DROP TABLE t1; + +--echo ### Make sure the table is dropped +SHOW TABLES; + +--echo ### Check what files still exist after DROP TABLE +--source dir_per_db_show_table_files.inc + +--echo ### Remove the rest of the files +--remove_files_wildcard $DATADIR/$DB *.tokudb + +--echo ### Make sure there are no tokudb files +--source dir_per_db_show_table_files.inc + +--echo ### Create the same table once more +# engine error 17 (EEXIST) was here before the fix +CALL create_table(); +--source dir_per_db_show_table_files.inc + +--echo ### Restore state +DROP TABLE t1; +DROP PROCEDURE create_table; +SET GLOBAL tokudb_dir_per_db=default; diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test new file mode 100644 index 00000000000..102e8217e2b --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test @@ -0,0 +1,51 @@ +source include/have_tokudb.inc; + +--let $MYSQL_DATADIR=`select @@datadir` + +SET GLOBAL tokudb_dir_per_db = ON; + +SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1"; +SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "detach test_dname_1"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "move test_dname_2 test_dname_3"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "detach test_dname_3"; +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +CREATE TABLE t1(a int) ENGINE=tokudb; +INSERT INTO t1 (a) VALUES (10); +--source include/table_files_replace_pattern.inc +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb"; +--source include/table_files_replace_pattern.inc +SELECT dictionary_name, internal_file_name + FROM information_schema.TokuDB_file_map; + +--echo ### rename t1_main_id.tokudb to t1-main-renamed.tokudb +--exec mv $MYSQL_DATADIR/test/t1_main_*.tokudb $MYSQL_DATADIR/test/t1-main-renamed.tokudb + +SELECT * FROM t1; + +--echo ### Test for errors notification +--error 1231 +SET tokudb_dir_cmd = "detach foo"; +SELECT @@tokudb_dir_cmd_last_error; +SELECT @@tokudb_dir_cmd_last_error_string; +SET @@tokudb_dir_cmd_last_error_string = "blablabla"; +SELECT @@tokudb_dir_cmd_last_error_string; +SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR + SELECT @@tokudb_dir_cmd_last_error_string; + +DROP TABLE t1; +SET GLOBAL tokudb_dir_per_db = default; diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result new file mode 100644 index 00000000000..992a828e287 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_info.result @@ -0,0 +1,26 @@ +### +# Test for binlog position +##### +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +Backup +include/filter_file.inc +### tokubackup_slave_info content: +host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name: +### +# Test for gtid set +##### +include/rpl_set_gtid_mode.inc +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +Backup +include/filter_file.inc +### tokubackup_slave_info content: +host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: ####, channel name: +include/rpl_set_gtid_mode.inc +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result new file mode 100644 index 00000000000..072dfff448b --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/backup_master_state.result @@ -0,0 +1,36 @@ +### Create backup dir +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +### Check for settings +SELECT @@gtid_mode; +@@gtid_mode +OFF +### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +### Master backup +include/filter_file.inc +### tokubackup_binlog_info content: +filename: ####, position: ####, gtid_mode: OFF, GTID of last change: +### Delete backup dir +### Create backup dir +### GTID-mode on +include/rpl_set_gtid_mode.inc +### Check for settings +SELECT @@gtid_mode; +@@gtid_mode +ON +### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; +### Master backup +include/filter_file.inc +### tokubackup_binlog_info content: +filename: ####, position: ####, gtid_mode: ON, GTID of last change: ##### +### Delete backup dir +### GTID-mode off +include/rpl_set_gtid_mode.inc +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result b/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result new file mode 100644 index 00000000000..a0af40d80cc --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/empty_slave_info_file.result @@ -0,0 +1 @@ +Backup diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result b/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result new file mode 100644 index 00000000000..94e113fc87d --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/innodb_use_native_aio_enabled.result @@ -0,0 +1,5 @@ +SELECT @@innodb_use_native_aio; +@@innodb_use_native_aio +1 +SET SESSION tokudb_backup_dir='MYSQL_TMP_DIR/tokudb_backup'; +ERROR 42000: Variable 'tokudb_backup_dir' can't be set to the value of 'MYSQL_TMP_DIR/tokudb_backup' diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result new file mode 100644 index 00000000000..13b5915354f --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_safe_slave.result @@ -0,0 +1,77 @@ +### +# Master-slave test +#### +include/rpl_init.inc [topology=1->2] +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +### Create temp table on master +CREATE TEMPORARY TABLE t1 (a INT); +include/sync_slave_sql_with_master.inc +### Setup debug_sync points and prepare for slave backup +SET SESSION debug="+d,debug_sync_abort_on_timeout"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue'; +### Turn-on safe-slave option +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; +### Start slave backup +SET SESSION debug="+d,debug_sync_abort_on_timeout"; +### Wait for safe slave function to start +SET DEBUG_SYNC = "now WAIT_FOR sse"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe slave loop start +SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe thread loop point just after slave sql thread start 1 +SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe thread loop end +SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 1 +### Wait for safe thread loop point just after slave sql thread start 2 +SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss"; +### Drop temp table on master +DROP TABLE t1; +### and syncronize slave +include/sync_slave_sql_with_master.inc +SHOW STATUS LIKE 'Slave_open_temp_tables'; +Variable_name Value +Slave_open_temp_tables 0 +### Continue backup +SET DEBUG_SYNC = "now SIGNAL ttlss_continue"; +## Reset debug_sync points +SET DEBUG_SYNC = "RESET"; +### Wait for backup finish +include/filter_file.inc +### Slave tokubackup_slave_info content: +host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name: +### Delete slave backup dir +### Turn-off safe-slave option for slave +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; +### Turn-on safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; +### Backup master +### Turn-off safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; +include/filter_file.inc +### Master tokubackup_binlog_info content: +filename: ####, position: ####, gtid_mode: OFF, GTID of last change: +### Delete master backup dir +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result new file mode 100644 index 00000000000..50508f073ab --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/r/rpl_tokudb_commit_sync.result @@ -0,0 +1,59 @@ +include/master-slave.inc +Warnings: +Note #### Sending passwords in plain text without SSL/TLS is extremely insecure. +Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information. +[connection master] +### Create some data on master +CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB; +INSERT INTO t1 SET a=100, b=100; +INSERT INTO t1 SET a=200, b=100; +INSERT INTO t1 SET a=300, b=100; +INSERT INTO t1 SET a=400, b=100; +INSERT INTO t1 SET a=500, b=100; +UPDATE t1 SET b = 200 WHERE a = 200; +DELETE FROM t1 WHERE a = 100; +SELECT * FROM t1; +a b +200 200 +300 100 +400 100 +500 100 +### Check for slave options +SELECT @@tokudb_commit_sync; +@@tokudb_commit_sync +0 +SELECT @@tokudb_fsync_log_period; +@@tokudb_fsync_log_period +1000000 +### Check data on slave after sync +SELECT * FROM t1; +a b +200 200 +300 100 +400 100 +500 100 +### Do backup on slave +### Check for errors +SELECT @@session.tokudb_backup_last_error; +@@session.tokudb_backup_last_error +0 +SELECT @@session.tokudb_backup_last_error_string; +@@session.tokudb_backup_last_error_string +NULL +### Stop slave server +include/rpl_stop_server.inc [server_number=2] +### Restore backup +### Start slave server and slave threads +include/rpl_start_server.inc [server_number=2] +include/start_slave.inc +### Sync slave with master +### Check data on slave +SELECT * FROM t1; +a b +200 200 +300 100 +400 100 +500 100 +### Cleanup +DROP TABLE t1; +include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test new file mode 100644 index 00000000000..8e9f6df4b1d --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_info.test @@ -0,0 +1,94 @@ +--source include/have_tokudb_backup.inc +--source include/not_gtid_enabled.inc + + +--let $SLAVE_INFO_FILE=tokubackup_slave_info +--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave +--let $SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE +--let DDIR=$BACKUP_DIR_SLAVE + +# Settings for include/filter_file.inc + +--delimiter | +let $script= + s{host: [^,]+,}{host: #.#.#.#,}; + s{user: [^,]+,}{user: ####,}; + s{port: [^,]+,}{port: ####,}; + s{master log file: [^,]+,}{master log file: ####,}; + s{relay log file: [^,]+,}{relay log file: ####,}; + s{exec master log pos: [^,]+,}{exec master log pos: ####,}; + s{executed gtid set: [^,]+, }{executed gtid set: ####, }; + s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, }; +| +--delimiter ; +--let $input_file = $SLAVE_INFO_FILE_PATH +--let $skip_column_names= 1 + +--echo ### +--echo # Test for binlog position +--echo ##### + +--mkdir $BACKUP_DIR_SLAVE + +--source include/master-slave.inc + +--connection master +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--sync_slave_with_master + +--connection slave +--echo Backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--source include/filter_file.inc +--echo ### $SLAVE_INFO_FILE content: +--cat_file $SLAVE_INFO_FILE_PATH + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### +--echo # Test for gtid set +--echo ##### + +--mkdir $BACKUP_DIR_SLAVE + +--let $rpl_server_numbers= 1,2 +--let $rpl_set_enforce_gtid_consistency= 1 +--source include/rpl_set_gtid_mode.inc + +--connection master +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--sync_slave_with_master + +--connection slave +--echo Backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--source include/filter_file.inc +--echo ### $SLAVE_INFO_FILE content: +--cat_file $SLAVE_INFO_FILE_PATH + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--let $rpl_gtid_mode= OFF +--let $rpl_set_enforce_gtid_consistency= 0 +--let $rpl_server_numbers= 1,2 +--let $rpl_skip_sync= 1 +--source include/rpl_set_gtid_mode.inc +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test new file mode 100644 index 00000000000..c301d55f8fa --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/backup_master_state.test @@ -0,0 +1,87 @@ +--source include/have_tokudb_backup.inc +--source include/not_gtid_enabled.inc + +--let $MASTER_STATE_FILE=tokubackup_binlog_info +--let $BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master +--let $MASTER_STATE_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_STATE_FILE +--let DDIR=$BACKUP_DIR_MASTER + +# Settings for include/filter_file.inc +--delimiter | +let $script= + s{filename: [^,]+,}{filename: ####,}; + s{position: [^,]+,}{position: ####,}; + s{GTID of last change: [^ ]+}{GTID of last change: #####}; +| +--delimiter ; +--let $input_file = $MASTER_STATE_FILE_PATH +--let $skip_column_names= 1 + +--echo ### Create backup dir +--mkdir $BACKUP_DIR_MASTER + +--source include/master-slave.inc + +--connection master + +--echo ### Check for settings +SELECT @@gtid_mode; + +--echo ### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--echo ### Master backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER' +--enable_query_log + +--source include/filter_file.inc +--echo ### $MASTER_STATE_FILE content: +--cat_file $MASTER_STATE_FILE_PATH + +--echo ### Delete backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### Create backup dir +--mkdir $BACKUP_DIR_MASTER + +--echo ### GTID-mode on +--let $rpl_server_numbers= 1,2 +--let $rpl_set_enforce_gtid_consistency= 1 +--source include/rpl_set_gtid_mode.inc + +--echo ### Check for settings +SELECT @@gtid_mode; + +--echo ### Generate some binlog events +CREATE TABLE t1(a INT) ENGINE=TokuDB; +DROP TABLE t1; + +--echo ### Master backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER' +--enable_query_log + +--source include/filter_file.inc +--echo ### $MASTER_STATE_FILE content: +--cat_file $MASTER_STATE_FILE_PATH + +--echo ### Delete backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### GTID-mode off +--let $rpl_gtid_mode= OFF +--let $rpl_set_enforce_gtid_consistency= 0 +--let $rpl_server_numbers= 1,2 +--source include/rpl_set_gtid_mode.inc + +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test b/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test new file mode 100644 index 00000000000..53592903a27 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/empty_slave_info_file.test @@ -0,0 +1,23 @@ +--source include/have_tokudb_backup.inc +--source include/not_gtid_enabled.inc + + +--let $SLAVE_INFO_FILE=tokubackup_slave_info +--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave +--let DDIR=$BACKUP_DIR_SLAVE + +--mkdir $BACKUP_DIR_SLAVE + +--echo Backup +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--list_files $BACKUP_DIR_SLAVE $SLAVE_INFO_FILE + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"DDIR"}; +rmtree([ "$DDIR" ]); +EOF + diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt new file mode 100644 index 00000000000..5f5dbb9c7c6 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled-master.opt @@ -0,0 +1 @@ +--innodb_use_native_aio=on diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test new file mode 100644 index 00000000000..3e09b465c02 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/innodb_use_native_aio_enabled.test @@ -0,0 +1,19 @@ +# Check if tokudb hot backup is prevented if innodb_use_native_aio enabled +--source include/have_tokudb_backup.inc +--source include/have_innodb.inc + +SELECT @@innodb_use_native_aio; + +--let BACKUP_DIR= $MYSQL_TMP_DIR/tokudb_backup + +--mkdir $BACKUP_DIR + +--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR +--error ER_WRONG_VALUE_FOR_VAR +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR' + +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"BACKUP_DIR"}; +rmtree([ "$DDIR" ]); +EOF diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt new file mode 100644 index 00000000000..af3a211967b --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-master.opt @@ -0,0 +1 @@ +--binlog-format=statement diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt new file mode 100644 index 00000000000..49405b1aec3 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave-slave.opt @@ -0,0 +1 @@ +--master-info-repository=TABLE --relay-log-info-repository=TABLE diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf new file mode 100644 index 00000000000..321be4ab2fc --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.cnf @@ -0,0 +1,14 @@ +!include ../../rpl/my.cnf + +[mysqld.1] + +[mysqld.2] + +[mysqld.3] +master-info-repository=TABLE +relay-log-info-repository=TABLE + +[ENV] +SERVER_MYPORT_3= @mysqld.3.port +SERVER_MYSOCK_3= @mysqld.3.socket + diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc new file mode 100644 index 00000000000..e0732ee63fc --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.inc @@ -0,0 +1,112 @@ +--connection server_1 +--echo ### Create temp table on master +CREATE TEMPORARY TABLE t1 (a INT); + +--let $sync_slave_connection= server_2 +--source include/sync_slave_sql_with_master.inc + +--echo ### Setup debug_sync points and prepare for slave backup +--connection slave_2 +SET SESSION debug="+d,debug_sync_abort_on_timeout"; + +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2'; +SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue'; + +--mkdir $BACKUP_DIR_SLAVE + +--echo ### Turn-on safe-slave option +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; + +--echo ### Start slave backup +--disable_query_log +--send_eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--connection server_2 +SET SESSION debug="+d,debug_sync_abort_on_timeout"; + +--echo ### Wait for safe slave function to start +SET DEBUG_SYNC = "now WAIT_FOR sse"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +--echo ### Wait for safe slave loop start +SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +--echo ### Wait for safe thread loop point just after slave sql thread start 1 +SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; +--echo ### Wait for safe thread loop end +SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle"; +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +--echo ### Wait for safe thread loop point just after slave sql thread start 2 +SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss"; + +--connection server_1 +--echo ### Drop temp table on master +DROP TABLE t1; + +--echo ### and syncronize slave +--let $sync_slave_connection= server_2 +--source include/sync_slave_sql_with_master.inc + +SHOW STATUS LIKE 'Slave_open_temp_tables'; + +--echo ### Continue backup +SET DEBUG_SYNC = "now SIGNAL ttlss_continue"; + +--echo ## Reset debug_sync points +SET DEBUG_SYNC = "RESET"; + +--connection slave_2 +--echo ### Wait for backup finish +--reap + +--let $input_file = $S_SLAVE_INFO_FILE_PATH +--source include/filter_file.inc +--echo ### Slave $SLAVE_INFO_FILE content: +--cat_file $S_SLAVE_INFO_FILE_PATH + +--echo ### Delete slave backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"BACKUP_DIR_SLAVE"}; +rmtree([ "$DDIR" ]); +EOF + +--echo ### Turn-off safe-slave option for slave +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; + +--connection server_1 + +--echo ### Turn-on safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=ON; +SET GLOBAL tokudb_backup_safe_slave_timeout=30; + +--echo ### Backup master +--mkdir $BACKUP_DIR_MASTER +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER' +--enable_query_log + +--echo ### Turn-off safe-slave option for master +SET GLOBAL tokudb_backup_safe_slave=default; +SET GLOBAL tokudb_backup_safe_slave_timeout=default; + +--let $input_file = $M_MASTER_INFO_FILE_PATH +--source include/filter_file.inc +--echo ### Master $MASTER_INFO_FILE content: +--cat_file $M_MASTER_INFO_FILE_PATH + +--echo ### Delete master backup dir +--perl +use File::Path 'rmtree'; +$DDIR=$ENV{"BACKUP_DIR_MASTER"}; +rmtree([ "$DDIR" ]); +EOF + diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test new file mode 100644 index 00000000000..15ba1d8bb66 --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_safe_slave.test @@ -0,0 +1,49 @@ +--source include/have_tokudb_backup.inc +--source include/have_binlog_format_statement.inc +--source include/have_debug_sync.inc + +--let $SLAVE_INFO_FILE=tokubackup_slave_info +--let $MASTER_INFO_FILE=tokubackup_binlog_info + +--let BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave +--let $S_SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE + +--let BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master +--let $M_MASTER_INFO_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_INFO_FILE + +# Settings for include/filter_file.inc +--delimiter | +let $script= + s{filename: [^,]+,}{filename: ####,}; + s{position: [^,]+,}{position: ####,}; + s{GTID of last change: [^ ]+}{GTID of last change: #####}; + s{host: [^,]+,}{host: #.#.#.#,}; + s{user: [^,]+,}{user: ####,}; + s{port: [^,]+,}{port: ####,}; + s{master log file: [^,]+,}{master log file: ####,}; + s{relay log file: [^,]+,}{relay log file: ####,}; + s{exec master log pos: [^,]+,}{exec master log pos: ####,}; + s{executed gtid set: [^,]+, }{executed gtid set: ####, }; + s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, }; +| +--delimiter ; +--let $skip_column_names= 1 + +--disable_query_log +CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +CALL mtr.add_suppression("Sending passwords in plain text without SSL/TLS is extremely insecure"); +--enable_query_log + +--echo ### +--echo # Master-slave test +--echo #### + +--let $rpl_server_count=3 +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +--connect (slave_2,localhost,root,,test,$SLAVE_MYPORT,$SLAVE_MYSOCK) + +--source rpl_safe_slave.inc + +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt new file mode 100644 index 00000000000..263e1aef0ab --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync-slave.opt @@ -0,0 +1 @@ +--loose-tokudb-commit-sync=OFF --loose-tokudb-fsync-log-period=1000000 diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test new file mode 100644 index 00000000000..5bd53cacdab --- /dev/null +++ b/storage/tokudb/mysql-test/tokudb_backup/t/rpl_tokudb_commit_sync.test @@ -0,0 +1,72 @@ +# if --tokudb-commit-sync is off on slave tokudb log must be flushed on backup +# to provide the ability to restore replication after backup restoring + +--source include/have_tokudb_backup.inc + +--let $BACKUP_DIR_SLAVE= $MYSQL_TMP_DIR/tokudb_backup_slave +--let $BACKUP_MYSQL_DATA_DIR= $BACKUP_DIR_SLAVE/mysql_data_dir + +--mkdir $BACKUP_DIR_SLAVE + +--source include/master-slave.inc + +--echo ### Create some data on master +--connection master +CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB; +INSERT INTO t1 SET a=100, b=100; +INSERT INTO t1 SET a=200, b=100; +INSERT INTO t1 SET a=300, b=100; +INSERT INTO t1 SET a=400, b=100; +INSERT INTO t1 SET a=500, b=100; +UPDATE t1 SET b = 200 WHERE a = 200; +DELETE FROM t1 WHERE a = 100; + +SELECT * FROM t1; + +--sync_slave_with_master +--let $SLAVE_DATA_DIR=`SELECT @@DATADIR` + +--echo ### Check for slave options +SELECT @@tokudb_commit_sync; +SELECT @@tokudb_fsync_log_period; + +--echo ### Check data on slave after sync +SELECT * FROM t1; + + +--echo ### Do backup on slave +--disable_query_log +--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE' +--enable_query_log + +--echo ### Check for errors +SELECT @@session.tokudb_backup_last_error; +SELECT @@session.tokudb_backup_last_error_string; + +--echo ### Stop slave server +--connection slave +--let $rpl_server_number= 2 +--let $rpl_force_stop= 1 +--source include/rpl_stop_server.inc + +--echo ### Restore backup +--exec rm -rf $SLAVE_DATA_DIR; +--exec mv $BACKUP_MYSQL_DATA_DIR $SLAVE_DATA_DIR; + +--echo ### Start slave server and slave threads +--connection slave +--source include/rpl_start_server.inc +--source include/start_slave.inc + +--echo ### Sync slave with master +--connection master +--sync_slave_with_master + +--echo ### Check data on slave +SELECT * FROM t1; + +--echo ### Cleanup +--connection master +DROP TABLE t1; + +--source include/rpl_end.inc diff --git a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt index 5d4cb245e27..a624d6895cc 100644 --- a/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt +++ b/storage/tokudb/mysql-test/tokudb_backup/t/suite.opt @@ -1 +1 @@ -$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M +$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-innodb_use_native_aio=off --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result index e5808f52e69..a7cdbcae1e2 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db233.result @@ -14,16 +14,6 @@ INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4 ANALYZE TABLE t1; Table Op Msg_type Msg_text test.t1 analyze status OK -set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1'; -SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC; -set DEBUG_SYNC = 'now WAIT_FOR hit1'; -set DEBUG_SYNC = 'now SIGNAL done1'; -c -8 -7 -6 -6 -5 set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2'; SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC; set DEBUG_SYNC = 'now WAIT_FOR hit2'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result index 6cc499389bb..2975d7d3116 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result +++ b/storage/tokudb/mysql-test/tokudb_bugs/r/simple_icp.result @@ -110,7 +110,7 @@ a b c d e 5 1 10 NULL NULL show status like '%Handler_read_prev%'; Variable_name Value -Handler_read_prev 799 +Handler_read_prev 41 flush status; show status like '%Handler_read_prev%'; Variable_name Value @@ -142,7 +142,7 @@ a b c d e 20 1 10 NULL NULL show status like '%Handler_read_prev%'; Variable_name Value -Handler_read_prev 399 +Handler_read_prev 21 flush status; show status like '%Handler_read_next%'; Variable_name Value diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test index 8e4c3b73c09..fec11bf0553 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db233.test @@ -29,24 +29,6 @@ ANALYZE TABLE t1; # lets flip to another connection connect(conn1, localhost, root); -# set up the DEBUG_SYNC point -set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1'; - -# send the query -send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC; - -# back to default connection -connection default; - -# wait for the ICP reverse scan to invalidate -set DEBUG_SYNC = 'now WAIT_FOR hit1'; - -# lets release and clean up -set DEBUG_SYNC = 'now SIGNAL done1'; - -connection conn1; -reap; - # set up the DEBUG_SYNC point again, but for the out of range set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test b/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test index fd4c5fad534..957fdadb71f 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/leak172.test @@ -132,6 +132,7 @@ CREATE TABLE `t2` ( ); LOAD DATA INFILE 'leak172_t1.data' INTO TABLE `t1` fields terminated by ','; +remove_file $MYSQLD_DATADIR/test/leak172_t1.data; connect(conn1,localhost,root,,); set session debug="+d,tokudb_end_bulk_insert_sleep"; @@ -145,8 +146,9 @@ UPDATE t1, t2 SET t1.`c5` = 4 WHERE t1.`c6` <= 'o'; connection conn1; reap; +remove_file $MYSQLD_DATADIR/test/leak172_t2.data; connection default; disconnect conn1; -drop table t1,t2; \ No newline at end of file +drop table t1,t2; diff --git a/storage/tokudb/tokudb_dir_cmd.cc b/storage/tokudb/tokudb_dir_cmd.cc new file mode 100644 index 00000000000..f9995302d49 --- /dev/null +++ b/storage/tokudb/tokudb_dir_cmd.cc @@ -0,0 +1,331 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +/* -*- mode: C; c-basic-offset: 4 -*- */ +#ident "$Id$" +/*====== +This file is part of TokuDB + + +Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. + + TokuDBis is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2, + as published by the Free Software Foundation. + + TokuDB is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with TokuDB. If not, see . + +======= */ + +#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." + +#include "hatoku_hton.h" +#include "tokudb_dir_cmd.h" +#include "my_dbug.h" +#include "sql_base.h" + +#include +#include + +namespace tokudb { + +const char tokens_delimiter = ' '; +const char tokens_escape_delimiter_char = '\\'; + +static int MDL_and_TDC(THD *thd, + const char *db, + const char *table, + const dir_cmd_callbacks &cb) { + int error; + LEX_STRING db_arg; + LEX_STRING table_arg; + + db_arg.str = const_cast(db); + db_arg.length = strlen(db);; + table_arg.str = const_cast(table); + table_arg.length = strlen(table); + Table_ident table_ident(thd, db_arg, table_arg, true);; + thd->lex->select_lex.add_table_to_list( + thd, &table_ident, NULL, 1, TL_UNLOCK, MDL_EXCLUSIVE, 0, 0, 0); + /* The lock will be released at the end of mysq_execute_command() */ + error = lock_table_names(thd, + thd->lex->select_lex.table_list.first, + NULL, + thd->variables.lock_wait_timeout, + 0); + if (error) { + if (cb.set_error) + cb.set_error(thd, + error, + "Can't lock table '%s.%s'", + db, + table); + return error; + } + tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table, false); + return error; +} + +static bool parse_db_and_table(const char *dname, + std::string /*out*/ &db_name, + std::string /*out*/ &table_name) { + const char *begin; + const char *end; + const char *db_name_begin; + const char *db_name_end; + + begin = strchr(dname, '/'); + if (!begin) + return false; + ++begin; + end = strchr(begin, '/'); + if (!end) + return false; + + db_name_begin = begin; + db_name_end = end; + + begin = end + 1; + + end = strchr(begin, '-'); + if (!end) + return false; + + if (strncmp(end, "-main", strlen("-main")) && + strncmp(end, "-status", strlen("-status")) && + strncmp(end, "-key", strlen("-key"))) + return false; + + db_name.assign(db_name_begin, db_name_end); + table_name.assign(begin, end); + + return true; +} + +static int attach(THD *thd, + const std::string &dname, + const std::string &iname, + const dir_cmd_callbacks &cb) { + int error; + DB_TXN* txn = NULL; + DB_TXN *parent_txn = NULL; + tokudb_trx_data *trx = NULL; + + std::string db_name; + std::string table_name; + + if (parse_db_and_table(dname.c_str(), db_name, table_name)) { + error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb); + if (error) + goto cleanup; + } + + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); + if (trx && trx->sub_sp_level) + parent_txn = trx->sub_sp_level; + error = txn_begin(db_env, parent_txn, &txn, 0, thd); + if (error) + goto cleanup; + + error = db_env->dirtool_attach(db_env, + txn, + dname.c_str(), + iname.c_str()); +cleanup: + if (txn) { + if (error) { + abort_txn(txn); + } + else { + commit_txn(txn, 0); + } + } + return error; +} + +static int detach(THD *thd, + const std::string &dname, + const dir_cmd_callbacks &cb) { + int error; + DB_TXN* txn = NULL; + DB_TXN *parent_txn = NULL; + tokudb_trx_data *trx = NULL; + + std::string db_name; + std::string table_name; + + if (parse_db_and_table(dname.c_str(), db_name, table_name)) { + error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb); + if (error) + goto cleanup; + } + + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); + if (trx && trx->sub_sp_level) + parent_txn = trx->sub_sp_level; + error = txn_begin(db_env, parent_txn, &txn, 0, thd); + if (error) + goto cleanup; + + error = db_env->dirtool_detach(db_env, + txn, + dname.c_str()); +cleanup: + if (txn) { + if (error) { + abort_txn(txn); + } + else { + commit_txn(txn, 0); + } + } + return error; +} + +static int move(THD *thd, + const std::string &old_dname, + const std::string &new_dname, + const dir_cmd_callbacks &cb) { + int error; + DB_TXN* txn = NULL; + DB_TXN *parent_txn = NULL; + tokudb_trx_data *trx = NULL; + + std::string db_name; + std::string table_name; + + if (parse_db_and_table(old_dname.c_str(), db_name, table_name)) { + error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb); + if (error) + goto cleanup; + } + + trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); + if (trx && trx->sub_sp_level) + parent_txn = trx->sub_sp_level; + error = txn_begin(db_env, parent_txn, &txn, 0, thd); + if (error) + goto cleanup; + + error = db_env->dirtool_move(db_env, + txn, + old_dname.c_str(), + new_dname.c_str()); +cleanup: + if (txn) { + if (error) { + abort_txn(txn); + } + else { + commit_txn(txn, 0); + } + } + return error; +} + +static void tokenize(const char *cmd_str, + std::vector /*out*/ &tokens) { + DBUG_ASSERT(cmd_str); + + bool was_escape = false; + const char *token_begin = cmd_str; + const char *token_end = token_begin; + + while (*token_end) { + if (*token_end == tokens_escape_delimiter_char) { + was_escape = true; + } + else if (*token_end == tokens_delimiter) { + if (was_escape) + was_escape = false; + else { + if (token_begin == token_end) + ++token_begin; + else { + tokens.push_back(std::string(token_begin, token_end)); + token_begin = token_end + 1; + } + } + } + else { + was_escape = false; + } + ++token_end; + } + + if (token_begin != token_end) + tokens.push_back(std::string(token_begin, token_end)); +} + +void process_dir_cmd(THD *thd, + const char *cmd_str, + const dir_cmd_callbacks &cb) { + + DBUG_ASSERT(thd); + DBUG_ASSERT(cmd_str); + + std::vector tokens; + tokenize(cmd_str, tokens); + + if (tokens.empty()) + return; + + const std::string &cmd = tokens[0]; + + if (!cmd.compare("attach")) { + if (tokens.size() != 3) { + if (cb.set_error) + cb.set_error(thd, + EINVAL, + "attach command requires two arguments"); + } + else { + int r = attach(thd, tokens[1], tokens[2], cb); + if (r && cb.set_error) + cb.set_error(thd, r, "Attach command error"); + } + } + else if (!cmd.compare("detach")) { + if (tokens.size() != 2) { + if (cb.set_error) + cb.set_error(thd, + EINVAL, + "detach command requires one argument"); + } + else { + int r = detach(thd, tokens[1], cb); + if (r && cb.set_error) + cb.set_error(thd, r, "detach command error"); + } + } + else if (!cmd.compare("move")) { + if (tokens.size() != 3) { + if (cb.set_error) + cb.set_error(thd, + EINVAL, + "move command requires two arguments"); + } + else { + int r = move(thd, tokens[1], tokens[2], cb); + if (r && cb.set_error) + cb.set_error(thd, r, "move command error"); + } + } + else { + if (cb.set_error) + cb.set_error(thd, + ENOENT, + "Unknown command '%s'", + cmd.c_str()); + } + + return; +}; + + +} // namespace tokudb diff --git a/storage/tokudb/tokudb_dir_cmd.h b/storage/tokudb/tokudb_dir_cmd.h new file mode 100644 index 00000000000..b39caadc7c3 --- /dev/null +++ b/storage/tokudb/tokudb_dir_cmd.h @@ -0,0 +1,46 @@ +/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */ +// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4: +#ident "$Id$" +/*====== +This file is part of TokuDB + + +Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. + + TokuDBis is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2, + as published by the Free Software Foundation. + + TokuDB is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with TokuDB. If not, see . + +======= */ + +#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." + +#ifndef _TOKUDB_DIR_CMD_H +#define _TOKUDB_DIR_CMD_H + +#include + +namespace tokudb { + +struct dir_cmd_callbacks { + void (*set_error)(THD *thd, + int error, + const char *error_fmt, + ...); +}; + +void process_dir_cmd(THD *thd, + const char *cmd_str, + const dir_cmd_callbacks &cb); + +}; + +#endif // _TOKUDB_DIR_CMD_H diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index e5185615279..31c44c45500 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -25,6 +25,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved. #ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved." #include "hatoku_hton.h" +#include "sql_acl.h" +#include "tokudb_dir_cmd.h" +#include "sql_parse.h" namespace tokudb { namespace sysvars { @@ -40,6 +43,8 @@ namespace sysvars { #define TOKUDB_VERSION_STR NULL #endif +const size_t error_buffer_max_size = 1024; + ulonglong cache_size = 0; uint cachetable_pool_threads = 0; int cardinality_scale_percent = 0; @@ -918,7 +923,70 @@ static MYSQL_THDVAR_BOOL( true); #endif +static int dir_cmd_check(THD* thd, struct st_mysql_sys_var* var, + void* save, struct st_mysql_value* value) ; + +static MYSQL_THDVAR_INT(dir_cmd_last_error, + PLUGIN_VAR_THDLOCAL, + "error from the last dir command. 0 is success", + NULL, NULL, 0, 0, 0, 1); + +static MYSQL_THDVAR_STR(dir_cmd_last_error_string, + PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC, + "error string from the last dir command", + NULL, NULL, NULL); + +static MYSQL_THDVAR_STR(dir_cmd, + PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC, + "name of the directory where the backup is stored", + dir_cmd_check, NULL, NULL); + +static void dir_cmd_set_error(THD *thd, + int error, + const char *error_fmt, + ...) { + char buff[error_buffer_max_size]; + va_list varargs; + + assert(thd); + assert(error_fmt); + + va_start(varargs, error_fmt); + vsnprintf(buff, sizeof(buff), error_fmt, varargs); + va_end(varargs); + + THDVAR_SET(thd, dir_cmd_last_error, &error); + THDVAR_SET(thd, dir_cmd_last_error_string, buff); +} +static int dir_cmd_check(THD* thd, struct st_mysql_sys_var* var, + void* save, struct st_mysql_value* value) { + int error = 0; + dir_cmd_set_error(thd, error, ""); + + if (check_global_access(thd, SUPER_ACL)) { + return 1; + } + + char buff[STRING_BUFFER_USUAL_SIZE]; + int length = sizeof(buff); + const char *str = value->val_str(value, buff, &length); + if (str) { + str = thd->strmake(str, length); + *(const char**)save = str; + } + + if (str) { + dir_cmd_callbacks callbacks { .set_error = dir_cmd_set_error }; + process_dir_cmd(thd, str, callbacks); + + error = THDVAR(thd, dir_cmd_last_error); + } else { + error = EINVAL; + } + + return error; +} //****************************************************************************** // all system variables @@ -949,7 +1017,6 @@ st_mysql_sys_var* system_variables[] = { MYSQL_SYSVAR(version), MYSQL_SYSVAR(write_status_frequency), MYSQL_SYSVAR(dir_per_db), - #if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL MYSQL_SYSVAR(gdb_path), MYSQL_SYSVAR(gdb_on_fatal), @@ -1008,6 +1075,9 @@ st_mysql_sys_var* system_variables[] = { #if TOKUDB_DEBUG MYSQL_SYSVAR(debug_pause_background_job_manager), #endif // TOKUDB_DEBUG + MYSQL_SYSVAR(dir_cmd_last_error), + MYSQL_SYSVAR(dir_cmd_last_error_string), + MYSQL_SYSVAR(dir_cmd), NULL }; -- cgit v1.2.1 From c1b3aaa24eceb59b20cd317256d6522f693a5869 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 16 May 2017 14:28:19 +0300 Subject: Update perfschema version to 5.6.36 post-merge --- storage/perfschema/ha_perfschema.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc index 2445bd80927..837a2463549 100644 --- a/storage/perfschema/ha_perfschema.cc +++ b/storage/perfschema/ha_perfschema.cc @@ -205,7 +205,7 @@ maria_declare_plugin(perfschema) 0x0001, pfs_status_vars, NULL, - "5.6.33", + "5.6.36", MariaDB_PLUGIN_MATURITY_STABLE } maria_declare_plugin_end; -- cgit v1.2.1 From dfeff407067e2eaa27a51a04306863f912ec4cc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Tue, 16 May 2017 17:22:44 +0300 Subject: Remove tokudb_dir_cmd variable The feature is too dangerous for a stable version. --- storage/tokudb/tokudb_sysvars.cc | 71 ---------------------------------------- 1 file changed, 71 deletions(-) (limited to 'storage') diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc index 2e356d0fd52..bbc39dc550a 100644 --- a/storage/tokudb/tokudb_sysvars.cc +++ b/storage/tokudb/tokudb_sysvars.cc @@ -43,7 +43,6 @@ namespace sysvars { #define TOKUDB_VERSION_STR NULL #endif -const size_t error_buffer_max_size = 1024; ulonglong cache_size = 0; uint cachetable_pool_threads = 0; @@ -923,73 +922,6 @@ static MYSQL_THDVAR_BOOL( true); #endif -static int dir_cmd_check(THD* thd, struct st_mysql_sys_var* var, - void* save, struct st_mysql_value* value) ; - -static MYSQL_THDVAR_INT(dir_cmd_last_error, - PLUGIN_VAR_THDLOCAL, - "error from the last dir command. 0 is success", - NULL, NULL, 0, 0, 0, 1); - -static MYSQL_THDVAR_STR(dir_cmd_last_error_string, - PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC, - "error string from the last dir command", - NULL, NULL, NULL); - -static MYSQL_THDVAR_STR(dir_cmd, - PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC, - "name of the directory where the backup is stored", - dir_cmd_check, NULL, NULL); - -static void dir_cmd_set_error(THD *thd, - int error, - const char *error_fmt, - ...) { - char buff[error_buffer_max_size]; - va_list varargs; - - assert(thd); - assert(error_fmt); - - va_start(varargs, error_fmt); - vsnprintf(buff, sizeof(buff), error_fmt, varargs); - va_end(varargs); - -#if 0 // Disable macros unavailable in MariaDB. - THDVAR_SET(thd, dir_cmd_last_error, &error); - THDVAR_SET(thd, dir_cmd_last_error_string, buff); -#endif -} - -static int dir_cmd_check(THD* thd, struct st_mysql_sys_var* var, - void* save, struct st_mysql_value* value) { - int error = 0; - dir_cmd_set_error(thd, error, ""); - - if (check_global_access(thd, SUPER_ACL)) { - return 1; - } - - char buff[STRING_BUFFER_USUAL_SIZE]; - int length = sizeof(buff); - const char *str = value->val_str(value, buff, &length); - if (str) { - str = thd->strmake(str, length); - *(const char**)save = str; - } - - if (str) { - dir_cmd_callbacks callbacks { .set_error = dir_cmd_set_error }; - process_dir_cmd(thd, str, callbacks); - - error = THDVAR(thd, dir_cmd_last_error); - } else { - error = EINVAL; - } - - return error; -} - //****************************************************************************** // all system variables //****************************************************************************** @@ -1077,9 +1009,6 @@ st_mysql_sys_var* system_variables[] = { #if TOKUDB_DEBUG MYSQL_SYSVAR(debug_pause_background_job_manager), #endif // TOKUDB_DEBUG - MYSQL_SYSVAR(dir_cmd_last_error), - MYSQL_SYSVAR(dir_cmd_last_error_string), - MYSQL_SYSVAR(dir_cmd), NULL }; -- cgit v1.2.1 From f7dab76aa286560d167c666e058a494e93c04da5 Mon Sep 17 00:00:00 2001 From: Monty Date: Wed, 17 May 2017 00:00:27 +0300 Subject: MDEV-12756 rpl.rpl_killed_ddl fails in buildbot with 'Can't find record' The issue was that my_errno was not set properly when a repair was killed, which confused the rpl_killed_ddl script. I also added an extra test line in varchar.inc to ensure we don't give duplicate error rows. --- storage/maria/ha_maria.cc | 8 +++++++- storage/maria/ma_check.c | 3 +++ storage/myisam/ha_myisam.cc | 13 ++++++++++++- storage/myisam/mi_check.c | 3 +++ 4 files changed, 25 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index b3fbc15c1a3..119a002a11a 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -834,7 +834,10 @@ extern "C" { int _ma_killed_ptr(HA_CHECK *param) { - return thd_killed((THD*)param->thd); + if (likely(thd_killed((THD*)param->thd)) == 0) + return 0; + my_errno= HA_ERR_ABORTED_BY_USER; + return 1; } @@ -1668,8 +1671,11 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize) } if (error && file->create_unique_index_by_sort && share->state.dupp_key != MAX_KEY) + { + my_errno= HA_ERR_FOUND_DUPP_KEY; print_keydup_error(table, &table->key_info[share->state.dupp_key], MYF(0)); + } } else { diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index f92774a0321..9787877420d 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -1364,6 +1364,7 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend, pos=block_info.filepos+block_info.block_len; if (block_info.rec_len > (uint) share->base.max_pack_length) { + my_errno= HA_ERR_WRONG_IN_RECORD; _ma_check_print_error(param,"Found too long record (%lu) at %s", (ulong) block_info.rec_len, llstr(start_recpos,llbuff)); @@ -4995,6 +4996,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } b_type= _ma_get_block_info(info, &block_info,-1,pos); @@ -5268,6 +5270,7 @@ static int sort_get_next_record(MARIA_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } sort_param->start_recpos=sort_param->pos; diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 2b5536a9ce5..0a8fe2c78e6 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -577,7 +577,10 @@ extern "C" { int killed_ptr(HA_CHECK *param) { - return thd_killed((THD*)param->thd); + if (likely(thd_killed((THD*)param->thd)) == 0) + return 0; + my_errno= HA_ERR_ABORTED_BY_USER; + return 1; } void mi_check_print_error(HA_CHECK *param, const char *fmt,...) @@ -1213,6 +1216,11 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) if (remap) mi_munmap_file(file); #endif + /* + The following is to catch errors when my_errno is no set properly + during repairt + */ + my_errno= 0; if (mi_test_if_sort_rep(file,file->state->records,tmp_key_map,0) && (local_testflag & T_REP_BY_SORT)) { @@ -1235,8 +1243,11 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) } if (error && file->create_unique_index_by_sort && share->state.dupp_key != MAX_KEY) + { + my_errno= HA_ERR_FOUND_DUPP_KEY; print_keydup_error(table, &table->key_info[share->state.dupp_key], MYF(0)); + } } else { diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index e0016eca43f..b65bb8b78bd 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -3126,6 +3126,7 @@ static int sort_key_read(MI_SORT_PARAM *sort_param, void *key) } if (info->state->records == sort_info->max_records) { + my_errno= HA_ERR_WRONG_IN_RECORD; mi_check_print_error(sort_info->param, "Key %d - Found too many records; Can't continue", sort_param->key+1); @@ -3332,6 +3333,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } b_type=_mi_get_block_info(&block_info,-1,pos); @@ -3590,6 +3592,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) param->error_printed=1; param->retry_repair=1; param->testflag|=T_RETRY_WITHOUT_QUICK; + my_errno= HA_ERR_WRONG_IN_RECORD; DBUG_RETURN(1); /* Something wrong with data */ } sort_param->start_recpos=sort_param->pos; -- cgit v1.2.1 From e63e2fe206a62058c1134e06d25c0a40745f38c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 17:40:52 +0300 Subject: Fix warnings in innochecksum compilation --- storage/innobase/page/page0zip.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'storage') diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index e8a147aab47..878a0b8728f 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -2,6 +2,7 @@ Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -68,8 +69,8 @@ using namespace std; # define buf_LRU_stat_inc_unzip() ((void) 0) #endif /* !UNIV_HOTBACKUP */ -#ifndef UNIV_HOTBACKUP #ifndef UNIV_INNOCHECKSUM +#ifndef UNIV_HOTBACKUP /** Statistics on compression, indexed by page_zip_des_t::ssize - 1 */ UNIV_INTERN page_zip_stat_t page_zip_stat[PAGE_ZIP_SSIZE_MAX]; /** Statistics on compression, indexed by index->id */ @@ -79,7 +80,6 @@ UNIV_INTERN ib_mutex_t page_zip_stat_per_index_mutex; #ifdef HAVE_PSI_INTERFACE UNIV_INTERN mysql_pfs_key_t page_zip_stat_per_index_mutex_key; #endif /* HAVE_PSI_INTERFACE */ -#endif /* !UNIV_INNOCHECKSUM */ #endif /* !UNIV_HOTBACKUP */ /* Compression level to be used by zlib. Settable by user. */ @@ -128,7 +128,6 @@ Compare at most sizeof(field_ref_zero) bytes. /* Enable some extra debugging output. This code can be enabled independently of any UNIV_ debugging conditions. */ -#ifndef UNIV_INNOCHECKSUM #if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG # include MY_ATTRIBUTE((format (printf, 1, 2))) -- cgit v1.2.1 From b8187fd005b765b81261908ba9caa667d506e3d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 17:47:04 +0300 Subject: MDEV-12071 storage/xtradb/handler/ha_innodb.cc: Cannot assign const_iterator to iterator --- storage/xtradb/handler/ha_innodb.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 12ae71508c7..6c390328258 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -13339,7 +13339,8 @@ fill_foreign_key_list(THD* thd, { ut_ad(mutex_own(&dict_sys->mutex)); - for (dict_foreign_set::iterator it = table->referenced_set.begin(); + for (dict_foreign_set::const_iterator it + = table->referenced_set.begin(); it != table->referenced_set.end(); ++it) { dict_foreign_t* foreign = *it; -- cgit v1.2.1 From 492c1e414564b4edc079b2eef7266e1cd551e91f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 18:33:51 +0300 Subject: Fix an incorrect debug assertion --- storage/innobase/include/page0zip.ic | 4 +++- storage/xtradb/include/page0zip.ic | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic index 6c7d8cd32c7..9a583086925 100644 --- a/storage/innobase/include/page0zip.ic +++ b/storage/innobase/include/page0zip.ic @@ -2,6 +2,7 @@ Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -172,7 +173,8 @@ page_zip_rec_needs_ext( ignored if zip_size == 0 */ ulint zip_size) /*!< in: compressed page size in bytes, or 0 */ { - ut_ad(rec_size > comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES); + ut_ad(rec_size + > (comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES)); ut_ad(ut_is_2pow(zip_size)); ut_ad(comp || !zip_size); diff --git a/storage/xtradb/include/page0zip.ic b/storage/xtradb/include/page0zip.ic index 6c7d8cd32c7..9a583086925 100644 --- a/storage/xtradb/include/page0zip.ic +++ b/storage/xtradb/include/page0zip.ic @@ -2,6 +2,7 @@ Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -172,7 +173,8 @@ page_zip_rec_needs_ext( ignored if zip_size == 0 */ ulint zip_size) /*!< in: compressed page size in bytes, or 0 */ { - ut_ad(rec_size > comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES); + ut_ad(rec_size + > (comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES)); ut_ad(ut_is_2pow(zip_size)); ut_ad(comp || !zip_size); -- cgit v1.2.1 From 7972da8aa1c52121f60fb8fdae534a46892b4e07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 20:08:47 +0300 Subject: Silence bogus GCC 7 warnings -Wimplicit-fallthrough Do not silence uncertain cases, or fix any bugs. The only functional change should be that ha_federated::extra() is not calling DBUG_PRINT to report an unhandled case for HA_EXTRA_PREPARE_FOR_DROP. --- storage/connect/array.cpp | 2 ++ storage/connect/filamdbf.cpp | 4 +++- storage/connect/filamfix.cpp | 3 ++- storage/connect/filamtxt.cpp | 1 + storage/connect/filamvct.cpp | 3 +++ storage/connect/filter.cpp | 3 ++- storage/connect/ha_connect.cc | 8 +++++--- storage/connect/json.cpp | 4 ++-- storage/connect/plgdbutl.cpp | 2 +- storage/connect/reldef.cpp | 1 + storage/connect/tabdos.cpp | 1 + storage/connect/tabjson.cpp | 2 +- storage/connect/value.cpp | 2 +- storage/connect/xindex.cpp | 2 +- storage/connect/xobject.cpp | 2 +- storage/federated/ha_federated.cc | 2 ++ storage/heap/hp_create.c | 4 ++-- storage/innobase/handler/ha_innodb.cc | 11 +++++++---- storage/innobase/include/data0type.ic | 4 +++- storage/innobase/include/mach0data.ic | 15 ++++++++------- storage/innobase/include/ut0rnd.ic | 7 +++++++ storage/innobase/row/row0import.cc | 1 + storage/innobase/row/row0log.cc | 2 ++ storage/innobase/row/row0mysql.cc | 3 ++- storage/innobase/row/row0purge.cc | 4 +++- storage/innobase/sync/sync0sync.cc | 1 + storage/maria/ma_extra.c | 3 ++- storage/maria/ma_recovery.c | 2 +- storage/myisam/mi_extra.c | 3 ++- storage/spider/spd_db_conn.cc | 2 +- storage/xtradb/handler/ha_innodb.cc | 11 +++++++---- storage/xtradb/include/data0type.ic | 3 ++- storage/xtradb/include/mach0data.ic | 15 ++++++++------- storage/xtradb/include/ut0rnd.ic | 7 +++++++ storage/xtradb/row/row0import.cc | 1 + storage/xtradb/row/row0log.cc | 2 ++ storage/xtradb/row/row0mysql.cc | 3 ++- storage/xtradb/row/row0purge.cc | 4 +++- storage/xtradb/sync/sync0sync.cc | 5 +++-- 39 files changed, 106 insertions(+), 49 deletions(-) (limited to 'storage') diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 1998ab890e9..88ab85d20eb 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -155,6 +155,7 @@ ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec) switch (type) { case TYPE_STRING: Len = length; + /* fall through */ case TYPE_SHORT: case TYPE_INT: case TYPE_DOUBLE: @@ -592,6 +593,7 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp) switch (Type) { case TYPE_DOUBLE: prec = 2; + /* fall through */ case TYPE_SHORT: case TYPE_INT: case TYPE_DATE: diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 9feb61d7d61..fec98a72544 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -503,7 +503,8 @@ bool DBFFAM::OpenTableFile(PGLOBAL g) break; } // endif - // Selective delete, pass thru + // Selective delete + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb" : "r+b"); @@ -623,6 +624,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g) case 'L': // Large (big) integer case 'T': // Tiny integer c = 'N'; // Numeric + /* fall through */ case 'N': // Numeric (integer) case 'F': // Float (double) descp->Decimals = (uchar)cdp->F.Prec; diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index cd25429318a..dcb677dc848 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -919,7 +919,8 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) break; } // endif - // Selective delete, pass thru + // Selective delete + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); oflag |= (UseTemp) ? O_RDONLY : O_RDWR; diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e53cdcd9ba9..12c0a66ecad 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -561,6 +561,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) // Selective delete, pass thru Bin = true; + /* fall through */ case MODE_UPDATE: if ((UseTemp = Tdbp->IsUsingTemp(g))) { strcpy(opmode, "r"); diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index fdc5433f4a4..71a8dd98c61 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -440,6 +440,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) } // endif // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb" : "r+b"); @@ -1918,6 +1919,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) } // endif filter // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb": "r+b"); @@ -3584,6 +3586,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) } // endif // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); oflag = (UseTemp) ? O_RDONLY : O_RDWR; diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 262d6b58a70..35840e9b5e3 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -1193,7 +1193,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) Arg(0) = pXVOID; } // endif void - // pass thru + // fall through case OP_IN: // For IN operator do optimize if operand is an array if (GetArgType(1) != TYPE_ARRAY) @@ -1260,6 +1260,7 @@ bool FILTER::Eval(PGLOBAL g) } // endif Opm // For modified operators, pass thru + /* fall through */ case OP_IN: case OP_EXIST: // For IN operations, special processing is done here diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 1f2bd0e5992..e646069c219 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1427,7 +1427,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: pcf->Flags |= U_VAR; - /* no break */ + /* fall through */ default: pcf->Type= MYSQLtoPLG(fp->type(), &v); break; @@ -2801,6 +2801,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) case Item_func::LIKE_FUNC: vop= OP_LIKE; break; case Item_func::ISNOTNULL_FUNC: neg = true; + /* fall through */ case Item_func::ISNULL_FUNC: vop= OP_NULL; break; case Item_func::IN_FUNC: vop= OP_IN; case Item_func::BETWEEN: @@ -4220,7 +4221,8 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) } else return false; - /* Fall through to check FILE_ACL */ + /* check FILE_ACL */ + /* fall through */ case TAB_ODBC: case TAB_JDBC: case TAB_MYSQL: @@ -5454,7 +5456,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // JDBC_SUPPORT case TAB_DBF: dbf= true; - // Passthru + // fall through case TAB_CSV: if (!fn && fnc != FNC_NO) sprintf(g->Message, "Missing %s file name", topt->type); diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index b473871e9f7..6904295066f 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -533,7 +533,7 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src) if (!has_e) goto err; - // passthru + // fall through case '-': if (found_digit) goto err; @@ -826,7 +826,7 @@ bool JOUTSTR::Escape(const char *s) case '\r': case '\b': case '\f': WriteChr('\\'); - // passthru + // fall through default: WriteChr(s[i]); break; diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index 1910cdcdec8..52c6058393f 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -917,7 +917,7 @@ int PlugCloseFile(PGLOBAL g __attribute__((unused)), PFBLOCK fp, bool all) fp->Memory = NULL; fp->Mode = MODE_ANY; - // Passthru + // fall through case TYPE_FB_HANDLE: if (fp->Handle && fp->Handle != INVALID_HANDLE_VALUE) if (CloseFileHandle(fp->Handle)) diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 5bb7848ab1c..0bb7adcce83 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -128,6 +128,7 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) switch (toupper(c)) { case 'M': n *= 1024; + // fall through case 'K': n *= 1024; } // endswitch c diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index d2bb3d7a4af..2da92741e4f 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1307,6 +1307,7 @@ PBF TDBDOS::InitBlockFilter(PGLOBAL g, PFIL filp) } // endif !opm // if opm, pass thru + /* fall through */ case OP_IN: if (filp->GetArgType(0) == TYPE_COLBLK && filp->GetArgType(1) == TYPE_ARRAY) { diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 1e11d454cfc..2addcc139fb 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1530,7 +1530,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) break; } // endif Op - // Passthru + // fall through case TYPE_DATE: case TYPE_INT: case TYPE_SHORT: diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index ced690e77c0..7660ee0a630 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -118,7 +118,7 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, maxval++; if (minus) *minus = true; } // endif Unsigned - + /* fall through */ case '+': p++; break; diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 15fb71ab88a..ae210cb900c 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -464,7 +464,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) if (ApplyFilter(g, filp)) break; - // passthru + // fall through case RC_NF: continue; case RC_EF: diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index a0b7849543d..746de097918 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -405,7 +405,7 @@ bool STRING::Append_quoted(PSZ s) case '\r': case '\b': case '\f': b |= Append('\\'); - // passthru + // fall through default: b |= Append(*p); break; diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 97baa27aba6..927921d6df4 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2004, 2015, Oracle and/or its affiliates. + Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2952,6 +2953,7 @@ int ha_federated::extra(ha_extra_function operation) break; case HA_EXTRA_PREPARE_FOR_DROP: table_will_be_deleted = TRUE; + break; default: /* do nothing */ DBUG_PRINT("info",("unhandled operation: %d", (uint) operation)); diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index a68a35e63fb..d03c7c46f15 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2011, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, SkySQL Ab. + Copyright (c) 2010, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -94,7 +94,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, case HA_KEYTYPE_VARBINARY1: /* Case-insensitiveness is handled in coll->hash_sort */ keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; - /* fall_through */ + /* fall through */ case HA_KEYTYPE_VARTEXT1: keyinfo->flag|= HA_VAR_LENGTH_KEY; length+= 2; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 1538471db4f..dac70203ee4 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -7063,8 +7063,8 @@ ha_innobase::innobase_lock_autoinc(void) break; } } - /* Fall through to old style locking. */ - + /* Use old style locking. */ + /* fall through */ case AUTOINC_OLD_STYLE_LOCKING: DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", ut_ad(0);); @@ -9725,7 +9725,8 @@ create_options_are_invalid( case ROW_TYPE_DYNAMIC: CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; - /* fall through since dynamic also shuns KBS */ + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ case ROW_TYPE_COMPACT: case ROW_TYPE_REDUNDANT: if (kbs_specified) { @@ -10111,7 +10112,8 @@ index_bad: break; } zip_allowed = FALSE; - /* fall through to set row_format = COMPACT */ + /* Set ROW_FORMAT = COMPACT */ + /* fall through */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: @@ -10119,6 +10121,7 @@ index_bad: thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: assuming ROW_FORMAT=COMPACT."); + /* fall through */ case ROW_TYPE_DEFAULT: /* If we fell through, set row format to Compact. */ row_format = ROW_TYPE_COMPACT; diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic index d489bef89a8..b4dbf4b1213 100644 --- a/storage/innobase/include/data0type.ic +++ b/storage/innobase/include/data0type.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -574,7 +575,8 @@ dtype_get_fixed_size_low( #else /* !UNIV_HOTBACKUP */ return(len); #endif /* !UNIV_HOTBACKUP */ - /* fall through for variable-length charsets */ + /* Treat as variable-length. */ + /* Fall through */ case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic index 6b303979b18..9a8441280a6 100644 --- a/storage/innobase/include/mach0data.ic +++ b/storage/innobase/include/mach0data.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -787,13 +788,13 @@ mach_swap_byte_order( dest += len; switch (len & 0x7) { - case 0: *--dest = *from++; - case 7: *--dest = *from++; - case 6: *--dest = *from++; - case 5: *--dest = *from++; - case 4: *--dest = *from++; - case 3: *--dest = *from++; - case 2: *--dest = *from++; + case 0: *--dest = *from++; /* fall through */ + case 7: *--dest = *from++; /* fall through */ + case 6: *--dest = *from++; /* fall through */ + case 5: *--dest = *from++; /* fall through */ + case 4: *--dest = *from++; /* fall through */ + case 3: *--dest = *from++; /* fall through */ + case 2: *--dest = *from++; /* fall through */ case 1: *--dest = *from; } } diff --git a/storage/innobase/include/ut0rnd.ic b/storage/innobase/include/ut0rnd.ic index 024c59e553b..987dfac03c1 100644 --- a/storage/innobase/include/ut0rnd.ic +++ b/storage/innobase/include/ut0rnd.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -237,16 +238,22 @@ ut_fold_binary( switch (len & 0x7) { case 7: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 6: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 5: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 4: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 3: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 2: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 1: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); } diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index da8e1aab2c9..1337496b897 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -2018,6 +2018,7 @@ PageConverter::update_page( case FIL_PAGE_TYPE_XDES: err = set_current_xdes( buf_block_get_page_no(block), get_frame(block)); + /* fall through */ case FIL_PAGE_INODE: case FIL_PAGE_TYPE_TRX_SYS: case FIL_PAGE_IBUF_FREE_LIST: diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 77cca37ddd1..3a49f033c9c 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1872,6 +1873,7 @@ row_log_table_apply_update( When applying the subsequent ROW_T_DELETE, no matching record will be found. */ + /* fall through */ case DB_SUCCESS: ut_ad(row != NULL); break; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index a3f4355ec6c..69d77060bd8 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -5407,7 +5407,8 @@ loop: fputs(" InnoDB: Warning: CHECK TABLE on ", stderr); dict_index_name_print(stderr, prebuilt->trx, index); fprintf(stderr, " returned %lu\n", ret); - /* fall through (this error is ignored by CHECK TABLE) */ + /* (this error is ignored by CHECK TABLE) */ + /* fall through */ case DB_END_OF_INDEX: func_exit: mem_free(buf); diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index bc2e0b0e1cb..bb883310820 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -488,8 +489,9 @@ row_purge_remove_sec_if_poss_leaf( success = false; } } - /* fall through (the index entry is still needed, + /* (The index entry is still needed, or the deletion succeeded) */ + /* fall through */ case ROW_NOT_DELETED_REF: /* The index entry is still needed. */ case ROW_BUFFERED: diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index a2165a917b8..855fd5dec11 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -1123,6 +1123,7 @@ sync_thread_add_level( upgrading in innobase_start_or_create_for_mysql(). */ break; } + /* fall through */ case SYNC_MEM_POOL: case SYNC_MEM_HASH: case SYNC_RECV: diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index fd21d2863f8..c6f53eaa24b 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -157,6 +157,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ + /* fall through */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { @@ -313,7 +314,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, share->state.open_count= 1; share->changed= 1; _ma_mark_file_changed_now(share); - /* Fall trough */ + /* Fall through */ case HA_EXTRA_PREPARE_FOR_RENAME: { my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP); diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index 59e0630be8c..a09662544a2 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -3061,7 +3061,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const case LOGREC_REDO_INDEX: case LOGREC_REDO_INDEX_FREE_PAGE: index_page_redo_entry= 1; - /* Fall trough*/ + /* Fall through */ case LOGREC_REDO_INSERT_ROW_HEAD: case LOGREC_REDO_INSERT_ROW_TAIL: case LOGREC_REDO_PURGE_ROW_HEAD: diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c index a47c1987e38..04e32378fd3 100644 --- a/storage/myisam/mi_extra.c +++ b/storage/myisam/mi_extra.c @@ -150,6 +150,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ + /* fall through */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { @@ -262,7 +263,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) //share->deleting= TRUE; share->global_changed= FALSE; /* force writing changed flag */ _mi_mark_file_changed(info); - /* Fall trough */ + /* Fall through */ case HA_EXTRA_PREPARE_FOR_RENAME: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index c29172a474a..495420dad32 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1844,8 +1844,8 @@ int spider_db_append_key_where_internal( #if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000 case HA_READ_PREFIX_LAST: result_list->desc_flg = TRUE; - /* fall through */ #endif + /* fall through */ case HA_READ_KEY_EXACT: if (sql_kind == SPIDER_SQL_KIND_SQL) { diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 6c390328258..2c6f844d026 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -7749,8 +7749,8 @@ ha_innobase::innobase_lock_autoinc(void) break; } } - /* Fall through to old style locking. */ - + /* Use old style locking. */ + /* fall through */ case AUTOINC_OLD_STYLE_LOCKING: DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", ut_ad(0);); @@ -10473,7 +10473,8 @@ create_options_are_invalid( case ROW_TYPE_DYNAMIC: CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; - /* fall through since dynamic also shuns KBS */ + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ case ROW_TYPE_COMPACT: case ROW_TYPE_REDUNDANT: if (kbs_specified) { @@ -10855,7 +10856,8 @@ index_bad: break; } zip_allowed = FALSE; - /* fall through to set row_format = COMPACT */ + /* Set ROW_FORMAT = COMPACT */ + /* fall through */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: @@ -10864,6 +10866,7 @@ index_bad: thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: assuming ROW_FORMAT=COMPACT."); + /* fall through */ case ROW_TYPE_DEFAULT: /* If we fell through, set row format to Compact. */ row_format = ROW_TYPE_COMPACT; diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic index 555852474aa..8f5cee0fd5f 100644 --- a/storage/xtradb/include/data0type.ic +++ b/storage/xtradb/include/data0type.ic @@ -576,7 +576,8 @@ dtype_get_fixed_size_low( #else /* !UNIV_HOTBACKUP */ return(len); #endif /* !UNIV_HOTBACKUP */ - /* fall through for variable-length charsets */ + /* Treat as variable-length. */ + /* Fall through */ case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: diff --git a/storage/xtradb/include/mach0data.ic b/storage/xtradb/include/mach0data.ic index 4b9275cc12c..26d6c879c9b 100644 --- a/storage/xtradb/include/mach0data.ic +++ b/storage/xtradb/include/mach0data.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -779,13 +780,13 @@ mach_swap_byte_order( dest += len; switch (len & 0x7) { - case 0: *--dest = *from++; - case 7: *--dest = *from++; - case 6: *--dest = *from++; - case 5: *--dest = *from++; - case 4: *--dest = *from++; - case 3: *--dest = *from++; - case 2: *--dest = *from++; + case 0: *--dest = *from++; /* fall through */ + case 7: *--dest = *from++; /* fall through */ + case 6: *--dest = *from++; /* fall through */ + case 5: *--dest = *from++; /* fall through */ + case 4: *--dest = *from++; /* fall through */ + case 3: *--dest = *from++; /* fall through */ + case 2: *--dest = *from++; /* fall through */ case 1: *--dest = *from; } } diff --git a/storage/xtradb/include/ut0rnd.ic b/storage/xtradb/include/ut0rnd.ic index 024c59e553b..987dfac03c1 100644 --- a/storage/xtradb/include/ut0rnd.ic +++ b/storage/xtradb/include/ut0rnd.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -237,16 +238,22 @@ ut_fold_binary( switch (len & 0x7) { case 7: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 6: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 5: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 4: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 3: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 2: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 1: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); } diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 23b9ba60269..bc7b5206a64 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -2018,6 +2018,7 @@ PageConverter::update_page( case FIL_PAGE_TYPE_XDES: err = set_current_xdes( buf_block_get_page_no(block), get_frame(block)); + /* fall through */ case FIL_PAGE_INODE: case FIL_PAGE_TYPE_TRX_SYS: case FIL_PAGE_IBUF_FREE_LIST: diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc index 3c5d5773aee..bc40a8f7c05 100644 --- a/storage/xtradb/row/row0log.cc +++ b/storage/xtradb/row/row0log.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1872,6 +1873,7 @@ row_log_table_apply_update( When applying the subsequent ROW_T_DELETE, no matching record will be found. */ + /* fall through */ case DB_SUCCESS: ut_ad(row != NULL); break; diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index c0c2b6fbabe..eca26ce9763 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -5421,7 +5421,8 @@ loop: fputs(" InnoDB: Warning: CHECK TABLE on ", stderr); dict_index_name_print(stderr, prebuilt->trx, index); fprintf(stderr, " returned %lu\n", ret); - /* fall through (this error is ignored by CHECK TABLE) */ + /* (this error is ignored by CHECK TABLE) */ + /* fall through */ case DB_END_OF_INDEX: func_exit: mem_free(buf); diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc index 35b3520749b..d1a7ac2036e 100644 --- a/storage/xtradb/row/row0purge.cc +++ b/storage/xtradb/row/row0purge.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -488,8 +489,9 @@ row_purge_remove_sec_if_poss_leaf( success = false; } } - /* fall through (the index entry is still needed, + /* (The index entry is still needed, or the deletion succeeded) */ + /* fall through */ case ROW_NOT_DELETED_REF: /* The index entry is still needed. */ case ROW_BUFFERED: diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc index c270b73e5a0..48cfbf4d6c3 100644 --- a/storage/xtradb/sync/sync0sync.cc +++ b/storage/xtradb/sync/sync0sync.cc @@ -1220,6 +1220,7 @@ sync_thread_add_level( upgrading in innobase_start_or_create_for_mysql(). */ break; } + /* fall through */ case SYNC_MEM_POOL: case SYNC_MEM_HASH: case SYNC_RECV: @@ -1282,9 +1283,9 @@ sync_thread_add_level( } } ut_ad(found_current); - - /* fallthrough */ } + + /* fall through */ case SYNC_BUF_FLUSH_LIST: case SYNC_BUF_LRU_LIST: case SYNC_BUF_FREE_LIST: -- cgit v1.2.1 From 4754f88cff3aa40706740c5656f97ff56d1f5c28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 17:45:22 +0300 Subject: Never pass NULL to innobase_get_stmt() --- storage/innobase/handler/ha_innodb.cc | 10 +++------- storage/innobase/lock/lock0lock.cc | 12 +++++------- storage/innobase/trx/trx0i_s.cc | 4 +++- storage/xtradb/handler/ha_innodb.cc | 10 +++------- storage/xtradb/lock/lock0lock.cc | 12 +++++------- storage/xtradb/trx/trx0i_s.cc | 4 +++- 6 files changed, 22 insertions(+), 30 deletions(-) (limited to 'storage') diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index dac70203ee4..26c9d0321d4 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1998,15 +1998,11 @@ innobase_get_stmt( THD* thd, /*!< in: MySQL thread handle */ size_t* length) /*!< out: length of the SQL statement */ { - const char* query = NULL; - LEX_STRING *stmt = NULL; - ut_ad(thd != NULL); - stmt = thd_query_string(thd); - if (stmt) { + if (const LEX_STRING *stmt = thd_query_string(thd)) { *length = stmt->length; - query = stmt->str; + return stmt->str; } - return (query); + return NULL; } /**********************************************************************//** diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 0a75389094d..869de0be182 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -4742,13 +4742,11 @@ lock_rec_unlock( trx_mutex_exit(trx); stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: unlock row could not" - " find a %lu mode lock on the record\n", - (ulong) lock_mode); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: current statement: %.*s\n", + + ib_logf(IB_LOG_LEVEL_ERROR, + "unlock row could not find a %u mode lock on the record;" + " statement=%.*s", + lock_mode, (int) stmt_len, stmt); return; diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index 993006efc6d..b05161a9932 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -503,7 +503,9 @@ fill_trx_row( row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd); - stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); + stmt = trx->mysql_thd + ? innobase_get_stmt(trx->mysql_thd, &stmt_len) + : NULL; if (stmt != NULL) { char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1]; diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 2c6f844d026..c1b9f59fea4 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -2324,15 +2324,11 @@ innobase_get_stmt( THD* thd, /*!< in: MySQL thread handle */ size_t* length) /*!< out: length of the SQL statement */ { - const char* query = NULL; - LEX_STRING *stmt = NULL; - ut_ad(thd != NULL); - stmt = thd_query_string(thd); - if (stmt) { + if (const LEX_STRING *stmt = thd_query_string(thd)) { *length = stmt->length; - query = stmt->str; + return stmt->str; } - return (query); + return NULL; } /**********************************************************************//** diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index ffbb1601ca5..e570f7ed494 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -4788,13 +4788,11 @@ lock_rec_unlock( trx_mutex_exit(trx); stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: unlock row could not" - " find a %lu mode lock on the record\n", - (ulong) lock_mode); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: current statement: %.*s\n", + + ib_logf(IB_LOG_LEVEL_ERROR, + "unlock row could not find a %u mode lock on the record;" + " statement=%.*s", + lock_mode, (int) stmt_len, stmt); return; diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc index eacd9212d2f..0c9618d98eb 100644 --- a/storage/xtradb/trx/trx0i_s.cc +++ b/storage/xtradb/trx/trx0i_s.cc @@ -507,7 +507,9 @@ fill_trx_row( row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd); - stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); + stmt = trx->mysql_thd + ? innobase_get_stmt(trx->mysql_thd, &stmt_len) + : NULL; if (stmt != NULL) { char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1]; -- cgit v1.2.1 From 71cd205956818d29edb6bf09002ecf2fe8303f64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 20:08:47 +0300 Subject: Silence bogus GCC 7 warnings -Wimplicit-fallthrough Do not silence uncertain cases, or fix any bugs. The only functional change should be that ha_federated::extra() is not calling DBUG_PRINT to report an unhandled case for HA_EXTRA_PREPARE_FOR_DROP. --- storage/connect/array.cpp | 2 ++ storage/connect/filamdbf.cpp | 4 +++- storage/connect/filamfix.cpp | 3 ++- storage/connect/filamtxt.cpp | 1 + storage/connect/filamvct.cpp | 3 +++ storage/connect/filter.cpp | 3 ++- storage/connect/ha_connect.cc | 8 +++++--- storage/connect/json.cpp | 4 ++-- storage/connect/plgdbutl.cpp | 2 +- storage/connect/reldef.cpp | 1 + storage/connect/tabdos.cpp | 1 + storage/connect/tabjson.cpp | 2 +- storage/connect/value.cpp | 2 +- storage/connect/xindex.cpp | 2 +- storage/connect/xobject.cpp | 2 +- storage/federated/ha_federated.cc | 2 ++ storage/heap/hp_create.c | 4 ++-- storage/innobase/handler/ha_innodb.cc | 11 +++++++---- storage/innobase/include/data0type.ic | 4 +++- storage/innobase/include/mach0data.ic | 15 ++++++++------- storage/innobase/include/ut0rnd.ic | 7 +++++++ storage/innobase/row/row0import.cc | 1 + storage/innobase/row/row0log.cc | 2 ++ storage/innobase/row/row0mysql.cc | 3 ++- storage/innobase/row/row0purge.cc | 4 +++- storage/innobase/sync/sync0sync.cc | 1 + storage/maria/ma_extra.c | 3 ++- storage/maria/ma_recovery.c | 2 +- storage/myisam/mi_extra.c | 3 ++- storage/spider/spd_db_conn.cc | 2 +- storage/xtradb/handler/ha_innodb.cc | 11 +++++++---- storage/xtradb/include/data0type.ic | 3 ++- storage/xtradb/include/mach0data.ic | 15 ++++++++------- storage/xtradb/include/ut0rnd.ic | 7 +++++++ storage/xtradb/row/row0import.cc | 1 + storage/xtradb/row/row0log.cc | 2 ++ storage/xtradb/row/row0mysql.cc | 3 ++- storage/xtradb/row/row0purge.cc | 4 +++- storage/xtradb/sync/sync0sync.cc | 5 +++-- 39 files changed, 106 insertions(+), 49 deletions(-) (limited to 'storage') diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 1998ab890e9..88ab85d20eb 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -155,6 +155,7 @@ ARRAY::ARRAY(PGLOBAL g, int type, int size, int length, int prec) switch (type) { case TYPE_STRING: Len = length; + /* fall through */ case TYPE_SHORT: case TYPE_INT: case TYPE_DOUBLE: @@ -592,6 +593,7 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp) switch (Type) { case TYPE_DOUBLE: prec = 2; + /* fall through */ case TYPE_SHORT: case TYPE_INT: case TYPE_DATE: diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 9feb61d7d61..fec98a72544 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -503,7 +503,8 @@ bool DBFFAM::OpenTableFile(PGLOBAL g) break; } // endif - // Selective delete, pass thru + // Selective delete + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb" : "r+b"); @@ -623,6 +624,7 @@ bool DBFFAM::AllocateBuffer(PGLOBAL g) case 'L': // Large (big) integer case 'T': // Tiny integer c = 'N'; // Numeric + /* fall through */ case 'N': // Numeric (integer) case 'F': // Float (double) descp->Decimals = (uchar)cdp->F.Prec; diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index cd25429318a..dcb677dc848 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -919,7 +919,8 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) break; } // endif - // Selective delete, pass thru + // Selective delete + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); oflag |= (UseTemp) ? O_RDONLY : O_RDWR; diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e53cdcd9ba9..12c0a66ecad 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -561,6 +561,7 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) // Selective delete, pass thru Bin = true; + /* fall through */ case MODE_UPDATE: if ((UseTemp = Tdbp->IsUsingTemp(g))) { strcpy(opmode, "r"); diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index fdc5433f4a4..71a8dd98c61 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -440,6 +440,7 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) } // endif // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb" : "r+b"); @@ -1918,6 +1919,7 @@ bool VECFAM::OpenTableFile(PGLOBAL g) } // endif filter // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); strcpy(opmode, (UseTemp) ? "rb": "r+b"); @@ -3584,6 +3586,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) } // endif // Selective delete, pass thru + /* fall through */ case MODE_UPDATE: UseTemp = Tdbp->IsUsingTemp(g); oflag = (UseTemp) ? O_RDONLY : O_RDWR; diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 262d6b58a70..35840e9b5e3 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -1193,7 +1193,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) Arg(0) = pXVOID; } // endif void - // pass thru + // fall through case OP_IN: // For IN operator do optimize if operand is an array if (GetArgType(1) != TYPE_ARRAY) @@ -1260,6 +1260,7 @@ bool FILTER::Eval(PGLOBAL g) } // endif Opm // For modified operators, pass thru + /* fall through */ case OP_IN: case OP_EXIST: // For IN operations, special processing is done here diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index b946424b9ab..8f537e546f7 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1427,7 +1427,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: pcf->Flags |= U_VAR; - /* no break */ + /* fall through */ default: pcf->Type= MYSQLtoPLG(fp->type(), &v); break; @@ -2801,6 +2801,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) case Item_func::LIKE_FUNC: vop= OP_LIKE; break; case Item_func::ISNOTNULL_FUNC: neg = true; + /* fall through */ case Item_func::ISNULL_FUNC: vop= OP_NULL; break; case Item_func::IN_FUNC: vop= OP_IN; case Item_func::BETWEEN: @@ -4220,7 +4221,8 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) } else return false; - /* Fall through to check FILE_ACL */ + /* check FILE_ACL */ + /* fall through */ case TAB_ODBC: case TAB_JDBC: case TAB_MYSQL: @@ -5454,7 +5456,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // JDBC_SUPPORT case TAB_DBF: dbf= true; - // Passthru + // fall through case TAB_CSV: if (!fn && fnc != FNC_NO) sprintf(g->Message, "Missing %s file name", topt->type); diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index b473871e9f7..6904295066f 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -533,7 +533,7 @@ PVAL ParseNumeric(PGLOBAL g, int& i, STRG& src) if (!has_e) goto err; - // passthru + // fall through case '-': if (found_digit) goto err; @@ -826,7 +826,7 @@ bool JOUTSTR::Escape(const char *s) case '\r': case '\b': case '\f': WriteChr('\\'); - // passthru + // fall through default: WriteChr(s[i]); break; diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index 1910cdcdec8..52c6058393f 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -917,7 +917,7 @@ int PlugCloseFile(PGLOBAL g __attribute__((unused)), PFBLOCK fp, bool all) fp->Memory = NULL; fp->Mode = MODE_ANY; - // Passthru + // fall through case TYPE_FB_HANDLE: if (fp->Handle && fp->Handle != INVALID_HANDLE_VALUE) if (CloseFileHandle(fp->Handle)) diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 5bb7848ab1c..0bb7adcce83 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -128,6 +128,7 @@ int RELDEF::GetSizeCatInfo(PSZ what, PSZ sdef) switch (toupper(c)) { case 'M': n *= 1024; + // fall through case 'K': n *= 1024; } // endswitch c diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index d2bb3d7a4af..2da92741e4f 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1307,6 +1307,7 @@ PBF TDBDOS::InitBlockFilter(PGLOBAL g, PFIL filp) } // endif !opm // if opm, pass thru + /* fall through */ case OP_IN: if (filp->GetArgType(0) == TYPE_COLBLK && filp->GetArgType(1) == TYPE_ARRAY) { diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 1e11d454cfc..2addcc139fb 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1530,7 +1530,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) break; } // endif Op - // Passthru + // fall through case TYPE_DATE: case TYPE_INT: case TYPE_SHORT: diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index ced690e77c0..7660ee0a630 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -118,7 +118,7 @@ ulonglong CharToNumber(char *p, int n, ulonglong maxval, maxval++; if (minus) *minus = true; } // endif Unsigned - + /* fall through */ case '+': p++; break; diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 15fb71ab88a..ae210cb900c 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -464,7 +464,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) if (ApplyFilter(g, filp)) break; - // passthru + // fall through case RC_NF: continue; case RC_EF: diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index a0b7849543d..746de097918 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -405,7 +405,7 @@ bool STRING::Append_quoted(PSZ s) case '\r': case '\b': case '\f': b |= Append('\\'); - // passthru + // fall through default: b |= Append(*p); break; diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 478a8f1cfaa..ddf7ec57ad8 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -1,4 +1,5 @@ /* Copyright (c) 2004, 2015, Oracle and/or its affiliates. + Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2945,6 +2946,7 @@ int ha_federated::extra(ha_extra_function operation) break; case HA_EXTRA_PREPARE_FOR_DROP: table_will_be_deleted = TRUE; + break; default: /* do nothing */ DBUG_PRINT("info",("unhandled operation: %d", (uint) operation)); diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index bb5537c9363..431e992e75b 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2011, Oracle and/or its affiliates. - Copyright (c) 2010, 2014, SkySQL Ab. + Copyright (c) 2010, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -94,7 +94,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, case HA_KEYTYPE_VARBINARY1: /* Case-insensitiveness is handled in coll->hash_sort */ keyinfo->seg[j].type= HA_KEYTYPE_VARTEXT1; - /* fall_through */ + /* fall through */ case HA_KEYTYPE_VARTEXT1: keyinfo->flag|= HA_VAR_LENGTH_KEY; length+= 2; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 8c795e4bdc5..21dece8ed94 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -7988,8 +7988,8 @@ ha_innobase::innobase_lock_autoinc(void) break; } } - /* Fall through to old style locking. */ - + /* Use old style locking. */ + /* fall through */ case AUTOINC_OLD_STYLE_LOCKING: DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", ut_ad(0);); @@ -11304,7 +11304,8 @@ create_options_are_invalid( case ROW_TYPE_DYNAMIC: CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; - /* fall through since dynamic also shuns KBS */ + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ case ROW_TYPE_COMPACT: case ROW_TYPE_REDUNDANT: if (kbs_specified) { @@ -11717,7 +11718,8 @@ index_bad: break; /* Correct row_format */ } zip_allowed = FALSE; - /* fall through to set row_format = COMPACT */ + /* Set ROW_FORMAT = COMPACT */ + /* fall through */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: @@ -11725,6 +11727,7 @@ index_bad: thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: assuming ROW_FORMAT=COMPACT."); + /* fall through */ case ROW_TYPE_DEFAULT: /* If we fell through, set row format to Compact. */ row_format = ROW_TYPE_COMPACT; diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic index d489bef89a8..b4dbf4b1213 100644 --- a/storage/innobase/include/data0type.ic +++ b/storage/innobase/include/data0type.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -574,7 +575,8 @@ dtype_get_fixed_size_low( #else /* !UNIV_HOTBACKUP */ return(len); #endif /* !UNIV_HOTBACKUP */ - /* fall through for variable-length charsets */ + /* Treat as variable-length. */ + /* Fall through */ case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic index 215bb12cbe7..33b6405c8b5 100644 --- a/storage/innobase/include/mach0data.ic +++ b/storage/innobase/include/mach0data.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -786,13 +787,13 @@ mach_swap_byte_order( dest += len; switch (len & 0x7) { - case 0: *--dest = *from++; - case 7: *--dest = *from++; - case 6: *--dest = *from++; - case 5: *--dest = *from++; - case 4: *--dest = *from++; - case 3: *--dest = *from++; - case 2: *--dest = *from++; + case 0: *--dest = *from++; /* fall through */ + case 7: *--dest = *from++; /* fall through */ + case 6: *--dest = *from++; /* fall through */ + case 5: *--dest = *from++; /* fall through */ + case 4: *--dest = *from++; /* fall through */ + case 3: *--dest = *from++; /* fall through */ + case 2: *--dest = *from++; /* fall through */ case 1: *--dest = *from; } } diff --git a/storage/innobase/include/ut0rnd.ic b/storage/innobase/include/ut0rnd.ic index 024c59e553b..987dfac03c1 100644 --- a/storage/innobase/include/ut0rnd.ic +++ b/storage/innobase/include/ut0rnd.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -237,16 +238,22 @@ ut_fold_binary( switch (len & 0x7) { case 7: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 6: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 5: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 4: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 3: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 2: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 1: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); } diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index acbf88fcea3..f0db79be17e 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -1994,6 +1994,7 @@ PageConverter::update_page( case FIL_PAGE_TYPE_XDES: err = set_current_xdes( buf_block_get_page_no(block), get_frame(block)); + /* fall through */ case FIL_PAGE_INODE: case FIL_PAGE_TYPE_TRX_SYS: case FIL_PAGE_IBUF_FREE_LIST: diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc index 12d4a59da6b..65690d7038c 100644 --- a/storage/innobase/row/row0log.cc +++ b/storage/innobase/row/row0log.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1880,6 +1881,7 @@ row_log_table_apply_update( When applying the subsequent ROW_T_DELETE, no matching record will be found. */ + /* fall through */ case DB_SUCCESS: ut_ad(row != NULL); break; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 717fd6bde0c..82938995e93 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -5511,7 +5511,8 @@ loop: fputs(" InnoDB: Warning: CHECK TABLE on ", stderr); dict_index_name_print(stderr, prebuilt->trx, index); fprintf(stderr, " returned %lu\n", ret); - /* fall through (this error is ignored by CHECK TABLE) */ + /* (this error is ignored by CHECK TABLE) */ + /* fall through */ case DB_END_OF_INDEX: func_exit: mem_free(buf); diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index e0f4599be06..1ed8b7377d9 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -488,8 +489,9 @@ row_purge_remove_sec_if_poss_leaf( success = false; } } - /* fall through (the index entry is still needed, + /* (The index entry is still needed, or the deletion succeeded) */ + /* fall through */ case ROW_NOT_DELETED_REF: /* The index entry is still needed. */ case ROW_BUFFERED: diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc index 1109d86c146..9abad30a9bd 100644 --- a/storage/innobase/sync/sync0sync.cc +++ b/storage/innobase/sync/sync0sync.cc @@ -1144,6 +1144,7 @@ sync_thread_add_level( upgrading in innobase_start_or_create_for_mysql(). */ break; } + /* fall through */ case SYNC_MEM_POOL: case SYNC_MEM_HASH: case SYNC_RECV: diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 3c7e829f8ad..99c9df23030 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -157,6 +157,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ + /* fall through */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { @@ -313,7 +314,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, share->state.open_count= 1; share->changed= 1; _ma_mark_file_changed_now(share); - /* Fall trough */ + /* Fall through */ case HA_EXTRA_PREPARE_FOR_RENAME: { my_bool do_flush= MY_TEST(function != HA_EXTRA_PREPARE_FOR_DROP); diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index 08d306a89be..9f142cb61c9 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -3061,7 +3061,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const case LOGREC_REDO_INDEX: case LOGREC_REDO_INDEX_FREE_PAGE: index_page_redo_entry= 1; - /* Fall trough*/ + /* Fall through */ case LOGREC_REDO_INSERT_ROW_HEAD: case LOGREC_REDO_INSERT_ROW_TAIL: case LOGREC_REDO_PURGE_ROW_HEAD: diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c index a47c1987e38..04e32378fd3 100644 --- a/storage/myisam/mi_extra.c +++ b/storage/myisam/mi_extra.c @@ -150,6 +150,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) if (info->s->data_file_type != DYNAMIC_RECORD) break; /* Remove read/write cache if dynamic rows */ + /* fall through */ case HA_EXTRA_NO_CACHE: if (info->opt_flag & (READ_CACHE_USED | WRITE_CACHE_USED)) { @@ -262,7 +263,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) //share->deleting= TRUE; share->global_changed= FALSE; /* force writing changed flag */ _mi_mark_file_changed(info); - /* Fall trough */ + /* Fall through */ case HA_EXTRA_PREPARE_FOR_RENAME: mysql_mutex_lock(&THR_LOCK_myisam); share->last_version= 0L; /* Impossible version */ diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 21bbac49f38..294fe92323c 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1844,8 +1844,8 @@ int spider_db_append_key_where_internal( #if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100000 case HA_READ_PREFIX_LAST: result_list->desc_flg = TRUE; - /* fall through */ #endif + /* fall through */ case HA_READ_KEY_EXACT: if (sql_kind == SPIDER_SQL_KIND_SQL) { diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 8c6d2ef5cd6..71477b455ad 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -8567,8 +8567,8 @@ ha_innobase::innobase_lock_autoinc(void) break; } } - /* Fall through to old style locking. */ - + /* Use old style locking. */ + /* fall through */ case AUTOINC_OLD_STYLE_LOCKING: DBUG_EXECUTE_IF("die_if_autoinc_old_lock_style_used", ut_ad(0);); @@ -11870,7 +11870,8 @@ create_options_are_invalid( case ROW_TYPE_DYNAMIC: CHECK_ERROR_ROW_TYPE_NEEDS_FILE_PER_TABLE(use_tablespace); CHECK_ERROR_ROW_TYPE_NEEDS_GT_ANTELOPE; - /* fall through since dynamic also shuns KBS */ + /* ROW_FORMAT=DYNAMIC also shuns KEY_BLOCK_SIZE */ + /* fall through */ case ROW_TYPE_COMPACT: case ROW_TYPE_REDUNDANT: if (kbs_specified) { @@ -12280,7 +12281,8 @@ index_bad: break; /* Correct row_format */ } zip_allowed = FALSE; - /* fall through to set row_format = COMPACT */ + /* Set ROW_FORMAT = COMPACT */ + /* fall through */ case ROW_TYPE_NOT_USED: case ROW_TYPE_FIXED: case ROW_TYPE_PAGE: @@ -12289,6 +12291,7 @@ index_bad: thd, Sql_condition::WARN_LEVEL_WARN, ER_ILLEGAL_HA_CREATE_OPTION, "InnoDB: assuming ROW_FORMAT=COMPACT."); + /* fall through */ case ROW_TYPE_DEFAULT: /* If we fell through, set row format to Compact. */ row_format = ROW_TYPE_COMPACT; diff --git a/storage/xtradb/include/data0type.ic b/storage/xtradb/include/data0type.ic index 555852474aa..8f5cee0fd5f 100644 --- a/storage/xtradb/include/data0type.ic +++ b/storage/xtradb/include/data0type.ic @@ -576,7 +576,8 @@ dtype_get_fixed_size_low( #else /* !UNIV_HOTBACKUP */ return(len); #endif /* !UNIV_HOTBACKUP */ - /* fall through for variable-length charsets */ + /* Treat as variable-length. */ + /* Fall through */ case DATA_VARCHAR: case DATA_BINARY: case DATA_DECIMAL: diff --git a/storage/xtradb/include/mach0data.ic b/storage/xtradb/include/mach0data.ic index 3904d96c09f..3b1cf9c0378 100644 --- a/storage/xtradb/include/mach0data.ic +++ b/storage/xtradb/include/mach0data.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -779,13 +780,13 @@ mach_swap_byte_order( dest += len; switch (len & 0x7) { - case 0: *--dest = *from++; - case 7: *--dest = *from++; - case 6: *--dest = *from++; - case 5: *--dest = *from++; - case 4: *--dest = *from++; - case 3: *--dest = *from++; - case 2: *--dest = *from++; + case 0: *--dest = *from++; /* fall through */ + case 7: *--dest = *from++; /* fall through */ + case 6: *--dest = *from++; /* fall through */ + case 5: *--dest = *from++; /* fall through */ + case 4: *--dest = *from++; /* fall through */ + case 3: *--dest = *from++; /* fall through */ + case 2: *--dest = *from++; /* fall through */ case 1: *--dest = *from; } } diff --git a/storage/xtradb/include/ut0rnd.ic b/storage/xtradb/include/ut0rnd.ic index 024c59e553b..987dfac03c1 100644 --- a/storage/xtradb/include/ut0rnd.ic +++ b/storage/xtradb/include/ut0rnd.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -237,16 +238,22 @@ ut_fold_binary( switch (len & 0x7) { case 7: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 6: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 5: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 4: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 3: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 2: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); + /* fall through */ case 1: fold = ut_fold_ulint_pair(fold, (ulint)(*str++)); } diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 81d6fda9e53..2f7aece665a 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -1995,6 +1995,7 @@ PageConverter::update_page( case FIL_PAGE_TYPE_XDES: err = set_current_xdes( buf_block_get_page_no(block), get_frame(block)); + /* fall through */ case FIL_PAGE_INODE: case FIL_PAGE_TYPE_TRX_SYS: case FIL_PAGE_IBUF_FREE_LIST: diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc index 666b59b42db..3cb909632dc 100644 --- a/storage/xtradb/row/row0log.cc +++ b/storage/xtradb/row/row0log.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1880,6 +1881,7 @@ row_log_table_apply_update( When applying the subsequent ROW_T_DELETE, no matching record will be found. */ + /* fall through */ case DB_SUCCESS: ut_ad(row != NULL); break; diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index 463981f51dd..59568f5c91b 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -5542,7 +5542,8 @@ loop: fputs(" InnoDB: Warning: CHECK TABLE on ", stderr); dict_index_name_print(stderr, prebuilt->trx, index); fprintf(stderr, " returned %lu\n", ret); - /* fall through (this error is ignored by CHECK TABLE) */ + /* (this error is ignored by CHECK TABLE) */ + /* fall through */ case DB_END_OF_INDEX: func_exit: mem_free(buf); diff --git a/storage/xtradb/row/row0purge.cc b/storage/xtradb/row/row0purge.cc index 8a1dbd6f69f..333677edf21 100644 --- a/storage/xtradb/row/row0purge.cc +++ b/storage/xtradb/row/row0purge.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -488,8 +489,9 @@ row_purge_remove_sec_if_poss_leaf( success = false; } } - /* fall through (the index entry is still needed, + /* (The index entry is still needed, or the deletion succeeded) */ + /* fall through */ case ROW_NOT_DELETED_REF: /* The index entry is still needed. */ case ROW_BUFFERED: diff --git a/storage/xtradb/sync/sync0sync.cc b/storage/xtradb/sync/sync0sync.cc index 6692eef9fb0..37ac3c56fff 100644 --- a/storage/xtradb/sync/sync0sync.cc +++ b/storage/xtradb/sync/sync0sync.cc @@ -1236,6 +1236,7 @@ sync_thread_add_level( upgrading in innobase_start_or_create_for_mysql(). */ break; } + /* fall through */ case SYNC_MEM_POOL: case SYNC_MEM_HASH: case SYNC_RECV: @@ -1299,9 +1300,9 @@ sync_thread_add_level( } } ut_ad(found_current); - - /* fallthrough */ } + + /* fall through */ case SYNC_BUF_FLUSH_LIST: case SYNC_BUF_LRU_LIST: case SYNC_BUF_FREE_LIST: -- cgit v1.2.1 From 408ef65f840fac2150c385efb0c377ed9a2801d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 17:45:22 +0300 Subject: Never pass NULL to innobase_get_stmt() --- storage/innobase/handler/ha_innodb.cc | 13 ++++--------- storage/innobase/lock/lock0lock.cc | 22 +++++++++++++--------- storage/innobase/trx/trx0i_s.cc | 4 +++- storage/xtradb/handler/ha_innodb.cc | 13 ++++--------- storage/xtradb/lock/lock0lock.cc | 22 +++++++++++++--------- storage/xtradb/trx/trx0i_s.cc | 4 +++- 6 files changed, 40 insertions(+), 38 deletions(-) (limited to 'storage') diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 21dece8ed94..fe5b4077203 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2265,16 +2265,11 @@ innobase_get_stmt( THD* thd, /*!< in: MySQL thread handle */ size_t* length) /*!< out: length of the SQL statement */ { - const char* query = NULL; - LEX_STRING *stmt = NULL; - if (thd) { - stmt = thd_query_string(thd); - if (stmt) { - *length = stmt->length; - query = stmt->str; - } + if (const LEX_STRING *stmt = thd_query_string(thd)) { + *length = stmt->length; + return stmt->str; } - return (query); + return NULL; } /**********************************************************************//** diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index a951eff4203..6f198b7aad9 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -909,12 +909,18 @@ lock_reset_lock_and_trx_wait( const char* stmt2=NULL; size_t stmt_len; trx_id_t trx_id = 0; - stmt = innobase_get_stmt(lock->trx->mysql_thd, &stmt_len); + stmt = lock->trx->mysql_thd + ? innobase_get_stmt(lock->trx->mysql_thd, &stmt_len) + : NULL; if (lock->trx->lock.wait_lock && lock->trx->lock.wait_lock->trx) { trx_id = lock->trx->lock.wait_lock->trx->id; - stmt2 = innobase_get_stmt(lock->trx->lock.wait_lock->trx->mysql_thd, &stmt_len); + stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd + ? innobase_get_stmt( + lock->trx->lock.wait_lock + ->trx->mysql_thd, &stmt_len) + : NULL; } ib_logf(IB_LOG_LEVEL_INFO, @@ -5586,13 +5592,11 @@ lock_rec_unlock( trx_mutex_exit(trx); stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: unlock row could not" - " find a %lu mode lock on the record\n", - (ulong) lock_mode); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: current statement: %.*s\n", + + ib_logf(IB_LOG_LEVEL_ERROR, + "unlock row could not find a %u mode lock on the record;" + " statement=%.*s", + lock_mode, (int) stmt_len, stmt); return; diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index 993006efc6d..b05161a9932 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -503,7 +503,9 @@ fill_trx_row( row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd); - stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); + stmt = trx->mysql_thd + ? innobase_get_stmt(trx->mysql_thd, &stmt_len) + : NULL; if (stmt != NULL) { char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1]; diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 71477b455ad..205b69dd279 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -2591,16 +2591,11 @@ innobase_get_stmt( THD* thd, /*!< in: MySQL thread handle */ size_t* length) /*!< out: length of the SQL statement */ { - const char* query = NULL; - LEX_STRING *stmt = NULL; - if (thd) { - stmt = thd_query_string(thd); - if (stmt) { - *length = stmt->length; - query = stmt->str; - } + if (const LEX_STRING *stmt = thd_query_string(thd)) { + *length = stmt->length; + return stmt->str; } - return (query); + return NULL; } /**********************************************************************//** diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index 717fbf02536..71612f66fcd 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -921,12 +921,18 @@ lock_reset_lock_and_trx_wait( const char* stmt2=NULL; size_t stmt_len; trx_id_t trx_id = 0; - stmt = innobase_get_stmt(lock->trx->mysql_thd, &stmt_len); + stmt = lock->trx->mysql_thd + ? innobase_get_stmt(lock->trx->mysql_thd, &stmt_len) + : NULL; if (lock->trx->lock.wait_lock && lock->trx->lock.wait_lock->trx) { trx_id = lock->trx->lock.wait_lock->trx->id; - stmt2 = innobase_get_stmt(lock->trx->lock.wait_lock->trx->mysql_thd, &stmt_len); + stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd + ? innobase_get_stmt( + lock->trx->lock.wait_lock + ->trx->mysql_thd, &stmt_len) + : NULL; } ib_logf(IB_LOG_LEVEL_INFO, @@ -5636,13 +5642,11 @@ lock_rec_unlock( trx_mutex_exit(trx); stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Error: unlock row could not" - " find a %lu mode lock on the record\n", - (ulong) lock_mode); - ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB: current statement: %.*s\n", + + ib_logf(IB_LOG_LEVEL_ERROR, + "unlock row could not find a %u mode lock on the record;" + " statement=%.*s", + lock_mode, (int) stmt_len, stmt); return; diff --git a/storage/xtradb/trx/trx0i_s.cc b/storage/xtradb/trx/trx0i_s.cc index eacd9212d2f..0c9618d98eb 100644 --- a/storage/xtradb/trx/trx0i_s.cc +++ b/storage/xtradb/trx/trx0i_s.cc @@ -507,7 +507,9 @@ fill_trx_row( row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd); - stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len); + stmt = trx->mysql_thd + ? innobase_get_stmt(trx->mysql_thd, &stmt_len) + : NULL; if (stmt != NULL) { char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1]; -- cgit v1.2.1 From febe88198ebadceef2549c04f3e6afe75b68bac3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 08:54:16 +0300 Subject: Make some variables const in fil_iterate() This is a non-functional change to make it slightly easier to read the code. We seem to have some bugs in this IMPORT TABLESPACE code; see MDEV-12396. --- storage/innobase/fil/fil0fil.cc | 26 ++++++++++---------------- storage/xtradb/fil/fil0fil.cc | 26 ++++++++++---------------- 2 files changed, 20 insertions(+), 32 deletions(-) (limited to 'storage') diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 7bc784121e5..b0fec9d921e 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -6583,15 +6583,15 @@ fil_iterate( /* TODO: For compressed tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ + const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { byte* io_buffer = iter.io_buffer; - bool row_compressed = false; block->frame = io_buffer; - if (callback.get_zip_size() > 0) { + if (row_compressed) { page_zip_des_init(&block->page.zip); page_zip_set_size(&block->page.zip, iter.page_size); block->page.zip.data = block->frame + UNIV_PAGE_SIZE; @@ -6600,9 +6600,6 @@ fil_iterate( /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; - row_compressed = true; - } else { - io_buffer = iter.io_buffer; } /* We have to read the exact number of bytes. Otherwise the @@ -6615,16 +6612,12 @@ fil_iterate( ut_ad(n_bytes > 0); ut_ad(!(n_bytes % iter.page_size)); - byte* readptr = io_buffer; - byte* writeptr = io_buffer; - bool encrypted = false; - + const bool encrypted = iter.crypt_data != NULL + && iter.crypt_data->should_encrypt(); /* Use additional crypt io buffer if tablespace is encrypted */ - if (iter.crypt_data != NULL && iter.crypt_data->should_encrypt()) { - encrypted = true; - readptr = iter.crypt_io_buffer; - writeptr = iter.crypt_io_buffer; - } + byte* const readptr = encrypted + ? iter.crypt_io_buffer : io_buffer; + byte* const writeptr = readptr; if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) { @@ -6647,8 +6640,9 @@ fil_iterate( ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED || - page_type == FIL_PAGE_PAGE_COMPRESSED); + const bool page_compressed + = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; /* If tablespace is encrypted, we need to decrypt the page. Note that tablespaces are not in diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index e39be46840c..1838b6f8e04 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -6929,15 +6929,15 @@ fil_iterate( /* TODO: For compressed tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ + const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { byte* io_buffer = iter.io_buffer; - bool row_compressed = false; block->frame = io_buffer; - if (callback.get_zip_size() > 0) { + if (row_compressed) { page_zip_des_init(&block->page.zip); page_zip_set_size(&block->page.zip, iter.page_size); block->page.zip.data = block->frame + UNIV_PAGE_SIZE; @@ -6946,9 +6946,6 @@ fil_iterate( /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; - row_compressed = true; - } else { - io_buffer = iter.io_buffer; } /* We have to read the exact number of bytes. Otherwise the @@ -6961,16 +6958,12 @@ fil_iterate( ut_ad(n_bytes > 0); ut_ad(!(n_bytes % iter.page_size)); - byte* readptr = io_buffer; - byte* writeptr = io_buffer; - bool encrypted = false; - + const bool encrypted = iter.crypt_data != NULL + && iter.crypt_data->should_encrypt(); /* Use additional crypt io buffer if tablespace is encrypted */ - if (iter.crypt_data != NULL && iter.crypt_data->should_encrypt()) { - encrypted = true; - readptr = iter.crypt_io_buffer; - writeptr = iter.crypt_io_buffer; - } + byte* const readptr = encrypted + ? iter.crypt_io_buffer : io_buffer; + byte* const writeptr = readptr; if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) { @@ -6993,8 +6986,9 @@ fil_iterate( ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED || - page_type == FIL_PAGE_PAGE_COMPRESSED); + const bool page_compressed + = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; /* If tablespace is encrypted, we need to decrypt the page. Note that tablespaces are not in -- cgit v1.2.1 From 956d2540c456059db85af860ce5cf47e5a5dd796 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 10:27:01 +0300 Subject: Remove redundant UT_LIST_INIT() calls The macro UT_LIST_INIT() zero-initializes the UT_LIST_NODE. There is no need to call this macro on a buffer that has already been zero-initialized by mem_zalloc() or mem_heap_zalloc() or similar. For some reason, the statement UT_LIST_INIT(srv_sys->tasks) in srv_init() caused a SIGSEGV on server startup when compiling with GCC 7.1.0 for AMD64 using -O3. The zero-initialization was attempted by the instruction movaps %xmm0,0x50(%rax), while the proper offset of srv_sys->tasks would seem to have been 0x48. --- storage/innobase/buf/buf0buf.cc | 3 +-- storage/innobase/fil/fil0fil.cc | 4 +--- storage/innobase/row/row0ftsort.cc | 4 +--- storage/innobase/srv/srv0srv.cc | 2 -- storage/innobase/usr/usr0sess.cc | 3 +-- storage/xtradb/buf/buf0buf.cc | 3 +-- storage/xtradb/fil/fil0fil.cc | 4 +--- storage/xtradb/row/row0ftsort.cc | 4 +--- storage/xtradb/srv/srv0srv.cc | 2 -- storage/xtradb/usr/usr0sess.cc | 3 +-- 10 files changed, 8 insertions(+), 24 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 0b305507271..83102ab1616 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2,6 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1343,8 +1344,6 @@ buf_pool_init_instance( buf_pool->chunks = chunk = (buf_chunk_t*) mem_zalloc(sizeof *chunk); - UT_LIST_INIT(buf_pool->free); - if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) { mem_free(chunk); mem_free(buf_pool); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 72ee8d74a82..36179960054 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1546,8 +1546,6 @@ fil_init( fil_system->spaces = hash_create(hash_size); fil_system->name_hash = hash_create(hash_size); - UT_LIST_INIT(fil_system->LRU); - fil_system->max_n_open = max_n_open; } diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc index 5b8ed009a2a..db570467414 100644 --- a/storage/innobase/row/row0ftsort.cc +++ b/storage/innobase/row/row0ftsort.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -225,9 +226,6 @@ row_fts_psort_info_init( each parallel sort thread. Each "sort bucket" holds records for a particular "FTS index partition" */ for (j = 0; j < fts_sort_pll_degree; j++) { - - UT_LIST_INIT(psort_info[j].fts_doc_list); - for (i = 0; i < FTS_NUM_AUX_INDEX; i++) { psort_info[j].merge_file[i] = diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 067e32a83aa..7beacc76615 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1011,8 +1011,6 @@ srv_init(void) srv_monitor_event = os_event_create(); srv_buf_dump_event = os_event_create(); - - UT_LIST_INIT(srv_sys->tasks); } /* page_zip_stat_per_index_mutex is acquired from: diff --git a/storage/innobase/usr/usr0sess.cc b/storage/innobase/usr/usr0sess.cc index ab7ba6bea09..e1bd71ff1a0 100644 --- a/storage/innobase/usr/usr0sess.cc +++ b/storage/innobase/usr/usr0sess.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -48,8 +49,6 @@ sess_open(void) sess->trx = trx_allocate_for_background(); sess->trx->sess = sess; - UT_LIST_INIT(sess->graphs); - return(sess); } diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 07b84c60c76..2785a2a33df 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -2,6 +2,7 @@ Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1432,8 +1433,6 @@ buf_pool_init_instance( buf_pool->chunks = chunk = (buf_chunk_t*) mem_zalloc(sizeof *chunk); - UT_LIST_INIT(buf_pool->free); - if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) { mem_free(chunk); mem_free(buf_pool); diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 1a866a693ca..d528164c6f4 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1598,8 +1598,6 @@ fil_init( fil_system->spaces = hash_create(hash_size); fil_system->name_hash = hash_create(hash_size); - UT_LIST_INIT(fil_system->LRU); - fil_system->max_n_open = max_n_open; } diff --git a/storage/xtradb/row/row0ftsort.cc b/storage/xtradb/row/row0ftsort.cc index 13877b8fe08..734b4f8b2f7 100644 --- a/storage/xtradb/row/row0ftsort.cc +++ b/storage/xtradb/row/row0ftsort.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2010, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -228,9 +229,6 @@ row_fts_psort_info_init( each parallel sort thread. Each "sort bucket" holds records for a particular "FTS index partition" */ for (j = 0; j < fts_sort_pll_degree; j++) { - - UT_LIST_INIT(psort_info[j].fts_doc_list); - for (i = 0; i < FTS_NUM_AUX_INDEX; i++) { psort_info[j].merge_file[i] = diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 5b3f86d8641..22a96b7446a 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -1164,8 +1164,6 @@ srv_init(void) if (srv_track_changed_pages) { os_event_set(srv_redo_log_tracked_event); } - - UT_LIST_INIT(srv_sys->tasks); } /* page_zip_stat_per_index_mutex is acquired from: diff --git a/storage/xtradb/usr/usr0sess.cc b/storage/xtradb/usr/usr0sess.cc index ab7ba6bea09..e1bd71ff1a0 100644 --- a/storage/xtradb/usr/usr0sess.cc +++ b/storage/xtradb/usr/usr0sess.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -48,8 +49,6 @@ sess_open(void) sess->trx = trx_allocate_for_background(); sess->trx->sess = sess; - UT_LIST_INIT(sess->graphs); - return(sess); } -- cgit v1.2.1 From 5064623ce4f721d7b3286296f7cd08e6eb8091b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Wed, 17 May 2017 12:13:09 +0300 Subject: Revert "Fix unit test after merge from mysql 5.5.35 perfschema" This reverts commit 4799af092574e7957d7143c7751acef74a95a495. Upstream provided adequate fix. This commit is no longer necessary. --- storage/perfschema/unittest/pfs-t.cc | 1 - 1 file changed, 1 deletion(-) (limited to 'storage') diff --git a/storage/perfschema/unittest/pfs-t.cc b/storage/perfschema/unittest/pfs-t.cc index 7883e8070fe..209c4d1ca88 100644 --- a/storage/perfschema/unittest/pfs-t.cc +++ b/storage/perfschema/unittest/pfs-t.cc @@ -1339,7 +1339,6 @@ void test_locker_disabled() /* Pretend the socket does not have a thread owner */ /* ---------------------------------------------- */ - psi->delete_current_thread(); socket_class_A->m_enabled= true; socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0); ok(socket_A1 != NULL, "instrumented"); -- cgit v1.2.1 From e22d86a3eb843bbf1664fc4ff8d112416d0eacd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 13:49:51 +0300 Subject: fil_create_new_single_table_tablespace(): Correct a bogus nonnull attribute The parameter path can be passed as NULL. This error was reported by GCC 7.1.0 when compiling CMAKE_BUILD_TYPE=Debug with -O3. --- storage/innobase/fil/fil0fil.cc | 4 +--- storage/innobase/include/fil0fil.h | 4 ++-- storage/xtradb/fil/fil0fil.cc | 4 +--- storage/xtradb/include/fil0fil.h | 4 ++-- 4 files changed, 6 insertions(+), 10 deletions(-) (limited to 'storage') diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 36179960054..a573c6eb4ef 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2271,14 +2271,12 @@ fil_op_log_parse_or_replay( } else if (log_flags & MLOG_FILE_FLAG_TEMP) { /* Temporary table, do nothing */ } else { - const char* path = NULL; - /* Create the database directory for name, if it does not exist yet */ fil_create_directory_for_tablename(name); if (fil_create_new_single_table_tablespace( - space_id, name, path, flags, + space_id, name, NULL, flags, DICT_TF2_USE_TABLESPACE, FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) { ut_error; diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index f29d1bf3408..d45e14a83de 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -782,7 +782,7 @@ fil_create_new_single_table_tablespace( ulint size) /*!< in: the initial size of the tablespace file in pages, must be >= FIL_IBD_FILE_INITIAL_SIZE */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull(2), warn_unused_result)); #ifndef UNIV_HOTBACKUP /********************************************************************//** Tries to open a single-table tablespace and optionally checks the space id is diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index d528164c6f4..ea984543150 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -2323,14 +2323,12 @@ fil_op_log_parse_or_replay( } else if (log_flags & MLOG_FILE_FLAG_TEMP) { /* Temporary table, do nothing */ } else { - const char* path = NULL; - /* Create the database directory for name, if it does not exist yet */ fil_create_directory_for_tablename(name); if (fil_create_new_single_table_tablespace( - space_id, name, path, flags, + space_id, name, NULL, flags, DICT_TF2_USE_TABLESPACE, FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) { ut_error; diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 3023b29b793..58bc5322cd9 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -778,7 +778,7 @@ fil_create_new_single_table_tablespace( ulint size) /*!< in: the initial size of the tablespace file in pages, must be >= FIL_IBD_FILE_INITIAL_SIZE */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull(2), warn_unused_result)); #ifndef UNIV_HOTBACKUP /********************************************************************//** Tries to open a single-table tablespace and optionally checks the space id is -- cgit v1.2.1 From fff61e31eca83a0a9af7c30e2bcda8309e9c695a Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 17 May 2017 14:44:16 +0300 Subject: Fix a compiler warning --- storage/rocksdb/rdb_datadic.h | 1 + 1 file changed, 1 insertion(+) (limited to 'storage') diff --git a/storage/rocksdb/rdb_datadic.h b/storage/rocksdb/rdb_datadic.h index 2ffd3f6e8db..9974baaeed0 100644 --- a/storage/rocksdb/rdb_datadic.h +++ b/storage/rocksdb/rdb_datadic.h @@ -751,6 +751,7 @@ public: interface Rdb_tables_scanner { virtual int add_table(Rdb_tbl_def * tdef) = 0; + virtual ~Rdb_tables_scanner() {} /* Keep the compiler happy */ }; /* -- cgit v1.2.1 From 9f89b94ba6d69f896b2af3e84a9b420131c530d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 14:08:08 +0300 Subject: MDEV-12358 Work around what looks like a bug in GCC 7.1.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The parameter thr of the function btr_cur_optimistic_insert() is not declared as nonnull, but GCC 7.1.0 with -O3 is wrongly optimizing away the first part of the condition UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes) when the function is being called by row_merge_insert_index_tuples() with thr==NULL. The fake_changes is an XtraDB addition. This GCC bug only appears to have an impact on XtraDB, not InnoDB. We work around the problem by not attempting to dereference thr when both BTR_NO_LOCKING_FLAG and BTR_NO_UNDO_LOG_FLAG are set in the flags. Probably BTR_NO_LOCKING_FLAG alone should suffice. btr_cur_optimistic_insert(), btr_cur_pessimistic_insert(), btr_cur_pessimistic_update(): Correct comments that disagree with usage and with nonnull attributes. No other parameter than thr can actually be NULL. row_ins_duplicate_error_in_clust(): Remove an unused parameter. innobase_is_fake_change(): Unused function; remove. ibuf_insert_low(), row_log_table_apply(), row_log_apply(), row_undo_mod_clust_low(): Because we will be passing BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG in the flags, the trx->fake_changes flag will be treated as false, which is the right thing to do at these low-level operations (change buffer merge, ALTER TABLE…LOCK=NONE, or ROLLBACK). This might be fixing actual XtraDB bugs. Other callers that pass these two flags are also passing thr=NULL, implying fake_changes=false. (Some callers in ROLLBACK are passing BTR_NO_LOCKING_FLAG and a nonnull thr. In these callers, fake_changes better be false, to avoid corruption.) --- storage/innobase/btr/btr0cur.cc | 27 +++++++++++++--------- storage/innobase/ibuf/ibuf0ibuf.cc | 3 ++- storage/innobase/include/btr0cur.h | 25 ++++++++++++--------- storage/innobase/row/row0ins.cc | 9 +++----- storage/xtradb/btr/btr0cur.cc | 45 ++++++++++++++++++++++++------------- storage/xtradb/handler/ha_innodb.cc | 27 ---------------------- storage/xtradb/ibuf/ibuf0ibuf.cc | 3 ++- storage/xtradb/include/btr0cur.h | 25 ++++++++++++--------- storage/xtradb/row/row0ins.cc | 9 +++----- 9 files changed, 86 insertions(+), 87 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 4f24ad75e26..7846debc115 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -3,6 +3,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1223,15 +1224,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -1252,6 +1255,7 @@ btr_cur_optimistic_insert( ulint rec_size; dberr_t err; + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; block = btr_cur_get_block(cursor); @@ -1510,15 +1514,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ { dict_index_t* index = cursor->index; @@ -1530,6 +1536,7 @@ btr_cur_pessimistic_insert( ulint n_reserved = 0; ut_ad(dtuple_check_typed(entry)); + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; @@ -2426,12 +2433,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ const upd_t* update, /*!< in: update vector; this is allowed also contain trx id and roll ptr fields, but the values in update vector have no effect */ diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 373c68503d7..55429e3cf34 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3662,7 +3663,7 @@ fail_exit: if (mode == BTR_MODIFY_PREV) { err = btr_cur_optimistic_insert( - BTR_NO_LOCKING_FLAG, + BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG, cursor, &offsets, &offsets_heap, ibuf_entry, &ins_rec, &dummy_big_rec, 0, thr, &mtr); diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index 28e01d4f02e..77de98d5812 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -220,15 +221,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -256,15 +259,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result)); /*************************************************************//** @@ -390,12 +395,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ const upd_t* update, /*!< in: update vector; this is allowed also contain trx id and roll ptr fields, but the values in update vector have no effect */ diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 9c1b5cb2d09..5cfa21c1fa2 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2121,14 +2122,10 @@ for a clustered index! @retval DB_SUCCESS if no error @retval DB_DUPLICATE_KEY if error, @retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate -record -@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found -in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */ +record */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_error_in_clust( -/*=============================*/ - ulint flags, /*!< in: undo logging and locking flags */ btr_cur_t* cursor, /*!< in: B-tree cursor */ const dtuple_t* entry, /*!< in: entry to insert */ que_thr_t* thr, /*!< in: query thread */ @@ -2386,7 +2383,7 @@ row_ins_clust_index_entry_low( DB_LOCK_WAIT */ err = row_ins_duplicate_error_in_clust( - flags, &cursor, entry, thr, &mtr); + &cursor, entry, thr, &mtr); } if (err != DB_SUCCESS) { diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc index dcb197a761f..0b554b271c4 100644 --- a/storage/xtradb/btr/btr0cur.cc +++ b/storage/xtradb/btr/btr0cur.cc @@ -3,6 +3,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -1328,15 +1329,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -1357,6 +1360,7 @@ btr_cur_optimistic_insert( ulint rec_size; dberr_t err; + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; block = btr_cur_get_block(cursor); @@ -1366,7 +1370,10 @@ btr_cur_optimistic_insert( page = buf_block_get_frame(block); index = cursor->index; - ut_ad((thr && thr_get_trx(thr)->fake_changes) + const bool fake_changes = (~flags & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) + && thr_get_trx(thr)->fake_changes; + ut_ad(fake_changes || mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) @@ -1507,7 +1514,7 @@ fail_err: goto fail_err; } - if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) { + if (UNIV_UNLIKELY(fake_changes)) { /* skip CHANGE, LOG */ *big_rec = big_rec_vec; return(err); /* == DB_SUCCESS */ @@ -1625,15 +1632,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ { dict_index_t* index = cursor->index; @@ -1645,13 +1654,17 @@ btr_cur_pessimistic_insert( ulint n_reserved = 0; ut_ad(dtuple_check_typed(entry)); + ut_ad(thr || !(~flags & (BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG))); *big_rec = NULL; - ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, + const bool fake_changes = (~flags & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) + && thr_get_trx(thr)->fake_changes; + ut_ad(fake_changes || mtr_memo_contains(mtr, dict_index_get_lock(btr_cur_get_index(cursor)), MTR_MEMO_X_LOCK)); - ut_ad((thr && thr_get_trx(thr)->fake_changes) || mtr_memo_contains(mtr, btr_cur_get_block(cursor), + ut_ad(fake_changes || mtr_memo_contains(mtr, btr_cur_get_block(cursor), MTR_MEMO_PAGE_X_FIX)); ut_ad(!dict_index_is_online_ddl(index) || dict_index_is_clust(index) @@ -1712,7 +1725,7 @@ btr_cur_pessimistic_insert( } } - if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) { + if (UNIV_UNLIKELY(fake_changes)) { /* skip CHANGE, LOG */ if (n_reserved > 0) { fil_space_release_free_extents(index->space, @@ -1803,7 +1816,7 @@ btr_cur_upd_lock_and_undo( ut_ad((thr != NULL) || (flags & BTR_NO_LOCKING_FLAG)); - if (UNIV_UNLIKELY(thr && thr_get_trx(thr)->fake_changes)) { + if (!(flags & BTR_NO_LOCKING_FLAG) && thr_get_trx(thr)->fake_changes) { /* skip LOCK, UNDO */ return(DB_SUCCESS); } @@ -2582,12 +2595,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ const upd_t* update, /*!< in: update vector; this is allowed also contain trx id and roll ptr fields, but the values in update vector have no effect */ diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index c1b9f59fea4..9af3c7272fe 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -769,17 +769,6 @@ innobase_purge_changed_page_bitmaps( ulonglong lsn) __attribute__((unused)); /*!< in: LSN to purge files up to */ -/*****************************************************************//** -Check whether this is a fake change transaction. -@return TRUE if a fake change transaction */ -static -my_bool -innobase_is_fake_change( -/*====================*/ - handlerton *hton, /*!< in: InnoDB handlerton */ - THD* thd) __attribute__((unused)); /*!< in: MySQL thread handle of the user for - whom the transaction is being committed */ - /** Get the list of foreign keys referencing a specified table table. @param thd The thread handle @@ -4242,22 +4231,6 @@ innobase_purge_changed_page_bitmaps( return (my_bool)log_online_purge_changed_page_bitmaps(lsn); } -/*****************************************************************//** -Check whether this is a fake change transaction. -@return TRUE if a fake change transaction */ -static -my_bool -innobase_is_fake_change( -/*====================*/ - handlerton *hton MY_ATTRIBUTE((unused)), - /*!< in: InnoDB handlerton */ - THD* thd) /*!< in: MySQL thread handle of the user for - whom the transaction is being committed */ -{ - trx_t* trx = check_trx_exists(thd); - return UNIV_UNLIKELY(trx->fake_changes); -} - /*****************************************************************//** Commits a transaction in an InnoDB database. */ static diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc index a9f039d3f0f..e00adefe68f 100644 --- a/storage/xtradb/ibuf/ibuf0ibuf.cc +++ b/storage/xtradb/ibuf/ibuf0ibuf.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -3704,7 +3705,7 @@ fail_exit: if (mode == BTR_MODIFY_PREV) { err = btr_cur_optimistic_insert( - BTR_NO_LOCKING_FLAG, + BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG, cursor, &offsets, &offsets_heap, ibuf_entry, &ins_rec, &dummy_big_rec, 0, thr, &mtr); diff --git a/storage/xtradb/include/btr0cur.h b/storage/xtradb/include/btr0cur.h index 19ab7d8bc3a..d973eb80c59 100644 --- a/storage/xtradb/include/btr0cur.h +++ b/storage/xtradb/include/btr0cur.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -220,15 +221,17 @@ btr_cur_optimistic_insert( btr_cur_t* cursor, /*!< in: cursor on page after which to insert; cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ - mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */ + mem_heap_t** heap, /*!< in/out: pointer to memory heap */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction; if this function returns DB_SUCCESS on a leaf page of a secondary index in a @@ -256,15 +259,17 @@ btr_cur_pessimistic_insert( cursor stays valid */ ulint** offsets,/*!< out: offsets on *rec */ mem_heap_t** heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ dtuple_t* entry, /*!< in/out: entry to insert */ rec_t** rec, /*!< out: pointer to inserted record if succeed */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or - NULL */ + be stored externally by the caller */ ulint n_ext, /*!< in: number of externally stored columns */ - que_thr_t* thr, /*!< in: query thread or NULL */ + que_thr_t* thr, /*!< in/out: query thread; can be NULL if + !(~flags + & (BTR_NO_LOCKING_FLAG + | BTR_NO_UNDO_LOG_FLAG)) */ mtr_t* mtr) /*!< in/out: mini-transaction */ MY_ATTRIBUTE((nonnull(2,3,4,5,6,7,10), warn_unused_result)); /*************************************************************//** @@ -392,12 +397,12 @@ btr_cur_pessimistic_update( ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */ mem_heap_t** offsets_heap, /*!< in/out: pointer to memory heap - that can be emptied, or NULL */ + that can be emptied */ mem_heap_t* entry_heap, /*!< in/out: memory heap for allocating big_rec and the index tuple */ big_rec_t** big_rec,/*!< out: big rec vector whose fields have to - be stored externally by the caller, or NULL */ + be stored externally by the caller */ const upd_t* update, /*!< in: update vector; this is allowed also contain trx id and roll ptr fields, but the values in update vector have no effect */ diff --git a/storage/xtradb/row/row0ins.cc b/storage/xtradb/row/row0ins.cc index 49744959dc6..ec9f74cd4ad 100644 --- a/storage/xtradb/row/row0ins.cc +++ b/storage/xtradb/row/row0ins.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2139,14 +2140,10 @@ for a clustered index! @retval DB_SUCCESS if no error @retval DB_DUPLICATE_KEY if error, @retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate -record -@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found -in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */ +record */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t row_ins_duplicate_error_in_clust( -/*=============================*/ - ulint flags, /*!< in: undo logging and locking flags */ btr_cur_t* cursor, /*!< in: B-tree cursor */ const dtuple_t* entry, /*!< in: entry to insert */ que_thr_t* thr, /*!< in: query thread */ @@ -2421,7 +2418,7 @@ row_ins_clust_index_entry_low( DB_LOCK_WAIT */ err = row_ins_duplicate_error_in_clust( - flags, &cursor, entry, thr, &mtr); + &cursor, entry, thr, &mtr); } if (err != DB_SUCCESS) { -- cgit v1.2.1 From 8b34aabf86b3f2bab392df8d87b28069e8adfc73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 16:09:29 +0300 Subject: Follow-up to MDEV-12534: Align srv_sys Allocate srv_sys statically so that the desired alignment can be guaranteed. This silences -fsanitize=undefined warnings. There probably is no performance impact of this, because the reason for the alignment to ensure the absence of false sharing between counters. Even with the misalignment, each counter would have been been aligned at 64 bits, and the counters would reside in separate cache lines. --- storage/innobase/srv/srv0srv.cc | 121 ++++++++++++++++-------------------- storage/xtradb/srv/srv0srv.cc | 134 ++++++++++++++++++---------------------- 2 files changed, 112 insertions(+), 143 deletions(-) (limited to 'storage') diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 7beacc76615..cadd3fc1d1a 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -293,7 +293,7 @@ UNIV_INTERN double srv_adaptive_flushing_lwm = 10.0; UNIV_INTERN ulong srv_flushing_avg_loops = 30; /* The number of purge threads to use.*/ -UNIV_INTERN ulong srv_n_purge_threads = 1; +UNIV_INTERN ulong srv_n_purge_threads; /* the number of pages to purge in one batch */ UNIV_INTERN ulong srv_purge_batch_size = 20; @@ -490,16 +490,16 @@ UNIV_INTERN uint srv_simulate_comp_failures = 0; /** Acquire the system_mutex. */ #define srv_sys_mutex_enter() do { \ - mutex_enter(&srv_sys->mutex); \ + mutex_enter(&srv_sys.mutex); \ } while (0) /** Test if the system mutex is owned. */ -#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \ +#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \ && !srv_read_only_mode) /** Release the system mutex. */ #define srv_sys_mutex_exit() do { \ - mutex_exit(&srv_sys->mutex); \ + mutex_exit(&srv_sys.mutex); \ } while (0) #define fetch_lock_wait_timeout(trx) \ @@ -591,7 +591,7 @@ struct srv_sys_t{ ulint n_sys_threads; /*!< size of the sys_threads array */ - srv_slot_t* sys_threads; /*!< server thread table; + srv_slot_t sys_threads[32 + 1]; /*!< server thread table; os_event_set() and os_event_reset() on sys_threads[]->event are @@ -611,7 +611,7 @@ struct srv_sys_t{ UNIV_INTERN ib_mutex_t server_mutex; #endif /* !HAVE_ATOMIC_BUILTINS */ -static srv_sys_t* srv_sys = NULL; +static srv_sys_t srv_sys; /** Event to signal srv_monitor_thread. Not protected by a mutex. Set after setting srv_print_innodb_monitor. */ @@ -633,10 +633,10 @@ and/or load it during startup. */ UNIV_INTERN char srv_buffer_pool_dump_at_shutdown = FALSE; UNIV_INTERN char srv_buffer_pool_load_at_startup = FALSE; -/** Slot index in the srv_sys->sys_threads array for the purge thread. */ +/** Slot index in the srv_sys.sys_threads array for the purge thread. */ static const ulint SRV_PURGE_SLOT = 1; -/** Slot index in the srv_sys->sys_threads array for the master thread. */ +/** Slot index in the srv_sys.sys_threads array for the master thread. */ static const ulint SRV_MASTER_SLOT = 0; /*********************************************************************//** @@ -737,21 +737,21 @@ srv_reserve_slot( switch (type) { case SRV_MASTER: - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; break; case SRV_PURGE: - slot = &srv_sys->sys_threads[SRV_PURGE_SLOT]; + slot = &srv_sys.sys_threads[SRV_PURGE_SLOT]; break; case SRV_WORKER: /* Find an empty slot, skip the master and purge slots. */ - for (slot = &srv_sys->sys_threads[2]; + for (slot = &srv_sys.sys_threads[2]; slot->in_use; ++slot) { - ut_a(slot < &srv_sys->sys_threads[ - srv_sys->n_sys_threads]); + ut_a(slot < &srv_sys.sys_threads[ + srv_sys.n_sys_threads]); } break; @@ -767,7 +767,7 @@ srv_reserve_slot( ut_ad(srv_slot_get_type(slot) == type); - ++srv_sys->n_threads_active[type]; + ++srv_sys.n_threads_active[type]; srv_sys_mutex_exit(); @@ -797,27 +797,27 @@ srv_suspend_thread_low( case SRV_MASTER: /* We have only one master thread and it should be the first entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_PURGE: /* We have only one purge coordinator thread and it should be the second entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] > 0); + ut_a(srv_sys.n_threads_active[type] > 0); break; } ut_a(!slot->suspended); slot->suspended = TRUE; - ut_a(srv_sys->n_threads_active[type] > 0); + ut_a(srv_sys.n_threads_active[type] > 0); - srv_sys->n_threads_active[type]--; + srv_sys.n_threads_active[type]--; return(os_event_reset(slot->event)); } @@ -872,7 +872,7 @@ srv_resume_thread(srv_slot_t* slot, ib_int64_t sig_count = 0, bool wait = true, ut_ad(slot->suspended); slot->suspended = FALSE; - ++srv_sys->n_threads_active[slot->type]; + ++srv_sys.n_threads_active[slot->type]; srv_sys_mutex_exit(); return(timeout); } @@ -894,8 +894,8 @@ srv_release_threads(enum srv_thread_type type, ulint n) srv_sys_mutex_enter(); - for (ulint i = 0; i < srv_sys->n_sys_threads; i++) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; i++) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; if (!slot->in_use || srv_slot_get_type(slot) != type) { continue; @@ -915,7 +915,7 @@ srv_release_threads(enum srv_thread_type type, ulint n) should be the first entry always. */ ut_a(n == 1); ut_a(i == SRV_MASTER_SLOT); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_PURGE: @@ -924,12 +924,12 @@ srv_release_threads(enum srv_thread_type type, ulint n) ut_a(n == 1); ut_a(i == SRV_PURGE_SLOT); ut_a(srv_n_purge_threads > 0); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] + ut_a(srv_sys.n_threads_active[type] < srv_n_purge_threads - 1); break; } @@ -967,9 +967,6 @@ void srv_init(void) /*==========*/ { - ulint n_sys_threads = 0; - ulint srv_sys_sz = sizeof(*srv_sys); - #ifndef HAVE_ATOMIC_BUILTINS mutex_create(server_mutex_key, &server_mutex, SYNC_ANY_LATCH); #endif /* !HAVE_ATOMIC_BUILTINS */ @@ -977,29 +974,19 @@ srv_init(void) mutex_create(srv_innodb_monitor_mutex_key, &srv_innodb_monitor_mutex, SYNC_NO_ORDER_CHECK); - if (!srv_read_only_mode) { - - /* Number of purge threads + master thread */ - n_sys_threads = srv_n_purge_threads + 1; - - srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads); - } - - srv_sys = static_cast(mem_zalloc(srv_sys_sz)); - - srv_sys->n_sys_threads = n_sys_threads; + srv_sys.n_sys_threads = srv_read_only_mode + ? 0 + : srv_n_purge_threads + 1/* purge coordinator */; if (!srv_read_only_mode) { - mutex_create(srv_sys_mutex_key, &srv_sys->mutex, SYNC_THREADS); + mutex_create(srv_sys_mutex_key, &srv_sys.mutex, SYNC_THREADS); mutex_create(srv_sys_tasks_mutex_key, - &srv_sys->tasks_mutex, SYNC_ANY_LATCH); - - srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1]; + &srv_sys.tasks_mutex, SYNC_ANY_LATCH); - for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; slot->event = os_event_create(); @@ -1048,10 +1035,8 @@ srv_free(void) { srv_conc_free(); - /* The mutexes srv_sys->mutex and srv_sys->tasks_mutex should have + /* The mutexes srv_sys.mutex and srv_sys.tasks_mutex should have been freed by sync_close() already. */ - mem_free(srv_sys); - srv_sys = NULL; trx_i_s_cache_free(trx_i_s_cache); @@ -1882,7 +1867,7 @@ void srv_inc_activity_count(void) /*========================*/ { - srv_sys->activity_count.inc(); + srv_sys.activity_count.inc(); } /**********************************************************************//** @@ -1904,7 +1889,7 @@ srv_get_active_thread_type(void) srv_sys_mutex_enter(); for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) { - if (srv_sys->n_threads_active[i] != 0) { + if (srv_sys.n_threads_active[i] != 0) { ret = static_cast(i); break; } @@ -1977,12 +1962,12 @@ srv_active_wake_master_thread(void) srv_inc_activity_count(); - if (srv_sys->n_threads_active[SRV_MASTER] == 0) { + if (srv_sys.n_threads_active[SRV_MASTER] == 0) { srv_slot_t* slot; srv_sys_mutex_enter(); - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; /* Only if the master thread has been started. */ @@ -2009,7 +1994,7 @@ srv_wake_purge_thread_if_not_active(void) ut_ad(!srv_sys_mutex_own()); if (purge_sys->state == PURGE_STATE_RUN - && srv_sys->n_threads_active[SRV_PURGE] == 0) { + && srv_sys.n_threads_active[SRV_PURGE] == 0) { srv_release_threads(SRV_PURGE, 1); } @@ -2038,7 +2023,7 @@ ulint srv_get_activity_count(void) /*========================*/ { - return(srv_sys->activity_count); + return(srv_sys.activity_count); } /*******************************************************************//** @@ -2050,7 +2035,7 @@ srv_check_activity( /*===============*/ ulint old_activity_count) /*!< in: old activity count */ { - return(srv_sys->activity_count != old_activity_count); + return(srv_sys.activity_count != old_activity_count); } /********************************************************************//** @@ -2415,7 +2400,7 @@ DECLARE_THREAD(srv_master_thread)( srv_main_thread_id = os_thread_pf(os_thread_get_curr_id()); slot = srv_reserve_slot(SRV_MASTER); - ut_a(slot == srv_sys->sys_threads); + ut_a(slot == srv_sys.sys_threads); last_print_time = ut_time(); loop: @@ -2505,18 +2490,18 @@ srv_task_execute(void) ut_ad(!srv_read_only_mode); ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - if (UT_LIST_GET_LEN(srv_sys->tasks) > 0) { + if (UT_LIST_GET_LEN(srv_sys.tasks) > 0) { - thr = UT_LIST_GET_FIRST(srv_sys->tasks); + thr = UT_LIST_GET_FIRST(srv_sys.tasks); ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE); - UT_LIST_REMOVE(queue, srv_sys->tasks, thr); + UT_LIST_REMOVE(queue, srv_sys.tasks, thr); } - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); if (thr != NULL) { @@ -2556,7 +2541,7 @@ DECLARE_THREAD(srv_worker_thread)( srv_sys_mutex_enter(); - ut_a(srv_sys->n_threads_active[SRV_WORKER] < srv_n_purge_threads); + ut_a(srv_sys.n_threads_active[SRV_WORKER] < srv_n_purge_threads); srv_sys_mutex_exit(); @@ -2885,11 +2870,11 @@ srv_que_task_enqueue_low( que_thr_t* thr) /*!< in: query thread */ { ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr); + UT_LIST_ADD_LAST(queue, srv_sys.tasks, thr); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); srv_release_threads(SRV_WORKER, 1); } @@ -2906,11 +2891,11 @@ srv_get_task_queue_length(void) ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - n_tasks = UT_LIST_GET_LEN(srv_sys->tasks); + n_tasks = UT_LIST_GET_LEN(srv_sys.tasks); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); return(n_tasks); } diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 22a96b7446a..80241d9947f 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -400,7 +400,7 @@ UNIV_INTERN my_bool srv_cleaner_thread_priority = FALSE; UNIV_INTERN my_bool srv_master_thread_priority = FALSE; /* The number of purge threads to use.*/ -UNIV_INTERN ulong srv_n_purge_threads = 1; +UNIV_INTERN ulong srv_n_purge_threads; /* the number of pages to purge in one batch */ UNIV_INTERN ulong srv_purge_batch_size = 20; @@ -619,16 +619,16 @@ current_time % 5 != 0. */ /** Acquire the system_mutex. */ #define srv_sys_mutex_enter() do { \ - mutex_enter(&srv_sys->mutex); \ + mutex_enter(&srv_sys.mutex); \ } while (0) /** Test if the system mutex is owned. */ -#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \ +#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \ && !srv_read_only_mode) /** Release the system mutex. */ #define srv_sys_mutex_exit() do { \ - mutex_exit(&srv_sys->mutex); \ + mutex_exit(&srv_sys.mutex); \ } while (0) #define fetch_lock_wait_timeout(trx) \ @@ -723,7 +723,7 @@ struct srv_sys_t{ ulint n_sys_threads; /*!< size of the sys_threads array */ - srv_slot_t* sys_threads; /*!< server thread table; + srv_slot_t sys_threads[32 + 1]; /*!< server thread table; os_event_set() and os_event_reset() on sys_threads[]->event are @@ -747,7 +747,7 @@ struct srv_sys_t{ UNIV_INTERN ib_mutex_t server_mutex; #endif /* !HAVE_ATOMIC_BUILTINS */ -static srv_sys_t* srv_sys = NULL; +static srv_sys_t srv_sys; /** Event to signal srv_monitor_thread. Not protected by a mutex. Set after setting srv_print_innodb_monitor. */ @@ -769,10 +769,10 @@ and/or load it during startup. */ UNIV_INTERN char srv_buffer_pool_dump_at_shutdown = FALSE; UNIV_INTERN char srv_buffer_pool_load_at_startup = FALSE; -/** Slot index in the srv_sys->sys_threads array for the purge thread. */ +/** Slot index in the srv_sys.sys_threads array for the purge thread. */ static const ulint SRV_PURGE_SLOT = 1; -/** Slot index in the srv_sys->sys_threads array for the master thread. */ +/** Slot index in the srv_sys.sys_threads array for the master thread. */ static const ulint SRV_MASTER_SLOT = 0; UNIV_INTERN os_event_t srv_checkpoint_completed_event; @@ -882,21 +882,21 @@ srv_reserve_slot( switch (type) { case SRV_MASTER: - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; break; case SRV_PURGE: - slot = &srv_sys->sys_threads[SRV_PURGE_SLOT]; + slot = &srv_sys.sys_threads[SRV_PURGE_SLOT]; break; case SRV_WORKER: /* Find an empty slot, skip the master and purge slots. */ - for (slot = &srv_sys->sys_threads[2]; + for (slot = &srv_sys.sys_threads[2]; slot->in_use; ++slot) { - ut_a(slot < &srv_sys->sys_threads[ - srv_sys->n_sys_threads]); + ut_a(slot < &srv_sys.sys_threads[ + srv_sys.n_sys_threads]); } break; @@ -912,7 +912,7 @@ srv_reserve_slot( ut_ad(srv_slot_get_type(slot) == type); - ++srv_sys->n_threads_active[type]; + ++srv_sys.n_threads_active[type]; srv_sys_mutex_exit(); @@ -942,27 +942,27 @@ srv_suspend_thread_low( case SRV_MASTER: /* We have only one master thread and it should be the first entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_PURGE: /* We have only one purge coordinator thread and it should be the second entry always. */ - ut_a(srv_sys->n_threads_active[type] == 1); + ut_a(srv_sys.n_threads_active[type] == 1); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] > 0); + ut_a(srv_sys.n_threads_active[type] > 0); break; } ut_a(!slot->suspended); slot->suspended = TRUE; - ut_a(srv_sys->n_threads_active[type] > 0); + ut_a(srv_sys.n_threads_active[type] > 0); - srv_sys->n_threads_active[type]--; + srv_sys.n_threads_active[type]--; return(os_event_reset(slot->event)); } @@ -1017,7 +1017,7 @@ srv_resume_thread(srv_slot_t* slot, ib_int64_t sig_count = 0, bool wait = true, ut_ad(slot->suspended); slot->suspended = FALSE; - ++srv_sys->n_threads_active[slot->type]; + ++srv_sys.n_threads_active[slot->type]; srv_sys_mutex_exit(); return(timeout); } @@ -1039,8 +1039,8 @@ srv_release_threads(enum srv_thread_type type, ulint n) srv_sys_mutex_enter(); - for (ulint i = 0; i < srv_sys->n_sys_threads; i++) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; i++) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; if (!slot->in_use || srv_slot_get_type(slot) != type) { continue; @@ -1060,7 +1060,7 @@ srv_release_threads(enum srv_thread_type type, ulint n) should be the first entry always. */ ut_a(n == 1); ut_a(i == SRV_MASTER_SLOT); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_PURGE: @@ -1069,12 +1069,12 @@ srv_release_threads(enum srv_thread_type type, ulint n) ut_a(n == 1); ut_a(i == SRV_PURGE_SLOT); ut_a(srv_n_purge_threads > 0); - ut_a(srv_sys->n_threads_active[type] == 0); + ut_a(srv_sys.n_threads_active[type] == 0); break; case SRV_WORKER: ut_a(srv_n_purge_threads > 1); - ut_a(srv_sys->n_threads_active[type] + ut_a(srv_sys.n_threads_active[type] < srv_n_purge_threads - 1); break; } @@ -1112,9 +1112,6 @@ void srv_init(void) /*==========*/ { - ulint n_sys_threads = 0; - ulint srv_sys_sz = sizeof(*srv_sys); - #ifndef HAVE_ATOMIC_BUILTINS mutex_create(server_mutex_key, &server_mutex, SYNC_ANY_LATCH); #endif /* !HAVE_ATOMIC_BUILTINS */ @@ -1122,29 +1119,19 @@ srv_init(void) mutex_create(srv_innodb_monitor_mutex_key, &srv_innodb_monitor_mutex, SYNC_NO_ORDER_CHECK); - if (!srv_read_only_mode) { - - /* Number of purge threads + master thread */ - n_sys_threads = srv_n_purge_threads + 1; - - srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads); - } - - srv_sys = static_cast(mem_zalloc(srv_sys_sz)); - - srv_sys->n_sys_threads = n_sys_threads; + srv_sys.n_sys_threads = srv_read_only_mode + ? 0 + : srv_n_purge_threads + 1/* purge coordinator */; if (!srv_read_only_mode) { - mutex_create(srv_sys_mutex_key, &srv_sys->mutex, SYNC_THREADS); + mutex_create(srv_sys_mutex_key, &srv_sys.mutex, SYNC_THREADS); mutex_create(srv_sys_tasks_mutex_key, - &srv_sys->tasks_mutex, SYNC_ANY_LATCH); + &srv_sys.tasks_mutex, SYNC_ANY_LATCH); - srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1]; - - for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) { - srv_slot_t* slot = &srv_sys->sys_threads[i]; + for (ulint i = 0; i < srv_sys.n_sys_threads; ++i) { + srv_slot_t* slot = &srv_sys.sys_threads[i]; slot->event = os_event_create(); @@ -1203,8 +1190,8 @@ srv_free(void) if (!srv_read_only_mode) { - for (ulint i = 0; i < srv_sys->n_sys_threads; i++) - os_event_free(srv_sys->sys_threads[i].event); + for (ulint i = 0; i < srv_sys.n_sys_threads; i++) + os_event_free(srv_sys.sys_threads[i].event); os_event_free(srv_error_event); srv_error_event = NULL; @@ -1216,8 +1203,8 @@ srv_free(void) srv_checkpoint_completed_event = NULL; os_event_free(srv_redo_log_tracked_event); srv_redo_log_tracked_event = NULL; - mutex_free(&srv_sys->mutex); - mutex_free(&srv_sys->tasks_mutex); + mutex_free(&srv_sys.mutex); + mutex_free(&srv_sys.tasks_mutex); } #ifndef HAVE_ATOMIC_BUILTINS @@ -1226,9 +1213,6 @@ srv_free(void) mutex_free(&srv_innodb_monitor_mutex); mutex_free(&page_zip_stat_per_index_mutex); - mem_free(srv_sys); - srv_sys = NULL; - trx_i_s_cache_free(trx_i_s_cache); } @@ -2305,9 +2289,9 @@ srv_inc_activity_count( is caused by the background change buffer merge */ { - srv_sys->activity_count.inc(); + srv_sys.activity_count.inc(); if (ibuf_merge_activity) - srv_sys->ibuf_merge_activity_count.inc(); + srv_sys.ibuf_merge_activity_count.inc(); } /**********************************************************************//** @@ -2329,7 +2313,7 @@ srv_get_active_thread_type(void) srv_sys_mutex_enter(); for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) { - if (srv_sys->n_threads_active[i] != 0) { + if (srv_sys.n_threads_active[i] != 0) { ret = static_cast(i); break; } @@ -2582,12 +2566,12 @@ srv_active_wake_master_thread(void) srv_inc_activity_count(); - if (srv_sys->n_threads_active[SRV_MASTER] == 0) { + if (srv_sys.n_threads_active[SRV_MASTER] == 0) { srv_slot_t* slot; srv_sys_mutex_enter(); - slot = &srv_sys->sys_threads[SRV_MASTER_SLOT]; + slot = &srv_sys.sys_threads[SRV_MASTER_SLOT]; /* Only if the master thread has been started. */ @@ -2614,7 +2598,7 @@ srv_wake_purge_thread_if_not_active(void) ut_ad(!srv_sys_mutex_own()); if (purge_sys->state == PURGE_STATE_RUN - && srv_sys->n_threads_active[SRV_PURGE] == 0) { + && srv_sys.n_threads_active[SRV_PURGE] == 0) { srv_release_threads(SRV_PURGE, 1); } @@ -2643,7 +2627,7 @@ ulint srv_get_activity_count(void) /*========================*/ { - return(srv_sys->activity_count); + return(srv_sys.activity_count); } /** Get current server ibuf merge activity count. @@ -2652,7 +2636,7 @@ static ulint srv_get_ibuf_merge_activity_count(void) { - return(srv_sys->ibuf_merge_activity_count); + return(srv_sys.ibuf_merge_activity_count); } /*******************************************************************//** @@ -2671,14 +2655,14 @@ srv_check_activity( ULINT_UNDEFINED */ ulint old_ibuf_merge_activity_count) { - ulint new_activity_count = srv_sys->activity_count; + ulint new_activity_count = srv_sys.activity_count; if (old_ibuf_merge_activity_count == ULINT_UNDEFINED) return(new_activity_count != old_activity_count); /* If we care about ibuf merge activity, then the server is considered idle if all activity, if any, was due to ibuf merge. */ ulint new_ibuf_merge_activity_count - = srv_sys->ibuf_merge_activity_count; + = srv_sys.ibuf_merge_activity_count; ut_ad(new_ibuf_merge_activity_count <= new_activity_count); ut_ad(new_ibuf_merge_activity_count >= old_ibuf_merge_activity_count); @@ -3069,7 +3053,7 @@ DECLARE_THREAD(srv_master_thread)( srv_main_thread_id = os_thread_pf(os_thread_get_curr_id()); slot = srv_reserve_slot(SRV_MASTER); - ut_a(slot == srv_sys->sys_threads); + ut_a(slot == srv_sys.sys_threads); last_print_time = ut_time(); loop: @@ -3165,18 +3149,18 @@ srv_task_execute(void) ut_ad(!srv_read_only_mode); ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - if (UT_LIST_GET_LEN(srv_sys->tasks) > 0) { + if (UT_LIST_GET_LEN(srv_sys.tasks) > 0) { - thr = UT_LIST_GET_FIRST(srv_sys->tasks); + thr = UT_LIST_GET_FIRST(srv_sys.tasks); ut_a(que_node_get_type(thr->child) == QUE_NODE_PURGE); - UT_LIST_REMOVE(queue, srv_sys->tasks, thr); + UT_LIST_REMOVE(queue, srv_sys.tasks, thr); } - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); if (thr != NULL) { @@ -3226,7 +3210,7 @@ DECLARE_THREAD(srv_worker_thread)( srv_sys_mutex_enter(); - ut_a(srv_sys->n_threads_active[SRV_WORKER] < srv_n_purge_threads); + ut_a(srv_sys.n_threads_active[SRV_WORKER] < srv_n_purge_threads); srv_sys_mutex_exit(); @@ -3566,11 +3550,11 @@ srv_que_task_enqueue_low( que_thr_t* thr) /*!< in: query thread */ { ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr); + UT_LIST_ADD_LAST(queue, srv_sys.tasks, thr); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); srv_release_threads(SRV_WORKER, 1); } @@ -3587,11 +3571,11 @@ srv_get_task_queue_length(void) ut_ad(!srv_read_only_mode); - mutex_enter(&srv_sys->tasks_mutex); + mutex_enter(&srv_sys.tasks_mutex); - n_tasks = UT_LIST_GET_LEN(srv_sys->tasks); + n_tasks = UT_LIST_GET_LEN(srv_sys.tasks); - mutex_exit(&srv_sys->tasks_mutex); + mutex_exit(&srv_sys.tasks_mutex); return(n_tasks); } -- cgit v1.2.1 From 3670d167a6261a867b8a5e64028e7b580ca5991a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Wed, 17 May 2017 16:19:01 +0300 Subject: Fix tokudb test failures post merge --- storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt | 1 - storage/tokudb/mysql-test/tokudb/t/background_job_manager.test | 1 + storage/tokudb/mysql-test/tokudb/t/bug-1657908.test | 1 + storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test | 1 + storage/tokudb/mysql-test/tokudb/t/dir_cmd.test | 1 + storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test | 1 + storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test | 1 + storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test | 1 + storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test | 1 + storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test | 1 + storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test | 1 + storage/tokudb/mysql-test/tokudb_bugs/t/db938.test | 1 + storage/tokudb/mysql-test/tokudb_bugs/t/db945.test | 1 + storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test | 1 + 14 files changed, 13 insertions(+), 1 deletion(-) delete mode 100644 storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt (limited to 'storage') diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt deleted file mode 100644 index 3cc9ea3009e..00000000000 --- a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.opt +++ /dev/null @@ -1 +0,0 @@ ---tokudb-background-job-status diff --git a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test index 933814442e0..709fc463696 100644 --- a/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test +++ b/storage/tokudb/mysql-test/tokudb/t/background_job_manager.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; # This is a comprehensive test for the background job manager and # the information_schema.tokudb_background_job_status table # diff --git a/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test index 3c7b602b96d..adcf4ef55f6 100644 --- a/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test +++ b/storage/tokudb/mysql-test/tokudb/t/bug-1657908.test @@ -1,3 +1,4 @@ +--source include/have_partition.inc # See https://bugs.launchpad.net/percona-server/+bug/1657908 source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test index ec74a4a28bc..9675449372b 100644 --- a/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test +++ b/storage/tokudb/mysql-test/tokudb/t/card_auto_analyze_lots.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; # Test the auto analyze on lots of tables -- source include/have_tokudb.inc diff --git a/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test index 102e8217e2b..b9d8c80de65 100644 --- a/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test +++ b/storage/tokudb/mysql-test/tokudb/t/dir_cmd.test @@ -1,3 +1,4 @@ +skip TokuDB dir CMD disabled in MariaDB; source include/have_tokudb.inc; --let $MYSQL_DATADIR=`select @@datadir` diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test index 42fb548814f..8fe5e66a9b3 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_lock_waits_timeout.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # verify that tokudb_locks and tokudb_lock_waits contents for 2 conflicting transactions with a lock timeout source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test index 8f205ad7f45..59d04ead386 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_locks.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # verify that information_schema.tokudb_locks gets populated with locks for 2 clients source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test index 517280391c4..b4ab64be962 100644 --- a/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test +++ b/storage/tokudb/mysql-test/tokudb/t/i_s_tokudb_trx.test @@ -1,3 +1,4 @@ +skip Tokudb trx not in I_S in MariaDB; # verify that information_schema.tokudb_trx gets populated with transactions source include/have_tokudb.inc; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test index 79043664607..7343768a7d7 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_delete_trigger.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # check for any locking weirdness on DELETE triggers source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test index ffe2face9f2..313b1f96b52 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_insert_trigger.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # check for any locking weirdness on INSERT triggers source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test index 063a88cb4ab..ce1eb6bddd1 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db397_update_trigger.test @@ -1,3 +1,4 @@ +skip Tokudb Lock Waits not in I_S in MariaDB; # check for any locking weirdness on UPDATE triggers source include/have_tokudb.inc; set default_storage_engine='tokudb'; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test index 50434a79a00..d65bf3d95de 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db938.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; # This test for DB-938 tests a race condition where a scheduled background job # (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that # were set to NULL during a TRUNCATE TABLE operation. diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test index 27b0d284484..7996f9f5792 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db945.test @@ -1,3 +1,4 @@ +skip Background Job Manager not supported in MariaDB; source include/have_tokudb.inc; set default_storage_engine='tokudb'; disable_warnings; diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test index 735a88afed8..e7ef3211401 100644 --- a/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test +++ b/storage/tokudb/mysql-test/tokudb_bugs/t/tokudb718.test @@ -1,3 +1,4 @@ +skip Tokudb Fractal Tree info not in I_S in MariaDB; # test DB-718, a crash caused by broken error handling in tokudb's fractal_tree_info information schema source include/have_tokudb.inc; set default_storage_engine='tokudb'; -- cgit v1.2.1 From 8b0db08f36e779694510f28d10be0c9d6c2f3501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Wed, 17 May 2017 18:39:25 +0300 Subject: Fix windows compilation in xtradb post-merge --- storage/xtradb/os/os0file.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'storage') diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 9e5da12bb41..5e67d2473f0 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -1859,17 +1859,17 @@ os_file_create_func( *success = TRUE; retry = FALSE; if (srv_use_native_aio && ((attributes & FILE_FLAG_OVERLAPPED) != 0)) { - ut_a(CreateIoCompletionPort(file, completion_port, 0, 0)); + ut_a(CreateIoCompletionPort(file.m_file, completion_port, 0, 0)); } } } while (retry); if (srv_use_atomic_writes && type == OS_DATA_FILE && - !os_file_set_atomic_writes(name, file)) { - CloseHandle(file); + !os_file_set_atomic_writes(name, file.m_file)) { + CloseHandle(file.m_file); *success = FALSE; - file = INVALID_HANDLE_VALUE; + file.m_file = INVALID_HANDLE_VALUE; } #else /* __WIN__ */ @@ -5040,7 +5040,7 @@ os_aio_windows_handle( switch (slot->type) { case OS_FILE_WRITE: - ret_val = os_file_write(slot->name, slot->file.m_file, slot->buf, + ret_val = os_file_write(slot->name, slot->file, slot->buf, li.QuadPart, slot->len); break; case OS_FILE_READ: -- cgit v1.2.1 From 9f57e595b49744e3c1920f8c46a2ec8e7fc5fd66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 16:14:41 +0300 Subject: Refactor trx_undo_report_row_operation() Fix a -fsanitizer=undefined warning that trx_undo_report_row_operation() was being passed thr=NULL when the BTR_NO_UNDO_LOG_FLAG flag was set. trx_undo_report_row_operation(): Remove the first two parameters. The parameter clust_entry!=NULL distinguishes inserts from updates. This should be a non-functional change (no observable change in behaviour; slightly smaller code). --- storage/innobase/btr/btr0cur.cc | 26 ++++++++++++---------- storage/innobase/include/trx0rec.h | 11 ++-------- storage/innobase/trx/trx0rec.cc | 45 +++++++++----------------------------- storage/xtradb/btr/btr0cur.cc | 26 ++++++++++++---------- storage/xtradb/include/trx0rec.h | 11 ++-------- storage/xtradb/trx/trx0rec.cc | 42 ++++++++--------------------------- 6 files changed, 53 insertions(+), 108 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 7846debc115..7ca38c5b67d 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -1164,18 +1164,21 @@ btr_cur_ins_lock_and_undo( index, thr, mtr, inherit); if (err != DB_SUCCESS + || !(~flags | (BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) || !dict_index_is_clust(index) || dict_index_is_ibuf(index)) { return(err); } - err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP, - thr, index, entry, - NULL, 0, NULL, NULL, - &roll_ptr); - if (err != DB_SUCCESS) { - - return(err); + if (flags & BTR_NO_UNDO_LOG_FLAG) { + roll_ptr = 0; + } else { + err = trx_undo_report_row_operation(thr, index, entry, + NULL, 0, NULL, NULL, + &roll_ptr); + if (err != DB_SUCCESS) { + return(err); + } } /* Now we can fill in the roll ptr field in entry */ @@ -1712,9 +1715,10 @@ btr_cur_upd_lock_and_undo( /* Append the info about the update in the undo log */ - return(trx_undo_report_row_operation( - flags, TRX_UNDO_MODIFY_OP, thr, - index, NULL, update, + return((flags & BTR_NO_UNDO_LOG_FLAG) + ? DB_SUCCESS + : trx_undo_report_row_operation( + thr, index, NULL, update, cmpl_info, rec, offsets, roll_ptr)); } @@ -2985,7 +2989,7 @@ btr_cur_del_mark_set_clust_rec( return(err); } - err = trx_undo_report_row_operation(0, TRX_UNDO_MODIFY_OP, thr, + err = trx_undo_report_row_operation(thr, index, NULL, NULL, 0, rec, offsets, &roll_ptr); if (err != DB_SUCCESS) { diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h index 359937e3583..a6e202d04e4 100644 --- a/storage/innobase/include/trx0rec.h +++ b/storage/innobase/include/trx0rec.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -212,10 +213,6 @@ UNIV_INTERN dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, @@ -233,7 +230,7 @@ trx_undo_report_row_operation( inserted undo log record, 0 if BTR_NO_UNDO_LOG flag was specified */ - MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,8), warn_unused_result)); /******************************************************************//** Copies an undo record to heap. This function can be called if we know that the undo log record exists. @@ -313,10 +310,6 @@ record */ storage fields: used by purge to free the external storage */ -/* Operation type flags used in trx_undo_report_row_operation */ -#define TRX_UNDO_INSERT_OP 1 -#define TRX_UNDO_MODIFY_OP 2 - #ifndef UNIV_NONINL #include "trx0rec.ic" #endif diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc index 5fa74846243..f694cdd3d5e 100644 --- a/storage/innobase/trx/trx0rec.cc +++ b/storage/innobase/trx/trx0rec.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1185,10 +1186,6 @@ UNIV_INTERN dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, @@ -1221,17 +1218,8 @@ trx_undo_report_row_operation( ut_ad(!srv_read_only_mode); ut_a(dict_index_is_clust(index)); ut_ad(!rec || rec_offs_validate(rec, index, offsets)); - - if (flags & BTR_NO_UNDO_LOG_FLAG) { - - *roll_ptr = 0; - - return(DB_SUCCESS); - } - ut_ad(thr); - ut_ad((op_type != TRX_UNDO_INSERT_OP) - || (clust_entry && !update && !rec)); + ut_ad(!clust_entry || (!update && !rec)); trx = thr_get_trx(thr); @@ -1252,8 +1240,7 @@ trx_undo_report_row_operation( /* If the undo log is not assigned yet, assign one */ - switch (op_type) { - case TRX_UNDO_INSERT_OP: + if (clust_entry) { undo = trx->insert_undo; if (undo == NULL) { @@ -1269,10 +1256,7 @@ trx_undo_report_row_operation( ut_ad(err == DB_SUCCESS); } - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - + } else { undo = trx->update_undo; if (undo == NULL) { @@ -1296,23 +1280,14 @@ trx_undo_report_row_operation( buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE); do { - page_t* undo_page; - ulint offset; - - undo_page = buf_block_get_frame(undo_block); ut_ad(page_no == buf_block_get_page_no(undo_block)); - - switch (op_type) { - case TRX_UNDO_INSERT_OP: - offset = trx_undo_page_report_insert( - undo_page, trx, index, clust_entry, &mtr); - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - offset = trx_undo_page_report_modify( + page_t* undo_page = buf_block_get_frame(undo_block); + ulint offset = clust_entry + ? trx_undo_page_report_insert( + undo_page, trx, index, clust_entry, &mtr) + : trx_undo_page_report_modify( undo_page, trx, index, rec, offsets, update, cmpl_info, &mtr); - } if (UNIV_UNLIKELY(offset == 0)) { /* The record did not fit on the page. We erase the @@ -1363,7 +1338,7 @@ trx_undo_report_row_operation( mutex_exit(&trx->undo_mutex); *roll_ptr = trx_undo_build_roll_ptr( - op_type == TRX_UNDO_INSERT_OP, + clust_entry != NULL, rseg->id, page_no, offset); return(DB_SUCCESS); } diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc index 0b554b271c4..c495e178b21 100644 --- a/storage/xtradb/btr/btr0cur.cc +++ b/storage/xtradb/btr/btr0cur.cc @@ -1269,18 +1269,21 @@ btr_cur_ins_lock_and_undo( index, thr, mtr, inherit); if (err != DB_SUCCESS + || !(~flags | (BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) || !dict_index_is_clust(index) || dict_index_is_ibuf(index)) { return(err); } - err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP, - thr, index, entry, - NULL, 0, NULL, NULL, - &roll_ptr); - if (err != DB_SUCCESS) { - - return(err); + if (flags & BTR_NO_UNDO_LOG_FLAG) { + roll_ptr = 0; + } else { + err = trx_undo_report_row_operation(thr, index, entry, + NULL, 0, NULL, NULL, + &roll_ptr); + if (err != DB_SUCCESS) { + return(err); + } } /* Now we can fill in the roll ptr field in entry */ @@ -1851,9 +1854,10 @@ btr_cur_upd_lock_and_undo( /* Append the info about the update in the undo log */ - return(trx_undo_report_row_operation( - flags, TRX_UNDO_MODIFY_OP, thr, - index, NULL, update, + return((flags & BTR_NO_UNDO_LOG_FLAG) + ? DB_SUCCESS + : trx_undo_report_row_operation( + thr, index, NULL, update, cmpl_info, rec, offsets, roll_ptr)); } @@ -3175,7 +3179,7 @@ btr_cur_del_mark_set_clust_rec( return(err); } - err = trx_undo_report_row_operation(0, TRX_UNDO_MODIFY_OP, thr, + err = trx_undo_report_row_operation(thr, index, NULL, NULL, 0, rec, offsets, &roll_ptr); if (err != DB_SUCCESS) { diff --git a/storage/xtradb/include/trx0rec.h b/storage/xtradb/include/trx0rec.h index 359937e3583..a6e202d04e4 100644 --- a/storage/xtradb/include/trx0rec.h +++ b/storage/xtradb/include/trx0rec.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -212,10 +213,6 @@ UNIV_INTERN dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, @@ -233,7 +230,7 @@ trx_undo_report_row_operation( inserted undo log record, 0 if BTR_NO_UNDO_LOG flag was specified */ - MY_ATTRIBUTE((nonnull(3,4,10), warn_unused_result)); + MY_ATTRIBUTE((nonnull(1,2,8), warn_unused_result)); /******************************************************************//** Copies an undo record to heap. This function can be called if we know that the undo log record exists. @@ -313,10 +310,6 @@ record */ storage fields: used by purge to free the external storage */ -/* Operation type flags used in trx_undo_report_row_operation */ -#define TRX_UNDO_INSERT_OP 1 -#define TRX_UNDO_MODIFY_OP 2 - #ifndef UNIV_NONINL #include "trx0rec.ic" #endif diff --git a/storage/xtradb/trx/trx0rec.cc b/storage/xtradb/trx/trx0rec.cc index 5fa74846243..257e0380db3 100644 --- a/storage/xtradb/trx/trx0rec.cc +++ b/storage/xtradb/trx/trx0rec.cc @@ -1185,10 +1185,6 @@ UNIV_INTERN dberr_t trx_undo_report_row_operation( /*==========================*/ - ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is - set, does nothing */ - ulint op_type, /*!< in: TRX_UNDO_INSERT_OP or - TRX_UNDO_MODIFY_OP */ que_thr_t* thr, /*!< in: query thread */ dict_index_t* index, /*!< in: clustered index */ const dtuple_t* clust_entry, /*!< in: in the case of an insert, @@ -1222,16 +1218,8 @@ trx_undo_report_row_operation( ut_a(dict_index_is_clust(index)); ut_ad(!rec || rec_offs_validate(rec, index, offsets)); - if (flags & BTR_NO_UNDO_LOG_FLAG) { - - *roll_ptr = 0; - - return(DB_SUCCESS); - } - ut_ad(thr); - ut_ad((op_type != TRX_UNDO_INSERT_OP) - || (clust_entry && !update && !rec)); + ut_ad(!clust_entry || (!update && !rec)); trx = thr_get_trx(thr); @@ -1252,8 +1240,7 @@ trx_undo_report_row_operation( /* If the undo log is not assigned yet, assign one */ - switch (op_type) { - case TRX_UNDO_INSERT_OP: + if (clust_entry) { undo = trx->insert_undo; if (undo == NULL) { @@ -1269,10 +1256,7 @@ trx_undo_report_row_operation( ut_ad(err == DB_SUCCESS); } - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - + } else { undo = trx->update_undo; if (undo == NULL) { @@ -1296,23 +1280,15 @@ trx_undo_report_row_operation( buf_block_dbg_add_level(undo_block, SYNC_TRX_UNDO_PAGE); do { - page_t* undo_page; - ulint offset; - - undo_page = buf_block_get_frame(undo_block); ut_ad(page_no == buf_block_get_page_no(undo_block)); - switch (op_type) { - case TRX_UNDO_INSERT_OP: - offset = trx_undo_page_report_insert( - undo_page, trx, index, clust_entry, &mtr); - break; - default: - ut_ad(op_type == TRX_UNDO_MODIFY_OP); - offset = trx_undo_page_report_modify( + page_t* undo_page = buf_block_get_frame(undo_block); + ulint offset = clust_entry + ? trx_undo_page_report_insert( + undo_page, trx, index, clust_entry, &mtr) + : trx_undo_page_report_modify( undo_page, trx, index, rec, offsets, update, cmpl_info, &mtr); - } if (UNIV_UNLIKELY(offset == 0)) { /* The record did not fit on the page. We erase the @@ -1363,7 +1339,7 @@ trx_undo_report_row_operation( mutex_exit(&trx->undo_mutex); *roll_ptr = trx_undo_build_roll_ptr( - op_type == TRX_UNDO_INSERT_OP, + clust_entry != NULL, rseg->id, page_no, offset); return(DB_SUCCESS); } -- cgit v1.2.1 From a436e349dfd0035b2b861ee772b47089043f5a2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 16:37:33 +0300 Subject: ibuf_get_volume_buffered_hash(): Use a proper type cast On 64-bit systems, the constant 1 would be 32-bit (int or unsigned) by default. Cast the constant to ulint before shifting to avoid a -fsanitize=undefined warning or any potential overflow. --- storage/innobase/ibuf/ibuf0ibuf.cc | 3 +-- storage/xtradb/ibuf/ibuf0ibuf.cc | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'storage') diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 55429e3cf34..49c1d1a4227 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -2895,8 +2895,7 @@ ibuf_get_volume_buffered_hash( fold = ut_fold_binary(data, len); hash += (fold / (CHAR_BIT * sizeof *hash)) % size; - bitmask = static_cast( - 1 << (fold % (CHAR_BIT * sizeof(*hash)))); + bitmask = static_cast(1) << (fold % (CHAR_BIT * sizeof(*hash))); if (*hash & bitmask) { diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc index e00adefe68f..414d7fc39be 100644 --- a/storage/xtradb/ibuf/ibuf0ibuf.cc +++ b/storage/xtradb/ibuf/ibuf0ibuf.cc @@ -2935,8 +2935,7 @@ ibuf_get_volume_buffered_hash( fold = ut_fold_binary(data, len); hash += (fold / (CHAR_BIT * sizeof *hash)) % size; - bitmask = static_cast( - 1 << (fold % (CHAR_BIT * sizeof(*hash)))); + bitmask = static_cast(1) << (fold % (CHAR_BIT * sizeof(*hash))); if (*hash & bitmask) { -- cgit v1.2.1 From 54bb04f7efc1d0842b3d8befb670f5d9f05cb211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 17 May 2017 16:39:57 +0300 Subject: Fix some __attribute__((nonnull)) misuse This fixes warnings that were emitted when running InnoDB test suites on a debug server that was compiled with GCC 7.1.0 using the flags -O3 -fsanitize=undefined. thd_requested_durability(): XtraDB can call this with trx->mysql_thd=NULL. Remove the function in InnoDB, because it is not used there. calc_row_difference(): Do not call memcmp(o_ptr, NULL, 0). innobase_index_name_is_reserved(): This can be called with key_info=NULL, num_of_keys=0. innobase_dropping_foreign(), innobase_check_foreigns_low(), innobase_check_foreigns(): This can be called with drop_fk=NULL, n_drop_fk=0. rec_convert_dtuple_to_rec_comp(): Do not invoke memcpy(end, NULL, 0). --- storage/innobase/handler/ha_innodb.cc | 17 ++--------------- storage/innobase/handler/ha_innodb.h | 4 ++-- storage/innobase/handler/handler0alter.cc | 13 +++++++------ storage/innobase/include/ha_prototypes.h | 12 ------------ storage/innobase/rem/rem0rec.cc | 6 ++++-- storage/xtradb/handler/ha_innodb.cc | 4 ++-- storage/xtradb/handler/ha_innodb.h | 2 +- storage/xtradb/handler/handler0alter.cc | 12 ++++++------ storage/xtradb/include/ha_prototypes.h | 3 ++- storage/xtradb/rem/rem0rec.cc | 6 ++++-- 10 files changed, 30 insertions(+), 49 deletions(-) (limited to 'storage') diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 26c9d0321d4..c0b12ab42df 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1321,19 +1321,6 @@ thd_is_replication_slave_thread( return((ibool) thd_slave_thread(thd)); } -/******************************************************************//** -Gets information on the durability property requested by thread. -Used when writing either a prepare or commit record to the log -buffer. @return the durability property. */ -UNIV_INTERN -enum durability_properties -thd_requested_durability( -/*=====================*/ - const THD* thd) /*!< in: thread handle */ -{ - return(thd_get_durability_property(thd)); -} - /******************************************************************//** Returns true if transaction should be flagged as read-only. @return true if the thd is marked as read-only */ @@ -7519,8 +7506,8 @@ calc_row_difference( } } - if (o_len != n_len || (o_len != UNIV_SQL_NULL && - 0 != memcmp(o_ptr, n_ptr, o_len))) { + if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL + && 0 != memcmp(o_ptr, n_ptr, o_len))) { /* The field has changed */ ufield = uvect->fields + n_changed; diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 9ddb1600378..46d689ce1e8 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2000, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2016, MariaDB Corporation. +Copyright (c) 2013, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -476,7 +476,7 @@ innobase_index_name_is_reserved( const KEY* key_info, /*!< in: Indexes to be created */ ulint num_of_keys) /*!< in: Number of indexes to be created. */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /*****************************************************************//** Determines InnoDB table flags. diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index dc084dc1b95..8d6f3e79f61 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2277,10 +2278,10 @@ online_retry_drop_indexes_with_trx( @param drop_fk constraints being dropped @param n_drop_fk number of constraints that are being dropped @return whether the constraint is being dropped */ -inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1), warn_unused_result)) +inline bool innobase_dropping_foreign( -/*======================*/ const dict_foreign_t* foreign, dict_foreign_t** drop_fk, ulint n_drop_fk) @@ -2304,10 +2305,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,4), warn_unused_result)) +static bool innobase_check_foreigns_low( -/*========================*/ const dict_table_t* user_table, dict_foreign_t** drop_fk, ulint n_drop_fk, @@ -2404,10 +2405,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result)) +static bool innobase_check_foreigns( -/*====================*/ Alter_inplace_info* ha_alter_info, const TABLE* altered_table, const TABLE* old_table, diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 75b81e26f2f..488ed0257a7 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -126,18 +126,6 @@ thd_is_replication_slave_thread( /*============================*/ THD* thd); /*!< in: thread handle */ -/******************************************************************//** -Gets information on the durability property requested by thread. -Used when writing either a prepare or commit record to the log -buffer. -@return the durability property. */ -UNIV_INTERN -enum durability_properties -thd_requested_durability( -/*=====================*/ - const THD* thd) /*!< in: thread handle */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); - /******************************************************************//** Returns true if the transaction this thread is processing has edited non-transactional tables. Used by the deadlock detector when deciding diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index a95e9c23613..a42f8f95083 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -1285,8 +1285,10 @@ rec_convert_dtuple_to_rec_comp( } } - memcpy(end, dfield_get_data(field), len); - end += len; + if (len) { + memcpy(end, dfield_get_data(field), len); + end += len; + } } } diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 9af3c7272fe..0f57878ace3 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -8202,8 +8202,8 @@ calc_row_difference( } } - if (o_len != n_len || (o_len != UNIV_SQL_NULL && - 0 != memcmp(o_ptr, n_ptr, o_len))) { + if (o_len != n_len || (o_len != 0 && o_len != UNIV_SQL_NULL + && 0 != memcmp(o_ptr, n_ptr, o_len))) { /* The field has changed */ ufield = uvect->fields + n_changed; diff --git a/storage/xtradb/handler/ha_innodb.h b/storage/xtradb/handler/ha_innodb.h index 38e45647a7c..faf1e5475b5 100644 --- a/storage/xtradb/handler/ha_innodb.h +++ b/storage/xtradb/handler/ha_innodb.h @@ -484,7 +484,7 @@ innobase_index_name_is_reserved( const KEY* key_info, /*!< in: Indexes to be created */ ulint num_of_keys) /*!< in: Number of indexes to be created. */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((nonnull(1), warn_unused_result)); /*****************************************************************//** Determines InnoDB table flags. diff --git a/storage/xtradb/handler/handler0alter.cc b/storage/xtradb/handler/handler0alter.cc index 3d20c9abccf..f60d9938c39 100644 --- a/storage/xtradb/handler/handler0alter.cc +++ b/storage/xtradb/handler/handler0alter.cc @@ -2278,10 +2278,10 @@ online_retry_drop_indexes_with_trx( @param drop_fk constraints being dropped @param n_drop_fk number of constraints that are being dropped @return whether the constraint is being dropped */ -inline MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1), warn_unused_result)) +inline bool innobase_dropping_foreign( -/*======================*/ const dict_foreign_t* foreign, dict_foreign_t** drop_fk, ulint n_drop_fk) @@ -2305,10 +2305,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,4), warn_unused_result)) +static bool innobase_check_foreigns_low( -/*========================*/ const dict_table_t* user_table, dict_foreign_t** drop_fk, ulint n_drop_fk, @@ -2405,10 +2405,10 @@ column that is being dropped or modified to NOT NULL. @retval true Not allowed (will call my_error()) @retval false Allowed */ -static MY_ATTRIBUTE((pure, nonnull, warn_unused_result)) +MY_ATTRIBUTE((pure, nonnull(1,2,3,4), warn_unused_result)) +static bool innobase_check_foreigns( -/*====================*/ Alter_inplace_info* ha_alter_info, const TABLE* altered_table, const TABLE* old_table, diff --git a/storage/xtradb/include/ha_prototypes.h b/storage/xtradb/include/ha_prototypes.h index b1975287f23..1dfbfe7c8fb 100644 --- a/storage/xtradb/include/ha_prototypes.h +++ b/storage/xtradb/include/ha_prototypes.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -143,7 +144,7 @@ enum durability_properties thd_requested_durability( /*=====================*/ const THD* thd) /*!< in: thread handle */ - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /******************************************************************//** Returns true if the transaction this thread is processing has edited diff --git a/storage/xtradb/rem/rem0rec.cc b/storage/xtradb/rem/rem0rec.cc index c6d9ba935e1..1316c4f35c6 100644 --- a/storage/xtradb/rem/rem0rec.cc +++ b/storage/xtradb/rem/rem0rec.cc @@ -1290,8 +1290,10 @@ rec_convert_dtuple_to_rec_comp( } } - memcpy(end, dfield_get_data(field), len); - end += len; + if (len) { + memcpy(end, dfield_get_data(field), len); + end += len; + } } } -- cgit v1.2.1 From f302a3cf9d9de48cae660641ec9695412f74b80e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Fri, 28 Apr 2017 10:07:03 +0300 Subject: MDEV-12593: InnoDB page compression should use lz4_compress_default if available lz4.cmake: Check if shared or static lz4 library has LZ4_compress_default function and if it has define HAVE_LZ4_COMPRESS_DEFAULT. fil_compress_page: If HAVE_LZ4_COMPRESS_DEFAULT is defined use LZ4_compress_default function for compression if not use LZ4_compress_limitedOutput function. Introduced a innodb-page-compression.inc file for page compression tests that will also search .ibd file to verify that pages are compressed (i.e. used search string is not found). Modified page compression tests to use this file. Note that snappy method is not included because of MDEV-12615 InnoDB page compression method snappy mostly does not compress pages that will be fixed on different commit. --- storage/innobase/fil/fil0pagecompress.cc | 5 +++++ storage/xtradb/fil/fil0pagecompress.cc | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'storage') diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index 8b2449983df..c377c19dd0d 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -163,8 +163,13 @@ fil_compress_page( switch(comp_method) { #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: +#ifdef HAVE_LZ4_COMPRESS_DEFAULT + err = LZ4_compress_default((const char *)buf, + (char *)out_buf+header_len, len, write_size); +#else err = LZ4_compress_limitedOutput((const char *)buf, (char *)out_buf+header_len, len, write_size); +#endif /* HAVE_LZ4_COMPRESS_DEFAULT */ write_size = err; if (err == 0) { diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc index 8b2449983df..c377c19dd0d 100644 --- a/storage/xtradb/fil/fil0pagecompress.cc +++ b/storage/xtradb/fil/fil0pagecompress.cc @@ -163,8 +163,13 @@ fil_compress_page( switch(comp_method) { #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: +#ifdef HAVE_LZ4_COMPRESS_DEFAULT + err = LZ4_compress_default((const char *)buf, + (char *)out_buf+header_len, len, write_size); +#else err = LZ4_compress_limitedOutput((const char *)buf, (char *)out_buf+header_len, len, write_size); +#endif /* HAVE_LZ4_COMPRESS_DEFAULT */ write_size = err; if (err == 0) { -- cgit v1.2.1 From 3c7af6c4904d11b324f911a7a7149f17d3f86ca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 18 May 2017 15:46:31 +0300 Subject: Fix xtradb handler compilation post merge --- storage/xtradb/handler/ha_innodb.cc | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'storage') diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index da486507cfc..d0eef98f064 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -768,6 +768,31 @@ innobase_purge_changed_page_bitmaps( /*================================*/ ulonglong lsn) __attribute__((unused)); /*!< in: LSN to purge files up to */ +/** Empty free list algorithm. +Checks if buffer pool is big enough to enable backoff algorithm. +InnoDB empty free list algorithm backoff requires free pages +from LRU for the best performance. +buf_LRU_buf_pool_running_out cancels query if 1/4 of +buffer pool belongs to LRU or freelist. +At the same time buf_flush_LRU_list_batch +keeps up to BUF_LRU_MIN_LEN in LRU. +In order to avoid deadlock baclkoff requires buffer pool +to be at least 4*BUF_LRU_MIN_LEN, +but flush peformance is bad because of trashing +and additional BUF_LRU_MIN_LEN pages are requested. +@param[in] algorithm desired algorithm from srv_empty_free_list_t +@return true if it's possible to enable backoff. */ +static inline +bool +innodb_empty_free_list_algorithm_allowed( + srv_empty_free_list_t algorithm) +{ + long long buf_pool_pages = srv_buf_pool_size / srv_page_size + / srv_buf_pool_instances; + + return(buf_pool_pages >= BUF_LRU_MIN_LEN * (4 + 1) + || algorithm != SRV_EMPTY_FREE_LIST_BACKOFF); +} /** Get the list of foreign keys referencing a specified table table. -- cgit v1.2.1 From 546a89ca58887ea0d89794f108bc864e05c2f74f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 18 May 2017 16:16:18 +0300 Subject: Update xtradb and innodb version to 5.6.36 --- storage/innobase/include/univ.i | 2 +- storage/xtradb/include/univ.i | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index e310799b7c6..15147f87115 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 35 +#define INNODB_VERSION_BUGFIX 36 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; diff --git a/storage/xtradb/include/univ.i b/storage/xtradb/include/univ.i index a82faa76eef..f554c50ff83 100644 --- a/storage/xtradb/include/univ.i +++ b/storage/xtradb/include/univ.i @@ -45,7 +45,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 6 -#define INNODB_VERSION_BUGFIX 35 +#define INNODB_VERSION_BUGFIX 36 #ifndef PERCONA_INNODB_VERSION #define PERCONA_INNODB_VERSION 82.0 -- cgit v1.2.1 From 4a846e018d2e31a7c78e45fb40b865246b5abb11 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 18 May 2017 19:31:44 +0200 Subject: Make IF clear. --- storage/maria/ma_loghandler.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'storage') diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index bf2205f5a4c..16cd0a09af5 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -7814,8 +7814,24 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, translog_force_current_buffer_to_finish(); translog_buffer_unlock(buffer); } - else if (log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE) + else { + if (log_descriptor.bc.buffer->last_lsn == LSN_IMPOSSIBLE) + { + /* + In this case both last_lsn & prev_last_lsn are LSN_IMPOSSIBLE + otherwise it will go in the first IF because LSN_IMPOSSIBLE less + then any real LSN and cmp_translog_addr(*lsn, + log_descriptor.bc.buffer->prev_last_lsn) will be TRUE + */ + DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn == + LSN_IMPOSSIBLE); + DBUG_PRINT("info", ("There is no LSNs yet generated => do nothing")); + translog_unlock(); + DBUG_VOID_RETURN; + } + + DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE); /* fix lsn if it was horizon */ *lsn= log_descriptor.bc.buffer->prev_last_lsn; DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: (%lu,0x%lx)", @@ -7824,13 +7840,6 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn, TRANSLOG_BUFFERS_NO); translog_unlock(); } - else if (log_descriptor.bc.buffer->last_lsn == LSN_IMPOSSIBLE) - { - DBUG_PRINT("info", ("There is no LSNs yet generated => do nothing")); - translog_unlock(); - DBUG_VOID_RETURN; - } - /* flush buffers */ *sent_to_disk= translog_get_sent_to_disk(); if (cmp_translog_addr(*lsn, *sent_to_disk) > 0) -- cgit v1.2.1 From 335c4ab790254462753ba4ed8b074c5847b2708f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2017 09:51:44 +0300 Subject: Remove dead code added in merge commit d8b45b0c004edc0b91029b232d7cc9aad02cc822 In a merge conflict resolution, callers of the added function rec_field_len_in_chars() were removed, but the function itself was not. --- storage/xtradb/row/row0sel.cc | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'storage') diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 17920c91743..b0c6565ab2e 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -3646,30 +3646,6 @@ row_search_idx_cond_check( return(result); } - -/** Return the record field length in characters. -@param[in] col table column of the field -@param[in] field_no field number -@param[in] rec physical record -@param[in] offsets field offsets in the physical record - -@return field length in characters */ -static -size_t -rec_field_len_in_chars(const dict_col_t &col, - const ulint field_no, - const rec_t *rec, - const ulint *offsets) -{ - const ulint cset = dtype_get_charset_coll(col.prtype); - const CHARSET_INFO* cs = all_charsets[cset]; - ulint rec_field_len; - const char* rec_field = reinterpret_cast( - rec_get_nth_field( - rec, offsets, field_no, &rec_field_len)); - return(cs->cset->numchars(cs, rec_field, rec_field + rec_field_len)); -} - /********************************************************************//** Searches for rows in the database. This is used in the interface to MySQL. This function opens a cursor, and also implements fetch next -- cgit v1.2.1 From 7c03edf2fe66855a8ce8f2575c3aaf66af975377 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 17 May 2017 15:16:24 +0200 Subject: MDEV-6262 analyze the coverity report on mariadb uploaded 10.0, analyzed everything with the Impact=High (and a couple of Medium) --- storage/csv/ha_tina.cc | 15 ++++++++------- storage/maria/ma_check.c | 1 + storage/maria/ma_loghandler.c | 28 +++++++++++++++++----------- storage/maria/ma_packrec.c | 2 +- storage/maria/ma_recovery.c | 3 +-- storage/myisam/ha_myisam.cc | 2 +- storage/myisam/mi_open.c | 2 +- storage/xtradb/buf/buf0dump.cc | 1 + storage/xtradb/dict/dict0mem.cc | 4 ++-- storage/xtradb/handler/ha_innodb.cc | 2 +- storage/xtradb/log/log0online.cc | 1 + storage/xtradb/srv/srv0srv.cc | 3 ++- 12 files changed, 37 insertions(+), 27 deletions(-) (limited to 'storage') diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 35596a59c86..a8ae617a588 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -1484,13 +1484,13 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) /* Don't assert in field::val() functions */ table->use_all_columns(); - if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* position buffer to the start of the file */ if (init_data_file()) DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR); + if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* Local_saved_data_file_length is initialized during the lock phase. Sometimes this is not getting executed before ::repair (e.g. for @@ -1574,9 +1574,9 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) DBUG_RETURN(my_errno ? my_errno : -1); share->tina_write_opened= FALSE; } - if (mysql_file_close(data_file, MYF(0)) || - mysql_file_close(repair_file, MYF(0)) || - mysql_file_rename(csv_key_file_data, + mysql_file_close(data_file, MYF(0)); + mysql_file_close(repair_file, MYF(0)); + if (mysql_file_rename(csv_key_file_data, repaired_fname, share->data_file_name, MYF(0))) DBUG_RETURN(-1); @@ -1698,13 +1698,14 @@ int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) DBUG_ENTER("ha_tina::check"); old_proc_info= thd_proc_info(thd, "Checking table"); - if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* position buffer to the start of the file */ if (init_data_file()) DBUG_RETURN(HA_ERR_CRASHED); + if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + /* Local_saved_data_file_length is initialized during the lock phase. Check does not use store_lock in certain cases. So, we set it diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index bd5c67c0409..851f21888d8 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -4215,6 +4215,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, printf("Data records: %s\n", llstr(start_records, llbuff)); } + bzero(&new_data_cache, sizeof(new_data_cache)); if (initialize_variables_for_repair(param, &sort_info, &tmp_sort_param, info, rep_quick, &backup_share)) goto err; diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index 86a8970d7a7..096d14f2d57 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -953,6 +953,7 @@ static File create_logfile_by_number_no_cache(uint32 file_no) { DBUG_PRINT("error", ("Error %d during syncing directory '%s'", errno, log_descriptor.directory)); + mysql_file_close(file, MYF(0)); translog_stop_writing(); DBUG_RETURN(-1); } @@ -1454,17 +1455,16 @@ LSN translog_get_file_max_lsn_stored(uint32 file) if (translog_read_file_header(&info, fd)) { DBUG_PRINT("error", ("Can't read file header")); - DBUG_RETURN(LSN_ERROR); + info.max_lsn= LSN_ERROR; } if (mysql_file_close(fd, MYF(MY_WME))) { DBUG_PRINT("error", ("Can't close file")); - DBUG_RETURN(LSN_ERROR); + info.max_lsn= LSN_ERROR; } - DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", - LSN_IN_PARTS(info.max_lsn))); + DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", LSN_IN_PARTS(info.max_lsn))); DBUG_RETURN(info.max_lsn); } } @@ -1638,13 +1638,15 @@ static my_bool translog_create_new_file() if (allocate_dynamic(&log_descriptor.open_files, log_descriptor.max_file - log_descriptor.min_file + 2)) goto error_lock; - if ((file->handler.file= - create_logfile_by_number_no_cache(file_no)) == -1) + + /* this call just expand the array */ + if (insert_dynamic(&log_descriptor.open_files, (uchar*)&file)) + goto error_lock; + + if ((file->handler.file= create_logfile_by_number_no_cache(file_no)) == -1) goto error_lock; translog_file_init(file, file_no, 0); - /* this call just expand the array */ - insert_dynamic(&log_descriptor.open_files, (uchar*)&file); log_descriptor.max_file++; { char *start= (char*) dynamic_element(&log_descriptor.open_files, 0, @@ -1678,6 +1680,7 @@ error_lock: mysql_rwlock_unlock(&log_descriptor.open_files_lock); error: translog_stop_writing(); + my_free(file); DBUG_RETURN(1); } @@ -3985,11 +3988,14 @@ my_bool translog_init_with_table(const char *directory, /* Start new log system from scratch */ log_descriptor.horizon= MAKE_LSN(start_file_num, TRANSLOG_PAGE_SIZE); /* header page */ - if ((file->handler.file= - create_logfile_by_number_no_cache(start_file_num)) == -1) - goto err; translog_file_init(file, start_file_num, 0); if (insert_dynamic(&log_descriptor.open_files, (uchar*)&file)) + { + my_free(file); + goto err; + } + if ((file->handler.file= + create_logfile_by_number_no_cache(start_file_num)) == -1) goto err; log_descriptor.min_file= log_descriptor.max_file= start_file_num; if (translog_write_file_header()) diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c index 6a4e7ea99cf..4127c4f5fcf 100644 --- a/storage/maria/ma_packrec.c +++ b/storage/maria/ma_packrec.c @@ -1445,7 +1445,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, maria->blob_length=info->blob_len; } info->filepos=filepos+head_length; - if (file > 0) + if (file >= 0) { info->offset=MY_MIN(info->rec_len, ref_length - head_length); memcpy(*rec_buff_p, header + head_length, info->offset); diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index a09662544a2..5a36c9db8ca 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -1988,7 +1988,7 @@ prototype_redo_exec_hook(UNDO_KEY_INSERT) const HA_KEYSEG *keyseg= info->s->keyinfo[keynr].seg; ulonglong value; char llbuf[22]; - uchar *to; + uchar reversed[MARIA_MAX_KEY_BUFF], *to; tprint(tracef, " state older than record\n"); /* we read the record to find the auto_increment value */ enlarge_buffer(rec); @@ -2005,7 +2005,6 @@ prototype_redo_exec_hook(UNDO_KEY_INSERT) if (keyseg->flag & HA_SWAP_KEY) { /* We put key from log record to "data record" packing format... */ - uchar reversed[MARIA_MAX_KEY_BUFF]; uchar *key_ptr= to; uchar *key_end= key_ptr + keyseg->length; to= reversed + keyseg->length; diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 2cfaa5ebdcc..8ec3c3681e9 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1248,6 +1248,7 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) { KEY_CACHE *new_key_cache= check_opt->key_cache; const char *errmsg= 0; + char buf[STRING_BUFFER_USUAL_SIZE]; int error= HA_ADMIN_OK; ulonglong map; TABLE_LIST *table_list= table->pos_in_table_list; @@ -1264,7 +1265,6 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) if ((error= mi_assign_to_key_cache(file, map, new_key_cache))) { - char buf[STRING_BUFFER_USUAL_SIZE]; my_snprintf(buf, sizeof(buf), "Failed to flush to index file (errno: %d)", error); errmsg= buf; diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index bdb2fdf8447..7e846fc262c 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -151,7 +151,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } share->mode=open_mode; errpos=1; - if (mysql_file_read(kfile, share->state.header.file_version, head_length, + if (mysql_file_read(kfile, (uchar*)&share->state.header, head_length, MYF(MY_NABP))) { my_errno= HA_ERR_NOT_A_TABLE; diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc index 5f83d401f98..51c41cc1b78 100644 --- a/storage/xtradb/buf/buf0dump.cc +++ b/storage/xtradb/buf/buf0dump.cc @@ -604,6 +604,7 @@ buf_load() if (dump_n == 0) { ut_free(dump); + ut_free(dump_tmp); ut_sprintf_timestamp(now); buf_load_status(STATUS_NOTICE, "Buffer pool(s) load completed at %s " diff --git a/storage/xtradb/dict/dict0mem.cc b/storage/xtradb/dict/dict0mem.cc index ee6de30cd40..0f48c7c69e3 100644 --- a/storage/xtradb/dict/dict0mem.cc +++ b/storage/xtradb/dict/dict0mem.cc @@ -321,8 +321,8 @@ dict_mem_table_col_rename_low( ut_ad(from_len <= NAME_LEN); ut_ad(to_len <= NAME_LEN); - char from[NAME_LEN]; - strncpy(from, s, NAME_LEN); + char from[NAME_LEN + 1]; + strncpy(from, s, NAME_LEN + 1); if (from_len == to_len) { /* The easy case: simply replace the column name in diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index d0eef98f064..7d1ed3da5fd 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -3219,13 +3219,13 @@ innobase_convert_identifier( ibool file_id)/*!< in: TRUE=id is a table or database name; FALSE=id is an UTF-8 string */ { + char nz2[MAX_TABLE_NAME_LEN + 1]; const char* s = id; int q; if (file_id) { char nz[MAX_TABLE_NAME_LEN + 1]; - char nz2[MAX_TABLE_NAME_LEN + 1]; /* Decode the table name. The MySQL function expects a NUL-terminated string. The input and output strings diff --git a/storage/xtradb/log/log0online.cc b/storage/xtradb/log/log0online.cc index 3cd1412098d..ee5136376fa 100644 --- a/storage/xtradb/log/log0online.cc +++ b/storage/xtradb/log/log0online.cc @@ -1453,6 +1453,7 @@ log_online_setup_bitmap_file_range( if (UNIV_UNLIKELY(array_pos >= bitmap_files->count)) { log_online_diagnose_inconsistent_dir(bitmap_files); + os_file_closedir(bitmap_dir); return FALSE; } diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 22d7312643b..bd1bc2c7131 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -2483,7 +2483,8 @@ purge_archived_logs( if (dirnamelen + strlen(fileinfo.name) + 2 > OS_FILE_MAX_PATH) continue; - snprintf(archived_log_filename + dirnamelen, OS_FILE_MAX_PATH, + snprintf(archived_log_filename + dirnamelen, + OS_FILE_MAX_PATH - dirnamelen - 1, "%s", fileinfo.name); if (before_no) { -- cgit v1.2.1 From 0bfa3dff8b95ed8b8bd133e434b47189857121ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 16 May 2017 12:07:26 +0300 Subject: MDEV-12698 innodb.innodb_stats_del_mark test failure In my merge of the MySQL fix for Oracle Bug#23333990 / WL#9513 I overlooked some subsequent revisions to the test, and I also failed to notice that the test is actually always failing. Oracle introduced the parameter innodb_stats_include_delete_marked but failed to consistently take it into account in FOREIGN KEY constraints that involve CASCADE or SET NULL. When innodb_stats_include_delete_marked=ON, obviously the purge of delete-marked records should update the statistics as well. One more omission was that statistics were never updated on ROLLBACK. We are fixing that as well, properly taking into account the parameter innodb_stats_include_delete_marked. dict_stats_analyze_index_level(): Simplify an expression. (Using the ternary operator with a constant operand is unnecessary obfuscation.) page_scan_method_t: Revert the change done by Oracle. Instead, examine srv_stats_include_delete_marked directly where it is needed. dict_stats_update_if_needed(): Renamed from row_update_statistics_if_needed(). row_update_for_mysql_using_upd_graph(): Assert that the table statistics are initialized, as guaranteed by ha_innobase::open(). Update the statistics in a consistent way, both for FOREIGN KEY triggers and for the main table. If FOREIGN KEY constraints exist, do not dereference a freed pointer, but cache the proper value of node->is_delete so that it matches prebuilt->table. row_purge_record_func(): Update statistics if innodb_stats_include_delete_marked=ON. row_undo_ins(): Update statistics (on ROLLBACK of a fresh INSERT). This is independent of the parameter; the record is not delete-marked. row_undo_mod(): Update statistics on the ROLLBACK of updating key columns, or (if innodb_stats_include_delete_marked=OFF) updating delete-marks. innodb.innodb_stats_persistent: Renamed and extended from innodb.innodb_stats_del_mark. Reduced the unnecessarily large dataset from 262,144 to 32 rows. Test both values of the configuration parameter innodb_stats_include_delete_marked. Test that purge is updating the statistics. innodb_fts.innodb_fts_multiple_index: Adjust the result. The test is performing a ROLLBACK of an INSERT, which now affects the statistics. include/wait_all_purged.inc: Moved from innodb.innodb_truncate_debug to its own file. --- storage/innobase/dict/dict0stats.cc | 37 +++++------- storage/innobase/dict/dict0stats_bg.cc | 39 +++++++++++++ storage/innobase/include/dict0stats.h | 7 +++ storage/innobase/include/dict0stats_bg.h | 11 ---- storage/innobase/include/row0mysql.h | 1 + storage/innobase/row/row0mysql.cc | 96 ++++++++++---------------------- storage/innobase/row/row0purge.cc | 10 +++- storage/innobase/row/row0uins.cc | 18 ++++++ storage/innobase/row/row0umod.cc | 34 ++++++++++- 9 files changed, 148 insertions(+), 105 deletions(-) (limited to 'storage') diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 54988b910d8..9350b5d400d 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -1159,10 +1159,11 @@ dict_stats_analyze_index_level( leaf-level delete marks because delete marks on non-leaf level do not make sense. */ - if (level == 0 && (srv_stats_include_delete_marked ? 0: - rec_get_deleted_flag( + if (level == 0 + && !srv_stats_include_delete_marked + && rec_get_deleted_flag( rec, - page_is_comp(btr_pcur_get_page(&pcur))))) { + page_is_comp(btr_pcur_get_page(&pcur)))) { if (rec_is_last_on_page && !prev_rec_is_copied @@ -1336,16 +1337,11 @@ dict_stats_analyze_index_level( /* aux enum for controlling the behavior of dict_stats_scan_page() @{ */ enum page_scan_method_t { - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED,/* scan all records on - the given page and count the number of - distinct ones, also ignore delete marked - records */ - QUIT_ON_FIRST_NON_BORING,/* quit when the first record that differs - from its right neighbor is found */ - COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED/* scan all records on - the given page and count the number of - distinct ones, include delete marked - records */ + /** scan the records on the given page, counting the number + of distinct ones; @see srv_stats_include_delete_marked */ + COUNT_ALL_NON_BORING, + /** quit on the first record that differs from its right neighbor */ + QUIT_ON_FIRST_NON_BORING }; /* @} */ @@ -1392,13 +1388,10 @@ dict_stats_scan_page( Because offsets1,offsets2 should be big enough, this memory heap should never be used. */ mem_heap_t* heap = NULL; - const rec_t* (*get_next)(const rec_t*); - - if (scan_method == COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED) { - get_next = page_rec_get_next_non_del_marked; - } else { - get_next = page_rec_get_next_const; - } + const rec_t* (*get_next)(const rec_t*) + = srv_stats_include_delete_marked + ? page_rec_get_next_const + : page_rec_get_next_non_del_marked; const bool should_count_external_pages = n_external_pages != NULL; @@ -1618,9 +1611,7 @@ dict_stats_analyze_index_below_cur( offsets_rec = dict_stats_scan_page( &rec, offsets1, offsets2, index, page, n_prefix, - srv_stats_include_delete_marked ? - COUNT_ALL_NON_BORING_INCLUDE_DEL_MARKED: - COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, n_diff, + COUNT_ALL_NON_BORING, n_diff, n_external_pages); #if 0 diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 876d1bcb342..f3482126d2d 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -120,6 +120,7 @@ background stats gathering thread. Only the table id is added to the list, so the table can be closed after being enqueued and it will be opened when needed. If the table does not exist later (has been DROPped), then it will be removed from the pool and skipped. */ +static void dict_stats_recalc_pool_add( /*=======================*/ @@ -147,6 +148,44 @@ dict_stats_recalc_pool_add( os_event_set(dict_stats_event); } +/** Update the table modification counter and if necessary, +schedule new estimates for table and index statistics to be calculated. +@param[in,out] table persistent or temporary table */ +void +dict_stats_update_if_needed(dict_table_t* table) +{ + ut_ad(table->stat_initialized); + ut_ad(!mutex_own(&dict_sys->mutex)); + + ulonglong counter = table->stat_modified_counter++; + ulonglong n_rows = dict_table_get_n_rows(table); + + if (dict_stats_is_persistent_enabled(table)) { + if (counter > n_rows / 10 /* 10% */ + && dict_stats_auto_recalc_is_enabled(table)) { + + dict_stats_recalc_pool_add(table); + table->stat_modified_counter = 0; + } + return; + } + + /* Calculate new statistics if 1 / 16 of table has been modified + since the last time a statistics batch was run. + We calculate statistics at most every 16th round, since we may have + a counter table which is very small and updated very often. */ + ulonglong threshold = 16 + n_rows / 16; /* 6.25% */ + + if (srv_stats_modified_counter) { + threshold = std::min(srv_stats_modified_counter, threshold); + } + + if (counter > threshold) { + /* this will reset table->stat_modified_counter to 0 */ + dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT); + } +} + /*****************************************************************//** Get a table from the auto recalc pool. The returned table id is removed from the pool. diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h index 752c197f8c3..8846aeda7fd 100644 --- a/storage/innobase/include/dict0stats.h +++ b/storage/innobase/include/dict0stats.h @@ -110,6 +110,13 @@ dict_stats_deinit( dict_table_t* table) /*!< in/out: table */ MY_ATTRIBUTE((nonnull)); +/** Update the table modification counter and if necessary, +schedule new estimates for table and index statistics to be calculated. +@param[in,out] table persistent or temporary table */ +void +dict_stats_update_if_needed(dict_table_t* table) + MY_ATTRIBUTE((nonnull)); + /*********************************************************************//** Calculates new estimates for table and index statistics. The statistics are used in query optimization. diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h index b7bf1b0c170..b0570d21ecb 100644 --- a/storage/innobase/include/dict0stats_bg.h +++ b/storage/innobase/include/dict0stats_bg.h @@ -46,17 +46,6 @@ extern mysql_pfs_key_t dict_stats_recalc_pool_mutex_key; extern my_bool innodb_dict_stats_disabled_debug; #endif /* UNIV_DEBUG */ -/*****************************************************************//** -Add a table to the recalc pool, which is processed by the -background stats gathering thread. Only the table id is added to the -list, so the table can be closed after being enqueued and it will be -opened when needed. If the table does not exist later (has been DROPped), -then it will be removed from the pool and skipped. */ -void -dict_stats_recalc_pool_add( -/*=======================*/ - const dict_table_t* table); /*!< in: table to add */ - /*****************************************************************//** Delete a given table from the auto recalc pool. dict_stats_recalc_pool_del() */ diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index 7507c96ea5f..6164366628e 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -204,6 +204,7 @@ row_update_prebuilt_trx( row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct in MySQL handle */ trx_t* trx); /*!< in: transaction handle */ + /*********************************************************************//** Sets an AUTO_INC type lock on the table mentioned in prebuilt. The AUTO_INC lock gives exclusive access to the auto-inc counter of the diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 4062103b1e8..4db99024c9c 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1196,58 +1196,6 @@ row_get_prebuilt_insert_row( return(prebuilt->ins_node->row); } -/*********************************************************************//** -Updates the table modification counter and calculates new estimates -for table and index statistics if necessary. */ -UNIV_INLINE -void -row_update_statistics_if_needed( -/*============================*/ - dict_table_t* table) /*!< in: table */ -{ - ib_uint64_t counter; - ib_uint64_t n_rows; - - if (!table->stat_initialized) { - DBUG_EXECUTE_IF( - "test_upd_stats_if_needed_not_inited", - fprintf(stderr, "test_upd_stats_if_needed_not_inited" - " was executed\n"); - ); - return; - } - - counter = table->stat_modified_counter++; - n_rows = dict_table_get_n_rows(table); - - if (dict_stats_is_persistent_enabled(table)) { - if (counter > n_rows / 10 /* 10% */ - && dict_stats_auto_recalc_is_enabled(table)) { - - dict_stats_recalc_pool_add(table); - table->stat_modified_counter = 0; - } - return; - } - - /* Calculate new statistics if 1 / 16 of table has been modified - since the last time a statistics batch was run. - We calculate statistics at most every 16th round, since we may have - a counter table which is very small and updated very often. */ - ib_uint64_t threshold= 16 + n_rows / 16; /* 6.25% */ - - if (srv_stats_modified_counter) { - threshold= ut_min((ib_uint64_t)srv_stats_modified_counter, threshold); - } - - if (counter > threshold) { - - ut_ad(!mutex_own(&dict_sys->mutex)); - /* this will reset table->stat_modified_counter to 0 */ - dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT); - } -} - /*********************************************************************//** Sets an AUTO_INC type lock on the table mentioned in prebuilt. The AUTO_INC lock gives exclusive access to the auto-inc counter of the @@ -1649,7 +1597,7 @@ error_exit: ut_memcpy(prebuilt->row_id, node->row_id_buf, DATA_ROW_ID_LEN); } - row_update_statistics_if_needed(table); + dict_stats_update_if_needed(table); trx->op_info = ""; if (blob_heap != NULL) { @@ -1895,6 +1843,7 @@ row_update_for_mysql_using_upd_graph( ut_ad(trx); ut_a(prebuilt->magic_n == ROW_PREBUILT_ALLOCATED); ut_a(prebuilt->magic_n2 == ROW_PREBUILT_ALLOCATED); + ut_ad(table->stat_initialized); UT_NOT_USED(mysql_rec); if (!table->is_readable()) { @@ -1929,6 +1878,8 @@ row_update_for_mysql_using_upd_graph( } node = prebuilt->upd_node; + const bool is_delete = node->is_delete; + ut_ad(node->table == table); if (node->cascade_heap) { mem_heap_empty(node->cascade_heap); @@ -2099,8 +2050,11 @@ run_again: thr->fk_cascade_depth = 0; - /* Update the statistics only after completing all cascaded - operations */ + /* Update the statistics of each involved table + only after completing all operations, including + FOREIGN KEY...ON...CASCADE|SET NULL. */ + bool update_statistics; + for (upd_cascade_t::iterator i = processed_cascades->begin(); i != processed_cascades->end(); ++i) { @@ -2114,16 +2068,25 @@ run_again: than protecting the following code with a latch. */ dict_table_n_rows_dec(node->table); + update_statistics = !srv_stats_include_delete_marked; srv_stats.n_rows_deleted.inc(size_t(trx->id)); } else { + update_statistics + = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); srv_stats.n_rows_updated.inc(size_t(trx->id)); } - row_update_statistics_if_needed(node->table); + if (update_statistics) { + dict_stats_update_if_needed(node->table); + } else { + /* Always update the table modification counter. */ + node->table->stat_modified_counter++; + } + que_graph_free_recursive(node); } - if (node->is_delete) { + if (is_delete) { /* Not protected by dict_table_stats_lock() for performance reasons, we would rather get garbage in stat_n_rows (which is just an estimate anyway) than protecting the following code @@ -2135,25 +2098,24 @@ run_again: } else { srv_stats.n_rows_deleted.inc(size_t(trx->id)); } + + update_statistics = !srv_stats_include_delete_marked; } else { if (table->is_system_db) { srv_stats.n_system_rows_updated.inc(size_t(trx->id)); } else { srv_stats.n_rows_updated.inc(size_t(trx->id)); } + + update_statistics + = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); } - /* We update table statistics only if it is a DELETE or UPDATE - that changes indexed columns, UPDATEs that change only non-indexed - columns would not affect statistics. */ - if (node->is_delete || !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) { - row_update_statistics_if_needed(prebuilt->table); + if (update_statistics) { + dict_stats_update_if_needed(prebuilt->table); } else { - /* Update the table modification counter even when - non-indexed columns change if statistics is initialized. */ - if (prebuilt->table->stat_initialized) { - prebuilt->table->stat_modified_counter++; - } + /* Always update the table modification counter. */ + prebuilt->table->stat_modified_counter++; } trx->op_info = ""; diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index e49fd7f0f8c..905a5b297a3 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -27,6 +27,7 @@ Created 3/14/1997 Heikki Tuuri #include "row0purge.h" #include "fsp0fsp.h" #include "mach0data.h" +#include "dict0stats.h" #include "trx0rseg.h" #include "trx0trx.h" #include "trx0roll.h" @@ -952,10 +953,13 @@ row_purge_record_func( switch (node->rec_type) { case TRX_UNDO_DEL_MARK_REC: purged = row_purge_del_mark(node); - if (!purged) { - break; + if (purged) { + if (node->table->stat_initialized + && srv_stats_include_delete_marked) { + dict_stats_update_if_needed(node->table); + } + MONITOR_INC(MONITOR_N_DEL_ROW_PURGE); } - MONITOR_INC(MONITOR_N_DEL_ROW_PURGE); break; default: if (!updated_extern) { diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc index 9288adb21a4..934b5ad5a7a 100644 --- a/storage/innobase/row/row0uins.cc +++ b/storage/innobase/row/row0uins.cc @@ -25,6 +25,7 @@ Created 2/25/1997 Heikki Tuuri #include "row0uins.h" #include "dict0dict.h" +#include "dict0stats.h" #include "dict0boot.h" #include "dict0crea.h" #include "trx0undo.h" @@ -508,6 +509,23 @@ row_undo_ins( mutex_exit(&dict_sys->mutex); } + + if (err == DB_SUCCESS && node->table->stat_initialized) { + /* Not protected by dict_table_stats_lock() for + performance reasons, we would rather get garbage + in stat_n_rows (which is just an estimate anyway) + than protecting the following code with a latch. */ + dict_table_n_rows_dec(node->table); + + /* Do not attempt to update statistics when + executing ROLLBACK in the InnoDB SQL + interpreter, because in that case we would + already be holding dict_sys->mutex, which + would be acquired when updating statistics. */ + if (!dict_locked) { + dict_stats_update_if_needed(node->table); + } + } } dict_table_close(node->table, dict_locked, FALSE); diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index eefe9fb2bd8..8fa5c2ccbff 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -27,6 +28,7 @@ Created 2/27/1997 Heikki Tuuri #include "row0umod.h" #include "dict0dict.h" +#include "dict0stats.h" #include "dict0boot.h" #include "trx0undo.h" #include "trx0roll.h" @@ -1251,8 +1253,38 @@ row_undo_mod( } if (err == DB_SUCCESS) { - err = row_undo_mod_clust(node, thr); + + bool update_statistics + = !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE); + + if (err == DB_SUCCESS && node->table->stat_initialized) { + switch (node->rec_type) { + case TRX_UNDO_UPD_EXIST_REC: + break; + case TRX_UNDO_DEL_MARK_REC: + dict_table_n_rows_inc(node->table); + update_statistics = update_statistics + || !srv_stats_include_delete_marked; + break; + case TRX_UNDO_UPD_DEL_REC: + dict_table_n_rows_dec(node->table); + update_statistics = update_statistics + || !srv_stats_include_delete_marked; + break; + } + + /* Do not attempt to update statistics when + executing ROLLBACK in the InnoDB SQL + interpreter, because in that case we would + already be holding dict_sys->mutex, which + would be acquired when updating statistics. */ + if (update_statistics && !dict_locked) { + dict_stats_update_if_needed(node->table); + } else { + node->table->stat_modified_counter++; + } + } } dict_table_close(node->table, dict_locked, FALSE); -- cgit v1.2.1 From 907cbadb6f70798652e7b910698a55fc1adf82c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 18 May 2017 14:11:17 +0300 Subject: Fix some -Wimplicit-fallthrough warnings in InnoDB buf_read_ahead_linear(): Do not display a message if the tablespace is being deleted. dtype_print(): Add a missing break statement. --- storage/innobase/buf/buf0rea.cc | 7 +------ storage/innobase/data/data0type.cc | 1 + storage/innobase/fts/fts0plugin.cc | 1 + storage/innobase/include/buf0buf.ic | 3 +-- storage/innobase/srv/srv0srv.cc | 3 ++- 5 files changed, 6 insertions(+), 9 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 12775c74daf..789918c6a35 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -752,14 +752,9 @@ buf_read_ahead_linear( switch (err) { case DB_SUCCESS: case DB_TABLESPACE_TRUNCATED: + case DB_TABLESPACE_DELETED: case DB_ERROR: break; - case DB_TABLESPACE_DELETED: - ib::info() << "linear readahead trying to" - " access page " - << page_id_t(page_id.space(), i) - << " in nonexisting or being-dropped" - " tablespace"; case DB_DECRYPTION_FAILED: ib::error() << "linear readahead failed to" " decrypt page " diff --git a/storage/innobase/data/data0type.cc b/storage/innobase/data/data0type.cc index 315b12e135c..a40724b303d 100644 --- a/storage/innobase/data/data0type.cc +++ b/storage/innobase/data/data0type.cc @@ -199,6 +199,7 @@ dtype_print(const dtype_t* type) case DATA_VAR_POINT: fputs("DATA_VAR_POINT", stderr); + break; case DATA_GEOMETRY: fputs("DATA_GEOMETRY", stderr); diff --git a/storage/innobase/fts/fts0plugin.cc b/storage/innobase/fts/fts0plugin.cc index e78dcdacfb9..b7a05deeb34 100644 --- a/storage/innobase/fts/fts0plugin.cc +++ b/storage/innobase/fts/fts0plugin.cc @@ -130,6 +130,7 @@ fts_query_add_word_for_parser( if (cur_node->type != FTS_AST_PARSER_PHRASE_LIST) { break; } + /* fall through */ case FT_TOKEN_WORD: term_node = fts_ast_create_node_term_for_parser( diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic index f22dcc48a01..38c52d5e608 100644 --- a/storage/innobase/include/buf0buf.ic +++ b/storage/innobase/include/buf0buf.ic @@ -1286,9 +1286,8 @@ buf_page_release_zip( rw_lock_s_unlock(&block->debug_latch); } } - /* Fall through */ #endif /* UNIV_DEBUG */ - + /* Fall through */ case BUF_BLOCK_ZIP_PAGE: case BUF_BLOCK_ZIP_DIRTY: buf_block_unfix(reinterpret_cast(bpage)); diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 6e010fadc6d..e6fa0aa502c 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -2622,7 +2622,8 @@ srv_purge_should_exit( /* Normal operation. */ break; } - /* close_connections() was called; fall through */ + /* close_connections() was called */ + /* fall through */ case SRV_SHUTDOWN_CLEANUP: case SRV_SHUTDOWN_EXIT_THREADS: /* Exit unless slow shutdown requested or all done. */ -- cgit v1.2.1 From 6f54d04eb90f9dba3da58da81cc03d6642cd30d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 18 May 2017 14:13:07 +0300 Subject: Remove unnecessary conversions from integer to double --- storage/innobase/handler/i_s.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'storage') diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index ced45b96ea2..b423e6eccbc 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -6082,10 +6082,10 @@ i_s_dict_fill_sys_tables( OK(field_store_string(fields[SYS_TABLES_ROW_FORMAT], row_format)); - OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(static_cast( + OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store( page_size.is_compressed() ? page_size.physical() - : 0))); + : 0, true)); OK(field_store_string(fields[SYS_TABLES_SPACE_TYPE], space_type)); @@ -6385,7 +6385,7 @@ i_s_dict_fill_sys_tablestats( OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, true)); - OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(static_cast(ref_count))); + OK(fields[SYS_TABLESTATS_TABLE_REF_COUNT]->store(ref_count, true)); OK(schema_table_store_record(thd, table_to_fill)); @@ -7321,11 +7321,11 @@ i_s_dict_fill_sys_fields( fields = table_to_fill->field; - OK(fields[SYS_FIELD_INDEX_ID]->store((longlong) index_id, TRUE)); + OK(fields[SYS_FIELD_INDEX_ID]->store(index_id, true)); OK(field_store_string(fields[SYS_FIELD_NAME], field->name)); - OK(fields[SYS_FIELD_POS]->store(static_cast(pos))); + OK(fields[SYS_FIELD_POS]->store(pos, true)); OK(schema_table_store_record(thd, table_to_fill)); -- cgit v1.2.1 From 9756440fb5a48c4b450eb32a2ba06399ede4c408 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 18 May 2017 14:29:39 +0300 Subject: Fix a compilation warning introduced by MDEV-11782 log_crypt(): Do not cast byte* to uint32_t*, because it may break strict aliasing rules. Instead, cast in the opposite direction. --- storage/innobase/log/log0crypt.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'storage') diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc index 79301254a0a..9ddd20db163 100644 --- a/storage/innobase/log/log0crypt.cc +++ b/storage/innobase/log/log0crypt.cc @@ -120,7 +120,8 @@ log_crypt(byte* buf, ulint size, bool decrypt) for (const byte* const end = buf + size; buf != end; buf += OS_FILE_LOG_BLOCK_SIZE) { - byte dst[OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE]; + uint32_t dst[(OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE) + / sizeof(uint32_t)]; const ulint log_block_no = log_block_get_hdr_no(buf); /* The log block number is not encrypted. */ @@ -130,8 +131,7 @@ log_crypt(byte* buf, ulint size, bool decrypt) #else ~(LOG_BLOCK_FLUSH_BIT_MASK >> 24) #endif - & (*reinterpret_cast(dst) - = *reinterpret_cast( + & (*dst = *reinterpret_cast( buf + LOG_BLOCK_HDR_NO)); #if LOG_BLOCK_HDR_NO + 4 != LOG_CRYPT_HDR_SIZE # error "LOG_BLOCK_HDR_NO has been moved; redo log format affected!" @@ -143,7 +143,8 @@ log_crypt(byte* buf, ulint size, bool decrypt) log_block_no)); int rc = encryption_crypt( - buf + LOG_CRYPT_HDR_SIZE, sizeof dst, dst, &dst_len, + buf + LOG_CRYPT_HDR_SIZE, sizeof dst, + reinterpret_cast(dst), &dst_len, const_cast(info.crypt_key.bytes), sizeof info.crypt_key, reinterpret_cast(aes_ctr_iv), sizeof aes_ctr_iv, -- cgit v1.2.1 From 7edadde72eb23e9110db8cea810c023104e9d15e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 18 May 2017 15:20:57 +0300 Subject: Remove bogus __attribute__((nonnull)) These were reported when running tests after compiling the debug server with -O3 -fsanitize=undefined in GCC 7.1.0. --- storage/innobase/fts/fts0que.cc | 3 ++- storage/innobase/include/rem0cmp.h | 3 ++- storage/innobase/include/row0merge.h | 6 +++--- storage/innobase/row/row0merge.cc | 7 ++++--- 4 files changed, 11 insertions(+), 8 deletions(-) (limited to 'storage') diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index bc1d173cc29..fc80c843412 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -2430,7 +2430,8 @@ fts_query_terms_in_document( /*****************************************************************//** Retrieve the document and match the phrase tokens. @return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) +MY_ATTRIBUTE((nonnull(1,2,3,6), warn_unused_result)) +static dberr_t fts_query_match_document( /*=====================*/ diff --git a/storage/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h index 245fefae944..216e3a7655b 100644 --- a/storage/innobase/include/rem0cmp.h +++ b/storage/innobase/include/rem0cmp.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -62,7 +63,7 @@ cmp_data_data( ulint len1, const byte* data2, ulint len2) - MY_ATTRIBUTE((nonnull, warn_unused_result)); + MY_ATTRIBUTE((warn_unused_result)); /** Compare two data fields. @param[in] dfield1 data field; must have type field set diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h index 1b61c475c6f..762b0213724 100644 --- a/storage/innobase/include/row0merge.h +++ b/storage/innobase/include/row0merge.h @@ -195,7 +195,7 @@ row_merge_drop_temp_indexes(void); /** Create temporary merge files in the given paramater path, and if UNIV_PFS_IO defined, register the file descriptor with Performance Schema. -@param[in] path location for creating temporary merge files. +@param[in] path location for creating temporary merge files, or NULL @return File descriptor */ int row_merge_file_create_low( @@ -398,13 +398,13 @@ row_merge_buf_empty( /** Create a merge file in the given location. @param[out] merge_file merge file structure -@param[in] path location for creating temporary file +@param[in] path location for creating temporary file, or NULL @return file descriptor, or -1 on failure */ int row_merge_file_create( merge_file_t* merge_file, const char* path) - MY_ATTRIBUTE((warn_unused_result, nonnull)); + MY_ATTRIBUTE((warn_unused_result, nonnull(1))); /** Merge disk files. @param[in] trx transaction diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 62cab870e9e..6038542edf8 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -4030,7 +4030,7 @@ row_merge_drop_temp_indexes(void) /** Create temporary merge files in the given paramater path, and if UNIV_PFS_IO defined, register the file descriptor with Performance Schema. -@param[in] path location for creating temporary merge files. +@param[in] path location for creating temporary merge files, or NULL @return File descriptor */ int row_merge_file_create_low( @@ -4063,7 +4063,7 @@ row_merge_file_create_low( /** Create a merge file in the given location. @param[out] merge_file merge file structure -@param[in] path location for creating temporary file +@param[in] path location for creating temporary file, or NULL @return file descriptor, or -1 on failure */ int row_merge_file_create( @@ -4373,7 +4373,8 @@ row_merge_rename_tables_dict( @param[in,out] index index @param[in] add_v new virtual columns added along with add index call @return DB_SUCCESS or error code */ -static MY_ATTRIBUTE((nonnull, warn_unused_result)) +MY_ATTRIBUTE((nonnull(1,2,3), warn_unused_result)) +static dberr_t row_merge_create_index_graph( trx_t* trx, -- cgit v1.2.1 From eb30230359309de8972d771cbf282097b1175a09 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 19 May 2017 22:27:26 +0200 Subject: compilation warnings in Connect --- storage/connect/array.cpp | 10 +++++----- storage/connect/array.h | 4 ++-- storage/connect/blkfil.cpp | 8 ++++---- storage/connect/blkfil.h | 8 ++++---- storage/connect/block.h | 4 ++-- storage/connect/colblk.cpp | 4 ++-- storage/connect/colblk.h | 4 ++-- storage/connect/csort.h | 4 ++-- storage/connect/filamvct.cpp | 1 - storage/connect/filter.cpp | 10 +++++----- storage/connect/filter.h | 4 ++-- storage/connect/ha_connect.cc | 2 +- storage/connect/plgdbutl.cpp | 2 +- storage/connect/tabcol.cpp | 8 ++++---- storage/connect/tabcol.h | 8 ++++---- storage/connect/tabdos.cpp | 4 ++-- storage/connect/tabdos.h | 4 ++-- storage/connect/tabjdbc.h | 2 +- storage/connect/table.cpp | 6 +++--- storage/connect/tabodbc.h | 2 +- storage/connect/tabsys.h | 4 ++-- storage/connect/tabxml.h | 2 +- storage/connect/value.cpp | 8 ++++---- storage/connect/value.h | 6 +++--- storage/connect/xindex.cpp | 4 ++-- storage/connect/xindex.h | 4 ++-- storage/connect/xobject.cpp | 8 ++++---- storage/connect/xobject.h | 4 ++-- storage/connect/xtable.h | 4 ++-- 29 files changed, 71 insertions(+), 72 deletions(-) (limited to 'storage') diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 5cf15c5ac18..6e0da312ca3 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -986,7 +986,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) for (i = 0; i < Nval; i++) { Value->SetValue_pvblk(Vblp, i); - Value->Print(g, tp, z); + Value->Prints(g, tp, z); len += strlen(tp); } // enfor i @@ -998,7 +998,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) for (i = 0; i < Nval;) { Value->SetValue_pvblk(Vblp, i); - Value->Print(g, tp, z); + Value->Prints(g, tp, z); strcat(p, tp); strcat(p, (++i == Nval) ? ")" : ","); } // enfor i @@ -1012,7 +1012,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) /***********************************************************************/ /* Make file output of ARRAY contents. */ /***********************************************************************/ -void ARRAY::Print(PGLOBAL g, FILE *f, uint n) +void ARRAY::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; int lim = MY_MIN(Nval,10); @@ -1029,7 +1029,7 @@ void ARRAY::Print(PGLOBAL g, FILE *f, uint n) if (Vblp) for (int i = 0; i < lim; i++) { Value->SetValue_pvblk(Vblp, i); - Value->Print(g, f, n+4); + Value->Printf(g, f, n+4); } // endfor i } else @@ -1040,7 +1040,7 @@ void ARRAY::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of ARRAY contents. */ /***********************************************************************/ -void ARRAY::Print(PGLOBAL, char *ps, uint z) +void ARRAY::Prints(PGLOBAL, char *ps, uint z) { if (z < 16) return; diff --git a/storage/connect/array.h b/storage/connect/array.h index dfc3638de8a..bd38344de06 100644 --- a/storage/connect/array.h +++ b/storage/connect/array.h @@ -56,8 +56,8 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock virtual bool Compare(PXOB) {assert(false); return false;} virtual bool SetFormat(PGLOBAL, FORMAT&) {assert(false); return false;} //virtual int CheckSpcCol(PTDB, int) {return 0;} - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); // void Empty(void); void SetPrecision(PGLOBAL g, int p); bool AddValue(PGLOBAL g, PSZ sp); diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp index e438e185e5d..77a46260934 100644 --- a/storage/connect/blkfil.cpp +++ b/storage/connect/blkfil.cpp @@ -56,7 +56,7 @@ BLOCKFILTER::BLOCKFILTER(PTDBDOS tdbp, int op) /***********************************************************************/ /* Make file output of BLOCKFILTER contents. */ /***********************************************************************/ -void BLOCKFILTER::Print(PGLOBAL, FILE *f, uint n) +void BLOCKFILTER::Printf(PGLOBAL, FILE *f, uint n) { char m[64]; @@ -70,7 +70,7 @@ void BLOCKFILTER::Print(PGLOBAL, FILE *f, uint n) /***********************************************************************/ /* Make string output of BLOCKFILTER contents. */ /***********************************************************************/ -void BLOCKFILTER::Print(PGLOBAL, char *ps, uint z) +void BLOCKFILTER::Prints(PGLOBAL, char *ps, uint z) { strncat(ps, "BlockFilter(s)", z); } // end of Print @@ -995,7 +995,7 @@ int BLOCKINDEX::BlockEval(PGLOBAL g) /***********************************************************************/ /* Make file output of BLOCKINDEX contents. */ /***********************************************************************/ -void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n) +void BLOCKINDEX::Printf(PGLOBAL g, FILE *f, UINT n) { char m[64]; @@ -1013,7 +1013,7 @@ void BLOCKINDEX::Print(PGLOBAL g, FILE *f, UINT n) /***********************************************************************/ /* Make string output of BLOCKINDEX contents. */ /***********************************************************************/ -void BLOCKINDEX::Print(PGLOBAL g, char *ps, UINT z) +void BLOCKINDEX::Prints(PGLOBAL g, char *ps, UINT z) { strncat(ps, "BlockIndex(es)", z); } // end of Print diff --git a/storage/connect/blkfil.h b/storage/connect/blkfil.h index 00b00139042..61b02c53c14 100644 --- a/storage/connect/blkfil.h +++ b/storage/connect/blkfil.h @@ -27,8 +27,8 @@ class DllExport BLOCKFILTER : public BLOCK { /* Block Filter */ // Methods virtual void Reset(PGLOBAL) = 0; virtual int BlockEval(PGLOBAL) = 0; - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); protected: BLOCKFILTER(void) {} // Standard constructor not to be used @@ -234,8 +234,8 @@ class DllExport BLOCKINDEX : public BLOCK { /* Indexing Test Block */ // Methods void Reset(void); virtual int BlockEval(PGLOBAL); - virtual void Print(PGLOBAL g, FILE *f, UINT n); - virtual void Print(PGLOBAL g, char *ps, UINT z); + virtual void Printf(PGLOBAL g, FILE *f, UINT n); + virtual void Prints(PGLOBAL g, char *ps, UINT z); protected: BLOCKINDEX(void) {} // Standard constructor not to be used diff --git a/storage/connect/block.h b/storage/connect/block.h index aa4edde5ec9..8ac7be80988 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -44,8 +44,8 @@ class DllExport BLOCK { return (PlugSubAlloc(g, p, size)); } // end of new - virtual void Print(PGLOBAL, FILE *, uint) {} // Produce file desc - virtual void Print(PGLOBAL, char *, uint) {} // Produce string desc + virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc + virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc #if !defined(__BORLANDC__) // Avoid warning C4291 by defining a matching dummy delete operator diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index fa205b493a2..324d59ab40e 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -214,7 +214,7 @@ void COLBLK::WriteColumn(PGLOBAL g) /***********************************************************************/ /* Make file output of a column descriptor block. */ /***********************************************************************/ -void COLBLK::Print(PGLOBAL, FILE *f, uint n) +void COLBLK::Printf(PGLOBAL, FILE *f, uint n) { char m[64]; int i; @@ -237,7 +237,7 @@ void COLBLK::Print(PGLOBAL, FILE *f, uint n) /***********************************************************************/ /* Make string output of a column descriptor block. */ /***********************************************************************/ -void COLBLK::Print(PGLOBAL, char *ps, uint) +void COLBLK::Prints(PGLOBAL, char *ps, uint) { sprintf(ps, "R%d.%s", To_Tdb->GetTdb_No(), Name); } // end of Print diff --git a/storage/connect/colblk.h b/storage/connect/colblk.h index 02c4f2361b7..608aa040787 100644 --- a/storage/connect/colblk.h +++ b/storage/connect/colblk.h @@ -72,8 +72,8 @@ class DllExport COLBLK : public XOBJECT { virtual void SetTo_Val(PVAL) {} virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); + virtual void Printf(PGLOBAL g, FILE *, uint); + virtual void Prints(PGLOBAL g, char *, uint); virtual bool VarSize(void) {return false;} bool InitValue(PGLOBAL g); diff --git a/storage/connect/csort.h b/storage/connect/csort.h index 55ff6268a4b..6e700059881 100644 --- a/storage/connect/csort.h +++ b/storage/connect/csort.h @@ -49,8 +49,8 @@ class DllExport CSORT { public: // Methods int Qsort(PGLOBAL g, int n); /* Sort calling routine */ -//virtual void Print(PGLOBAL g, FILE *f, uint n); -//virtual void Print(PGLOBAL g, char *ps, uint z); +//virtual void Printf(PGLOBAL g, FILE *f, uint n); +//virtual void Prints(PGLOBAL g, char *ps, uint z); #ifdef DEBTRACE int GetNcmp(void) {return num_comp;} #endif diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index f8a7f2600f3..537f77d01ac 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -569,7 +569,6 @@ bool VCTFAM::InitInsert(PGLOBAL g) CurNum = 0; AddBlock = !MaxBlk; } else { - int rc; PVCTCOL cp = (PVCTCOL)Tdbp->GetColumns(); // The starting point must be at the end of file as for append. diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 863c9ca6571..da44b129ccb 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -1409,7 +1409,7 @@ PFIL FILTER::Copy(PTABS t) /*********************************************************************/ /* Make file output of FILTER contents. */ /*********************************************************************/ -void FILTER::Print(PGLOBAL g, FILE *f, uint n) +void FILTER::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; @@ -1431,7 +1431,7 @@ void FILTER::Print(PGLOBAL g, FILE *f, uint n) if (lin && fp->GetArgType(i) == TYPE_FILTER) fprintf(f, "%s Filter at %p\n", m, fp->Arg(i)); else - fp->Arg(i)->Print(g, f, n + 2); + fp->Arg(i)->Printf(g, f, n + 2); } // endfor i @@ -1442,7 +1442,7 @@ void FILTER::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of TABLE contents (z should be checked). */ /***********************************************************************/ -void FILTER::Print(PGLOBAL g, char *ps, uint z) +void FILTER::Prints(PGLOBAL g, char *ps, uint z) { #define FLEN 100 @@ -1470,7 +1470,7 @@ void FILTER::Print(PGLOBAL g, char *ps, uint z) bcp = bxp; p = bcp->Cold; n = FLEN; - fp->Arg(0)->Print(g, p, n); + fp->Arg(0)->Prints(g, p, n); n = FLEN - strlen(p); switch (fp->Opc) { @@ -1516,7 +1516,7 @@ void FILTER::Print(PGLOBAL g, char *ps, uint z) n = FLEN - strlen(p); p += strlen(p); - fp->Arg(1)->Print(g, p, n); + fp->Arg(1)->Prints(g, p, n); } else if (!bcp) { strncat(ps, "???", z); diff --git a/storage/connect/filter.h b/storage/connect/filter.h index f4835d23a7c..22d1e4ed4be 100644 --- a/storage/connect/filter.h +++ b/storage/connect/filter.h @@ -61,8 +61,8 @@ class DllExport FILTER : public XOBJECT { /* Filter description block */ //virtual PXOB CheckSubQuery(PGLOBAL, PSQL); //virtual bool CheckLocal(PTDB); //virtual int CheckSpcCol(PTDB tdbp, int n); - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); // PFIL Linearize(bool nosep); // PFIL Link(PGLOBAL g, PFIL fil2); // PFIL RemoveLastSep(void); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 68b01e3c7f2..c30ab9adf4e 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1034,7 +1034,7 @@ PCSZ GetListOption(PGLOBAL g, PCSZ opname, PCSZ oplist, PCSZ def) pv= strchr(pk, '='); if (pv && (!pn || pv < pn)) { - n= MY_MIN(pv - pk, sizeof(key) - 1); + n= MY_MIN(pv - pk, (int)sizeof(key) - 1); memcpy(key, pk, n); key[n]= 0; pv++; diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index d990f082e5b..b6f59bac8cf 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -1492,7 +1492,7 @@ void PlugPutOut(PGLOBAL g, FILE *f, short t, void *v, uint n) case TYPE_TABLE: case TYPE_TDB: case TYPE_XOBJECT: - ((PBLOCK)v)->Print(g, f, n-2); + ((PBLOCK)v)->Printf(g, f, n-2); break; default: diff --git a/storage/connect/tabcol.cpp b/storage/connect/tabcol.cpp index fde1baa6317..2740864a69b 100644 --- a/storage/connect/tabcol.cpp +++ b/storage/connect/tabcol.cpp @@ -73,7 +73,7 @@ PTABLE XTAB::Link(PTABLE tab2) /***********************************************************************/ /* Make file output of XTAB contents. */ /***********************************************************************/ -void XTAB::Print(PGLOBAL g, FILE *f, uint n) +void XTAB::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; @@ -91,7 +91,7 @@ void XTAB::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of XTAB contents. */ /***********************************************************************/ -void XTAB::Print(PGLOBAL, char *ps, uint z) +void XTAB::Prints(PGLOBAL, char *ps, uint z) { char buf[128]; int i, n = (int)z - 1; @@ -134,7 +134,7 @@ bool COLUMN::SetFormat(PGLOBAL g, FORMAT&) /***********************************************************************/ /* Make file output of COLUMN contents. */ /***********************************************************************/ -void COLUMN::Print(PGLOBAL g, FILE *f, uint n) +void COLUMN::Printf(PGLOBAL g, FILE *f, uint n) { char m[64]; @@ -154,7 +154,7 @@ void COLUMN::Print(PGLOBAL g, FILE *f, uint n) /***********************************************************************/ /* Make string output of COLUMN contents. */ /***********************************************************************/ -void COLUMN::Print(PGLOBAL, char *ps, uint z) +void COLUMN::Prints(PGLOBAL, char *ps, uint z) { char buf[80]; diff --git a/storage/connect/tabcol.h b/storage/connect/tabcol.h index 3bfc37e69c1..e4657e2f261 100644 --- a/storage/connect/tabcol.h +++ b/storage/connect/tabcol.h @@ -38,8 +38,8 @@ class DllExport XTAB: public BLOCK { // Table Name-Schema-Srcdef block. // Methods PTABLE Link(PTABLE); - void Print(PGLOBAL g, FILE *f, uint n); - void Print(PGLOBAL g, char *ps, uint z); + void Printf(PGLOBAL g, FILE *f, uint n); + void Prints(PGLOBAL g, char *ps, uint z); protected: // Members @@ -78,8 +78,8 @@ class DllExport COLUMN: public XOBJECT { // Column Name/Qualifier block. void SetTo_Col(PCOL colp) {To_Col = colp;} // Methods - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); // All methods below should never be used for COLUMN's virtual void Reset(void) {assert(false);} virtual bool Compare(PXOB) {assert(false); return false;} diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 21b64a41684..468966e79d9 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -2871,9 +2871,9 @@ bool DOSCOL::AddDistinctValue(PGLOBAL g) /***********************************************************************/ /* Make file output of a Dos column descriptor block. */ /***********************************************************************/ -void DOSCOL::Print(PGLOBAL g, FILE *f, uint n) +void DOSCOL::Printf(PGLOBAL g, FILE *f, uint n) { - COLBLK::Print(g, f, n); + COLBLK::Printf(g, f, n); } // end of Print /* ------------------------------------------------------------------- */ diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index d175cc2da4d..9722cd3777d 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -232,12 +232,12 @@ class DllExport DOSCOL : public COLBLK { virtual PVBLK GetDval(void) {return Dval;} // Methods - using COLBLK::Print; + //using COLBLK::Print; virtual bool VarSize(void); virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); - virtual void Print(PGLOBAL g, FILE *, uint); + virtual void Printf(PGLOBAL g, FILE *, uint); protected: virtual bool SetMinMax(PGLOBAL g); diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index 7c14783285f..d8ec65d02d8 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -173,7 +173,7 @@ public: // Methods virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); - // void Print(PGLOBAL g, FILE *, uint); + // void Printf(PGLOBAL g, FILE *, uint); protected: // Members diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp index 22fb09dbb86..d39837a7b5a 100644 --- a/storage/connect/table.cpp +++ b/storage/connect/table.cpp @@ -299,7 +299,7 @@ bool TDB::SetRecpos(PGLOBAL g, int) return true; } // end of SetRecpos -void TDB::Print(PGLOBAL g, FILE *f, uint n) +void TDB::Printf(PGLOBAL g, FILE *f, uint n) { PCOL cp; char m[64]; @@ -315,13 +315,13 @@ void TDB::Print(PGLOBAL g, FILE *f, uint n) fprintf(f, "%s Columns (deg=%d):\n", m, tp->Degree); for (cp = tp->Columns; cp; cp = cp->GetNext()) - cp->Print(g, f, n); + cp->Printf(g, f, n); } /* endfor tp */ } // end of Print -void TDB::Print(PGLOBAL, char *ps, uint) +void TDB::Prints(PGLOBAL, char *ps, uint) { sprintf(ps, "R%d.%s", Tdb_No, Name); } // end of Print diff --git a/storage/connect/tabodbc.h b/storage/connect/tabodbc.h index 487a5073559..0ca88b60858 100644 --- a/storage/connect/tabodbc.h +++ b/storage/connect/tabodbc.h @@ -187,7 +187,7 @@ class XSRCCOL : public ODBCCOL { // Methods virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); -// void Print(PGLOBAL g, FILE *, uint); +// void Printf(PGLOBAL g, FILE *, uint); protected: // Members diff --git a/storage/connect/tabsys.h b/storage/connect/tabsys.h index 44b5a137a70..0c6017af177 100644 --- a/storage/connect/tabsys.h +++ b/storage/connect/tabsys.h @@ -62,7 +62,7 @@ class TDBINI : public TDBASE { virtual int GetProgCur(void) {return N;} //virtual int GetAffectedRows(void) {return 0;} virtual PCSZ GetFile(PGLOBAL g) {return Ifile;} - virtual void SetFile(PGLOBAL g, PSZ fn) {Ifile = fn;} + virtual void SetFile(PGLOBAL g, PCSZ fn) {Ifile = fn;} virtual void ResetDB(void) {Seclist = Section = NULL; N = 0;} virtual void ResetSize(void) {MaxSize = -1; Seclist = NULL;} virtual int RowNumber(PGLOBAL g, bool b = false) {return N;} @@ -80,7 +80,7 @@ class TDBINI : public TDBASE { protected: // Members - char *Ifile; // The INI file + PCSZ Ifile; // The INI file char *Seclist; // The section list char *Section; // The current section int Seclen; // Length of seclist buffer diff --git a/storage/connect/tabxml.h b/storage/connect/tabxml.h index 6b18eb645f0..813f62dde52 100644 --- a/storage/connect/tabxml.h +++ b/storage/connect/tabxml.h @@ -75,7 +75,7 @@ class DllExport TDBXML : public TDBASE { virtual int GetRecpos(void); virtual int GetProgCur(void) {return N;} virtual PCSZ GetFile(PGLOBAL g) {return Xfile;} - virtual void SetFile(PGLOBAL g, PSZ fn) {Xfile = fn;} + virtual void SetFile(PGLOBAL g, PCSZ fn) {Xfile = fn;} virtual void ResetDB(void) {N = 0;} virtual void ResetSize(void) {MaxSize = -1;} virtual int RowNumber(PGLOBAL g, bool b = false); diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 429593f07d9..b6c63bdadd3 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -561,7 +561,7 @@ bool VALUE::Compute(PGLOBAL g, PVAL *, int, OPVAL) /***********************************************************************/ /* Make file output of an object value. */ /***********************************************************************/ -void VALUE::Print(PGLOBAL g, FILE *f, uint n) +void VALUE::Printf(PGLOBAL g, FILE *f, uint n) { char m[64], buf[64]; @@ -571,14 +571,14 @@ void VALUE::Print(PGLOBAL g, FILE *f, uint n) if (Null) fprintf(f, "%s\n", m); else - fprintf(f, strcat(strcat(GetCharString(buf), "\n"), m)); + fprintf(f, "%s%s%s", GetCharString(buf), "\n", m); } /* end of Print */ /***********************************************************************/ /* Make string output of an object value. */ /***********************************************************************/ -void VALUE::Print(PGLOBAL g, char *ps, uint z) +void VALUE::Prints(PGLOBAL g, char *ps, uint z) { char *p, buf[64]; @@ -1712,7 +1712,7 @@ bool TYPVAL::SetConstFormat(PGLOBAL, FORMAT& fmt) /***********************************************************************/ /* Make string output of an object value. */ /***********************************************************************/ -void TYPVAL::Print(PGLOBAL g, char *ps, uint z) +void TYPVAL::Prints(PGLOBAL g, char *ps, uint z) { if (Null) strncpy(ps, "null", z); diff --git a/storage/connect/value.h b/storage/connect/value.h index cf6682f56f2..2754c761815 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -122,8 +122,8 @@ class DllExport VALUE : public BLOCK { virtual bool IsEqual(PVAL vp, bool chktype) = 0; virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); virtual bool FormatValue(PVAL vp, PCSZ fmt) = 0; - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *, uint); + virtual void Prints(PGLOBAL g, char *ps, uint z); /** Set value from a non-aligned in-memory value in the machine byte order. @@ -308,7 +308,7 @@ class DllExport TYPVAL: public VALUE { virtual bool Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op); virtual bool FormatValue(PVAL vp, PCSZ fmt); virtual bool SetConstFormat(PGLOBAL, FORMAT&); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Prints(PGLOBAL g, char *ps, uint z); protected: // Members diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index a34d2e1c847..3e4db8080ae 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -181,7 +181,7 @@ XXBASE::XXBASE(PTDBDOS tbxp, bool b) : CSORT(b), /***********************************************************************/ /* Make file output of XINDEX contents. */ /***********************************************************************/ -void XXBASE::Print(PGLOBAL, FILE *f, uint n) +void XXBASE::Printf(PGLOBAL, FILE *f, uint n) { char m[64]; @@ -193,7 +193,7 @@ void XXBASE::Print(PGLOBAL, FILE *f, uint n) /***********************************************************************/ /* Make string output of XINDEX contents. */ /***********************************************************************/ -void XXBASE::Print(PGLOBAL, char *ps, uint z) +void XXBASE::Prints(PGLOBAL, char *ps, uint z) { *ps = '\0'; strncat(ps, "Xindex", z); diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h index 2d10d72722e..339d7e68b75 100644 --- a/storage/connect/xindex.h +++ b/storage/connect/xindex.h @@ -200,8 +200,8 @@ class DllExport XXBASE : public CSORT, public BLOCK { void FreeIndex(void) {PlgDBfree(Index);} // Methods - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); virtual bool Init(PGLOBAL g) = 0; virtual bool Make(PGLOBAL g, PIXDEF sxp) = 0; #if defined(XMAP) diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index 8f0b76d2ddc..205edc12d0c 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -173,17 +173,17 @@ bool CONSTANT::Rephrase(PGLOBAL g, PSZ work) /***********************************************************************/ /* Make file output of a constant object. */ /***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, FILE *f, uint n) +void CONSTANT::Printf(PGLOBAL g, FILE *f, uint n) { - Value->Print(g, f, n); + Value->Printf(g, f, n); } /* end of Print */ /***********************************************************************/ /* Make string output of a constant object. */ /***********************************************************************/ -void CONSTANT::Print(PGLOBAL g, char *ps, uint z) +void CONSTANT::Prints(PGLOBAL g, char *ps, uint z) { - Value->Print(g, ps, z); + Value->Prints(g, ps, z); } /* end of Print */ /* -------------------------- Class STRING --------------------------- */ diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index 204144182c8..bc5912d3054 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -112,8 +112,8 @@ class DllExport CONSTANT : public XOBJECT { {return Value->SetConstFormat(g, fmt);} void Convert(PGLOBAL g, int newtype); void SetValue(PVAL vp) {Value = vp;} - virtual void Print(PGLOBAL g, FILE *, uint); - virtual void Print(PGLOBAL g, char *, uint); + virtual void Printf(PGLOBAL g, FILE *, uint); + virtual void Prints(PGLOBAL g, char *, uint); }; // end of class CONSTANT /***********************************************************************/ diff --git a/storage/connect/xtable.h b/storage/connect/xtable.h index 3da0c17bdfe..ebef7a2549a 100644 --- a/storage/connect/xtable.h +++ b/storage/connect/xtable.h @@ -109,8 +109,8 @@ class DllExport TDB: public BLOCK { // Table Descriptor Block. virtual PTDB Copy(PTABS t); virtual void PrintAM(FILE *f, char *m) {fprintf(f, "%s AM(%d)\n", m, GetAmType());} - virtual void Print(PGLOBAL g, FILE *f, uint n); - virtual void Print(PGLOBAL g, char *ps, uint z); + virtual void Printf(PGLOBAL g, FILE *f, uint n); + virtual void Prints(PGLOBAL g, char *ps, uint z); virtual PCSZ GetServer(void) = 0; virtual int GetBadLines(void) {return 0;} virtual CHARSET_INFO *data_charset(void); -- cgit v1.2.1 From a4d4a5fe82bc87b222c50d192adf5e379fe6365d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2017 14:28:57 +0300 Subject: After-merge fix for MDEV-11638 In commit 360a4a037271d65ab6471f7ab3f9b6a893d90a31 some debug assertions were introduced to the page flushing code in XtraDB. Add these assertions to InnoDB as well, and adjust the InnoDB shutdown so that these assertions will not fail. logs_empty_and_mark_files_at_shutdown(): Advance srv_shutdown_state from the first phase SRV_SHUTDOWN_CLEANUP only after no page-dirtying activity is possible (well, except by srv_master_do_shutdown_tasks(), which will be fixed separately in MDEV-12052). rotate_thread_t::should_shutdown(): Already exit the key rotation threads at the first phase of shutdown (SRV_SHUTDOWN_CLEANUP). page_cleaner_sleep_if_needed(): Do not sleep during shutdown. This change is originally from XtraDB. --- storage/innobase/buf/buf0flu.cc | 9 +++++++++ storage/innobase/fil/fil0crypt.cc | 4 ++-- storage/innobase/log/log0log.cc | 2 +- storage/xtradb/fil/fil0crypt.cc | 4 ++-- storage/xtradb/log/log0log.cc | 2 +- 5 files changed, 15 insertions(+), 6 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index b5ce171c92e..b8429052e93 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -150,6 +150,7 @@ buf_flush_insert_in_flush_rbt( buf_page_t* prev = NULL; buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(buf_flush_list_mutex_own(buf_pool)); /* Insert this buffer into the rbt. */ @@ -359,6 +360,7 @@ buf_flush_insert_sorted_into_flush_list( buf_page_t* prev_b; buf_page_t* b; + ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE); ut_ad(!buf_pool_mutex_own(buf_pool)); ut_ad(log_flush_order_mutex_own()); ut_ad(mutex_own(&block->mutex)); @@ -678,6 +680,7 @@ buf_flush_write_complete( flush_type = buf_page_get_flush_type(bpage); buf_pool->n_flush[flush_type]--; + ut_ad(buf_pool->n_flush[flush_type] != ULINT_MAX); ut_ad(buf_pool_mutex_own(buf_pool)); @@ -1051,6 +1054,7 @@ buf_flush_page( } ++buf_pool->n_flush[flush_type]; + ut_ad(buf_pool->n_flush[flush_type] != 0); mutex_exit(block_mutex); buf_pool_mutex_exit(buf_pool); @@ -2273,6 +2277,11 @@ page_cleaner_sleep_if_needed( ulint next_loop_time) /*!< in: time when next loop iteration should start */ { + /* No sleep if we are cleaning the buffer pool during the shutdown + with everything else finished */ + if (srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE) + return; + ulint cur_time = ut_time_ms(); if (next_loop_time > cur_time) { diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index b7841a4c411..a6d85fb89bf 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -1294,10 +1294,10 @@ struct rotate_thread_t { bool should_shutdown() const { switch (srv_shutdown_state) { case SRV_SHUTDOWN_NONE: - case SRV_SHUTDOWN_CLEANUP: return thread_no >= srv_n_fil_crypt_threads; - case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_CLEANUP: return true; + case SRV_SHUTDOWN_FLUSH_PHASE: case SRV_SHUTDOWN_LAST_PHASE: case SRV_SHUTDOWN_EXIT_THREADS: break; diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 934350f9efb..8b97ccf890e 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -3321,8 +3321,8 @@ wait_suspend_loop: switch (srv_get_active_thread_type()) { case SRV_NONE: - srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; if (!srv_n_fil_crypt_threads_started) { + srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; break; } os_event_set(fil_crypt_threads_event); diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc index bbafad6d04b..da78d4dd85d 100644 --- a/storage/xtradb/fil/fil0crypt.cc +++ b/storage/xtradb/fil/fil0crypt.cc @@ -1294,10 +1294,10 @@ struct rotate_thread_t { bool should_shutdown() const { switch (srv_shutdown_state) { case SRV_SHUTDOWN_NONE: - case SRV_SHUTDOWN_CLEANUP: return thread_no >= srv_n_fil_crypt_threads; - case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_CLEANUP: return true; + case SRV_SHUTDOWN_FLUSH_PHASE: case SRV_SHUTDOWN_LAST_PHASE: case SRV_SHUTDOWN_EXIT_THREADS: break; diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 7abab278416..d39bcb87117 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -3639,8 +3639,8 @@ wait_suspend_loop: switch (srv_get_active_thread_type()) { case SRV_NONE: - srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; if (!srv_n_fil_crypt_threads_started) { + srv_shutdown_state = SRV_SHUTDOWN_FLUSH_PHASE; break; } os_event_set(fil_crypt_threads_event); -- cgit v1.2.1 From 90c52e5291b3ad0935df7da56ec0fcbf530733b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Sat, 20 May 2017 21:49:35 +0300 Subject: MDEV-12615: InnoDB page compression method snappy mostly does not compress pages Snappy compression method require that output buffer used for compression is bigger than input buffer. Similarly lzo require additional work memory buffer. Increase the allocated buffer accordingly. buf_tmp_buffer_t: removed unnecessary lzo_mem, crypt_buf_free and comp_buf_free. buf_pool_reserve_tmp_slot: use alligned_alloc and if snappy available allocate size based on snappy_max_compressed_length and if lzo is available increase buffer by LZO1X_1_15_MEM_COMPRESS. fil_compress_page: Remove unneeded lzo mem (we use same buffer) and if output buffer is not yet allocated allocate based similarly as above. Decompression does not require additional work area. Modify test to use same test as other compression method tests. --- storage/innobase/buf/buf0buf.cc | 76 +++++++++++++++++--------- storage/innobase/fil/fil0fil.cc | 3 +- storage/innobase/fil/fil0pagecompress.cc | 79 ++++++++++++++------------- storage/innobase/include/buf0buf.h | 7 --- storage/innobase/include/fil0pagecompress.h | 3 +- storage/xtradb/buf/buf0buf.cc | 84 +++++++++++++++++++---------- storage/xtradb/fil/fil0fil.cc | 3 +- storage/xtradb/fil/fil0pagecompress.cc | 79 ++++++++++++++------------- storage/xtradb/include/buf0buf.h | 7 --- storage/xtradb/include/fil0pagecompress.h | 3 +- 10 files changed, 194 insertions(+), 150 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 6c54cee06b2..5cc09287350 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -64,10 +64,38 @@ Created 11/5/1995 Heikki Tuuri #include "ut0byte.h" #include +#ifdef UNIV_LINUX +#include +#endif + #ifdef HAVE_LZO #include "lzo/lzo1x.h" #endif +#ifdef HAVE_SNAPPY +#include "snappy-c.h" +#endif + +inline void* aligned_malloc(size_t size, size_t align) { + void *result; +#ifdef _MSC_VER + result = _aligned_malloc(size, align); +#else + if(posix_memalign(&result, align, size)) { + result = 0; + } +#endif + return result; +} + +inline void aligned_free(void *ptr) { +#ifdef _MSC_VER + _aligned_free(ptr); +#else + free(ptr); +#endif +} + /* IMPLEMENTATION OF THE BUFFER POOL ================================= @@ -1555,20 +1583,14 @@ buf_pool_free_instance( if (buf_pool->tmp_arr) { for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]); -#ifdef HAVE_LZO - if (slot && slot->lzo_mem) { - ut_free(slot->lzo_mem); - slot->lzo_mem = NULL; - } -#endif - if (slot && slot->crypt_buf_free) { - ut_free(slot->crypt_buf_free); - slot->crypt_buf_free = NULL; + if (slot && slot->crypt_buf) { + aligned_free(slot->crypt_buf); + slot->crypt_buf = NULL; } - if (slot && slot->comp_buf_free) { - ut_free(slot->comp_buf_free); - slot->comp_buf_free = NULL; + if (slot && slot->comp_buf) { + aligned_free(slot->comp_buf); + slot->comp_buf = NULL; } } } @@ -6024,22 +6046,27 @@ buf_pool_reserve_tmp_slot( buf_pool_mutex_exit(buf_pool); /* Allocate temporary memory for encryption/decryption */ - if (free_slot->crypt_buf_free == NULL) { - free_slot->crypt_buf_free = static_cast(ut_malloc(UNIV_PAGE_SIZE*2)); - free_slot->crypt_buf = static_cast(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2); + if (free_slot->crypt_buf == NULL) { + free_slot->crypt_buf = static_cast(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE)); + memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE); } /* For page compressed tables allocate temporary memory for compression/decompression */ - if (compressed && free_slot->comp_buf_free == NULL) { - free_slot->comp_buf_free = static_cast(ut_malloc(UNIV_PAGE_SIZE*2)); - free_slot->comp_buf = static_cast(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2); -#ifdef HAVE_LZO - free_slot->lzo_mem = static_cast(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); - memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + if (compressed && free_slot->comp_buf == NULL) { + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + size = snappy_max_compressed_length(size); +#endif +#if HAVE_LZO + size += LZO1X_1_15_MEM_COMPRESS; #endif + free_slot->comp_buf = static_cast(aligned_malloc(size, UNIV_PAGE_SIZE)); + memset(free_slot->comp_buf, 0, size); } return (free_slot); @@ -6127,8 +6154,7 @@ buf_page_encrypt_before_write( fsp_flags_get_page_compression_level(space->flags), fil_space_get_block_size(space, bpage->offset), encrypted, - &out_len, - IF_LZO(slot->lzo_mem, NULL)); + &out_len); bpage->real_size = out_len; diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 9a0b6010d0c..fa606e89828 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -6744,8 +6744,7 @@ fil_iterate( 0,/* FIXME: compression level */ 512,/* FIXME: use proper block size */ encrypted, - &len, - NULL); + &len); updated = true; } diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index c377c19dd0d..2b6ae95640f 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -99,17 +99,16 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len) /*!< out: actual length of compressed page */ - byte* lzo_mem) /*!< in: temporal memory used by LZO */ { int err = Z_OK; int comp_level = level; ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; - ulint write_size=0; + ulint write_size = 0; /* Cache to avoid change during function execution */ ulint comp_method = innodb_compression_algorithm; - bool allocated=false; + bool allocated = false; /* page_compression does not apply to tables or tablespaces that use ROW_FORMAT=COMPRESSED */ @@ -121,13 +120,23 @@ fil_compress_page( if (!out_buf) { allocated = true; - out_buf = static_cast(ut_malloc(UNIV_PAGE_SIZE)); -#ifdef HAVE_LZO + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + if (comp_method == PAGE_SNAPPY_ALGORITHM) { + size = snappy_max_compressed_length(size); + } +#endif +#if HAVE_LZO if (comp_method == PAGE_LZO_ALGORITHM) { - lzo_mem = static_cast(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); - memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + size += LZO1X_1_15_MEM_COMPRESS; } #endif + + out_buf = static_cast(ut_malloc(size)); } ut_ad(buf); @@ -163,6 +172,7 @@ fil_compress_page( switch(comp_method) { #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: + #ifdef HAVE_LZ4_COMPRESS_DEFAULT err = LZ4_compress_default((const char *)buf, (char *)out_buf+header_len, len, write_size); @@ -197,7 +207,7 @@ fil_compress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size, lzo_mem); + buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE); if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { if (space && !space->printed_compression_failure) { @@ -288,6 +298,7 @@ fil_compress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; + write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE); cstatus = snappy_compress( (const char *)buf, @@ -443,11 +454,6 @@ fil_compress_page( err_exit: if (allocated) { ut_free(out_buf); -#ifdef HAVE_LZO - if (comp_method == PAGE_LZO_ALGORITHM) { - ut_free(lzo_mem); - } -#endif } return (buf); @@ -509,7 +515,7 @@ fil_decompress_page( ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: We try to uncompress corrupted page" - " CRC %lu type %lu len %lu.", + " CRC " ULINTPF " type " ULINTPF " len " ULINTPF ".", mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM), mach_read_from_2(buf+FIL_PAGE_TYPE), len); @@ -533,7 +539,7 @@ fil_decompress_page( if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: We try to uncompress corrupted page" - " actual size %lu compression %s.", + " actual size " ULINTPF " compression %s.", actual_size, fil_get_compression_alg_name(compression_alg)); fflush(stderr); if (return_error) { @@ -548,12 +554,9 @@ fil_decompress_page( *write_size = actual_size; } -#ifdef UNIV_PAGECOMPRESS_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Preparing for decompress for len %lu\n", - actual_size); -#endif /* UNIV_PAGECOMPRESS_DEBUG */ - + DBUG_PRINT("compress", + ("Preparing for decompress for len " ULINTPF ".", + actual_size)); switch(compression_alg) { case PAGE_ZLIB_ALGORITHM: @@ -565,7 +568,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but uncompress failed with error %d " - " size %lu len %lu.", + " size " ULINTPF " len " ULINTPF ".", err, actual_size, len); fflush(stderr); @@ -584,9 +587,10 @@ fil_decompress_page( if (err != (int)actual_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %d bytes " - " size %lu len %lu.", + " but uncompress failed with error %d " + " size " ULINTPF " len " ULINTPF ".", err, actual_size, len); + fflush(stderr); if (return_error) { @@ -598,16 +602,17 @@ fil_decompress_page( #endif /* HAVE_LZ4 */ #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: { - ulint olen=0; + ulint olen = 0; err = lzo1x_decompress((const unsigned char *)buf+header_len, actual_size,(unsigned char *)in_buf, &olen, NULL); if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %ld bytes" - " size %lu len %lu.", - olen, actual_size, len); + " but uncompress failed with error %d " + " size " ULINTPF " len " ULINTPF ".", + err, actual_size, len); + fflush(stderr); if (return_error) { @@ -642,7 +647,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but decompression read only %ld bytes" - " size %lu len %lu.", + " size " ULINTPF "len " ULINTPF ".", dst_pos, actual_size, len); fflush(stderr); @@ -671,7 +676,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but decompression read only %du bytes" - " size %lu len %lu err %d.", + " size " ULINTPF " len " ULINTPF " err %d.", dst_pos, actual_size, len, err); fflush(stderr); @@ -687,7 +692,7 @@ fil_decompress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; - ulint olen = 0; + ulint olen = UNIV_PAGE_SIZE; cstatus = snappy_uncompress( (const char *)(buf+header_len), @@ -695,11 +700,11 @@ fil_decompress_page( (char *)in_buf, (size_t*)&olen); - if (cstatus != SNAPPY_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { + if (cstatus != SNAPPY_OK || olen != UNIV_PAGE_SIZE) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %lu bytes" - " size %lu len %lu err %d.", + " but decompression read only " ULINTPF " bytes" + " size " ULINTPF " len " ULINTPF " err %d.", olen, actual_size, len, (int)cstatus); fflush(stderr); @@ -708,6 +713,7 @@ fil_decompress_page( } ut_error; } + break; } #endif /* HAVE_SNAPPY */ @@ -733,8 +739,7 @@ fil_decompress_page( memcpy(buf, in_buf, len); error_return: - // Need to free temporal buffer if no buffer was given - if (page_buf == NULL) { + if (page_buf != in_buf) { ut_free(in_buf); } } diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 1301e112078..cabc6f64150 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -1545,20 +1545,13 @@ directory (buf) to see it. Do not use from outside! */ typedef struct { bool reserved; /*!< true if this slot is reserved */ -#ifdef HAVE_LZO - byte* lzo_mem; /*!< Temporal memory used by LZO */ -#endif byte* crypt_buf; /*!< for encryption the data needs to be copied to a separate buffer before it's encrypted&written. this as a page can be read while it's being flushed */ - byte* crypt_buf_free; /*!< for encryption, allocated buffer - that is then alligned */ byte* comp_buf; /*!< for compression we need temporal buffer because page can be read while it's being flushed */ - byte* comp_buf_free; /*!< for compression, allocated - buffer that is then alligned */ byte* out_buf; /*!< resulting buffer after encryption/compression. This is a pointer and not allocated. */ diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h index 73667c5420e..03e16699ce3 100644 --- a/storage/innobase/include/fil0pagecompress.h +++ b/storage/innobase/include/fil0pagecompress.h @@ -65,9 +65,8 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len); /*!< out: actual length of compressed page */ - byte* lzo_mem); /*!< in: temporal memory used by LZO */ /****************************************************************//** For page compressed pages decompress the page after actual read diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 0226b6738d4..8ee85d0ecd8 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -65,6 +65,18 @@ Created 11/5/1995 Heikki Tuuri #include "fil0pagecompress.h" #include "ha_prototypes.h" +#ifdef UNIV_LINUX +#include +#endif + +#ifdef HAVE_LZO +#include "lzo/lzo1x.h" +#endif + +#ifdef HAVE_SNAPPY +#include "snappy-c.h" +#endif + /** Decrypt a page. @param[in,out] bpage Page control block @param[in,out] space tablespace @@ -77,6 +89,26 @@ buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) /* prototypes for new functions added to ha_innodb.cc */ trx_t* innobase_get_trx(); +inline void* aligned_malloc(size_t size, size_t align) { + void *result; +#ifdef _MSC_VER + result = _aligned_malloc(size, align); +#else + if(posix_memalign(&result, align, size)) { + result = 0; + } +#endif + return result; +} + +inline void aligned_free(void *ptr) { +#ifdef _MSC_VER + _aligned_free(ptr); +#else + free(ptr); +#endif +} + static inline void _increment_page_get_statistics(buf_block_t* block, trx_t* trx) @@ -108,10 +140,6 @@ _increment_page_get_statistics(buf_block_t* block, trx_t* trx) return; } -#ifdef HAVE_LZO -#include "lzo/lzo1x.h" -#endif - /* IMPLEMENTATION OF THE BUFFER POOL ================================= @@ -1639,20 +1667,14 @@ buf_pool_free_instance( if (buf_pool->tmp_arr) { for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]); -#ifdef HAVE_LZO - if (slot && slot->lzo_mem) { - ut_free(slot->lzo_mem); - slot->lzo_mem = NULL; - } -#endif - if (slot && slot->crypt_buf_free) { - ut_free(slot->crypt_buf_free); - slot->crypt_buf_free = NULL; + if (slot && slot->crypt_buf) { + aligned_free(slot->crypt_buf); + slot->crypt_buf = NULL; } - if (slot && slot->comp_buf_free) { - ut_free(slot->comp_buf_free); - slot->comp_buf_free = NULL; + if (slot && slot->comp_buf) { + aligned_free(slot->comp_buf); + slot->comp_buf = NULL; } } } @@ -6171,22 +6193,27 @@ buf_pool_reserve_tmp_slot( buf_pool_mutex_exit(buf_pool); /* Allocate temporary memory for encryption/decryption */ - if (free_slot->crypt_buf_free == NULL) { - free_slot->crypt_buf_free = static_cast(ut_malloc(UNIV_PAGE_SIZE*2)); - free_slot->crypt_buf = static_cast(ut_align(free_slot->crypt_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->crypt_buf_free, 0, UNIV_PAGE_SIZE *2); + if (free_slot->crypt_buf == NULL) { + free_slot->crypt_buf = static_cast(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE)); + memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE); } /* For page compressed tables allocate temporary memory for compression/decompression */ - if (compressed && free_slot->comp_buf_free == NULL) { - free_slot->comp_buf_free = static_cast(ut_malloc(UNIV_PAGE_SIZE*2)); - free_slot->comp_buf = static_cast(ut_align(free_slot->comp_buf_free, UNIV_PAGE_SIZE)); - memset(free_slot->comp_buf_free, 0, UNIV_PAGE_SIZE *2); -#ifdef HAVE_LZO - free_slot->lzo_mem = static_cast(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); - memset(free_slot->lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + if (compressed && free_slot->comp_buf == NULL) { + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + size = snappy_max_compressed_length(size); +#endif +#if HAVE_LZO + size += LZO1X_1_15_MEM_COMPRESS; #endif + free_slot->comp_buf = static_cast(aligned_malloc(size, UNIV_PAGE_SIZE)); + memset(free_slot->comp_buf, 0, size); } return (free_slot); @@ -6274,8 +6301,7 @@ buf_page_encrypt_before_write( fsp_flags_get_page_compression_level(space->flags), fil_space_get_block_size(space, bpage->offset), encrypted, - &out_len, - IF_LZO(slot->lzo_mem, NULL)); + &out_len); bpage->real_size = out_len; diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index fc46d2b71e9..09ba89459ad 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -7090,8 +7090,7 @@ fil_iterate( 0,/* FIXME: compression level */ 512,/* FIXME: use proper block size */ encrypted, - &len, - NULL); + &len); updated = true; } diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc index c377c19dd0d..2b6ae95640f 100644 --- a/storage/xtradb/fil/fil0pagecompress.cc +++ b/storage/xtradb/fil/fil0pagecompress.cc @@ -99,17 +99,16 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len) /*!< out: actual length of compressed page */ - byte* lzo_mem) /*!< in: temporal memory used by LZO */ { int err = Z_OK; int comp_level = level; ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; - ulint write_size=0; + ulint write_size = 0; /* Cache to avoid change during function execution */ ulint comp_method = innodb_compression_algorithm; - bool allocated=false; + bool allocated = false; /* page_compression does not apply to tables or tablespaces that use ROW_FORMAT=COMPRESSED */ @@ -121,13 +120,23 @@ fil_compress_page( if (!out_buf) { allocated = true; - out_buf = static_cast(ut_malloc(UNIV_PAGE_SIZE)); -#ifdef HAVE_LZO + ulint size = UNIV_PAGE_SIZE; + + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ +#if HAVE_SNAPPY + if (comp_method == PAGE_SNAPPY_ALGORITHM) { + size = snappy_max_compressed_length(size); + } +#endif +#if HAVE_LZO if (comp_method == PAGE_LZO_ALGORITHM) { - lzo_mem = static_cast(ut_malloc(LZO1X_1_15_MEM_COMPRESS)); - memset(lzo_mem, 0, LZO1X_1_15_MEM_COMPRESS); + size += LZO1X_1_15_MEM_COMPRESS; } #endif + + out_buf = static_cast(ut_malloc(size)); } ut_ad(buf); @@ -163,6 +172,7 @@ fil_compress_page( switch(comp_method) { #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: + #ifdef HAVE_LZ4_COMPRESS_DEFAULT err = LZ4_compress_default((const char *)buf, (char *)out_buf+header_len, len, write_size); @@ -197,7 +207,7 @@ fil_compress_page( #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size, lzo_mem); + buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE); if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { if (space && !space->printed_compression_failure) { @@ -288,6 +298,7 @@ fil_compress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; + write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE); cstatus = snappy_compress( (const char *)buf, @@ -443,11 +454,6 @@ fil_compress_page( err_exit: if (allocated) { ut_free(out_buf); -#ifdef HAVE_LZO - if (comp_method == PAGE_LZO_ALGORITHM) { - ut_free(lzo_mem); - } -#endif } return (buf); @@ -509,7 +515,7 @@ fil_decompress_page( ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: We try to uncompress corrupted page" - " CRC %lu type %lu len %lu.", + " CRC " ULINTPF " type " ULINTPF " len " ULINTPF ".", mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM), mach_read_from_2(buf+FIL_PAGE_TYPE), len); @@ -533,7 +539,7 @@ fil_decompress_page( if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: We try to uncompress corrupted page" - " actual size %lu compression %s.", + " actual size " ULINTPF " compression %s.", actual_size, fil_get_compression_alg_name(compression_alg)); fflush(stderr); if (return_error) { @@ -548,12 +554,9 @@ fil_decompress_page( *write_size = actual_size; } -#ifdef UNIV_PAGECOMPRESS_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Preparing for decompress for len %lu\n", - actual_size); -#endif /* UNIV_PAGECOMPRESS_DEBUG */ - + DBUG_PRINT("compress", + ("Preparing for decompress for len " ULINTPF ".", + actual_size)); switch(compression_alg) { case PAGE_ZLIB_ALGORITHM: @@ -565,7 +568,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but uncompress failed with error %d " - " size %lu len %lu.", + " size " ULINTPF " len " ULINTPF ".", err, actual_size, len); fflush(stderr); @@ -584,9 +587,10 @@ fil_decompress_page( if (err != (int)actual_size) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %d bytes " - " size %lu len %lu.", + " but uncompress failed with error %d " + " size " ULINTPF " len " ULINTPF ".", err, actual_size, len); + fflush(stderr); if (return_error) { @@ -598,16 +602,17 @@ fil_decompress_page( #endif /* HAVE_LZ4 */ #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: { - ulint olen=0; + ulint olen = 0; err = lzo1x_decompress((const unsigned char *)buf+header_len, actual_size,(unsigned char *)in_buf, &olen, NULL); if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %ld bytes" - " size %lu len %lu.", - olen, actual_size, len); + " but uncompress failed with error %d " + " size " ULINTPF " len " ULINTPF ".", + err, actual_size, len); + fflush(stderr); if (return_error) { @@ -642,7 +647,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but decompression read only %ld bytes" - " size %lu len %lu.", + " size " ULINTPF "len " ULINTPF ".", dst_pos, actual_size, len); fflush(stderr); @@ -671,7 +676,7 @@ fil_decompress_page( ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" " but decompression read only %du bytes" - " size %lu len %lu err %d.", + " size " ULINTPF " len " ULINTPF " err %d.", dst_pos, actual_size, len, err); fflush(stderr); @@ -687,7 +692,7 @@ fil_decompress_page( case PAGE_SNAPPY_ALGORITHM: { snappy_status cstatus; - ulint olen = 0; + ulint olen = UNIV_PAGE_SIZE; cstatus = snappy_uncompress( (const char *)(buf+header_len), @@ -695,11 +700,11 @@ fil_decompress_page( (char *)in_buf, (size_t*)&olen); - if (cstatus != SNAPPY_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { + if (cstatus != SNAPPY_OK || olen != UNIV_PAGE_SIZE) { ib_logf(IB_LOG_LEVEL_ERROR, "Corruption: Page is marked as compressed" - " but decompression read only %lu bytes" - " size %lu len %lu err %d.", + " but decompression read only " ULINTPF " bytes" + " size " ULINTPF " len " ULINTPF " err %d.", olen, actual_size, len, (int)cstatus); fflush(stderr); @@ -708,6 +713,7 @@ fil_decompress_page( } ut_error; } + break; } #endif /* HAVE_SNAPPY */ @@ -733,8 +739,7 @@ fil_decompress_page( memcpy(buf, in_buf, len); error_return: - // Need to free temporal buffer if no buffer was given - if (page_buf == NULL) { + if (page_buf != in_buf) { ut_free(in_buf); } } diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h index 9b4276efaa8..1899165ace0 100644 --- a/storage/xtradb/include/buf0buf.h +++ b/storage/xtradb/include/buf0buf.h @@ -1577,20 +1577,13 @@ directory (buf) to see it. Do not use from outside! */ typedef struct { bool reserved; /*!< true if this slot is reserved */ -#ifdef HAVE_LZO - byte* lzo_mem; /*!< Temporal memory used by LZO */ -#endif byte* crypt_buf; /*!< for encryption the data needs to be copied to a separate buffer before it's encrypted&written. this as a page can be read while it's being flushed */ - byte* crypt_buf_free; /*!< for encryption, allocated buffer - that is then alligned */ byte* comp_buf; /*!< for compression we need temporal buffer because page can be read while it's being flushed */ - byte* comp_buf_free; /*!< for compression, allocated - buffer that is then alligned */ byte* out_buf; /*!< resulting buffer after encryption/compression. This is a pointer and not allocated. */ diff --git a/storage/xtradb/include/fil0pagecompress.h b/storage/xtradb/include/fil0pagecompress.h index 73667c5420e..03e16699ce3 100644 --- a/storage/xtradb/include/fil0pagecompress.h +++ b/storage/xtradb/include/fil0pagecompress.h @@ -65,9 +65,8 @@ fil_compress_page( ulint level, /* in: compression level */ ulint block_size, /*!< in: block size */ bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len, /*!< out: actual length of compressed + ulint* out_len); /*!< out: actual length of compressed page */ - byte* lzo_mem); /*!< in: temporal memory used by LZO */ /****************************************************************//** For page compressed pages decompress the page after actual read -- cgit v1.2.1 From a1b6128dedb4419db9fadaf94c356d3477d4e06f Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 22 May 2017 18:27:44 +0000 Subject: MDEV-11264 Fix DeviceIoControl() usage in innodb. As documented in MSDN, DeviceIoControl() needs valid (not NULL) OVERLAPPED parameter, for files opened with for OVERLAPPED access. --- storage/xtradb/os/os0file.cc | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'storage') diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 34336a4bb7d..20b202506f5 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -6423,22 +6423,27 @@ os_file_trim( flt.Ranges[0].Offset = off; flt.Ranges[0].Length = trim_len; + OVERLAPPED overlapped = { 0 }; + overlapped.hEvent = win_get_syncio_event(); BOOL ret = DeviceIoControl(slot->file, FSCTL_FILE_LEVEL_TRIM, - &flt, sizeof(flt), NULL, NULL, NULL, NULL); - + &flt, sizeof(flt), NULL, NULL, NULL, &overlapped); + DWORD tmp; + if (ret) { + ret = GetOverlappedResult(slot->file, &overlapped, &tmp, FALSE); + } + else if (GetLastError() == ERROR_IO_PENDING) { + ret = GetOverlappedResult(slot->file, &overlapped, &tmp, TRUE); + } if (!ret) { + DWORD last_error = GetLastError(); /* After first failure do not try to trim again */ os_fallocate_failed = true; srv_use_trim = FALSE; ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: Warning: fallocate call failed with error.\n" - " InnoDB: start: %lu len: %lu payload: %lu\n" - " InnoDB: Disabling fallocate for now.\n", off, trim_len, len); - os_file_handle_error_no_exit(slot->name, - " DeviceIOControl(FSCTL_FILE_LEVEL_TRIM) ", - FALSE, __FILE__, __LINE__); + fprintf(stderr, + " InnoDB: Warning: DeviceIoControl(FSCTL_FILE_LEVEL_TRIM) call failed with error %u%s. Disabling trimming.\n", + last_error, last_error == ERROR_NOT_SUPPORTED ? "(ERROR_NOT_SUPPORTED)" : ""); if (slot->write_size) { *slot->write_size = 0; -- cgit v1.2.1 From 7e0f40b33342135ed3e203314696dd75c1558be0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 23 May 2017 18:37:55 +0300 Subject: =?UTF-8?q?MDEV-12625=20total=5Findex=5Fblocks=20is=20uninitialize?= =?UTF-8?q?d=20in=20ALTER=20TABLE=E2=80=A6ALGORITHM=3DINPLACE=20of=20small?= =?UTF-8?q?=20tables?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before MDEV-6812, it did not matter that merge_files[].offset was uninitialized when no files were created. This problem was introduced in MDEV-6812. There could be a user-visible impact that the progress reports spit into the error log are bogus. row_merge_build_indexes(): Initialize merge_files[i].offset. --- storage/innobase/row/row0merge.cc | 1 + storage/xtradb/row/row0merge.cc | 1 + 2 files changed, 2 insertions(+) (limited to 'storage') diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 9cd79bdd523..85e053de961 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -4035,6 +4035,7 @@ row_merge_build_indexes( for (i = 0; i < n_indexes; i++) { merge_files[i].fd = -1; + merge_files[i].offset = 0; } total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX; diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 42033983cfa..16348f3c8dd 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -4037,6 +4037,7 @@ row_merge_build_indexes( for (i = 0; i < n_indexes; i++) { merge_files[i].fd = -1; + merge_files[i].offset = 0; } total_static_cost = COST_BUILD_INDEX_STATIC * n_indexes + COST_READ_CLUSTERED_INDEX; -- cgit v1.2.1 From fdc1fd6f49b43d1226785e71b3a2c851ac3bfb47 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 20 May 2017 14:04:03 +0200 Subject: MDEV-11311 Create federated table does not work as expected. FederatedX wasn't discovering prefix keys correctly. Of course, as it had the HA_NO_PREFIX_CHAR_KEYS table_flag set... --- storage/federatedx/ha_federatedx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h index 2c2c6eef26b..f8531014ee4 100644 --- a/storage/federatedx/ha_federatedx.h +++ b/storage/federatedx/ha_federatedx.h @@ -330,7 +330,7 @@ public: return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS | HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_CAN_REPAIR | - HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | + HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY); } /* -- cgit v1.2.1 From fc89f5fd40d7be54f1d4f7017a8f46f3be9fa902 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 20 May 2017 19:14:06 +0200 Subject: MDEV-11335 Changing delay_key_write option for MyISAM table should not copy rows Don't rebuild the table for ALTER TABLE delay_key_write changes. After that, delay_key_write value in .frm may differ from the value in .MYI. We'll do what .frm says. --- storage/myisam/ha_myisam.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'storage') diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 4427db79da1..b335d360379 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -825,6 +825,10 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) /* Count statistics of usage for newly open normal files */ if (file->s->reopen == 1 && ! (test_if_locked & HA_OPEN_TMP_TABLE)) { + /* use delay_key_write from .frm, not .MYI */ + file->s->delay_key_write= delay_key_write_options == DELAY_KEY_WRITE_ALL || + (delay_key_write_options == DELAY_KEY_WRITE_ON && + table->s->db_create_options & HA_OPTION_DELAY_KEY_WRITE); if (file->s->delay_key_write) feature_files_opened_with_delayed_keys++; } @@ -2269,10 +2273,8 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *create_info, table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet return COMPATIBLE_DATA_NO; - if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM | - HA_OPTION_DELAY_KEY_WRITE)) != - (create_info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM | - HA_OPTION_DELAY_KEY_WRITE))) + if ((options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM)) != + (create_info->table_options & (HA_OPTION_PACK_RECORD | HA_OPTION_CHECKSUM))) return COMPATIBLE_DATA_NO; return COMPATIBLE_DATA_YES; } -- cgit v1.2.1 From 449a88e1c629b742e8be152c896c65a8bda5c817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 23 May 2017 12:17:43 +0300 Subject: MDEV-12052 Shutdown crash presumably due to master thread activity InnoDB shutdown assumes that once the server has entered SRV_SHUTDOWN_FLUSH_PHASE, no change to persistent data is allowed. It was possible for the master thread to wake up while shutdown is executing in SRV_SHUTDOWN_FLUSH_PHASE or even in SRV_SHUTDOWN_LAST_PHASE. We do not yet know if further crashes at shutdown are possible. Also, we do not know if all the observed crashes could be explained by the race conditions that we are now fixing. srv_shutdown_print_master_pending(): Remove a redundant ut_time() call. srv_shutdown(): Renamed from srv_master_do_shutdown_tasks(). srv_master_thread(): Do not resume after shutdown has been initiated. --- storage/innobase/srv/srv0srv.cc | 147 ++++++++++++++++------------------------ storage/xtradb/srv/srv0srv.cc | 147 ++++++++++++++++------------------------ 2 files changed, 118 insertions(+), 176 deletions(-) (limited to 'storage') diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index fd85b8500b8..93ed302a236 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1715,7 +1715,7 @@ loop: } } - if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { goto exit_func; } @@ -1845,7 +1845,7 @@ loop: os_event_wait_time_low(srv_error_event, 1000000, sig_count); - if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state == SRV_SHUTDOWN_NONE) { goto loop; } @@ -2105,7 +2105,7 @@ srv_shutdown_print_master_pending( time_elapsed = ut_difftime(current_time, *last_print_time); if (time_elapsed > 60) { - *last_print_time = ut_time(); + *last_print_time = current_time; if (n_tables_to_drop) { ut_print_timestamp(stderr); @@ -2157,7 +2157,7 @@ srv_master_do_active_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2191,11 +2191,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_MEM_VALIDATE_MICROSECOND, counter_time); } #endif - if (srv_shutdown_state > 0) { - return; - } - - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2206,7 +2202,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time); } - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2249,7 +2245,7 @@ srv_master_do_idle_tasks(void) MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2265,7 +2261,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2279,7 +2275,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_LOG_FLUSH_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2290,70 +2286,42 @@ srv_master_do_idle_tasks(void) counter_time); } -/*********************************************************************//** -Perform the tasks during shutdown. The tasks that we do at shutdown -depend on srv_fast_shutdown: -2 => very fast shutdown => do no book keeping -1 => normal shutdown => clear drop table queue and make checkpoint -0 => slow shutdown => in addition to above do complete purge and ibuf -merge -@return TRUE if some work was done. FALSE otherwise */ +/** Perform shutdown tasks. +@param[in] ibuf_merge whether to complete the change buffer merge */ static -ibool -srv_master_do_shutdown_tasks( -/*=========================*/ - ib_time_t* last_print_time)/*!< last time the function - print the message */ +void +srv_shutdown(bool ibuf_merge) { - ulint n_bytes_merged = 0; - ulint n_tables_to_drop = 0; - - ut_ad(!srv_read_only_mode); - - ++srv_main_shutdown_loops; - - ut_a(srv_shutdown_state > 0); - - /* In very fast shutdown none of the following is necessary */ - if (srv_fast_shutdown == 2) { - return(FALSE); - } - - /* ALTER TABLE in MySQL requires on Unix that the table handler - can drop tables lazily after there no longer are SELECT - queries to them. */ - srv_main_thread_op_info = "doing background drop tables"; - n_tables_to_drop = row_drop_tables_for_mysql_in_background(); - - /* make sure that there is enough reusable space in the redo - log files */ - srv_main_thread_op_info = "checking free log space"; - log_free_check(); + ulint n_bytes_merged = 0; + ulint n_tables_to_drop; + ib_time_t now = ut_time(); - /* In case of normal shutdown we don't do ibuf merge or purge */ - if (srv_fast_shutdown == 1) { - goto func_exit; - } - - /* Do an ibuf merge */ - srv_main_thread_op_info = "doing insert buffer merge"; - n_bytes_merged = ibuf_merge_in_background(true); - - /* Flush logs if needed */ - srv_sync_log_buffer_in_background(); - -func_exit: - /* Make a new checkpoint about once in 10 seconds */ - srv_main_thread_op_info = "making checkpoint"; - log_checkpoint(TRUE, FALSE); - - /* Print progress message every 60 seconds during shutdown */ - if (srv_shutdown_state > 0 && srv_print_verbose_log) { - srv_shutdown_print_master_pending( - last_print_time, n_tables_to_drop, n_bytes_merged); - } + do { + ut_ad(!srv_read_only_mode); + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); + ++srv_main_shutdown_loops; + + /* FIXME: Remove the background DROP TABLE queue; it is not + crash-safe and breaks ACID. */ + srv_main_thread_op_info = "doing background drop tables"; + n_tables_to_drop = row_drop_tables_for_mysql_in_background(); + + if (ibuf_merge) { + srv_main_thread_op_info = "checking free log space"; + log_free_check(); + srv_main_thread_op_info = "doing insert buffer merge"; + n_bytes_merged = ibuf_merge_in_background(true); + + /* Flush logs if needed */ + srv_sync_log_buffer_in_background(); + } - return(n_bytes_merged || n_tables_to_drop); + /* Print progress message every 60 seconds during shutdown */ + if (srv_print_verbose_log) { + srv_shutdown_print_master_pending( + &now, n_tables_to_drop, n_bytes_merged); + } + } while (n_bytes_merged || n_tables_to_drop); } /*********************************************************************//** @@ -2385,7 +2353,6 @@ DECLARE_THREAD(srv_master_thread)( srv_slot_t* slot; ulint old_activity_count = srv_get_activity_count(); - ib_time_t last_print_time; ut_ad(!srv_read_only_mode); @@ -2404,7 +2371,6 @@ DECLARE_THREAD(srv_master_thread)( slot = srv_reserve_slot(SRV_MASTER); ut_a(slot == srv_sys.sys_threads); - last_print_time = ut_time(); loop: if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) { goto suspend_thread; @@ -2424,13 +2390,26 @@ loop: } } - while (srv_master_do_shutdown_tasks(&last_print_time)) { - - /* Shouldn't loop here in case of very fast shutdown */ - ut_ad(srv_fast_shutdown < 2); +suspend_thread: + switch (srv_shutdown_state) { + case SRV_SHUTDOWN_NONE: + break; + case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_LAST_PHASE: + ut_ad(0); + /* fall through */ + case SRV_SHUTDOWN_EXIT_THREADS: + /* srv_init_abort() must have been invoked */ + case SRV_SHUTDOWN_CLEANUP: + if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP + && srv_fast_shutdown < 2) { + srv_shutdown(srv_fast_shutdown == 1); + } + srv_suspend_thread(slot); + my_thread_end(); + os_thread_exit(NULL); } -suspend_thread: srv_main_thread_op_info = "suspending"; srv_suspend_thread(slot); @@ -2442,15 +2421,7 @@ suspend_thread: srv_main_thread_op_info = "waiting for server activity"; srv_resume_thread(slot); - - if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { - my_thread_end(); - os_thread_exit(NULL); - } - goto loop; - - OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */ } /*********************************************************************//** diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index bd1bc2c7131..d18abbe0574 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -2147,7 +2147,7 @@ loop: } } - if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { goto exit_func; } @@ -2277,7 +2277,7 @@ loop: os_event_wait_time_low(srv_error_event, 1000000, sig_count); - if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) { + if (srv_shutdown_state == SRV_SHUTDOWN_NONE) { goto loop; } @@ -2756,7 +2756,7 @@ srv_shutdown_print_master_pending( time_elapsed = ut_difftime(current_time, *last_print_time); if (time_elapsed > 60) { - *last_print_time = ut_time(); + *last_print_time = current_time; if (n_tables_to_drop) { ut_print_timestamp(stderr); @@ -2808,7 +2808,7 @@ srv_master_do_active_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2842,11 +2842,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_MEM_VALIDATE_MICROSECOND, counter_time); } #endif - if (srv_shutdown_state > 0) { - return; - } - - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2857,7 +2853,7 @@ srv_master_do_active_tasks(void) MONITOR_SRV_DICT_LRU_MICROSECOND, counter_time); } - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2900,7 +2896,7 @@ srv_master_do_idle_tasks(void) MONITOR_SRV_BACKGROUND_DROP_TABLE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2916,7 +2912,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2930,7 +2926,7 @@ srv_master_do_idle_tasks(void) MONITOR_INC_TIME_IN_MICRO_SECS( MONITOR_SRV_LOG_FLUSH_MICROSECOND, counter_time); - if (srv_shutdown_state > 0) { + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { return; } @@ -2951,70 +2947,42 @@ srv_master_do_idle_tasks(void) } } -/*********************************************************************//** -Perform the tasks during shutdown. The tasks that we do at shutdown -depend on srv_fast_shutdown: -2 => very fast shutdown => do no book keeping -1 => normal shutdown => clear drop table queue and make checkpoint -0 => slow shutdown => in addition to above do complete purge and ibuf -merge -@return TRUE if some work was done. FALSE otherwise */ +/** Perform shutdown tasks. +@param[in] ibuf_merge whether to complete the change buffer merge */ static -ibool -srv_master_do_shutdown_tasks( -/*=========================*/ - ib_time_t* last_print_time)/*!< last time the function - print the message */ +void +srv_shutdown(bool ibuf_merge) { - ulint n_bytes_merged = 0; - ulint n_tables_to_drop = 0; - - ut_ad(!srv_read_only_mode); - - ++srv_main_shutdown_loops; + ulint n_bytes_merged = 0; + ulint n_tables_to_drop; + ib_time_t now = ut_time(); - ut_a(srv_shutdown_state > 0); - - /* In very fast shutdown none of the following is necessary */ - if (srv_fast_shutdown == 2) { - return(FALSE); - } - - /* ALTER TABLE in MySQL requires on Unix that the table handler - can drop tables lazily after there no longer are SELECT - queries to them. */ - srv_main_thread_op_info = "doing background drop tables"; - n_tables_to_drop = row_drop_tables_for_mysql_in_background(); - - /* make sure that there is enough reusable space in the redo - log files */ - srv_main_thread_op_info = "checking free log space"; - log_free_check(); - - /* In case of normal shutdown we don't do ibuf merge or purge */ - if (srv_fast_shutdown == 1) { - goto func_exit; - } - - /* Do an ibuf merge */ - srv_main_thread_op_info = "doing insert buffer merge"; - n_bytes_merged = ibuf_merge_in_background(true); - - /* Flush logs if needed */ - srv_sync_log_buffer_in_background(); - -func_exit: - /* Make a new checkpoint about once in 10 seconds */ - srv_main_thread_op_info = "making checkpoint"; - log_checkpoint(TRUE, FALSE, FALSE); - - /* Print progress message every 60 seconds during shutdown */ - if (srv_shutdown_state > 0 && srv_print_verbose_log) { - srv_shutdown_print_master_pending( - last_print_time, n_tables_to_drop, n_bytes_merged); - } + do { + ut_ad(!srv_read_only_mode); + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); + ++srv_main_shutdown_loops; + + /* FIXME: Remove the background DROP TABLE queue; it is not + crash-safe and breaks ACID. */ + srv_main_thread_op_info = "doing background drop tables"; + n_tables_to_drop = row_drop_tables_for_mysql_in_background(); + + if (ibuf_merge) { + srv_main_thread_op_info = "checking free log space"; + log_free_check(); + srv_main_thread_op_info = "doing insert buffer merge"; + n_bytes_merged = ibuf_merge_in_background(true); + + /* Flush logs if needed */ + srv_sync_log_buffer_in_background(); + } - return(n_bytes_merged || n_tables_to_drop); + /* Print progress message every 60 seconds during shutdown */ + if (srv_print_verbose_log) { + srv_shutdown_print_master_pending( + &now, n_tables_to_drop, n_bytes_merged); + } + } while (n_bytes_merged || n_tables_to_drop); } /*********************************************************************//** @@ -3048,7 +3016,6 @@ DECLARE_THREAD(srv_master_thread)( ulint old_activity_count = srv_get_activity_count(); ulint old_ibuf_merge_activity_count = srv_get_ibuf_merge_activity_count(); - ib_time_t last_print_time; ut_ad(!srv_read_only_mode); @@ -3071,7 +3038,6 @@ DECLARE_THREAD(srv_master_thread)( slot = srv_reserve_slot(SRV_MASTER); ut_a(slot == srv_sys.sys_threads); - last_print_time = ut_time(); loop: if (srv_force_recovery >= SRV_FORCE_NO_BACKGROUND) { goto suspend_thread; @@ -3097,13 +3063,26 @@ loop: } } - while (srv_master_do_shutdown_tasks(&last_print_time)) { - - /* Shouldn't loop here in case of very fast shutdown */ - ut_ad(srv_fast_shutdown < 2); +suspend_thread: + switch (srv_shutdown_state) { + case SRV_SHUTDOWN_NONE: + break; + case SRV_SHUTDOWN_FLUSH_PHASE: + case SRV_SHUTDOWN_LAST_PHASE: + ut_ad(0); + /* fall through */ + case SRV_SHUTDOWN_EXIT_THREADS: + /* srv_init_abort() must have been invoked */ + case SRV_SHUTDOWN_CLEANUP: + if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP + && srv_fast_shutdown < 2) { + srv_shutdown(srv_fast_shutdown == 1); + } + srv_suspend_thread(slot); + my_thread_end(); + os_thread_exit(NULL); } -suspend_thread: srv_main_thread_op_info = "suspending"; srv_suspend_thread(slot); @@ -3115,15 +3094,7 @@ suspend_thread: srv_main_thread_op_info = "waiting for server activity"; srv_resume_thread(slot); - - if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { - my_thread_end(); - os_thread_exit(NULL); - } - goto loop; - - OS_THREAD_DUMMY_RETURN; /* Not reached, avoid compiler warning */ } /*********************************************************************//** -- cgit v1.2.1 From 2f29fc3c1c4262919633ab53c80f195ec67a32e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 23 May 2017 12:17:43 +0300 Subject: 10.1 additions for MDEV-12052 Shutdown crash presumably due to master thread activity btr_defragment_thread(): Create the thread in the same place as other threads. Do not invoke btr_defragment_shutdown(), because row_drop_tables_for_mysql_in_background() in the master thread can still keep invoking btr_defragment_remove_table(). logs_empty_and_mark_files_at_shutdown(): Wait for btr_defragment_thread() to exit. innobase_start_or_create_for_mysql(), innobase_shutdown_for_mysql(): Skip encryption and scrubbing in innodb_read_only_mode. srv_export_innodb_status(): Do not export encryption or scrubbing statistics in innodb_read_only mode, because the threads will not be running. --- storage/innobase/btr/btr0defragment.cc | 18 +++++++++--------- storage/innobase/include/btr0defragment.h | 14 ++++++-------- storage/innobase/log/log0log.cc | 7 +++++-- storage/innobase/srv/srv0srv.cc | 8 ++++++-- storage/innobase/srv/srv0start.cc | 17 +++++++++-------- storage/xtradb/btr/btr0defragment.cc | 18 +++++++++--------- storage/xtradb/include/btr0defragment.h | 15 +++++++-------- storage/xtradb/log/log0log.cc | 7 +++++-- storage/xtradb/srv/srv0srv.cc | 8 ++++++-- storage/xtradb/srv/srv0start.cc | 17 +++++++++-------- 10 files changed, 71 insertions(+), 58 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc index 9933884bbfe..ca4c90eef41 100644 --- a/storage/innobase/btr/btr0defragment.cc +++ b/storage/innobase/btr/btr0defragment.cc @@ -154,7 +154,6 @@ btr_defragment_init() 1000000.0 / srv_defragment_frequency); mutex_create(btr_defragment_mutex_key, &btr_defragment_mutex, SYNC_ANY_LATCH); - os_thread_create(btr_defragment_thread, NULL, NULL); } /******************************************************************//** @@ -735,14 +734,13 @@ btr_defragment_n_pages( return current_block; } -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ +/** Whether btr_defragment_thread is active */ +bool btr_defragment_thread_active; + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg) /*!< in: work queue */ +DECLARE_THREAD(btr_defragment_thread)(void*) { btr_pcur_t* pcur; btr_cur_t* cursor; @@ -752,6 +750,8 @@ DECLARE_THREAD(btr_defragment_thread)( buf_block_t* last_block; while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { + ut_ad(btr_defragment_thread_active); + /* If defragmentation is disabled, sleep before checking whether it's enabled. */ if (!srv_defragment) { @@ -825,9 +825,9 @@ DECLARE_THREAD(btr_defragment_thread)( btr_defragment_remove_item(item); } } - btr_defragment_shutdown(); + + btr_defragment_thread_active = false; os_thread_exit(NULL); OS_THREAD_DUMMY_RETURN; } - #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/include/btr0defragment.h b/storage/innobase/include/btr0defragment.h index 6257c4bc996..477824c1a35 100644 --- a/storage/innobase/include/btr0defragment.h +++ b/storage/innobase/include/btr0defragment.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved. -Copyright (C) 2014, 2015, MariaDB Corporation. +Copyright (C) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -89,16 +89,14 @@ UNIV_INTERN void btr_defragment_save_defrag_stats_if_needed( dict_index_t* index); /*!< in: index */ -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg); /*!< in: a dummy parameter required by - os_thread_create */ +DECLARE_THREAD(btr_defragment_thread)(void*); +/** Whether btr_defragment_thread is active */ +extern bool btr_defragment_thread_active; #endif /* !UNIV_HOTBACKUP */ #endif diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 8b97ccf890e..14d1f4544cd 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -45,12 +45,13 @@ Created 12/9/1995 Heikki Tuuri #include "mem0mem.h" #include "buf0buf.h" #include "buf0flu.h" -#include "srv0srv.h" #include "lock0lock.h" #include "log0recv.h" #include "fil0fil.h" #include "dict0boot.h" -#include "dict0stats_bg.h" /* dict_stats_event */ +#include "dict0stats_bg.h" +#include "btr0defragment.h" +#include "srv0srv.h" #include "srv0start.h" #include "trx0sys.h" #include "trx0trx.h" @@ -3300,6 +3301,8 @@ loop: thread_name = "lock_wait_timeout_thread"; } else if (srv_buf_dump_thread_active) { thread_name = "buf_dump_thread"; + } else if (btr_defragment_thread_active) { + thread_name = "btr_defragment_thread"; } else if (srv_fast_shutdown != 2 && trx_rollback_or_clean_is_active) { thread_name = "rollback of recovered transactions"; } else { diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 21ba050b952..787ec97d5a9 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1457,8 +1457,10 @@ srv_export_innodb_status(void) buf_get_total_stat(&stat); buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len); buf_get_total_list_size_in_bytes(&buf_pools_list_size); - fil_crypt_total_stat(&crypt_stat); - btr_scrub_total_stat(&scrub_stat); + if (!srv_read_only_mode) { + fil_crypt_total_stat(&crypt_stat); + btr_scrub_total_stat(&scrub_stat); + } mutex_enter(&srv_innodb_monitor_mutex); @@ -1660,6 +1662,7 @@ srv_export_innodb_status(void) export_vars.innodb_sec_rec_cluster_reads_avoided = srv_stats.n_sec_rec_cluster_reads_avoided; + if (!srv_read_only_mode) { export_vars.innodb_encryption_rotation_pages_read_from_cache = crypt_stat.pages_read_from_cache; export_vars.innodb_encryption_rotation_pages_read_from_disk = @@ -1687,6 +1690,7 @@ srv_export_innodb_status(void) scrub_stat.page_split_failures_missing_index; export_vars.innodb_scrub_page_split_failures_unknown = scrub_stat.page_split_failures_unknown; + } mutex_exit(&srv_innodb_monitor_mutex); } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index f3304c5dad8..be65e4a6d17 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2981,13 +2981,15 @@ files_checked: fil_system_enter(); fil_crypt_threads_init(); fil_system_exit(); - } - /* Init data for datafile scrub threads */ - btr_scrub_init(); + /* Init data for datafile scrub threads */ + btr_scrub_init(); - /* Initialize online defragmentation. */ - btr_defragment_init(); + /* Initialize online defragmentation. */ + btr_defragment_init(); + btr_defragment_thread_active = true; + os_thread_create(btr_defragment_thread, NULL, NULL); + } srv_was_started = TRUE; @@ -3158,11 +3160,10 @@ innobase_shutdown_for_mysql(void) if (!srv_read_only_mode) { dict_stats_thread_deinit(); fil_crypt_threads_cleanup(); + btr_scrub_cleanup(); + btr_defragment_shutdown(); } - /* Cleanup data for datafile scrubbing */ - btr_scrub_cleanup(); - #ifdef __WIN__ /* MDEV-361: ha_innodb.dll leaks handles on Windows MDEV-7403: should not pass recv_writer_thread_handle to diff --git a/storage/xtradb/btr/btr0defragment.cc b/storage/xtradb/btr/btr0defragment.cc index b3978722c1a..44acd9118d7 100644 --- a/storage/xtradb/btr/btr0defragment.cc +++ b/storage/xtradb/btr/btr0defragment.cc @@ -154,7 +154,6 @@ btr_defragment_init() 1000000.0 / srv_defragment_frequency); mutex_create(btr_defragment_mutex_key, &btr_defragment_mutex, SYNC_ANY_LATCH); - os_thread_create(btr_defragment_thread, NULL, NULL); } /******************************************************************//** @@ -735,14 +734,13 @@ btr_defragment_n_pages( return current_block; } -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ +/** Whether btr_defragment_thread is active */ +bool btr_defragment_thread_active; + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg) /*!< in: work queue */ +DECLARE_THREAD(btr_defragment_thread)(void*) { btr_pcur_t* pcur; btr_cur_t* cursor; @@ -752,6 +750,8 @@ DECLARE_THREAD(btr_defragment_thread)( buf_block_t* last_block; while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { + ut_ad(btr_defragment_thread_active); + /* If defragmentation is disabled, sleep before checking whether it's enabled. */ if (!srv_defragment) { @@ -825,9 +825,9 @@ DECLARE_THREAD(btr_defragment_thread)( btr_defragment_remove_item(item); } } - btr_defragment_shutdown(); + + btr_defragment_thread_active = false; os_thread_exit(NULL); OS_THREAD_DUMMY_RETURN; } - #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/xtradb/include/btr0defragment.h b/storage/xtradb/include/btr0defragment.h index 5c54b898e37..477824c1a35 100644 --- a/storage/xtradb/include/btr0defragment.h +++ b/storage/xtradb/include/btr0defragment.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved. -Copyright (C) 2014, 2015, MariaDB Corporation. +Copyright (C) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -89,15 +89,14 @@ UNIV_INTERN void btr_defragment_save_defrag_stats_if_needed( dict_index_t* index); /*!< in: index */ -/******************************************************************//** -Thread that merges consecutive b-tree pages into fewer pages to defragment -the index. */ + +/** Merge consecutive b-tree pages into fewer pages to defragment indexes */ extern "C" UNIV_INTERN os_thread_ret_t -DECLARE_THREAD(btr_defragment_thread)( -/*==========================================*/ - void* arg); /*!< in: a dummy parameter required by - os_thread_create */ +DECLARE_THREAD(btr_defragment_thread)(void*); + +/** Whether btr_defragment_thread is active */ +extern bool btr_defragment_thread_active; #endif /* !UNIV_HOTBACKUP */ #endif diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index d39bcb87117..eae7f2bb09b 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -55,12 +55,13 @@ Created 12/9/1995 Heikki Tuuri #include "mem0mem.h" #include "buf0buf.h" #include "buf0flu.h" -#include "srv0srv.h" #include "lock0lock.h" #include "log0recv.h" #include "fil0fil.h" #include "dict0boot.h" -#include "dict0stats_bg.h" /* dict_stats_event */ +#include "dict0stats_bg.h" +#include "dict0stats_bg.h" +#include "btr0defragment.h" #include "srv0srv.h" #include "srv0start.h" #include "trx0sys.h" @@ -3618,6 +3619,8 @@ loop: thread_name = "lock_wait_timeout_thread"; } else if (srv_buf_dump_thread_active) { thread_name = "buf_dump_thread"; + } else if (btr_defragment_thread_active) { + thread_name = "btr_defragment_thread"; } else if (srv_fast_shutdown != 2 && trx_rollback_or_clean_is_active) { thread_name = "rollback of recovered transactions"; } else { diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 8dbb7c3a498..5f16eca8531 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -1784,8 +1784,10 @@ srv_export_innodb_status(void) buf_get_total_stat(&stat); buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len); buf_get_total_list_size_in_bytes(&buf_pools_list_size); - fil_crypt_total_stat(&crypt_stat); - btr_scrub_total_stat(&scrub_stat); + if (!srv_read_only_mode) { + fil_crypt_total_stat(&crypt_stat); + btr_scrub_total_stat(&scrub_stat); + } mem_adaptive_hash = 0; @@ -2099,6 +2101,7 @@ srv_export_innodb_status(void) export_vars.innodb_sec_rec_cluster_reads_avoided = srv_stats.n_sec_rec_cluster_reads_avoided; + if (!srv_read_only_mode) { export_vars.innodb_encryption_rotation_pages_read_from_cache = crypt_stat.pages_read_from_cache; export_vars.innodb_encryption_rotation_pages_read_from_disk = @@ -2126,6 +2129,7 @@ srv_export_innodb_status(void) scrub_stat.page_split_failures_missing_index; export_vars.innodb_scrub_page_split_failures_unknown = scrub_stat.page_split_failures_unknown; + } mutex_exit(&srv_innodb_monitor_mutex); } diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 46fb0017a9a..5247e80da1f 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -3089,13 +3089,15 @@ files_checked: fil_system_enter(); fil_crypt_threads_init(); fil_system_exit(); - } - /* Init data for datafile scrub threads */ - btr_scrub_init(); + /* Init data for datafile scrub threads */ + btr_scrub_init(); - /* Initialize online defragmentation. */ - btr_defragment_init(); + /* Initialize online defragmentation. */ + btr_defragment_init(); + btr_defragment_thread_active = true; + os_thread_create(btr_defragment_thread, NULL, NULL); + } srv_was_started = TRUE; @@ -3260,11 +3262,10 @@ innobase_shutdown_for_mysql(void) if (!srv_read_only_mode) { dict_stats_thread_deinit(); fil_crypt_threads_cleanup(); + btr_scrub_cleanup(); + btr_defragment_shutdown(); } - /* Cleanup data for datafile scrubbing */ - btr_scrub_cleanup(); - #ifdef __WIN__ /* MDEV-361: ha_innodb.dll leaks handles on Windows MDEV-7403: should not pass recv_writer_thread_handle to -- cgit v1.2.1 From 808f18c748a98dcc7df0b8154563153c8527d3f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 26 May 2017 19:13:21 +0300 Subject: MDEV-12926 encryption.innodb_onlinealter_encryption, encryption.innodb-bad-key-change failed in buildbot with valgrind row_merge_write(): Pass the correct (possibly encrypted) buffer to os_file_write_int_fd(). This bug was introduced in commit 65e1399e64a306f1ce1d920e66206954f8630da8 which included a commit to merge changes from MySQL 5.6.36 to MariaDB Server 10.0. --- storage/innobase/row/row0merge.cc | 2 +- storage/xtradb/row/row0merge.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 85e053de961..45a8010d56d 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -1016,7 +1016,7 @@ row_merge_write( mach_write_to_4((byte *)out_buf, 0); } - ret = os_file_write_int_fd("(merge)", fd, buf, ofs, buf_len); + ret = os_file_write_int_fd("(merge)", fd, out_buf, ofs, buf_len); #ifdef UNIV_DEBUG if (row_merge_print_block_write) { diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc index 16348f3c8dd..408e94b3997 100644 --- a/storage/xtradb/row/row0merge.cc +++ b/storage/xtradb/row/row0merge.cc @@ -1023,7 +1023,7 @@ row_merge_write( mach_write_to_4((byte *)out_buf, 0); } - ret = os_file_write_int_fd("(merge)", fd, buf, ofs, buf_len); + ret = os_file_write_int_fd("(merge)", fd, out_buf, ofs, buf_len); #ifdef UNIV_DEBUG if (row_merge_print_block_write) { -- cgit v1.2.1 From 4114d1d45264065cccf721bcb752a1801505c68e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2017 11:05:41 +0300 Subject: Fix WITH_INNODB_EXTRA_DEBUG --- storage/innobase/innodb.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/innodb.cmake b/storage/innobase/innodb.cmake index 9d6ac0eb0e8..cb62246c4ee 100644 --- a/storage/innobase/innodb.cmake +++ b/storage/innobase/innodb.cmake @@ -123,8 +123,8 @@ ENDIF() OPTION(WITH_INNODB_EXTRA_DEBUG "Enable extra InnoDB debug checks" OFF) IF(WITH_INNODB_EXTRA_DEBUG) - IF(NOT WITH_DEBUG) - MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only when WITH_DEBUG is enabled") + IF(NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + MESSAGE(FATAL_ERROR "WITH_INNODB_EXTRA_DEBUG can be enabled only in debug builds") ENDIF() SET(EXTRA_DEBUG_FLAGS "") -- cgit v1.2.1 From e32dc40c50e60e6ac194ab82e3a44b84eb66635f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2017 12:42:25 +0300 Subject: Remove an unused variable --- storage/innobase/ut/ut0ut.cc | 2 -- 1 file changed, 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index e7489861473..f2019f21386 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -38,8 +38,6 @@ Created 5/11/1994 Heikki Tuuri #include #include "log.h" -/** A constant to prevent the compiler from optimizing ut_delay() away. */ -ibool ut_always_false = FALSE; #ifdef _WIN32 /*****************************************************************//** NOTE: The Windows epoch starts from 1601/01/01 whereas the Unix -- cgit v1.2.1 From 3b68515bf2f6bcb74def8dfcef191dc0e612420f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2017 13:25:27 +0300 Subject: Actually enable UNIV_DEBUG_VALGRIND --- storage/innobase/include/univ.i | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index c572538ce3b..88c13be5a8f 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -184,9 +184,9 @@ command. */ #define UNIV_ENABLE_UNIT_TEST_ROW_RAW_FORMAT_INT */ -#if defined HAVE_valgrind && defined HAVE_VALGRIND +#if defined HAVE_valgrind && defined HAVE_VALGRIND_MEMCHECK_H # define UNIV_DEBUG_VALGRIND -#endif /* HAVE_VALGRIND */ +#endif #ifdef DBUG_OFF # undef UNIV_DEBUG -- cgit v1.2.1 From 2fd840011c8937327189555ab662d6666cd70780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2017 13:30:01 +0300 Subject: Remove ut_allocator::m_oom_fatal ut_allocator: Move m_oom_fatal into a template parameter oom_fatal, to reduce the data and code size. --- storage/innobase/include/ut0new.h | 51 +++++++++------------------------------ 1 file changed, 12 insertions(+), 39 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/ut0new.h b/storage/innobase/include/ut0new.h index 5a9022e8a77..955e7b026c7 100644 --- a/storage/innobase/include/ut0new.h +++ b/storage/innobase/include/ut0new.h @@ -235,8 +235,10 @@ struct ut_new_pfx_t { #endif }; -/** Allocator class for allocating memory from inside std::* containers. */ -template +/** Allocator class for allocating memory from inside std::* containers. +@tparam T type of allocated object +@tparam oom_fatal whether to commit suicide when running out of memory */ +template class ut_allocator { public: typedef T* pointer; @@ -249,13 +251,10 @@ public: /** Default constructor. */ explicit - ut_allocator( - PSI_memory_key key = PSI_NOT_INSTRUMENTED) - : + ut_allocator(PSI_memory_key key = PSI_NOT_INSTRUMENTED) #ifdef UNIV_PFS_MEMORY - m_key(key), + : m_key(key) #endif /* UNIV_PFS_MEMORY */ - m_oom_fatal(true) { } @@ -263,30 +262,10 @@ public: template ut_allocator( const ut_allocator& other) - : m_oom_fatal(other.is_oom_fatal()) - { #ifdef UNIV_PFS_MEMORY - const PSI_memory_key other_key = other.get_mem_key(NULL); - - m_key = (other_key != mem_key_std) - ? other_key - : PSI_NOT_INSTRUMENTED; + : m_key(other.m_key) #endif /* UNIV_PFS_MEMORY */ - } - - /** When out of memory (OOM) happens, report error and do not - make it fatal. - @return a reference to the allocator. */ - ut_allocator& - set_oom_not_fatal() { - m_oom_fatal = false; - return(*this); - } - - /** Check if allocation failure is a fatal error. - @return true if allocation failure is fatal, false otherwise. */ - bool is_oom_fatal() const { - return(m_oom_fatal); + { } /** Return the maximum number of objects that can be allocated by @@ -364,7 +343,7 @@ public: } if (ptr == NULL) { - ib::fatal_or_error(m_oom_fatal) + ib::fatal_or_error(oom_fatal) << "Cannot allocate " << total_bytes << " bytes of memory after " << alloc_max_retries << " retries over " @@ -499,14 +478,13 @@ public: } if (pfx_new == NULL) { - ib::fatal_or_error(m_oom_fatal) + ib::fatal_or_error(oom_fatal) << "Cannot reallocate " << total_bytes << " bytes of memory after " << alloc_max_retries << " retries over " << alloc_max_retries << " seconds. OS error: " << strerror(errno) << " (" << errno << "). " << OUT_OF_MEMORY_MSG; - /* not reached */ return(NULL); } @@ -739,10 +717,6 @@ private: void operator=( const ut_allocator&); - - /** A flag to indicate whether out of memory (OOM) error is considered - fatal. If true, it is fatal. */ - bool m_oom_fatal; }; /** Compare two allocators of the same type. @@ -882,9 +856,8 @@ ut_delete_array( n_bytes, NULL, __FILE__, true, false)) #define ut_zalloc_nokey_nofatal(n_bytes) static_cast( \ - ut_allocator(PSI_NOT_INSTRUMENTED). \ - set_oom_not_fatal(). \ - allocate(n_bytes, NULL, __FILE__, true, false)) + ut_allocator(PSI_NOT_INSTRUMENTED).allocate( \ + n_bytes, NULL, __FILE__, true, false)) #define ut_realloc(ptr, n_bytes) static_cast( \ ut_allocator(PSI_NOT_INSTRUMENTED).reallocate( \ -- cgit v1.2.1 From e54d521b663f5988bfc844397505bd9d10c0f158 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2017 14:07:41 +0300 Subject: Remove some noise from ib::fatal() and ib::fatal_or_error() Avoid the redundant output from ut_dbg_assertion_failed by directly invoking abort(). --- storage/innobase/ut/ut0ut.cc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'storage') diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index f2019f21386..4eb9d45b0dc 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -837,7 +837,7 @@ error::~error() fatal::~fatal() { sql_print_error("[FATAL] InnoDB: %s", m_oss.str().c_str()); - ut_error; + abort(); } error_or_warn::~error_or_warn() @@ -851,8 +851,11 @@ error_or_warn::~error_or_warn() fatal_or_error::~fatal_or_error() { - sql_print_error("InnoDB: %s", m_oss.str().c_str()); - ut_a(!m_fatal); + sql_print_error(m_fatal ? "[FATAL] InnoDB: %s" : "InnoDB: %s", + m_oss.str().c_str()); + if (m_fatal) { + abort(); + } } } // namespace ib -- cgit v1.2.1 From c0dec39da04d68b49fbab51eb87bd81df70d6d58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2017 15:20:35 +0300 Subject: fts_is_charset_cjk(): Do not call strcmp() Remove a bogus reference to gb18030_chinese_ci, which was introduced in MySQL 5.7. It is roughly equivalent to utf8mb4_unicode_ci, just using a different internal encoding, and different collation. --- storage/innobase/include/fts0types.ic | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/fts0types.ic b/storage/innobase/include/fts0types.ic index 417a1010919..a8712751412 100644 --- a/storage/innobase/include/fts0types.ic +++ b/storage/innobase/include/fts0types.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -115,19 +116,14 @@ bool fts_is_charset_cjk( const CHARSET_INFO* cs) { - if (strcmp(cs->name, "gb2312_chinese_ci") == 0 - || strcmp(cs->name, "gbk_chinese_ci") == 0 - || strcmp(cs->name, "big5_chinese_ci") == 0 - || strcmp(cs->name, "gb18030_chinese_ci") == 0 - || strcmp(cs->name, "ujis_japanese_ci") == 0 - || strcmp(cs->name, "sjis_japanese_ci") == 0 - || strcmp(cs->name, "cp932_japanese_ci") == 0 - || strcmp(cs->name, "eucjpms_japanese_ci") == 0 - || strcmp(cs->name, "euckr_korean_ci") == 0) { - return(true); - } else { - return(false); - } + return cs == &my_charset_gb2312_chinese_ci + || cs == &my_charset_gbk_chinese_ci + || cs == &my_charset_big5_chinese_ci + || cs == &my_charset_ujis_japanese_ci + || cs == &my_charset_sjis_japanese_ci + || cs == &my_charset_cp932_japanese_ci + || cs == &my_charset_eucjpms_japanese_ci + || cs == &my_charset_euckr_korean_ci; } /** Select the FTS auxiliary index for the given character by range. -- cgit v1.2.1 From 73deafbc1780c367846fa5cd27f9db092faa2de8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 18 May 2017 15:45:05 +0300 Subject: Remove dict_index_t::is_ngram When MySQL 5.7 introduced fulltext parser plugins to InnoDB, it hard-coded the plugin name "ngram" to mean something special. Because -fsanitize=undefined was issuing warnings for the assignment in row_merge_create_index() that the value is out of range for Boolean, we remove this code that was not intended to be used in MariaDB 10.2. fts_check_token(): Remove the special logic for N-gram tokens. --- storage/innobase/fts/fts0fts.cc | 102 ++---------------------------- storage/innobase/fts/fts0que.cc | 2 - storage/innobase/handler/ha_innodb.cc | 5 -- storage/innobase/handler/ha_innodb.h | 2 - storage/innobase/handler/handler0alter.cc | 7 -- storage/innobase/include/dict0mem.h | 2 - storage/innobase/include/fts0priv.h | 7 +- storage/innobase/include/fts0types.h | 4 +- storage/innobase/include/row0merge.h | 1 - storage/innobase/row/row0ftsort.cc | 7 +- storage/innobase/row/row0merge.cc | 1 - 11 files changed, 12 insertions(+), 128 deletions(-) (limited to 'storage') diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 79020fb4442..1cce58dd9c5 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1200,7 +1200,6 @@ fts_tokenizer_word_get( /* If it is a stopword, do not index it */ if (!fts_check_token(text, cache->stopword_info.cached_stopword, - index_cache->index->is_ngram, index_cache->charset)) { return(NULL); @@ -3241,7 +3240,6 @@ fts_query_expansion_fetch_doc( } doc.charset = doc_charset; - doc.is_ngram = result_doc->is_ngram; if (dfield_is_ext(dfield)) { /* We ignore columns that are stored externally, this @@ -3347,7 +3345,6 @@ fts_fetch_doc_from_rec( doc->found = TRUE; doc->charset = get_doc->index_cache->charset; - doc->is_ngram = index->is_ngram; /* Null Field */ if (doc->text.f_len == UNIV_SQL_NULL || doc->text.f_len == 0) { @@ -4379,13 +4376,10 @@ fts_sync_table( return(err); } -/** Check fts token -1. for ngram token, check whether the token contains any words in stopwords -2. for non-ngram token, check if it's stopword or less than fts_min_token_size +/** Check if a fts token is a stopword or less than fts_min_token_size or greater than fts_max_token_size. @param[in] token token string @param[in] stopwords stopwords rb tree -@param[in] is_ngram is ngram parser @param[in] cs token charset @retval true if it is not stopword and length in range @retval false if it is stopword or lenght not in range */ @@ -4393,96 +4387,16 @@ bool fts_check_token( const fts_string_t* token, const ib_rbt_t* stopwords, - bool is_ngram, const CHARSET_INFO* cs) { ut_ad(cs != NULL || stopwords == NULL); - if (!is_ngram) { - ib_rbt_bound_t parent; + ib_rbt_bound_t parent; - if (token->f_n_char < fts_min_token_size - || token->f_n_char > fts_max_token_size - || (stopwords != NULL - && rbt_search(stopwords, &parent, token) == 0)) { - return(false); - } else { - return(true); - } - } - - /* Check token for ngram. */ - DBUG_EXECUTE_IF( - "fts_instrument_ignore_ngram_check", - return(true); - ); - - /* We ignore fts_min_token_size when ngram */ - ut_ad(token->f_n_char > 0 - && token->f_n_char <= fts_max_token_size); - - if (stopwords == NULL) { - return(true); - } - - /*Ngram checks whether the token contains any words in stopwords. - We can't simply use CONTAIN to search in stopwords, because it's - built on COMPARE. So we need to tokenize the token into words - from unigram to f_n_char, and check them separately. */ - for (ulint ngram_token_size = 1; ngram_token_size <= token->f_n_char; - ngram_token_size ++) { - const char* start; - const char* next; - const char* end; - ulint char_len; - ulint n_chars; - - start = reinterpret_cast(token->f_str); - next = start; - end = start + token->f_len; - n_chars = 0; - - while (next < end) { - char_len = my_charlen(cs, next, end); - - if (next + char_len > end || char_len == 0) { - break; - } else { - /* Skip SPACE */ - if (char_len == 1 && *next == ' ') { - start = next + 1; - next = start; - n_chars = 0; - - continue; - } - - next += char_len; - n_chars++; - } - - if (n_chars == ngram_token_size) { - fts_string_t ngram_token; - ngram_token.f_str = - reinterpret_cast( - const_cast(start)); - ngram_token.f_len = next - start; - ngram_token.f_n_char = ngram_token_size; - - ib_rbt_bound_t parent; - if (rbt_search(stopwords, &parent, - &ngram_token) == 0) { - return(false); - } - - /* Move a char forward */ - start += my_charlen(cs, start, end); - n_chars = ngram_token_size - 1; - } - } - } - - return(true); + return(token->f_n_char >= fts_min_token_size + && token->f_n_char <= fts_max_token_size + && (stopwords == NULL + || rbt_search(stopwords, &parent, token) != 0)); } /** Add the token and its start position to the token's list of positions. @@ -4499,8 +4413,7 @@ fts_add_token( /* Ignore string whose character number is less than "fts_min_token_size" or more than "fts_max_token_size" */ - if (fts_check_token(&str, NULL, result_doc->is_ngram, - result_doc->charset)) { + if (fts_check_token(&str, NULL, result_doc->charset)) { mem_heap_t* heap; fts_string_t t_str; @@ -7487,7 +7400,6 @@ fts_init_recover_doc( } doc.charset = get_doc->index_cache->charset; - doc.is_ngram = get_doc->index_cache->index->is_ngram; if (dfield_is_ext(dfield)) { dict_table_t* table = cache->sync->table; diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index fc80c843412..594f337c978 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -2693,7 +2693,6 @@ fts_query_phrase_split( if (fts_check_token( &result_str, cache->stopword_info.cached_stopword, - query->index->is_ngram, query->fts_index_table.charset)) { /* Add the word to the RB tree so that we can calculate it's frequencey within a document. */ @@ -4278,7 +4277,6 @@ fts_expand_query( result_doc.charset = index_cache->charset; result_doc.parser = index_cache->index->parser; - result_doc.is_ngram = index_cache->index->is_ngram; query->total_size += SIZEOF_RBT_CREATE; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 66b099ac348..c162c1f9f3f 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -6915,11 +6915,6 @@ ha_innobase::open( static_cast( plugin_decl(parser)->info); - index->is_ngram = strncmp( - plugin_name(parser)->str, - FTS_NGRAM_PARSER_NAME, - plugin_name(parser)->length) == 0; - DBUG_EXECUTE_IF("fts_instrument_use_default_parser", index->parser = &fts_default_parser;); } diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 8412f5ab53d..a9f063dc224 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -619,8 +619,6 @@ extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id); extern const struct _ft_vft ft_vft_result; -#define FTS_NGRAM_PARSER_NAME "ngram" - /** Structure Returned by ha_innobase::ft_init_ext() */ typedef struct new_ft_info { diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index e88a3554074..5cc3347ebbe 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -2203,7 +2203,6 @@ innobase_create_index_def( memset(index->fields, 0, n_fields * sizeof *index->fields); index->parser = NULL; - index->is_ngram = false; index->key_number = key_number; index->n_fields = n_fields; index->name = mem_heap_strdup(heap, key->name); @@ -2237,12 +2236,6 @@ innobase_create_index_def( static_cast( plugin_decl(parser)->info); - index->is_ngram = strncmp( - plugin_name(parser)->str, - FTS_NGRAM_PARSER_NAME, - plugin_name(parser)->length) - == 0; - break; } } diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index f94d5f2b1ca..87f415c8a04 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -918,8 +918,6 @@ struct dict_index_t{ dict_field_t* fields; /*!< array of field descriptions */ st_mysql_ftparser* parser; /*!< fulltext parser plugin */ - bool is_ngram; - /*!< true if it's ngram parser */ bool has_new_v_col; /*!< whether it has a newly added virtual column in ALTER */ diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h index 80ebcf09d6d..f9d5d07a44c 100644 --- a/storage/innobase/include/fts0priv.h +++ b/storage/innobase/include/fts0priv.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -215,13 +216,10 @@ fts_write_node( fts_node_t* node) /*!< in: node columns */ MY_ATTRIBUTE((warn_unused_result)); -/** Check fts token -1. for ngram token, check whether the token contains any words in stopwords -2. for non-ngram token, check if it's stopword or less than fts_min_token_size +/** Check if a fts token is a stopword or less than fts_min_token_size or greater than fts_max_token_size. @param[in] token token string @param[in] stopwords stopwords rb tree -@param[in] is_ngram is ngram parser @param[in] cs token charset @retval true if it is not stopword and length in range @retval false if it is stopword or length not in range */ @@ -229,7 +227,6 @@ bool fts_check_token( const fts_string_t* token, const ib_rbt_t* stopwords, - bool is_ngram, const CHARSET_INFO* cs); /******************************************************************//** diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h index c1db160602f..55a698e8b66 100644 --- a/storage/innobase/include/fts0types.h +++ b/storage/innobase/include/fts0types.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -277,8 +277,6 @@ struct fts_doc_t { st_mysql_ftparser* parser; /*!< fts plugin parser */ - bool is_ngram; /*!< Whether it is a ngram parser */ - ib_rbt_t* stopwords; /*!< Stopwords */ }; diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h index 47b5e32d3de..50c3361a3f9 100644 --- a/storage/innobase/include/row0merge.h +++ b/storage/innobase/include/row0merge.h @@ -129,7 +129,6 @@ struct index_def_t { index_field_t* fields; /*!< field definitions */ st_mysql_ftparser* parser; /*!< fulltext parser plugin */ - bool is_ngram; /*!< true if it's ngram parser */ }; /** Structure for reporting duplicate records. */ diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc index fca6ae5a1bf..321b55e9894 100644 --- a/storage/innobase/row/row0ftsort.cc +++ b/storage/innobase/row/row0ftsort.cc @@ -94,7 +94,6 @@ row_merge_create_fts_sort_index( new_index->n_def = FTS_NUM_FIELDS_SORT; new_index->cached = TRUE; new_index->parser = index->parser; - new_index->is_ngram = index->is_ngram; idx_field = dict_index_get_nth_field(index, 0); charset = fts_index_get_charset(index); @@ -515,7 +514,6 @@ row_merge_fts_doc_tokenize( ulint data_size[FTS_NUM_AUX_INDEX]; ulint n_tuple[FTS_NUM_AUX_INDEX]; st_mysql_ftparser* parser; - bool is_ngram; t_str.f_n_char = 0; t_ctx->buf_used = 0; @@ -524,7 +522,6 @@ row_merge_fts_doc_tokenize( memset(data_size, 0, FTS_NUM_AUX_INDEX * sizeof(ulint)); parser = sort_buf[0]->index->parser; - is_ngram = sort_buf[0]->index->is_ngram; /* Tokenize the data and add each word string, its corresponding doc id and position to sort buffer */ @@ -570,7 +567,7 @@ row_merge_fts_doc_tokenize( /* Ignore string whose character number is less than "fts_min_token_size" or more than "fts_max_token_size" */ - if (!fts_check_token(&str, NULL, is_ngram, NULL)) { + if (!fts_check_token(&str, NULL, NULL)) { if (parser != NULL) { UT_LIST_REMOVE(t_ctx->fts_token_list, fts_token); ut_free(fts_token); @@ -589,7 +586,7 @@ row_merge_fts_doc_tokenize( /* if "cached_stopword" is defined, ignore words in the stopword list */ - if (!fts_check_token(&str, t_ctx->cached_stopword, is_ngram, + if (!fts_check_token(&str, t_ctx->cached_stopword, doc->charset)) { if (parser != NULL) { UT_LIST_REMOVE(t_ctx->fts_token_list, fts_token); diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index eb0a58f4c28..ea332adfdc3 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -4504,7 +4504,6 @@ row_merge_create_index( ut_a(index); index->parser = index_def->parser; - index->is_ngram = index_def->is_ngram; index->has_new_v_col = has_new_v_col; /* Note the id of the transaction that created this -- cgit v1.2.1 From b53f6b97685a7da0e9c10eaa3675ff5b4326aff6 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 29 May 2017 15:30:21 +1000 Subject: MDEV-12924: re-add numa to innodb Commit 3a3b3d8ba8d8f32feb7cf6c2609639635ddb53b5 overly purged the numa checks. Re-add this. Signed-off-by: Daniel Black --- storage/innobase/innodb.cmake | 2 ++ 1 file changed, 2 insertions(+) (limited to 'storage') diff --git a/storage/innobase/innodb.cmake b/storage/innobase/innodb.cmake index cb62246c4ee..fe2d537c50e 100644 --- a/storage/innobase/innodb.cmake +++ b/storage/innobase/innodb.cmake @@ -24,12 +24,14 @@ INCLUDE(lzo.cmake) INCLUDE(lzma.cmake) INCLUDE(bzip2.cmake) INCLUDE(snappy.cmake) +INCLUDE(numa) MYSQL_CHECK_LZ4() MYSQL_CHECK_LZO() MYSQL_CHECK_LZMA() MYSQL_CHECK_BZIP2() MYSQL_CHECK_SNAPPY() +MYSQL_CHECK_NUMA() IF(CMAKE_CROSSCOMPILING) # Use CHECK_C_SOURCE_COMPILES instead of CHECK_C_SOURCE_RUNS when -- cgit v1.2.1 From 2cb94aa1b70fa01d9e5c1d8a7625415a0220a3d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 29 May 2017 13:07:23 +0300 Subject: MDEV-11626 innodb.innodb-change-buffer-recovery fails for xtradb buf_page_get_gen(): Remove the error log messages about page flushing and eviction when innodb_change_buffering_debug=1 is in effect. --- storage/innobase/buf/buf0buf.c | 6 ------ storage/xtradb/buf/buf0buf.c | 6 ------ 2 files changed, 12 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0buf.c b/storage/innobase/buf/buf0buf.c index a5e1c045b7b..80a15ffe0d5 100644 --- a/storage/innobase/buf/buf0buf.c +++ b/storage/innobase/buf/buf0buf.c @@ -2560,14 +2560,8 @@ wait_until_unfixed: } } buf_pool_mutex_exit(buf_pool); - fprintf(stderr, - "innodb_change_buffering_debug evict %u %u\n", - (unsigned) space, (unsigned) offset); return(NULL); } else if (buf_flush_page_try(buf_pool, block)) { - fprintf(stderr, - "innodb_change_buffering_debug flush %u %u\n", - (unsigned) space, (unsigned) offset); guess = block; goto loop; } diff --git a/storage/xtradb/buf/buf0buf.c b/storage/xtradb/buf/buf0buf.c index e68b0526d42..0b0a00cd1ed 100644 --- a/storage/xtradb/buf/buf0buf.c +++ b/storage/xtradb/buf/buf0buf.c @@ -2852,9 +2852,6 @@ wait_until_unfixed: } } //buf_pool_mutex_exit(buf_pool); - fprintf(stderr, - "innodb_change_buffering_debug evict %u %u\n", - (unsigned) space, (unsigned) offset); return(NULL); } else if (UNIV_UNLIKELY(buf_block_get_state(block) @@ -2875,9 +2872,6 @@ wait_until_unfixed: ut_ad(!mutex_own(&buf_pool->LRU_list_mutex)); if (buf_flush_page_try(buf_pool, block)) { - fprintf(stderr, - "innodb_change_buffering_debug flush %u %u\n", - (unsigned) space, (unsigned) offset); guess = block; goto loop; } -- cgit v1.2.1 From 22e5e64c0d62713b052badd87cb0910af5b67f91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 29 May 2017 14:16:57 +0300 Subject: MDEV-11623 merge fix: Use the correct flags in an error message --- storage/innobase/fil/fil0fil.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 32681f2d639..966c160f4bd 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -673,7 +673,7 @@ retry: if (cflags == ULINT_UNDEFINED) { ib::error() << "Expected tablespace flags " - << ib::hex(flags) + << ib::hex(space->flags) << " but found " << ib::hex(flags) << " in the file " << node->name; return(false); -- cgit v1.2.1 From 1af8bf39ca2513bfdf43a55af0b6af10d32dcebb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 30 May 2017 11:30:43 +0300 Subject: MDEV-12113: install_db shows corruption for rest encryption with innodb_data_file_path=ibdata1:3M; Problem was that FIL_PAGE_FLUSH_LSN_OR_KEY_VERSION field that for encrypted pages even in system datafiles should contain key_version except very first page (0:0) is after encryption overwritten with flush lsn. Ported WL#7990 Repurpose FIL_PAGE_FLUSH_LSN to 10.1 The field FIL_PAGE_FLUSH_LSN_OR_KEY_VERSION is consulted during InnoDB startup. At startup, InnoDB reads the FIL_PAGE_FLUSH_LSN_OR_KEY_VERSION from the first page of each file in the InnoDB system tablespace. If there are multiple files, the minimum and maximum LSN can differ. These numbers are passed to InnoDB startup. Having the number in other files than the first file of the InnoDB system tablespace is not providing much additional value. It is conflicting with other use of the field, such as on InnoDB R-tree index pages and encryption key_version. This worklog will stop writing FIL_PAGE_FLUSH_LSN_OR_KEY_VERSION to other files than the first file of the InnoDB system tablespace (page number 0:0) when system tablespace is encrypted. If tablespace is not encrypted we continue writing FIL_PAGE_FLUSH_LSN_OR_KEY_VERSION to all first pages of system tablespace to avoid unnecessary warnings on downgrade. open_or_create_data_files(): pass only one flushed_lsn parameter xb_load_tablespaces(): pass only one flushed_lsn parameter. buf_page_create(): Improve comment about where FIL_PAGE_FIL_FLUSH_LSN_OR_KEY_VERSION is set. fil_write_flushed_lsn(): A new function, merged from fil_write_lsn_and_arch_no_to_file() and fil_write_flushed_lsn_to_data_files(). Only write to the first page of the system tablespace (page 0:0) if tablespace is encrypted, or write all first pages of system tablespace and invoke fil_flush_file_spaces(FIL_TYPE_TABLESPACE) afterwards. fil_read_first_page(): read flush_lsn and crypt_data only from first datafile. fil_open_single_table_tablespace(): Remove output of LSN, because it was only valid for the system tablespace and the undo tablespaces, not user tablespaces. fil_validate_single_table_tablespace(): Remove output of LSN. checkpoint_now_set(): Use fil_write_flushed_lsn and output a error if operation fails. Remove lsn variable from fsp_open_info. recv_recovery_from_checkpoint_start(): Remove unnecessary second flush_lsn parameter. log_empty_and_mark_files_at_shutdown(): Use fil_writte_flushed_lsn and output error if it fails. open_or_create_data_files(): Pass only one flushed_lsn variable. --- storage/innobase/buf/buf0buf.cc | 10 +- storage/innobase/fil/fil0fil.cc | 269 +++++++++++++++------------------ storage/innobase/handler/ha_innodb.cc | 11 +- storage/innobase/include/fil0fil.h | 63 ++++---- storage/innobase/include/log0recv.h | 36 +++-- storage/innobase/log/log0log.cc | 13 +- storage/innobase/log/log0recv.cc | 37 +++-- storage/innobase/srv/srv0start.cc | 134 ++++++++--------- storage/xtradb/buf/buf0buf.cc | 10 +- storage/xtradb/fil/fil0fil.cc | 276 +++++++++++++++------------------- storage/xtradb/handler/ha_innodb.cc | 11 +- storage/xtradb/include/fil0fil.h | 53 ++++--- storage/xtradb/include/log0recv.h | 36 +++-- storage/xtradb/log/log0log.cc | 9 +- storage/xtradb/log/log0recv.cc | 37 +++-- storage/xtradb/srv/srv0start.cc | 138 ++++++++--------- 16 files changed, 549 insertions(+), 594 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 5cc09287350..2cb2673f7fb 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -4291,11 +4291,11 @@ buf_page_create( memset(frame + FIL_PAGE_NEXT, 0xff, 4); mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED); - /* Reset to zero the file flush lsn field in the page; if the first - page of an ibdata file is 'created' in this function into the buffer - pool then we lose the original contents of the file flush lsn stamp. - Then InnoDB could in a crash recovery print a big, false, corruption - warning if the stamp contains an lsn bigger than the ib_logfile lsn. */ + /* FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is only used on the + following pages: + (1) The first page of the InnoDB system tablespace (page 0:0) + (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages + (3) key_version on encrypted pages (not page 0:0) */ memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index fa606e89828..9d4a464460f 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2211,99 +2211,70 @@ fil_set_max_space_id_if_bigger( mutex_exit(&fil_system->mutex); } -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page header -of the first page of a data file of the system tablespace (space 0), -which is uncompressed. */ -static MY_ATTRIBUTE((warn_unused_result)) +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_lsn_and_arch_no_to_file( -/*==============================*/ - ulint space, /*!< in: space to write to */ - ulint sum_of_sizes, /*!< in: combined size of previous files - in space, in database pages */ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no MY_ATTRIBUTE((unused))) - /*!< in: archived log number to write */ +fil_write_flushed_lsn( + lsn_t lsn) { byte* buf1; byte* buf; dberr_t err; - buf1 = static_cast(mem_alloc(2 * UNIV_PAGE_SIZE)); + buf1 = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); buf = static_cast(ut_align(buf1, UNIV_PAGE_SIZE)); - err = fil_read(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); - if (err == DB_SUCCESS) { - mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, - lsn); - - err = fil_write(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); - } - - mem_free(buf1); - - return(err); -} + /* Acquire system tablespace */ + fil_space_t* space = fil_space_acquire(0); -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no) /*!< in: latest archived log file number */ -{ - fil_space_t* space; - fil_node_t* node; - dberr_t err; + /* If tablespace is not encrypted, stamp flush_lsn to + first page of all system tablespace datafiles to avoid + unnecessary error messages on possible downgrade. */ + if (space->crypt_data->min_key_version == 0) { + fil_node_t* node; + ulint sum_of_sizes = 0; - mutex_enter(&fil_system->mutex); - - for (space = UT_LIST_GET_FIRST(fil_system->space_list); - space != NULL; - space = UT_LIST_GET_NEXT(space_list, space)) { - - /* We only write the lsn to all existing data files which have - been open during the lifetime of the mysqld process; they are - represented by the space objects in the tablespace memory - cache. Note that all data files in the system tablespace 0 - and the UNDO log tablespaces (if separate) are always open. */ - - if (space->purpose == FIL_TABLESPACE - && !fil_is_user_tablespace_id(space->id)) { - ulint sum_of_sizes = 0; - - for (node = UT_LIST_GET_FIRST(space->chain); - node != NULL; - node = UT_LIST_GET_NEXT(chain, node)) { - - mutex_exit(&fil_system->mutex); + for (node = UT_LIST_GET_FIRST(space->chain); + node != NULL; + node = UT_LIST_GET_NEXT(chain, node)) { - err = fil_write_lsn_and_arch_no_to_file( - space->id, sum_of_sizes, lsn, - arch_log_no); + err = fil_read(TRUE, 0, 0, sum_of_sizes, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); - if (err != DB_SUCCESS) { + if (err == DB_SUCCESS) { + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + lsn); - return(err); - } - - mutex_enter(&fil_system->mutex); + err = fil_write(TRUE, 0, 0, sum_of_sizes, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); sum_of_sizes += node->size; } } + } else { + /* When system tablespace is encrypted stamp flush_lsn to + only the first page of the first datafile (rest of pages + are encrypted). */ + err = fil_read(TRUE, 0, 0, 0, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); + + if (err == DB_SUCCESS) { + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + lsn); + + err = fil_write(TRUE, 0, 0, 0, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); + } } - mutex_exit(&fil_system->mutex); + fil_flush_file_spaces(FIL_TABLESPACE); + fil_space_release(space); - return(DB_SUCCESS); + ut_free(buf1); + + return(err); } /** Check the consistency of the first data page of a tablespace @@ -2356,36 +2327,37 @@ fil_check_first_page(const page_t* page, ulint space_id, ulint flags) return("inconsistent data in space header"); } -/*******************************************************************//** -Reads the flushed lsn, arch no, space_id and tablespace flag fields from -the first page of a data file at database startup. +/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a first data file at database startup. +@param[in] data_file open data file +@param[in] one_read_only true if first datafile is already + read +@param[out] flags FSP_SPACE_FLAGS +@param[out] space_id tablepspace ID +@param[out] min_arch_log_no min of archived log numbers in + data files +@param[out] max_arch_log_no max of archived log numbers in + data files +@param[out] flushed_lsn flushed lsn value +@param[out] crypt_data encryption crypt data @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN const char* fil_read_first_page( -/*================*/ - pfs_os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: FSP_SPACE_FLAGS */ - ulint* space_id, /*!< out: tablespace ID */ + pfs_os_file_t data_file, + ibool one_read_already, + ulint* flags, + ulint* space_id, #ifdef UNIV_LOG_ARCHIVE - ulint* min_arch_log_no, /*!< out: min of archived - log numbers in data files */ - ulint* max_arch_log_no, /*!< out: max of archived - log numbers in data files */ + ulint* min_arch_log_no, + ulint* max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*< out: crypt data */ + lsn_t* flushed_lsn, + fil_space_crypt_t** crypt_data) { byte* buf; byte* page; - lsn_t flushed_lsn; const char* check_msg = NULL; fil_space_crypt_t* cdata; @@ -2407,6 +2379,11 @@ fil_read_first_page( *space_id = fsp_header_get_space_id(page); *flags = fsp_header_get_flags(page); + if (flushed_lsn) { + *flushed_lsn = mach_read_from_8(page + + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + } + if (!fsp_flags_is_valid(*flags)) { ulint cflags = fsp_flags_convert_from_101(*flags); if (cflags == ULINT_UNDEFINED) { @@ -2420,33 +2397,33 @@ fil_read_first_page( } check_msg = fil_check_first_page(page, *space_id, *flags); - } - - flushed_lsn = mach_read_from_8(page + - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); - ulint space = fsp_header_get_space_id(page); - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(*flags)); + /* Possible encryption crypt data is also stored only to first page + of the first datafile. */ - cdata = fil_space_read_crypt_data(space, page, offset); + ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(*flags)); - if (crypt_data) { - *crypt_data = cdata; - } + cdata = fil_space_read_crypt_data(*space_id, page, offset); - /* If file space is encrypted we need to have at least some - encryption service available where to get keys */ - if (cdata && cdata->should_encrypt()) { + if (crypt_data) { + *crypt_data = cdata; + } - if (!encryption_key_id_exists(cdata->key_id)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespace id %ld is encrypted but encryption service" - " or used key_id %u is not available. Can't continue opening tablespace.", - space, cdata->key_id); + /* If file space is encrypted we need to have at least some + encryption service available where to get keys */ + if (cdata && cdata->should_encrypt()) { - return ("table encrypted but encryption service not available."); + if (!encryption_key_id_exists(cdata->key_id)) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tablespace id " ULINTPF + " is encrypted but encryption service" + " or used key_id %u is not available. " + "Can't continue opening tablespace.", + *space_id, cdata->key_id); + return ("table encrypted but encryption service not available."); + } } } @@ -2457,8 +2434,6 @@ fil_read_first_page( } if (!one_read_already) { - *min_flushed_lsn = flushed_lsn; - *max_flushed_lsn = flushed_lsn; #ifdef UNIV_LOG_ARCHIVE *min_arch_log_no = arch_log_no; *max_arch_log_no = arch_log_no; @@ -2466,16 +2441,11 @@ fil_read_first_page( return(NULL); } - if (*min_flushed_lsn > flushed_lsn) { - *min_flushed_lsn = flushed_lsn; - } - if (*max_flushed_lsn < flushed_lsn) { - *max_flushed_lsn = flushed_lsn; - } #ifdef UNIV_LOG_ARCHIVE if (*min_arch_log_no > arch_log_no) { *min_arch_log_no = arch_log_no; } + if (*max_arch_log_no < arch_log_no) { *max_arch_log_no = arch_log_no; } @@ -4173,6 +4143,7 @@ fil_open_single_table_tablespace( def.file = os_file_create_simple_no_error_handling( innodb_file_data_key, def.filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &def.success, atomic_writes); + if (def.success) { tablespaces_found++; } @@ -4187,11 +4158,11 @@ fil_open_single_table_tablespace( /* Read the first page of the datadir tablespace, if found. */ if (def.success) { def.check_msg = fil_read_first_page( - def.file, FALSE, &def.flags, &def.id, + def.file, false, &def.flags, &def.id, #ifdef UNIV_LOG_ARCHIVE &space_arch_log_no, &space_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &def.lsn, &def.lsn, &def.crypt_data); + NULL, &def.crypt_data); if (table) { table->crypt_data = def.crypt_data; @@ -4200,6 +4171,7 @@ fil_open_single_table_tablespace( def.valid = !def.check_msg && def.id == id && fsp_flags_match(flags, def.flags); + if (def.valid) { valid_tablespaces_found++; } else { @@ -4213,11 +4185,11 @@ fil_open_single_table_tablespace( /* Read the first page of the remote tablespace */ if (remote.success) { remote.check_msg = fil_read_first_page( - remote.file, FALSE, &remote.flags, &remote.id, + remote.file, false, &remote.flags, &remote.id, #ifdef UNIV_LOG_ARCHIVE &remote.arch_log_no, &remote.arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &remote.lsn, &remote.lsn, &remote.crypt_data); + NULL, &remote.crypt_data); if (table) { table->crypt_data = remote.crypt_data; @@ -4227,6 +4199,7 @@ fil_open_single_table_tablespace( /* Validate this single-table-tablespace with SYS_TABLES. */ remote.valid = !remote.check_msg && remote.id == id && fsp_flags_match(flags, remote.flags); + if (remote.valid) { valid_tablespaces_found++; } else { @@ -4241,11 +4214,11 @@ fil_open_single_table_tablespace( /* Read the first page of the datadir tablespace, if found. */ if (dict.success) { dict.check_msg = fil_read_first_page( - dict.file, FALSE, &dict.flags, &dict.id, + dict.file, false, &dict.flags, &dict.id, #ifdef UNIV_LOG_ARCHIVE &dict.arch_log_no, &dict.arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &dict.lsn, &dict.lsn, &dict.crypt_data); + NULL, &dict.crypt_data); if (table) { table->crypt_data = dict.crypt_data; @@ -4289,26 +4262,32 @@ fil_open_single_table_tablespace( ib_logf(IB_LOG_LEVEL_ERROR, "A tablespace for %s has been found in " "multiple places;", tablename); + if (def.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Default location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - def.filepath, def.lsn, - (ulong) def.id, (ulong) def.flags); + "Default location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + def.filepath, + def.id, + def.flags); } + if (remote.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Remote location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - remote.filepath, remote.lsn, - (ulong) remote.id, (ulong) remote.flags); + "Remote location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + remote.filepath, + remote.id, + remote.flags); } + if (dict.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Dictionary location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - dict.filepath, dict.lsn, - (ulong) dict.id, (ulong) dict.flags); + "Dictionary location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + dict.filepath, + dict.id, + dict.flags); } /* Force-recovery will allow some tablespaces to be @@ -4341,6 +4320,7 @@ fil_open_single_table_tablespace( os_file_close(def.file); tablespaces_found--; } + if (dict.success && !dict.valid) { dict.success = false; os_file_close(dict.file); @@ -4348,6 +4328,7 @@ fil_open_single_table_tablespace( can be corrected below. */ tablespaces_found--; } + if (remote.success && !remote.valid) { remote.success = false; os_file_close(remote.file); @@ -4698,7 +4679,7 @@ check_first_page: #ifdef UNIV_LOG_ARCHIVE &fsp->arch_log_no, &fsp->arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &fsp->lsn, &fsp->lsn, &fsp->crypt_data)) { + NULL, &fsp->crypt_data)) { ib_logf(IB_LOG_LEVEL_ERROR, "%s in tablespace %s (table %s)", check_msg, fsp->filepath, tablename); @@ -4928,11 +4909,11 @@ will_not_choose: if (def.success && remote.success) { ib_logf(IB_LOG_LEVEL_ERROR, "Tablespaces for %s have been found in two places;\n" - "Location 1: SpaceID: %lu LSN: %lu File: %s\n" - "Location 2: SpaceID: %lu LSN: %lu File: %s\n" + "Location 1: SpaceID: " ULINTPF " File: %s\n" + "Location 2: SpaceID: " ULINTPF " File: %s\n" "You must delete one of them.", - tablename, (ulong) def.id, (ulong) def.lsn, - def.filepath, (ulong) remote.id, (ulong) remote.lsn, + tablename, def.id, + def.filepath, remote.id, remote.filepath); def.success = FALSE; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 012d81380f1..012539d1ace 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -18221,8 +18221,15 @@ checkpoint_now_set( log_make_checkpoint_at(LSN_MAX, TRUE); fil_flush_file_spaces(FIL_LOG); } - fil_write_flushed_lsn_to_data_files(log_sys->lsn, 0); - fil_flush_file_spaces(FIL_TABLESPACE); + + dberr_t err = fil_write_flushed_lsn(log_sys->lsn); + + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Failed to write flush lsn to the " + "system tablespace at checkpoint err=%s", + ut_strerr(err)); + } } } diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index d546bab6862..ba440cb2a1c 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -215,7 +215,6 @@ struct fsp_open_info { ibool valid; /*!< Is the tablespace valid? */ pfs_os_file_t file; /*!< File handle */ char* filepath; /*!< File path to open */ - lsn_t lsn; /*!< Flushed LSN from header page */ ulint id; /*!< Space ID */ ulint flags; /*!< Tablespace flags */ ulint encryption_error; /*!< if an encryption error occurs */ @@ -637,17 +636,17 @@ void fil_set_max_space_id_if_bigger( /*===========================*/ ulint max_id);/*!< in: maximum known id */ + #ifndef UNIV_HOTBACKUP -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN + +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no); /*!< in: latest archived log file number */ +fil_write_flushed_lsn( + lsn_t lsn) + MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace when it could be dropped concurrently. Used by background threads that do not necessarily hold proper locks @@ -793,35 +792,37 @@ private: fil_space_t* m_space; }; -/*******************************************************************//** -Reads the flushed lsn, arch no, and tablespace flag fields from a data -file at database startup. +/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a first data file at database startup. +@param[in] data_file open data file +@param[in] one_read_only true if first datafile is already + read +@param[out] flags FSP_SPACE_FLAGS +@param[out] space_id tablepspace ID +@param[out] min_arch_log_no min of archived log numbers in + data files +@param[out] max_arch_log_no max of archived log numbers in + data files +@param[out] flushed_lsn flushed lsn value +@param[out] crypt_data encryption crypt data @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN const char* fil_read_first_page( -/*================*/ - pfs_os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: FSP_SPACE_FLAGS */ - ulint* space_id, /*!< out: tablespace ID */ + pfs_os_file_t data_file, + ibool one_read_already, + ulint* flags, + ulint* space_id, #ifdef UNIV_LOG_ARCHIVE - ulint* min_arch_log_no, /*!< out: min of archived - log numbers in data files */ - ulint* max_arch_log_no, /*!< out: max of archived - log numbers in data files */ + ulint* min_arch_log_no, + ulint* max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*!< out: crypt data */ - - __attribute__((warn_unused_result)); + lsn_t* flushed_lsn, + fil_space_crypt_t** crypt_data) + MY_ATTRIBUTE((warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ + /*******************************************************************//** Parses the body of a log record written about an .ibd file operation. That is, the log record part after the standard (type, space id, page no) header of the diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 9ca1c46d72b..dca99f2a7a5 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -123,26 +123,25 @@ a freshly read page) */ # define recv_recover_page(jri, block) recv_recover_page_func(block) #endif /* !UNIV_HOTBACKUP */ -/********************************************************//** -Recovers from a checkpoint. When this function returns, the database is able + +/** Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. +@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE +@param[in] limit_lsn recover up to this lsn if possible +@param[in] flushed_lsn flushed lsn from first data file @return error code or DB_SUCCESS */ UNIV_INTERN dberr_t recv_recovery_from_checkpoint_start_func( -/*=====================================*/ #ifdef UNIV_LOG_ARCHIVE - ulint type, /*!< in: LOG_CHECKPOINT or - LOG_ARCHIVE */ - lsn_t limit_lsn, /*!< in: recover up to this lsn - if possible */ + ulint type, + lsn_t limit_lsn, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t min_flushed_lsn,/*!< in: min flushed lsn from - data files */ - lsn_t max_flushed_lsn);/*!< in: max flushed lsn from - data files */ + lsn_t flushed_lsn) + MY_ATTRIBUTE((warn_unused_result)); + #ifdef UNIV_LOG_ARCHIVE /** Wrapper for recv_recovery_from_checkpoint_start_func(). Recovers from a checkpoint. When this function returns, the database is able @@ -151,11 +150,10 @@ recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. @param type in: LOG_CHECKPOINT or LOG_ARCHIVE @param lim in: recover up to this log sequence number if possible -@param min in: minimum flushed log sequence number from data files -@param max in: maximum flushed log sequence number from data files +@param lsn in: flushed log sequence number from first data file @return error code or DB_SUCCESS */ -# define recv_recovery_from_checkpoint_start(type,lim,min,max) \ - recv_recovery_from_checkpoint_start_func(type,lim,min,max) +# define recv_recovery_from_checkpoint_start(type,lim,lsn) \ + recv_recovery_from_checkpoint_start_func(type,lim,lsn) #else /* UNIV_LOG_ARCHIVE */ /** Wrapper for recv_recovery_from_checkpoint_start_func(). Recovers from a checkpoint. When this function returns, the database is able @@ -164,12 +162,12 @@ recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. @param type ignored: LOG_CHECKPOINT or LOG_ARCHIVE @param lim ignored: recover up to this log sequence number if possible -@param min in: minimum flushed log sequence number from data files -@param max in: maximum flushed log sequence number from data files +@param lsn in: flushed log sequence number from first data file @return error code or DB_SUCCESS */ -# define recv_recovery_from_checkpoint_start(type,lim,min,max) \ - recv_recovery_from_checkpoint_start_func(min,max) +# define recv_recovery_from_checkpoint_start(type,lim,lsn) \ + recv_recovery_from_checkpoint_start_func(lsn) #endif /* UNIV_LOG_ARCHIVE */ + /********************************************************//** Completes recovery from a checkpoint. */ UNIV_INTERN diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 14d1f4544cd..0dc06fd4b1b 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -3246,7 +3246,9 @@ logs_empty_and_mark_files_at_shutdown(void) /*=======================================*/ { lsn_t lsn; +#ifdef UNIV_LOG_ARCHIVE ulint arch_log_no; +#endif ulint count = 0; ulint pending_io; ibool server_busy; @@ -3454,9 +3456,9 @@ wait_suspend_loop: goto loop; } +#ifdef UNIV_LOG_ARCHIVE arch_log_no = 0; -#ifdef UNIV_LOG_ARCHIVE UT_LIST_GET_FIRST(log_sys->log_groups)->archived_file_no; if (!UT_LIST_GET_FIRST(log_sys->log_groups)->archived_offset) { @@ -3511,9 +3513,14 @@ wait_suspend_loop: srv_shutdown_lsn = lsn; if (!srv_read_only_mode) { - fil_write_flushed_lsn_to_data_files(lsn, arch_log_no); + dberr_t err = fil_write_flushed_lsn(lsn); - fil_flush_file_spaces(FIL_TABLESPACE); + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Failed to write flush lsn to the " + "system tablespace at shutdown err=%s", + ut_strerr(err)); + } } fil_close_all_files(); diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 2dc2d419e3f..bf00cd8e8d9 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2929,22 +2929,22 @@ recv_init_crash_recovery(void) } } -/********************************************************//** -Recovers from a checkpoint. When this function returns, the database is able +/** Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. +@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE +@param[in] limit_lsn recover up to this lsn if possible +@param[in] flushed_lsn flushed lsn from first data file @return error code or DB_SUCCESS */ UNIV_INTERN dberr_t recv_recovery_from_checkpoint_start_func( -/*=====================================*/ #ifdef UNIV_LOG_ARCHIVE - ulint type, /*!< in: LOG_CHECKPOINT or LOG_ARCHIVE */ - lsn_t limit_lsn, /*!< in: recover up to this lsn if possible */ + ulint type, + lsn_t limit_lsn, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t min_flushed_lsn,/*!< in: min flushed lsn from data files */ - lsn_t max_flushed_lsn)/*!< in: max flushed lsn from data files */ + lsn_t flushed_lsn) { log_group_t* group; log_group_t* max_cp_group; @@ -3165,6 +3165,7 @@ recv_recovery_from_checkpoint_start_func( group = UT_LIST_GET_NEXT(log_groups, group); } + /* Done with startup scan. Clear the flag. */ recv_log_scan_is_startup_type = FALSE; @@ -3177,10 +3178,9 @@ recv_recovery_from_checkpoint_start_func( there is something wrong we will print a message to the user about recovery: */ - if (checkpoint_lsn != max_flushed_lsn - || checkpoint_lsn != min_flushed_lsn) { + if (checkpoint_lsn != flushed_lsn) { - if (checkpoint_lsn < max_flushed_lsn) { + if (checkpoint_lsn < flushed_lsn) { ib_logf(IB_LOG_LEVEL_WARN, "The log sequence number " @@ -3191,24 +3191,21 @@ recv_recovery_from_checkpoint_start_func( "ib_logfiles to start up the database. " "Log sequence number in the " "ib_logfiles is " LSN_PF ", log" - "sequence numbers stamped " - "to ibdata file headers are between " - "" LSN_PF " and " LSN_PF ".", + "sequence number stamped " + "to ibdata file header is " LSN_PF ".", checkpoint_lsn, - min_flushed_lsn, - max_flushed_lsn); + flushed_lsn); } if (!recv_needed_recovery) { ib_logf(IB_LOG_LEVEL_INFO, - "The log sequence numbers " - LSN_PF " and " LSN_PF - " in ibdata files do not match" + "The log sequence number " + LSN_PF + " in ibdata file do not match" " the log sequence number " LSN_PF " in the ib_logfiles!", - min_flushed_lsn, - max_flushed_lsn, + flushed_lsn, checkpoint_lsn); if (!srv_read_only_mode) { diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index be65e4a6d17..f601ff17ebc 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -786,27 +786,26 @@ open_log_file( return(DB_SUCCESS); } -/*********************************************************************//** -Creates or opens database data files and closes them. +/** Creates or opens database data files and closes them. +@param[out] create_new_db true = create new database +@param[out] min_arch_log_no min of archived log numbers in + data files +@param[out] max_arch_log_no max of archived log numbers in + data files +@param[out] flushed_lsn flushed lsn in fist datafile +@param[out] sum_of_new_sizes sum of sizes of the new files + added @return DB_SUCCESS or error code */ static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t open_or_create_data_files( -/*======================*/ - ibool* create_new_db, /*!< out: TRUE if new database should be - created */ + bool* create_new_db, #ifdef UNIV_LOG_ARCHIVE - ulint* min_arch_log_no,/*!< out: min of archived log - numbers in data files */ - ulint* max_arch_log_no,/*!< out: max of archived log - numbers in data files */ + ulint* min_arch_log_no, + ulint* max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t* min_flushed_lsn,/*!< out: min of flushed lsn - values in data files */ - lsn_t* max_flushed_lsn,/*!< out: max of flushed lsn - values in data files */ - ulint* sum_of_new_sizes)/*!< out: sum of sizes of the - new files added */ + lsn_t* flushed_lsn, + ulint* sum_of_new_sizes) { ibool ret; ulint i; @@ -830,7 +829,7 @@ open_or_create_data_files( *sum_of_new_sizes = 0; - *create_new_db = FALSE; + *create_new_db = false; srv_normalize_path_for_win(srv_data_home); @@ -918,12 +917,13 @@ open_or_create_data_files( } const char* check_msg; + check_msg = fil_read_first_page( files[i], FALSE, &flags, &space, #ifdef UNIV_LOG_ARCHIVE min_arch_log_no, max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - min_flushed_lsn, max_flushed_lsn, NULL); + flushed_lsn, NULL); /* If first page is valid, don't overwrite DB. It prevents overwriting DB when mysql_install_db @@ -954,6 +954,7 @@ open_or_create_data_files( name); return(DB_ERROR); } + if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) { ut_a(!srv_read_only_mode); files[i] = os_file_create( @@ -973,7 +974,6 @@ open_or_create_data_files( } if (!ret) { - os_file_get_last_error(true); ib_logf(IB_LOG_LEVEL_ERROR, @@ -983,7 +983,6 @@ open_or_create_data_files( } if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) { - goto skip_size_check; } @@ -1010,16 +1009,15 @@ size_check: "auto-extending " "data file %s is " "of a different size " - "%lu pages (rounded " + ULINTPF " pages (rounded " "down to MB) than specified " "in the .cnf file: " - "initial %lu pages, " - "max %lu (relevant if " + "initial " ULINTPF " pages, " + "max " ULINTPF " (relevant if " "non-zero) pages!", name, - (ulong) rounded_size_pages, - (ulong) srv_data_file_sizes[i], - (ulong) + rounded_size_pages, + srv_data_file_sizes[i], srv_last_file_size_max); return(DB_ERROR); @@ -1032,12 +1030,12 @@ size_check: ib_logf(IB_LOG_LEVEL_ERROR, "Data file %s is of a different " - "size %lu pages (rounded down to MB) " + "size " ULINTPF " pages (rounded down to MB) " "than specified in the .cnf file " - "%lu pages!", + ULINTPF " pages!", name, - (ulong) rounded_size_pages, - (ulong) srv_data_file_sizes[i]); + rounded_size_pages, + srv_data_file_sizes[i]); return(DB_ERROR); } @@ -1057,7 +1055,7 @@ check_first_page: #ifdef UNIV_LOG_ARCHIVE min_arch_log_no, max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - min_flushed_lsn, max_flushed_lsn, &crypt_data); + flushed_lsn, &crypt_data); if (check_msg) { @@ -1094,9 +1092,9 @@ check_first_page: != fsp_flags_get_page_size(flags)) { ib_logf(IB_LOG_LEVEL_ERROR, - "Data file \"%s\" uses page size %lu," + "Data file \"%s\" uses page size " ULINTPF " ," "but the start-up parameter " - "is --innodb-page-size=%lu", + "is --innodb-page-size=" ULINTPF " .", name, fsp_flags_get_page_size(flags), UNIV_PAGE_SIZE); @@ -1127,9 +1125,9 @@ check_first_page: } ib_logf(IB_LOG_LEVEL_INFO, - "Setting file %s size to %lu MB", + "Setting file %s size to " ULINTPF " MB", name, - (ulong) (srv_data_file_sizes[i] + (srv_data_file_sizes[i] >> (20 - UNIV_PAGE_SIZE_SHIFT))); ret = os_file_set_size( @@ -1592,9 +1590,8 @@ dberr_t innobase_start_or_create_for_mysql(void) /*====================================*/ { - ibool create_new_db; - lsn_t min_flushed_lsn; - lsn_t max_flushed_lsn; + bool create_new_db; + lsn_t flushed_lsn; #ifdef UNIV_LOG_ARCHIVE ulint min_arch_log_no; ulint max_arch_log_no; @@ -2152,7 +2149,7 @@ innobase_start_or_create_for_mysql(void) #ifdef UNIV_LOG_ARCHIVE &min_arch_log_no, &max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &min_flushed_lsn, &max_flushed_lsn, + &flushed_lsn, &sum_of_new_sizes); if (err == DB_FAIL) { @@ -2197,12 +2194,12 @@ innobase_start_or_create_for_mysql(void) bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); ut_a(success); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); err = create_log_files(create_new_db, logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); @@ -2222,19 +2219,8 @@ innobase_start_or_create_for_mysql(void) if (err == DB_NOT_FOUND) { if (i == 0) { - if (max_flushed_lsn - != min_flushed_lsn) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create" - " log files because" - " data files are" - " corrupt or" - " not in sync" - " with each other"); - return(DB_ERROR); - } - if (max_flushed_lsn < (lsn_t) 1000) { + if (flushed_lsn < (lsn_t) 1000) { ib_logf(IB_LOG_LEVEL_ERROR, "Cannot create" " log files because" @@ -2249,14 +2235,14 @@ innobase_start_or_create_for_mysql(void) err = create_log_files( create_new_db, logfilename, - dirnamelen, max_flushed_lsn, + dirnamelen, flushed_lsn, logfile0); if (err == DB_SUCCESS) { err = create_log_files_rename( logfilename, dirnamelen, - max_flushed_lsn, + flushed_lsn, logfile0); } @@ -2266,8 +2252,7 @@ innobase_start_or_create_for_mysql(void) /* Suppress the message about crash recovery. */ - max_flushed_lsn = min_flushed_lsn - = log_get_lsn(); + flushed_lsn = log_get_lsn(); goto files_checked; } else if (i < 2) { /* must have at least 2 log files */ @@ -2422,17 +2407,19 @@ files_checked: bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); ut_a(success); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); /* Stamp the LSN to the data files. */ - fil_write_flushed_lsn_to_data_files(max_flushed_lsn, 0); + err = fil_write_flushed_lsn(flushed_lsn); - fil_flush_file_spaces(FIL_TABLESPACE); + if (err != DB_SUCCESS) { + return(err); + } err = create_log_files_rename(logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); @@ -2504,7 +2491,7 @@ files_checked: err = recv_recovery_from_checkpoint_start( LOG_CHECKPOINT, LSN_MAX, - min_flushed_lsn, max_flushed_lsn); + flushed_lsn); if (err == DB_SUCCESS) { /* Initialize the change buffer. */ @@ -2672,7 +2659,7 @@ files_checked: DBUG_EXECUTE_IF("innodb_log_abort_1", return(DB_ERROR);); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); ib_logf(IB_LOG_LEVEL_WARN, "Resizing redo log from %u*%u to %u*%u pages" @@ -2681,7 +2668,7 @@ files_checked: (unsigned) srv_log_file_size, (unsigned) srv_n_log_files, (unsigned) srv_log_file_size_requested, - max_flushed_lsn); + flushed_lsn); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); @@ -2691,7 +2678,7 @@ files_checked: we need to explicitly flush the log buffers. */ fil_flush(SRV_LOG_SPACE_FIRST_ID); - ut_ad(max_flushed_lsn == log_get_lsn()); + ut_ad(flushed_lsn == log_get_lsn()); /* Prohibit redo log writes from any other threads until creating a log checkpoint at the @@ -2703,8 +2690,7 @@ files_checked: return(DB_ERROR);); /* Stamp the LSN to the data files. */ - fil_write_flushed_lsn_to_data_files( - max_flushed_lsn, 0); + err = fil_write_flushed_lsn(flushed_lsn); DBUG_EXECUTE_IF("innodb_log_abort_4", err = DB_ERROR;); @@ -2712,8 +2698,6 @@ files_checked: return(err); } - fil_flush_file_spaces(FIL_TABLESPACE); - /* Close and free the redo log files, so that we can replace them. */ fil_close_log_files(true); @@ -2730,13 +2714,23 @@ files_checked: srv_log_file_size = srv_log_file_size_requested; err = create_log_files(create_new_db, logfilename, - dirnamelen, max_flushed_lsn, + dirnamelen, flushed_lsn, logfile0); + if (err != DB_SUCCESS) { + return(err); + } + + /* create_log_files() can increase system lsn that is + why FIL_PAGE_FILE_FLUSH_LSN have to be updated */ + flushed_lsn = log_get_lsn(); + + err = fil_write_flushed_lsn(flushed_lsn); + if (err == DB_SUCCESS) { err = create_log_files_rename( logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); } if (err != DB_SUCCESS) { diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 8ee85d0ecd8..7c30ff92b69 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -4371,11 +4371,11 @@ buf_page_create( memset(frame + FIL_PAGE_NEXT, 0xff, 4); mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED); - /* Reset to zero the file flush lsn field in the page; if the first - page of an ibdata file is 'created' in this function into the buffer - pool then we lose the original contents of the file flush lsn stamp. - Then InnoDB could in a crash recovery print a big, false, corruption - warning if the stamp contains an lsn bigger than the ib_logfile lsn. */ + /* FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION is only used on the + following pages: + (1) The first page of the InnoDB system tablespace (page 0:0) + (2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages + (3) key_version on encrypted pages (not page 0:0) */ memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8); diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 09ba89459ad..17e9a1be973 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -2265,99 +2265,70 @@ fil_set_max_space_id_if_bigger( mutex_exit(&fil_system->mutex); } -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page header -of the first page of a data file of the system tablespace (space 0), -which is uncompressed. */ -static MY_ATTRIBUTE((warn_unused_result)) +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_lsn_and_arch_no_to_file( -/*==============================*/ - ulint space, /*!< in: space to write to */ - ulint sum_of_sizes, /*!< in: combined size of previous files - in space, in database pages */ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no MY_ATTRIBUTE((unused))) - /*!< in: archived log number to write */ +fil_write_flushed_lsn( + lsn_t lsn) { byte* buf1; byte* buf; dberr_t err; - buf1 = static_cast(mem_alloc(2 * UNIV_PAGE_SIZE)); + buf1 = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); buf = static_cast(ut_align(buf1, UNIV_PAGE_SIZE)); - err = fil_read(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); - if (err == DB_SUCCESS) { - mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, - lsn); - - err = fil_write(TRUE, space, 0, sum_of_sizes, 0, - UNIV_PAGE_SIZE, buf, NULL, 0); - } - - mem_free(buf1); - - return(err); -} - -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN -dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no) /*!< in: latest archived log file number */ -{ - fil_space_t* space; - fil_node_t* node; - dberr_t err; + /* Acquire system tablespace */ + fil_space_t* space = fil_space_acquire(0); - mutex_enter(&fil_system->mutex); + /* If tablespace is not encrypted, stamp flush_lsn to + first page of all system tablespace datafiles to avoid + unnecessary error messages on possible downgrade. */ + if (space->crypt_data->min_key_version == 0) { + fil_node_t* node; + ulint sum_of_sizes = 0; - for (space = UT_LIST_GET_FIRST(fil_system->space_list); - space != NULL; - space = UT_LIST_GET_NEXT(space_list, space)) { - - /* We only write the lsn to all existing data files which have - been open during the lifetime of the mysqld process; they are - represented by the space objects in the tablespace memory - cache. Note that all data files in the system tablespace 0 - and the UNDO log tablespaces (if separate) are always open. */ - - if (space->purpose == FIL_TABLESPACE - && !fil_is_user_tablespace_id(space->id)) { - ulint sum_of_sizes = 0; - - for (node = UT_LIST_GET_FIRST(space->chain); - node != NULL; - node = UT_LIST_GET_NEXT(chain, node)) { - - mutex_exit(&fil_system->mutex); - - err = fil_write_lsn_and_arch_no_to_file( - space->id, sum_of_sizes, lsn, - arch_log_no); + for (node = UT_LIST_GET_FIRST(space->chain); + node != NULL; + node = UT_LIST_GET_NEXT(chain, node)) { - if (err != DB_SUCCESS) { + err = fil_read(TRUE, 0, 0, sum_of_sizes, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); - return(err); - } + if (err == DB_SUCCESS) { + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + lsn); - mutex_enter(&fil_system->mutex); + err = fil_write(TRUE, 0, 0, sum_of_sizes, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); sum_of_sizes += node->size; } } + } else { + /* When system tablespace is encrypted stamp flush_lsn to + only the first page of the first datafile (rest of pages + are encrypted). */ + err = fil_read(TRUE, 0, 0, 0, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); + + if (err == DB_SUCCESS) { + mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + lsn); + + err = fil_write(TRUE, 0, 0, 0, 0, + UNIV_PAGE_SIZE, buf, NULL, 0); + } } - mutex_exit(&fil_system->mutex); + fil_flush_file_spaces(FIL_TABLESPACE); + fil_space_release(space); - return(DB_SUCCESS); + ut_free(buf1); + + return(err); } /** Check the consistency of the first data page of a tablespace @@ -2410,30 +2381,29 @@ fil_check_first_page(const page_t* page, ulint space_id, ulint flags) return("inconsistent data in space header"); } -/*******************************************************************//** -Reads the flushed lsn, arch no, space_id and tablespace flag fields from -the first page of a data file at database startup. +/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a first data file at database startup. +@param[in] data_file open data file +@param[in] one_read_only true if first datafile is already + read +@param[out] flags FSP_SPACE_FLAGS +@param[out] space_id tablepspace ID +@param[out] flushed_lsn flushed lsn value +@param[out] crypt_data encryption crypt data @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN const char* fil_read_first_page( -/*================*/ - pfs_os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: FSP_SPACE_FLAGS */ - ulint* space_id, /*!< out: tablespace ID */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*< out: crypt data */ + pfs_os_file_t data_file, + ibool one_read_already, + ulint* flags, + ulint* space_id, + lsn_t* flushed_lsn, + fil_space_crypt_t** crypt_data) { byte* buf; byte* page; - lsn_t flushed_lsn; const char* check_msg = NULL; fil_space_crypt_t* cdata; @@ -2450,6 +2420,7 @@ fil_read_first_page( return "File size is less than minimum"; } } + buf = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); /* Align the memory for a possible read from a raw device */ @@ -2468,6 +2439,11 @@ fil_read_first_page( *space_id = fsp_header_get_space_id(page); *flags = fsp_header_get_flags(page); + if (flushed_lsn) { + *flushed_lsn = mach_read_from_8(page + + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + } + if (!fsp_flags_is_valid(*flags)) { ulint cflags = fsp_flags_convert_from_101(*flags); if (cflags == ULINT_UNDEFINED) { @@ -2480,37 +2456,36 @@ fil_read_first_page( } } - if (!(IS_XTRABACKUP() && srv_backup_mode)) { - check_msg = fil_check_first_page(page, *space_id, *flags); + if (!(IS_XTRABACKUP() && srv_backup_mode)) { + check_msg = fil_check_first_page(page, *space_id, *flags); } - } - - flushed_lsn = mach_read_from_8(page + - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + /* Possible encryption crypt data is also stored only to first page + of the first datafile. */ - ulint space = fsp_header_get_space_id(page); - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(*flags)); + ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(*flags)); - cdata = fil_space_read_crypt_data(space, page, offset); + cdata = fil_space_read_crypt_data(*space_id, page, offset); - if (crypt_data) { - *crypt_data = cdata; - } + if (crypt_data) { + *crypt_data = cdata; + } - /* If file space is encrypted we need to have at least some - encryption service available where to get keys */ - if (cdata && cdata->should_encrypt()) { + /* If file space is encrypted we need to have at least some + encryption service available where to get keys */ + if (cdata && cdata->should_encrypt()) { - if (!encryption_key_id_exists(cdata->key_id)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Tablespace id %ld is encrypted but encryption service" - " or used key_id %u is not available. Can't continue opening tablespace.", - space, cdata->key_id); - - return ("table encrypted but encryption service not available."); + if (!encryption_key_id_exists(cdata->key_id)) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tablespace id " ULINTPF + " is encrypted but encryption service" + " or used key_id %u is not available. " + "Can't continue opening tablespace.", + *space_id, cdata->key_id); + return ("table encrypted but encryption service not available."); + } } } @@ -2520,20 +2495,6 @@ fil_read_first_page( return(check_msg); } - if (!one_read_already) { - *min_flushed_lsn = flushed_lsn; - *max_flushed_lsn = flushed_lsn; - - return(NULL); - } - - if (*min_flushed_lsn > flushed_lsn) { - *min_flushed_lsn = flushed_lsn; - } - if (*max_flushed_lsn < flushed_lsn) { - *max_flushed_lsn = flushed_lsn; - } - return(NULL); } @@ -4377,6 +4338,7 @@ fil_open_single_table_tablespace( def.file = os_file_create_simple_no_error_handling( innodb_file_data_key, def.filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &def.success, atomic_writes); + if (def.success) { tablespaces_found++; } @@ -4391,8 +4353,8 @@ fil_open_single_table_tablespace( /* Read the first page of the datadir tablespace, if found. */ if (def.success) { def.check_msg = fil_read_first_page( - def.file, FALSE, &def.flags, &def.id, - &def.lsn, &def.lsn, &def.crypt_data); + def.file, false, &def.flags, &def.id, + NULL, &def.crypt_data); if (table) { table->crypt_data = def.crypt_data; @@ -4401,6 +4363,7 @@ fil_open_single_table_tablespace( def.valid = !def.check_msg && def.id == id && fsp_flags_match(flags, def.flags); + if (def.valid) { valid_tablespaces_found++; } else { @@ -4414,8 +4377,8 @@ fil_open_single_table_tablespace( /* Read the first page of the remote tablespace */ if (remote.success) { remote.check_msg = fil_read_first_page( - remote.file, FALSE, &remote.flags, &remote.id, - &remote.lsn, &remote.lsn, &remote.crypt_data); + remote.file, false, &remote.flags, &remote.id, + NULL, &remote.crypt_data); if (table) { table->crypt_data = remote.crypt_data; @@ -4425,6 +4388,7 @@ fil_open_single_table_tablespace( /* Validate this single-table-tablespace with SYS_TABLES. */ remote.valid = !remote.check_msg && remote.id == id && fsp_flags_match(flags, remote.flags); + if (remote.valid) { valid_tablespaces_found++; } else { @@ -4439,8 +4403,8 @@ fil_open_single_table_tablespace( /* Read the first page of the datadir tablespace, if found. */ if (dict.success) { dict.check_msg = fil_read_first_page( - dict.file, FALSE, &dict.flags, &dict.id, - &dict.lsn, &dict.lsn, &dict.crypt_data); + dict.file, false, &dict.flags, &dict.id, + NULL, &dict.crypt_data); if (table) { table->crypt_data = dict.crypt_data; @@ -4472,14 +4436,16 @@ fil_open_single_table_tablespace( "See " REFMAN "innodb-troubleshooting-datadict.html " "for how to resolve the issue.", tablename); + if (IS_XTRABACKUP() && fix_dict) { ib_logf(IB_LOG_LEVEL_WARN, - "It will be removed from the data dictionary."); + "It will be removed from the data dictionary."); if (purge_sys) { fil_remove_invalid_table_from_data_dict(tablename); } } + err = DB_CORRUPTION; goto cleanup_and_exit; @@ -4491,26 +4457,32 @@ fil_open_single_table_tablespace( ib_logf(IB_LOG_LEVEL_ERROR, "A tablespace for %s has been found in " "multiple places;", tablename); + if (def.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Default location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - def.filepath, def.lsn, - (ulong) def.id, (ulong) def.flags); + "Default location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + def.filepath, + def.id, + def.flags); } + if (remote.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Remote location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - remote.filepath, remote.lsn, - (ulong) remote.id, (ulong) remote.flags); + "Remote location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + remote.filepath, + remote.id, + remote.flags); } + if (dict.success) { ib_logf(IB_LOG_LEVEL_ERROR, - "Dictionary location; %s, LSN=" LSN_PF - ", Space ID=%lu, Flags=%lu", - dict.filepath, dict.lsn, - (ulong) dict.id, (ulong) dict.flags); + "Dictionary location; %s" + ", Space ID=" ULINTPF " , Flags=" ULINTPF " .", + dict.filepath, + dict.id, + dict.flags); } /* Force-recovery will allow some tablespaces to be @@ -4543,6 +4515,7 @@ fil_open_single_table_tablespace( os_file_close(def.file); tablespaces_found--; } + if (dict.success && !dict.valid) { dict.success = false; os_file_close(dict.file); @@ -4895,8 +4868,8 @@ fil_validate_single_table_tablespace( check_first_page: fsp->success = TRUE; if (const char* check_msg = fil_read_first_page( - fsp->file, FALSE, &fsp->flags, &fsp->id, - &fsp->lsn, &fsp->lsn, &fsp->crypt_data)) { + fsp->file, false, &fsp->flags, &fsp->id, + NULL, &fsp->crypt_data)) { ib_logf(IB_LOG_LEVEL_ERROR, "%s in tablespace %s (table %s)", check_msg, fsp->filepath, tablename); @@ -4909,6 +4882,7 @@ check_first_page: in Xtrabackup, this does not work.*/ return; } + if (!restore_attempted) { if (!fil_user_tablespace_find_space_id(fsp)) { return; @@ -5152,11 +5126,11 @@ will_not_choose: if (def.success && remote.success) { ib_logf(IB_LOG_LEVEL_ERROR, "Tablespaces for %s have been found in two places;\n" - "Location 1: SpaceID: %lu LSN: %lu File: %s\n" - "Location 2: SpaceID: %lu LSN: %lu File: %s\n" + "Location 1: SpaceID: " ULINTPF " File: %s\n" + "Location 2: SpaceID: " ULINTPF " File: %s\n" "You must delete one of them.", - tablename, (ulong) def.id, (ulong) def.lsn, - def.filepath, (ulong) remote.id, (ulong) remote.lsn, + tablename, def.id, + def.filepath, remote.id, remote.filepath); def.success = FALSE; diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index f4576dac03a..dd5d041aed9 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -19238,8 +19238,15 @@ checkpoint_now_set( log_make_checkpoint_at(LSN_MAX, TRUE); fil_flush_file_spaces(FIL_LOG); } - fil_write_flushed_lsn_to_data_files(log_sys->lsn, 0); - fil_flush_file_spaces(FIL_TABLESPACE); + + dberr_t err = fil_write_flushed_lsn(log_sys->lsn); + + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Failed to write flush lsn to the " + "system tablespace at checkpoint err=%s", + ut_strerr(err)); + } } } diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index a4e147b9797..b861225f562 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -212,7 +212,6 @@ struct fsp_open_info { ibool valid; /*!< Is the tablespace valid? */ pfs_os_file_t file; /*!< File handle */ char* filepath; /*!< File path to open */ - lsn_t lsn; /*!< Flushed LSN from header page */ ulint id; /*!< Space ID */ ulint flags; /*!< Tablespace flags */ ulint encryption_error; /*!< if an encryption error occurs */ @@ -643,17 +642,17 @@ void fil_set_max_space_id_if_bigger( /*===========================*/ ulint max_id);/*!< in: maximum known id */ + #ifndef UNIV_HOTBACKUP -/****************************************************************//** -Writes the flushed lsn and the latest archived log number to the page -header of the first page of each data file in the system tablespace. -@return DB_SUCCESS or error number */ -UNIV_INTERN + +/** Write the flushed LSN to the page header of the first page in the +system tablespace. +@param[in] lsn flushed LSN +@return DB_SUCCESS or error number */ dberr_t -fil_write_flushed_lsn_to_data_files( -/*================================*/ - lsn_t lsn, /*!< in: lsn to write */ - ulint arch_log_no); /*!< in: latest archived log file number */ +fil_write_flushed_lsn( + lsn_t lsn) + MY_ATTRIBUTE((warn_unused_result)); /** Acquire a tablespace when it could be dropped concurrently. Used by background threads that do not necessarily hold proper locks @@ -799,28 +798,28 @@ private: fil_space_t* m_space; }; -/*******************************************************************//** -Reads the flushed lsn, arch no, and tablespace flag fields from a data -file at database startup. +/** Reads the flushed lsn, arch no, space_id and tablespace flag fields from +the first page of a first data file at database startup. +@param[in] data_file open data file +@param[in] one_read_only true if first datafile is already + read +@param[out] flags FSP_SPACE_FLAGS +@param[out] space_id tablepspace ID +@param[out] flushed_lsn flushed lsn value +@param[out] crypt_data encryption crypt data @retval NULL on success, or if innodb_force_recovery is set @return pointer to an error message string */ UNIV_INTERN const char* fil_read_first_page( -/*================*/ - pfs_os_file_t data_file, /*!< in: open data file */ - ibool one_read_already, /*!< in: TRUE if min and max - parameters below already - contain sensible data */ - ulint* flags, /*!< out: FSP_SPACE_FLAGS */ - ulint* space_id, /*!< out: tablespace ID */ - lsn_t* min_flushed_lsn, /*!< out: min of flushed - lsn values in data files */ - lsn_t* max_flushed_lsn, /*!< out: max of flushed - lsn values in data files */ - fil_space_crypt_t** crypt_data) /*!< out: crypt data */ - - __attribute__((warn_unused_result)); + pfs_os_file_t data_file, + ibool one_read_already, + ulint* flags, + ulint* space_id, + lsn_t* flushed_lsn, + fil_space_crypt_t** crypt_data) + MY_ATTRIBUTE((warn_unused_result)); + #endif /* !UNIV_HOTBACKUP */ /*******************************************************************//** Parses the body of a log record written about an .ibd file operation. That is, diff --git a/storage/xtradb/include/log0recv.h b/storage/xtradb/include/log0recv.h index e7b6a937f01..73d53d2ddab 100644 --- a/storage/xtradb/include/log0recv.h +++ b/storage/xtradb/include/log0recv.h @@ -137,26 +137,25 @@ a freshly read page) */ # define recv_recover_page(jri, block) recv_recover_page_func(block) #endif /* !UNIV_HOTBACKUP */ -/********************************************************//** -Recovers from a checkpoint. When this function returns, the database is able + +/** Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. +@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE +@param[in] limit_lsn recover up to this lsn if possible +@param[in] flushed_lsn flushed lsn from first data file @return error code or DB_SUCCESS */ UNIV_INTERN dberr_t recv_recovery_from_checkpoint_start_func( -/*=====================================*/ #ifdef UNIV_LOG_ARCHIVE - ulint type, /*!< in: LOG_CHECKPOINT or - LOG_ARCHIVE */ - lsn_t limit_lsn, /*!< in: recover up to this lsn - if possible */ + ulint type, + lsn_t limit_lsn, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t min_flushed_lsn,/*!< in: min flushed lsn from - data files */ - lsn_t max_flushed_lsn);/*!< in: max flushed lsn from - data files */ + lsn_t flushed_lsn) + MY_ATTRIBUTE((warn_unused_result)); + #ifdef UNIV_LOG_ARCHIVE /** Wrapper for recv_recovery_from_checkpoint_start_func(). Recovers from a checkpoint. When this function returns, the database is able @@ -165,11 +164,10 @@ recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. @param type in: LOG_CHECKPOINT or LOG_ARCHIVE @param lim in: recover up to this log sequence number if possible -@param min in: minimum flushed log sequence number from data files -@param max in: maximum flushed log sequence number from data files +@param lsn in: flushed log sequence number from first data file @return error code or DB_SUCCESS */ -# define recv_recovery_from_checkpoint_start(type,lim,min,max) \ - recv_recovery_from_checkpoint_start_func(type,lim,min,max) +# define recv_recovery_from_checkpoint_start(type,lim,lsn) \ + recv_recovery_from_checkpoint_start_func(type,lim,lsn) #else /* UNIV_LOG_ARCHIVE */ /** Wrapper for recv_recovery_from_checkpoint_start_func(). Recovers from a checkpoint. When this function returns, the database is able @@ -178,12 +176,12 @@ recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. @param type ignored: LOG_CHECKPOINT or LOG_ARCHIVE @param lim ignored: recover up to this log sequence number if possible -@param min in: minimum flushed log sequence number from data files -@param max in: maximum flushed log sequence number from data files +@param lsn in: flushed log sequence number from first data file @return error code or DB_SUCCESS */ -# define recv_recovery_from_checkpoint_start(type,lim,min,max) \ - recv_recovery_from_checkpoint_start_func(min,max) +# define recv_recovery_from_checkpoint_start(type,lim,lsn) \ + recv_recovery_from_checkpoint_start_func(lsn) #endif /* UNIV_LOG_ARCHIVE */ + /********************************************************//** Completes recovery from a checkpoint. */ UNIV_INTERN diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index eae7f2bb09b..3252cd793c9 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -3836,9 +3836,14 @@ wait_suspend_loop: srv_shutdown_lsn = lsn; if (!srv_read_only_mode) { - fil_write_flushed_lsn_to_data_files(lsn, 0); + dberr_t err = fil_write_flushed_lsn(lsn); - fil_flush_file_spaces(FIL_TABLESPACE); + if (err != DB_SUCCESS) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Failed to write flush lsn to the " + "system tablespace at shutdown err=%s", + ut_strerr(err)); + } } fil_close_all_files(); diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index 0b797fd6c23..037bc9d35d3 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -3019,22 +3019,22 @@ recv_init_crash_recovery(void) } } -/********************************************************//** -Recovers from a checkpoint. When this function returns, the database is able +/** Recovers from a checkpoint. When this function returns, the database is able to start processing of new user transactions, but the function recv_recovery_from_checkpoint_finish should be called later to complete the recovery and free the resources used in it. +@param[in] type LOG_CHECKPOINT or LOG_ARCHIVE +@param[in] limit_lsn recover up to this lsn if possible +@param[in] flushed_lsn flushed lsn from first data file @return error code or DB_SUCCESS */ UNIV_INTERN dberr_t recv_recovery_from_checkpoint_start_func( -/*=====================================*/ #ifdef UNIV_LOG_ARCHIVE - ulint type, /*!< in: LOG_CHECKPOINT or LOG_ARCHIVE */ - lsn_t limit_lsn, /*!< in: recover up to this lsn if possible */ + ulint type, + lsn_t limit_lsn, #endif /* UNIV_LOG_ARCHIVE */ - lsn_t min_flushed_lsn,/*!< in: min flushed lsn from data files */ - lsn_t max_flushed_lsn)/*!< in: max flushed lsn from data files */ + lsn_t flushed_lsn) { log_group_t* group; log_group_t* max_cp_group; @@ -3262,6 +3262,7 @@ recv_recovery_from_checkpoint_start_func( group = UT_LIST_GET_NEXT(log_groups, group); } + /* Done with startup scan. Clear the flag. */ recv_log_scan_is_startup_type = FALSE; @@ -3274,10 +3275,9 @@ recv_recovery_from_checkpoint_start_func( there is something wrong we will print a message to the user about recovery: */ - if (checkpoint_lsn != max_flushed_lsn - || checkpoint_lsn != min_flushed_lsn) { + if (checkpoint_lsn != flushed_lsn) { - if (checkpoint_lsn < max_flushed_lsn) { + if (checkpoint_lsn > (20 - UNIV_PAGE_SIZE_SHIFT))); ret = os_file_set_size( @@ -1655,9 +1654,8 @@ dberr_t innobase_start_or_create_for_mysql(void) /*====================================*/ { - ibool create_new_db; - lsn_t min_flushed_lsn; - lsn_t max_flushed_lsn; + bool create_new_db; + lsn_t flushed_lsn; #ifdef UNIV_LOG_ARCHIVE lsn_t min_arch_log_no = LSN_MAX; lsn_t max_arch_log_no = LSN_MAX; @@ -2237,7 +2235,7 @@ innobase_start_or_create_for_mysql(void) #ifdef UNIV_LOG_ARCHIVE &min_arch_log_no, &max_arch_log_no, #endif /* UNIV_LOG_ARCHIVE */ - &min_flushed_lsn, &max_flushed_lsn, + &flushed_lsn, &sum_of_new_sizes); if (err == DB_FAIL) { @@ -2281,12 +2279,12 @@ innobase_start_or_create_for_mysql(void) bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); ut_a(success); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); err = create_log_files(create_new_db, logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); @@ -2306,19 +2304,8 @@ innobase_start_or_create_for_mysql(void) if (err == DB_NOT_FOUND) { if (i == 0) { - if (max_flushed_lsn - != min_flushed_lsn) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Cannot create" - " log files because" - " data files are" - " corrupt or" - " not in sync" - " with each other"); - return(DB_ERROR); - } - if (max_flushed_lsn < (lsn_t) 1000) { + if (flushed_lsn < (lsn_t) 1000) { ib_logf(IB_LOG_LEVEL_ERROR, "Cannot create" " log files because" @@ -2333,14 +2320,14 @@ innobase_start_or_create_for_mysql(void) err = create_log_files( create_new_db, logfilename, - dirnamelen, max_flushed_lsn, + dirnamelen, flushed_lsn, logfile0); if (err == DB_SUCCESS) { err = create_log_files_rename( logfilename, dirnamelen, - max_flushed_lsn, + flushed_lsn, logfile0); } @@ -2350,8 +2337,7 @@ innobase_start_or_create_for_mysql(void) /* Suppress the message about crash recovery. */ - max_flushed_lsn = min_flushed_lsn - = log_get_lsn(); + flushed_lsn = log_get_lsn(); goto files_checked; } else if (i < 2 && !IS_XTRABACKUP()) { /* must have at least 2 log files */ @@ -2509,17 +2495,19 @@ files_checked: bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL); ut_a(success); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); /* Stamp the LSN to the data files. */ - fil_write_flushed_lsn_to_data_files(max_flushed_lsn, 0); + err = fil_write_flushed_lsn(flushed_lsn); - fil_flush_file_spaces(FIL_TABLESPACE); + if (err != DB_SUCCESS) { + return(err); + } err = create_log_files_rename(logfilename, dirnamelen, - max_flushed_lsn, logfile0); + flushed_lsn, logfile0); if (err != DB_SUCCESS) { return(err); @@ -2574,7 +2562,7 @@ files_checked: err = recv_recovery_from_checkpoint_start( LOG_CHECKPOINT, LSN_MAX, - min_flushed_lsn, max_flushed_lsn); + flushed_lsn); if (err != DB_SUCCESS) { return(err); @@ -2757,7 +2745,7 @@ files_checked: DBUG_EXECUTE_IF("innodb_log_abort_1", return(DB_ERROR);); - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); + flushed_lsn = log_get_lsn(); ib_logf(IB_LOG_LEVEL_WARN, "Resizing redo log from %u*%u to %u*%u pages" @@ -2766,7 +2754,7 @@ files_checked: (unsigned) srv_log_file_size, (unsigned) srv_n_log_files, (unsigned) srv_log_file_size_requested, - max_flushed_lsn); + flushed_lsn); buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST); @@ -2776,7 +2764,7 @@ files_checked: we need to explicitly flush the log buffers. */ fil_flush(SRV_LOG_SPACE_FIRST_ID); - ut_ad(max_flushed_lsn == log_get_lsn()); + ut_ad(flushed_lsn == log_get_lsn()); /* Prohibit redo log writes from any other threads until creating a log checkpoint at the @@ -2788,8 +2776,7 @@ files_checked: return(DB_ERROR);); /* Stamp the LSN to the data files. */ - fil_write_flushed_lsn_to_data_files( - max_flushed_lsn, 0); + err = fil_write_flushed_lsn(flushed_lsn); DBUG_EXECUTE_IF("innodb_log_abort_4", err = DB_ERROR;); @@ -2797,8 +2784,6 @@ files_checked: return(err); } - fil_flush_file_spaces(FIL_TABLESPACE); - /* Close and free the redo log files, so that we can replace them. */ fil_close_log_files(true); @@ -2815,7 +2800,7 @@ files_checked: srv_log_file_size = srv_log_file_size_requested; err = create_log_files(create_new_db, logfilename, - dirnamelen, max_flushed_lsn, + dirnamelen, flushed_lsn, logfile0); if (err != DB_SUCCESS) { @@ -2824,12 +2809,17 @@ files_checked: /* create_log_files() can increase system lsn that is why FIL_PAGE_FILE_FLUSH_LSN have to be updated */ - min_flushed_lsn = max_flushed_lsn = log_get_lsn(); - fil_write_flushed_lsn_to_data_files(min_flushed_lsn, 0); - fil_flush_file_spaces(FIL_TABLESPACE); + flushed_lsn = log_get_lsn(); + + err = fil_write_flushed_lsn(flushed_lsn); + + if (err != DB_SUCCESS) { + return(err); + } err = create_log_files_rename(logfilename, dirnamelen, log_get_lsn(), logfile0); + if (err != DB_SUCCESS) { return(err); } -- cgit v1.2.1 From 6b6987154a23a8eba72fd58cbff915ae6d17189f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 30 May 2017 12:02:42 +0300 Subject: MDEV-12114: install_db shows corruption for rest encryption and innodb_checksum_algorithm=strict_none Problem was that checksum check resulted false positives that page is both not encrypted and encryted when checksum_algorithm was strict_none. Encrypton checksum will use only crc32 regardless of setting. buf_zip_decompress: If compression fails report a error message containing the space name if available (not available during import). And note if space could be encrypted. buf_page_get_gen: Do not assert if decompression fails, instead unfix the page and return NULL to upper layer. fil_crypt_calculate_checksum: Use only crc32 method. fil_space_verify_crypt_checksum: Here we need to check crc32, innodb and none method for old datafiles. fil_space_release_for_io: Allow null space. encryption.innodb-compressed-blob is now run with crc32 and none combinations. Note that with none and strict_none method there is not really a way to detect page corruptions and page corruptions after decrypting the page with incorrect key. New test innodb-checksum-algorithm to test different checksum algorithms with encrypted, row compressed and page compressed tables. --- storage/innobase/buf/buf0buf.cc | 101 +++++++++++++++++++++++++++++--------- storage/innobase/fil/fil0crypt.cc | 34 ++----------- storage/xtradb/buf/buf0buf.cc | 101 +++++++++++++++++++++++++++++--------- storage/xtradb/fil/fil0crypt.cc | 34 ++----------- 4 files changed, 164 insertions(+), 106 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 2cb2673f7fb..9b9d0b37f13 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -354,6 +354,15 @@ bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) MY_ATTRIBUTE((nonnull)); +/********************************************************************//** +Mark a table with the specified space pointed by bpage->space corrupted. +Also remove the bpage from LRU list. +@param[in,out] bpage Block */ +static +void +buf_mark_space_corrupt( + buf_page_t* bpage); + /* prototypes for new functions added to ha_innodb.cc */ trx_t* innobase_get_trx(); @@ -2526,17 +2535,26 @@ buf_zip_decompress( { const byte* frame = block->page.zip.data; ulint size = page_zip_get_size(&block->page.zip); + /* Space is not found if this function is called during IMPORT */ + fil_space_t* space = fil_space_acquire_for_io(block->page.space); + const unsigned key_version = mach_read_from_4(frame + + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + fil_space_crypt_t* crypt_data = space ? space->crypt_data : NULL; + const bool encrypted = crypt_data + && crypt_data->type != CRYPT_SCHEME_UNENCRYPTED + && (!crypt_data->is_default_encryption() + || srv_encrypt_tables); ut_ad(buf_block_get_zip_size(block)); ut_a(buf_block_get_space(block) != 0); if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: compressed page checksum mismatch" - " (space %u page %u): stored: %lu, crc32: %lu " - "innodb: %lu, none: %lu\n", + ib_logf(IB_LOG_LEVEL_ERROR, + "Compressed page checksum mismatch" + " for %s [%u:%u]: stored: " ULINTPF ", crc32: " ULINTPF + " innodb: " ULINTPF ", none: " ULINTPF ".", + space ? space->chain.start->name : "N/A", block->page.space, block->page.offset, mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM), page_zip_calc_checksum(frame, size, @@ -2545,22 +2563,28 @@ buf_zip_decompress( SRV_CHECKSUM_ALGORITHM_INNODB), page_zip_calc_checksum(frame, size, SRV_CHECKSUM_ALGORITHM_NONE)); - return(FALSE); + goto err_exit; } switch (fil_page_get_type(frame)) { - case FIL_PAGE_INDEX: + case FIL_PAGE_INDEX: { + if (page_zip_decompress(&block->page.zip, block->frame, TRUE)) { + if (space) { + fil_space_release_for_io(space); + } return(TRUE); } - fprintf(stderr, - "InnoDB: unable to decompress space %u page %u\n", + ib_logf(IB_LOG_LEVEL_ERROR, + "Unable to decompress space %s [%u:%u]", + space ? space->chain.start->name : "N/A", block->page.space, block->page.offset); - return(FALSE); + goto err_exit; + } case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: case FIL_PAGE_IBUF_BITMAP: @@ -2571,14 +2595,36 @@ buf_zip_decompress( /* Copy to uncompressed storage. */ memcpy(block->frame, frame, buf_block_get_zip_size(block)); + + if (space) { + fil_space_release_for_io(space); + } + return(TRUE); } - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: unknown compressed page" - " type %lu\n", - fil_page_get_type(frame)); + ib_logf(IB_LOG_LEVEL_ERROR, + "Unknown compressed page in %s [%u:%u]" + " type %s [" ULINTPF "].", + space ? space->chain.start->name : "N/A", + block->page.space, block->page.offset, + fil_get_page_type_name(fil_page_get_type(frame)), fil_page_get_type(frame)); + +err_exit: + if (encrypted) { + ib_logf(IB_LOG_LEVEL_INFO, + "Row compressed page could be encrypted with key_version %u.", + key_version); + block->page.encrypted = true; + dict_set_encrypted_by_space(block->page.space); + } else { + dict_set_corrupted_by_space(block->page.space); + } + + if (space) { + fil_space_release_for_io(space); + } + return(FALSE); } @@ -3031,9 +3077,9 @@ loop: } ib_logf(IB_LOG_LEVEL_FATAL, "Unable" - " to read tablespace %lu page no" - " %lu into the buffer pool after" - " %lu attempts" + " to read tablespace " ULINTPF " page no " + ULINTPF " into the buffer pool after " + ULINTPF " attempts." " The most probable cause" " of this error may be that the" " table has been corrupted." @@ -3232,12 +3278,21 @@ got_block: /* Decompress the page while not holding buf_pool->mutex or block->mutex. */ - /* Page checksum verification is already done when - the page is read from disk. Hence page checksum - verification is not necessary when decompressing the page. */ { - bool success = buf_zip_decompress(block, FALSE); - ut_a(success); + bool success = buf_zip_decompress(block, TRUE); + + if (!success) { + buf_pool_mutex_enter(buf_pool); + buf_block_mutex_enter(fix_block); + buf_block_set_io_fix(fix_block, BUF_IO_NONE); + buf_block_mutex_exit(fix_block); + + --buf_pool->n_pend_unzip; + buf_block_unfix(fix_block); + buf_pool_mutex_exit(buf_pool); + rw_lock_x_unlock(&fix_block->lock); + return NULL; + } } if (!recv_no_ibuf_operations) { diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index a6d85fb89bf..2131a936656 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -887,7 +887,7 @@ fil_space_decrypt( Calculate post encryption checksum @param[in] zip_size zip_size or 0 @param[in] dst_frame Block where checksum is calculated -@return page checksum or BUF_NO_CHECKSUM_MAGIC +@return page checksum not needed. */ UNIV_INTERN ulint @@ -896,30 +896,13 @@ fil_crypt_calculate_checksum( const byte* dst_frame) { ib_uint32_t checksum = 0; - srv_checksum_algorithm_t algorithm = - static_cast(srv_checksum_algorithm); + /* For encrypted tables we use only crc32 and strict_crc32 */ if (zip_size == 0) { - switch (algorithm) { - case SRV_CHECKSUM_ALGORITHM_CRC32: - case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: - checksum = buf_calc_page_crc32(dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_INNODB: - case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: - checksum = (ib_uint32_t) buf_calc_page_new_checksum( - dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_NONE: - case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: - checksum = BUF_NO_CHECKSUM_MAGIC; - break; - /* no default so the compiler will emit a warning - * if new enum is added and not handled here */ - } + checksum = buf_calc_page_crc32(dst_frame); } else { checksum = page_zip_calc_checksum(dst_frame, zip_size, - algorithm); + SRV_CHECKSUM_ALGORITHM_CRC32); } return checksum; @@ -953,14 +936,6 @@ fil_space_verify_crypt_checksum( return(false); } - srv_checksum_algorithm_t algorithm = - static_cast(srv_checksum_algorithm); - - /* If no checksum is used, can't continue checking. */ - if (algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { - return(true); - } - /* Read stored post encryption checksum. */ ib_uint32_t checksum = mach_read_from_4( page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4); @@ -1044,7 +1019,6 @@ fil_space_verify_crypt_checksum( checksum2 = mach_read_from_4( page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM); valid = (buf_page_is_checksum_valid_crc32(page,checksum1,checksum2) - || buf_page_is_checksum_valid_none(page,checksum1,checksum2) || buf_page_is_checksum_valid_innodb(page,checksum1, checksum2)); } diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 7c30ff92b69..da4ab059d07 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -86,6 +86,15 @@ bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) MY_ATTRIBUTE((nonnull)); +/********************************************************************//** +Mark a table with the specified space pointed by bpage->space corrupted. +Also remove the bpage from LRU list. +@param[in,out] bpage Block */ +static +void +buf_mark_space_corrupt( + buf_page_t* bpage); + /* prototypes for new functions added to ha_innodb.cc */ trx_t* innobase_get_trx(); @@ -2538,17 +2547,26 @@ buf_zip_decompress( { const byte* frame = block->page.zip.data; ulint size = page_zip_get_size(&block->page.zip); + /* Space is not found if this function is called during IMPORT */ + fil_space_t* space = fil_space_acquire_for_io(block->page.space); + const unsigned key_version = mach_read_from_4(frame + + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); + fil_space_crypt_t* crypt_data = space ? space->crypt_data : NULL; + const bool encrypted = crypt_data + && crypt_data->type != CRYPT_SCHEME_UNENCRYPTED + && (!crypt_data->is_default_encryption() + || srv_encrypt_tables); ut_ad(buf_block_get_zip_size(block)); ut_a(buf_block_get_space(block) != 0); if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) { - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: compressed page checksum mismatch" - " (space %u page %u): stored: %lu, crc32: %lu " - "innodb: %lu, none: %lu\n", + ib_logf(IB_LOG_LEVEL_ERROR, + "Compressed page checksum mismatch" + " for %s [%u:%u]: stored: " ULINTPF ", crc32: " ULINTPF + " innodb: " ULINTPF ", none: " ULINTPF ".", + space ? space->chain.start->name : "N/A", block->page.space, block->page.offset, mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM), page_zip_calc_checksum(frame, size, @@ -2557,22 +2575,28 @@ buf_zip_decompress( SRV_CHECKSUM_ALGORITHM_INNODB), page_zip_calc_checksum(frame, size, SRV_CHECKSUM_ALGORITHM_NONE)); - return(FALSE); + goto err_exit; } switch (fil_page_get_type(frame)) { - case FIL_PAGE_INDEX: + case FIL_PAGE_INDEX: { + if (page_zip_decompress(&block->page.zip, block->frame, TRUE)) { + if (space) { + fil_space_release_for_io(space); + } return(TRUE); } - fprintf(stderr, - "InnoDB: unable to decompress space %u page %u\n", + ib_logf(IB_LOG_LEVEL_ERROR, + "Unable to decompress space %s [%u:%u]", + space ? space->chain.start->name : "N/A", block->page.space, block->page.offset); - return(FALSE); + goto err_exit; + } case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: case FIL_PAGE_IBUF_BITMAP: @@ -2583,14 +2607,36 @@ buf_zip_decompress( /* Copy to uncompressed storage. */ memcpy(block->frame, frame, buf_block_get_zip_size(block)); + + if (space) { + fil_space_release_for_io(space); + } + return(TRUE); } - ut_print_timestamp(stderr); - fprintf(stderr, - " InnoDB: unknown compressed page" - " type %lu\n", - fil_page_get_type(frame)); + ib_logf(IB_LOG_LEVEL_ERROR, + "Unknown compressed page in %s [%u:%u]" + " type %s [" ULINTPF "].", + space ? space->chain.start->name : "N/A", + block->page.space, block->page.offset, + fil_get_page_type_name(fil_page_get_type(frame)), fil_page_get_type(frame)); + +err_exit: + if (encrypted) { + ib_logf(IB_LOG_LEVEL_INFO, + "Row compressed page could be encrypted with key_version %u.", + key_version); + block->page.encrypted = true; + dict_set_encrypted_by_space(block->page.space); + } else { + dict_set_corrupted_by_space(block->page.space); + } + + if (space) { + fil_space_release_for_io(space); + } + return(FALSE); } @@ -3073,9 +3119,9 @@ loop: } ib_logf(IB_LOG_LEVEL_FATAL, "Unable" - " to read tablespace %lu page no" - " %lu into the buffer pool after" - " %lu attempts" + " to read tablespace " ULINTPF " page no " + ULINTPF " into the buffer pool after " + ULINTPF " attempts." " The most probable cause" " of this error may be that the" " table has been corrupted." @@ -3288,12 +3334,21 @@ got_block: /* Decompress the page while not holding any buf_pool or block->mutex. */ - /* Page checksum verification is already done when - the page is read from disk. Hence page checksum - verification is not necessary when decompressing the page. */ { - bool success = buf_zip_decompress(block, FALSE); - ut_a(success); + bool success = buf_zip_decompress(block, TRUE); + + if (!success) { + buf_block_mutex_enter(fix_block); + buf_block_set_io_fix(fix_block, BUF_IO_NONE); + buf_block_mutex_exit(fix_block); + + os_atomic_decrement_ulint(&buf_pool->n_pend_unzip, 1); + rw_lock_x_unlock(&fix_block->lock); + mutex_enter(&buf_pool->LRU_list_mutex); + buf_block_unfix(fix_block); + mutex_exit(&buf_pool->LRU_list_mutex); + return NULL; + } } if (!recv_no_ibuf_operations) { diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc index da78d4dd85d..21c1e3b730e 100644 --- a/storage/xtradb/fil/fil0crypt.cc +++ b/storage/xtradb/fil/fil0crypt.cc @@ -887,7 +887,7 @@ fil_space_decrypt( Calculate post encryption checksum @param[in] zip_size zip_size or 0 @param[in] dst_frame Block where checksum is calculated -@return page checksum or BUF_NO_CHECKSUM_MAGIC +@return page checksum not needed. */ UNIV_INTERN ulint @@ -896,30 +896,13 @@ fil_crypt_calculate_checksum( const byte* dst_frame) { ib_uint32_t checksum = 0; - srv_checksum_algorithm_t algorithm = - static_cast(srv_checksum_algorithm); + /* For encrypted tables we use only crc32 and strict_crc32 */ if (zip_size == 0) { - switch (algorithm) { - case SRV_CHECKSUM_ALGORITHM_CRC32: - case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32: - checksum = buf_calc_page_crc32(dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_INNODB: - case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB: - checksum = (ib_uint32_t) buf_calc_page_new_checksum( - dst_frame); - break; - case SRV_CHECKSUM_ALGORITHM_NONE: - case SRV_CHECKSUM_ALGORITHM_STRICT_NONE: - checksum = BUF_NO_CHECKSUM_MAGIC; - break; - /* no default so the compiler will emit a warning - * if new enum is added and not handled here */ - } + checksum = buf_calc_page_crc32(dst_frame); } else { checksum = page_zip_calc_checksum(dst_frame, zip_size, - algorithm); + SRV_CHECKSUM_ALGORITHM_CRC32); } return checksum; @@ -953,14 +936,6 @@ fil_space_verify_crypt_checksum( return(false); } - srv_checksum_algorithm_t algorithm = - static_cast(srv_checksum_algorithm); - - /* If no checksum is used, can't continue checking. */ - if (algorithm == SRV_CHECKSUM_ALGORITHM_NONE) { - return(true); - } - /* Read stored post encryption checksum. */ ib_uint32_t checksum = mach_read_from_4( page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4); @@ -1044,7 +1019,6 @@ fil_space_verify_crypt_checksum( checksum1 = mach_read_from_4( page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM); valid = (buf_page_is_checksum_valid_crc32(page,checksum1,checksum2) - || buf_page_is_checksum_valid_none(page,checksum1,checksum2) || buf_page_is_checksum_valid_innodb(page,checksum1, checksum2)); } -- cgit v1.2.1 From 112b21da37dad0fbb28bc65f9ab5a3ba6c0c2186 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 30 May 2017 11:55:11 +0300 Subject: MDEV-12600: crash during install_db with innodb_page_size=32K and ibdata1=3M; Problem was that all doublewrite buffer pages must fit to first system datafile. Ported commit 27a34df7882b1f8ed283f22bf83e8bfc523cbfde Author: Shaohua Wang Date: Wed Aug 12 15:55:19 2015 +0800 BUG#21551464 - SEGFAULT WHILE INITIALIZING DATABASE WHEN INNODB_DATA_FILE SIZE IS SMALL To 10.1 (with extended error printout). btr_create(): If ibuf header page allocation fails report error and return FIL_NULL. Similarly if root page allocation fails return a error. dict_build_table_def_step: If fsp_header_init fails return error code. fsp_header_init: returns true if header initialization succeeds and false if not. fseg_create_general: report error if segment or page allocation fails. innobase_init: If first datafile is smaller than 3M and could not contain all doublewrite buffer pages report error and fail to initialize InnoDB plugin. row_truncate_table_for_mysql: report error if fsp header init fails. srv_init_abort: New function to report database initialization errors. srv_undo_tablespaces_init, innobase_start_or_create_for_mysql: If database initialization fails report error and abort. trx_rseg_create: If segment header creation fails return. --- storage/innobase/btr/btr0btr.cc | 19 +++++++++++++ storage/innobase/dict/dict0crea.cc | 6 +++- storage/innobase/fil/fil0fil.cc | 42 ++++++++++++++-------------- storage/innobase/fsp/fsp0fsp.cc | 37 ++++++++++++++++++------- storage/innobase/handler/ha_innodb.cc | 14 ++++++++++ storage/innobase/include/fsp0fsp.h | 20 ++++++++------ storage/innobase/row/row0mysql.cc | 8 +++++- storage/innobase/srv/srv0start.cc | 52 +++++++++++++++++++++++++++++++++-- storage/innobase/trx/trx0rseg.cc | 5 +++- storage/xtradb/btr/btr0btr.cc | 19 +++++++++++++ storage/xtradb/dict/dict0crea.cc | 6 +++- storage/xtradb/fil/fil0fil.cc | 42 ++++++++++++++-------------- storage/xtradb/fsp/fsp0fsp.cc | 37 ++++++++++++++++++------- storage/xtradb/handler/ha_innodb.cc | 14 ++++++++++ storage/xtradb/include/fsp0fsp.h | 20 ++++++++------ storage/xtradb/row/row0mysql.cc | 8 +++++- storage/xtradb/srv/srv0start.cc | 52 +++++++++++++++++++++++++++++++++-- storage/xtradb/trx/trx0rseg.cc | 5 +++- 18 files changed, 318 insertions(+), 88 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index cccae031b88..e200a2b9677 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -1701,6 +1701,12 @@ btr_create( space, 0, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); + if (ibuf_hdr_block == NULL) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the first ibuf header page failed."); + return (FIL_NULL); + } + buf_block_dbg_add_level( ibuf_hdr_block, SYNC_IBUF_TREE_NODE_NEW); @@ -1714,6 +1720,11 @@ btr_create( + IBUF_HEADER + IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); + + if (!block) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the tree root page segment failed."); + } ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO); } else { #ifdef UNIV_BLOB_DEBUG @@ -1726,6 +1737,12 @@ btr_create( #endif /* UNIV_BLOB_DEBUG */ block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); + + if (!block) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the btree segment failed."); + } + } if (block == NULL) { @@ -1754,6 +1771,8 @@ btr_create( segment before return. */ btr_free_root(space, zip_size, page_no, mtr); + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the non-ibuf tree segment for leaf pages failed."); return(FIL_NULL); } diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index f6cd294884b..1ec7123bbc2 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -321,9 +321,13 @@ dict_build_table_def_step( mtr_start(&mtr); - fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); + bool res = fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); + + if (!res) { + return (DB_ERROR); + } } else { /* Create in the system tablespace: disallow Barracuda features by keeping only the first bit which says whether diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 9d4a464460f..e19ef110b4f 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -5812,19 +5812,21 @@ fil_report_invalid_page_access( ulint len, /*!< in: I/O length */ ulint type) /*!< in: I/O type */ { - fprintf(stderr, - "InnoDB: Error: trying to access page number %lu" - " in space %lu,\n" - "InnoDB: space name %s,\n" - "InnoDB: which is outside the tablespace bounds.\n" - "InnoDB: Byte offset %lu, len %lu, i/o type %lu.\n" - "InnoDB: If you get this error at mysqld startup," - " please check that\n" - "InnoDB: your my.cnf matches the ibdata files" - " that you have in the\n" - "InnoDB: MySQL server.\n", - (ulong) block_offset, (ulong) space_id, space_name, - (ulong) byte_offset, (ulong) len, (ulong) type); + ib_logf(IB_LOG_LEVEL_ERROR, + "Trying to access page number " ULINTPF + " in space " ULINTPF + " space name %s," + " which is outside the tablespace bounds." + " Byte offset " ULINTPF ", len " ULINTPF " i/o type " ULINTPF ".", + block_offset, space_id, space_name, + byte_offset, len, type); + + ib_logf(IB_LOG_LEVEL_FATAL, + "If you get this error at mysqld startup," + " please check that" + " your my.cnf matches the ibdata files" + " that you have in the" + " MySQL server."); } /********************************************************************//** @@ -6043,11 +6045,10 @@ fil_io( mutex_exit(&fil_system->mutex); return(DB_ERROR); } + fil_report_invalid_page_access( block_offset, space_id, space->name, byte_offset, len, type); - - ut_error; } /* Open file if closed */ @@ -6059,10 +6060,11 @@ fil_io( ib_logf(IB_LOG_LEVEL_ERROR, "Trying to do i/o to a tablespace which " "exists without .ibd data file. " - "i/o type %lu, space id %lu, page no %lu, " - "i/o length %lu bytes", - (ulong) type, (ulong) space_id, - (ulong) block_offset, (ulong) len); + "i/o type " ULINTPF ", space id " + ULINTPF ", page no " ULINTPF ", " + "i/o length " ULINTPF " bytes", + type, space_id, + block_offset, len); return(DB_TABLESPACE_DELETED); } @@ -6082,8 +6084,6 @@ fil_io( fil_report_invalid_page_access( block_offset, space_id, space->name, byte_offset, len, type); - - ut_error; } /* Now we have made the changes in the data structures of fil_system */ diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index a40283412e2..98cd11f3369 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -670,16 +670,18 @@ fsp_header_init_fields( } #ifndef UNIV_HOTBACKUP -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ +/** Initializes the space header of a new created space and creates also the +insert buffer tree root if space == 0. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr min-transaction +@return true on success, otherwise false. */ UNIV_INTERN -void +bool fsp_header_init( -/*============*/ - ulint space_id, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space_id, + ulint size, + mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; @@ -722,11 +724,15 @@ fsp_header_init( flst_init(header + FSP_SEG_INODES_FREE, mtr); mlog_write_ull(header + FSP_SEG_ID, 1, mtr); + if (space_id == 0) { fsp_fill_free_list(FALSE, space_id, header, mtr); - btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, + + if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 0, 0, DICT_IBUF_ID_MIN + space_id, - dict_ind_redundant, mtr); + dict_ind_redundant, mtr) == FIL_NULL) { + return (false); + } } else { fsp_fill_free_list(TRUE, space_id, header, mtr); } @@ -739,6 +745,8 @@ fsp_header_init( } fil_space_release(space); + + return (true); } #endif /* !UNIV_HOTBACKUP */ @@ -2057,6 +2065,10 @@ fseg_create_general( success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_NORMAL, mtr); if (!success) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Reserving %d free extents failed" + " could reserve only " ULINTPF " extents.", + 2, n_reserved); return(NULL); } } @@ -2066,6 +2078,8 @@ fseg_create_general( inode = fsp_alloc_seg_inode(space_header, mtr); if (inode == NULL) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of a new file segment inode page failed."); goto funct_exit; } @@ -2095,6 +2109,9 @@ fseg_create_general( inode, 0, FSP_UP, mtr, mtr); if (block == NULL) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of a free page from space " ULINTPF " failed.", + space); fsp_free_seg_inode(space, zip_size, inode, mtr); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 012539d1ace..b68a96c8846 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -3353,6 +3353,7 @@ innobase_init( char *default_path; uint format_id; ulong num_pll_degree; + ulint min_size = 0; DBUG_ENTER("innobase_init"); handlerton *innobase_hton= (handlerton*) p; @@ -3563,6 +3564,19 @@ mem_free_and_error: goto error; } + /* All doublewrite buffer pages must fit to first system + datafile and first datafile must be at least 3M. */ + min_size = ut_max((3*1024*1024U), (192U*UNIV_PAGE_SIZE)); + + if ((srv_data_file_sizes[0]*1024*1024) < min_size) { + sql_print_error( + "InnoDB: first datafile is too small current=" ULINTPF + "M it should be at least " ULINTPF "M.", + srv_data_file_sizes[0], + min_size / (1024 * 1024)); + goto mem_free_and_error; + } + /* -------------- All log files ---------------------------*/ /* The default dir for log files is the datadir of MySQL */ diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 2a162100dc1..905e98cc1e6 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -520,16 +520,20 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ +/** Initializes the space header of a new created space and creates also the +insert buffer tree root if space == 0. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr min-transaction +@return true on success, otherwise false. */ UNIV_INTERN -void +bool fsp_header_init( -/*============*/ - ulint space, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr); /*!< in/out: mini-transaction */ + ulint space_id, + ulint size, + mtr_t* mtr) + MY_ATTRIBUTE((warn_unused_result)); + /**********************************************************************//** Increases the space size field of a space. */ UNIV_INTERN diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 82938995e93..6ca9443dc7d 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -3590,9 +3590,15 @@ row_truncate_table_for_mysql( } while (index); mtr_start_trx(&mtr, trx); - fsp_header_init(space_id, + bool ret = fsp_header_init(space_id, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); + + if (!ret) { + table->file_unreadable = true; + err = DB_ERROR; + goto funct_exit; + } } } else { /* Lock all index trees for this table, as we will diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index f601ff17ebc..032b902c633 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -188,6 +188,39 @@ UNIV_INTERN mysql_pfs_key_t srv_master_thread_key; UNIV_INTERN mysql_pfs_key_t srv_purge_thread_key; #endif /* UNIV_PFS_THREAD */ +/** Innobase start-up aborted. Perform cleanup actions. +@param[in] create_new_db TRUE if new db is being created +@param[in] file File name +@param[in] line Line number +@param[in] err Reason for aborting InnoDB startup +@return DB_SUCCESS or error code. */ +static +dberr_t +srv_init_abort( + bool create_new_db, + const char* file, + ulint line, + dberr_t err) +{ + if (create_new_db) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Database creation was aborted" + " at %s [" ULINTPF "]" + " with error %s. You may need" + " to delete the ibdata1 file before trying to start" + " up again.", + file, line, ut_strerr(err)); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Plugin initialization aborted" + " at %s [" ULINTPF "]" + " with error %s.", + file, line, ut_strerr(err)); + } + + return(err); +} + /*********************************************************************//** Convert a numeric string that optionally ends in G or M or K, to a number containing megabytes. @@ -1528,18 +1561,26 @@ srv_undo_tablespaces_init( if (create_new_db) { mtr_t mtr; + bool ret=true; mtr_start(&mtr); /* The undo log tablespace */ for (i = 0; i < n_undo_tablespaces; ++i) { - fsp_header_init( + ret = fsp_header_init( undo_tablespace_ids[i], SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); + if (!ret) { + break; + } } mtr_commit(&mtr); + + if (!ret) { + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); + } } return(DB_SUCCESS); @@ -2378,10 +2419,14 @@ files_checked: mtr_start(&mtr); - fsp_header_init(0, sum_of_new_sizes, &mtr); + bool ret = fsp_header_init(0, sum_of_new_sizes, &mtr); mtr_commit(&mtr); + if (!ret) { + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); + } + /* To maintain backward compatibility we create only the first rollback segment before the double write buffer. All the remaining rollback segments will be created later, @@ -2809,6 +2854,9 @@ files_checked: /* Can only happen if server is read only. */ ut_a(srv_read_only_mode); srv_undo_logs = ULONG_UNDEFINED; + } else if (srv_available_undo_logs < srv_undo_logs) { + /* Should due to out of file space. */ + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); } if (!srv_read_only_mode) { diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc index 003d1036a8c..21dbab98e48 100644 --- a/storage/innobase/trx/trx0rseg.cc +++ b/storage/innobase/trx/trx0rseg.cc @@ -323,7 +323,10 @@ trx_rseg_create( page_no = trx_rseg_header_create( space, 0, ULINT_MAX, slot_no, &mtr); - ut_a(page_no != FIL_NULL); + if (page_no == FIL_NULL) { + mtr_commit(&mtr); + return (rseg); + } sys_header = trx_sysf_get(&mtr); diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc index d84c93f8b3e..7b5b3bb6cba 100644 --- a/storage/xtradb/btr/btr0btr.cc +++ b/storage/xtradb/btr/btr0btr.cc @@ -1720,6 +1720,12 @@ btr_create( space, 0, IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); + if (ibuf_hdr_block == NULL) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the first ibuf header page failed."); + return (FIL_NULL); + } + buf_block_dbg_add_level( ibuf_hdr_block, SYNC_IBUF_TREE_NODE_NEW); @@ -1733,6 +1739,11 @@ btr_create( + IBUF_HEADER + IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); + + if (!block) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the tree root page segment failed."); + } ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO); } else { #ifdef UNIV_BLOB_DEBUG @@ -1745,6 +1756,12 @@ btr_create( #endif /* UNIV_BLOB_DEBUG */ block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); + + if (!block) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the btree segment failed."); + } + } if (block == NULL) { @@ -1773,6 +1790,8 @@ btr_create( segment before return. */ btr_free_root(space, zip_size, page_no, mtr); + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of the non-ibuf tree segment for leaf pages failed."); return(FIL_NULL); } diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc index 6d5b12474eb..faaa6d1bdc7 100644 --- a/storage/xtradb/dict/dict0crea.cc +++ b/storage/xtradb/dict/dict0crea.cc @@ -323,9 +323,13 @@ dict_build_table_def_step( mtr_start(&mtr); - fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); + bool res = fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); + + if(!res) { + return (DB_ERROR); + } } else { /* Create in the system tablespace: disallow Barracuda features by keeping only the first bit which says whether diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 17e9a1be973..b669d35ff15 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -6115,19 +6115,21 @@ fil_report_invalid_page_access( ulint len, /*!< in: I/O length */ ulint type) /*!< in: I/O type */ { - fprintf(stderr, - "InnoDB: Error: trying to access page number %lu" - " in space %lu,\n" - "InnoDB: space name %s,\n" - "InnoDB: which is outside the tablespace bounds.\n" - "InnoDB: Byte offset %lu, len %lu, i/o type %lu.\n" - "InnoDB: If you get this error at mysqld startup," - " please check that\n" - "InnoDB: your my.cnf matches the ibdata files" - " that you have in the\n" - "InnoDB: MySQL server.\n", - (ulong) block_offset, (ulong) space_id, space_name, - (ulong) byte_offset, (ulong) len, (ulong) type); + ib_logf(IB_LOG_LEVEL_ERROR, + "Trying to access page number " ULINTPF + " in space " ULINTPF + " space name %s," + " which is outside the tablespace bounds." + " Byte offset " ULINTPF ", len " ULINTPF " i/o type " ULINTPF ".", + block_offset, space_id, space_name, + byte_offset, len, type); + + ib_logf(IB_LOG_LEVEL_FATAL, + "If you get this error at mysqld startup," + " please check that" + " your my.cnf matches the ibdata files" + " that you have in the" + " MySQL server."); } /********************************************************************//** @@ -6347,11 +6349,10 @@ _fil_io( mutex_exit(&fil_system->mutex); return(DB_ERROR); } + fil_report_invalid_page_access( block_offset, space_id, space->name, byte_offset, len, type); - - ut_error; } /* Open file if closed */ @@ -6363,10 +6364,11 @@ _fil_io( ib_logf(IB_LOG_LEVEL_ERROR, "Trying to do i/o to a tablespace which " "exists without .ibd data file. " - "i/o type %lu, space id %lu, page no %lu, " - "i/o length %lu bytes", - (ulong) type, (ulong) space_id, - (ulong) block_offset, (ulong) len); + "i/o type " ULINTPF ", space id " + ULINTPF ", page no " ULINTPF ", " + "i/o length " ULINTPF " bytes", + type, space_id, + block_offset, len); return(DB_TABLESPACE_DELETED); } @@ -6386,8 +6388,6 @@ _fil_io( fil_report_invalid_page_access( block_offset, space_id, space->name, byte_offset, len, type); - - ut_error; } /* Now we have made the changes in the data structures of fil_system */ diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc index 934824c6462..7929372ae6c 100644 --- a/storage/xtradb/fsp/fsp0fsp.cc +++ b/storage/xtradb/fsp/fsp0fsp.cc @@ -673,16 +673,18 @@ fsp_header_init_fields( } #ifndef UNIV_HOTBACKUP -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ +/** Initializes the space header of a new created space and creates also the +insert buffer tree root if space == 0. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr min-transaction +@return true on success, otherwise false. */ UNIV_INTERN -void +bool fsp_header_init( -/*============*/ - ulint space_id, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr) /*!< in/out: mini-transaction */ + ulint space_id, + ulint size, + mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; @@ -725,11 +727,15 @@ fsp_header_init( flst_init(header + FSP_SEG_INODES_FREE, mtr); mlog_write_ull(header + FSP_SEG_ID, 1, mtr); + if (space_id == 0) { fsp_fill_free_list(FALSE, space_id, header, mtr); - btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, + + if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 0, 0, DICT_IBUF_ID_MIN + space_id, - dict_ind_redundant, mtr); + dict_ind_redundant, mtr) == FIL_NULL) { + return (false); + } } else { fsp_fill_free_list(TRUE, space_id, header, mtr); } @@ -742,6 +748,8 @@ fsp_header_init( } fil_space_release(space); + + return (true); } #endif /* !UNIV_HOTBACKUP */ @@ -2066,6 +2074,10 @@ fseg_create_general( success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_NORMAL, mtr); if (!success) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Reserving %d free extents failed" + " could reserve only " ULINTPF " extents.", + 2, n_reserved); return(NULL); } } @@ -2075,6 +2087,8 @@ fseg_create_general( inode = fsp_alloc_seg_inode(space_header, mtr); if (inode == NULL) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of a new file segment inode page failed."); goto funct_exit; } @@ -2104,6 +2118,9 @@ fseg_create_general( inode, 0, FSP_UP, mtr, mtr); if (block == NULL) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Allocation of a free page from space " ULINTPF " failed.", + space); fsp_free_seg_inode(space, zip_size, inode, mtr); diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index dd5d041aed9..7be6c64407b 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -3770,6 +3770,7 @@ innobase_init( char *default_path; uint format_id; ulong num_pll_degree; + ulint min_size = 0; DBUG_ENTER("innobase_init"); handlerton *innobase_hton= (handlerton*) p; @@ -4020,6 +4021,19 @@ mem_free_and_error: goto error; } + /* All doublewrite buffer pages must fit to first system + datafile and first datafile must be at least 3M. */ + min_size = ut_max((3*1024*1024U), (192U*UNIV_PAGE_SIZE)); + + if ((srv_data_file_sizes[0]*1024*1024) < min_size) { + sql_print_error( + "InnoDB: first datafile is too small current=" ULINTPF + "M it should be at least " ULINTPF "M.", + srv_data_file_sizes[0], + min_size / (1024 * 1024)); + goto mem_free_and_error; + } + /* -------------- All log files ---------------------------*/ /* The default dir for log files is the datadir of MySQL */ diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h index 6ed78eba6f9..d8f851a0846 100644 --- a/storage/xtradb/include/fsp0fsp.h +++ b/storage/xtradb/include/fsp0fsp.h @@ -519,16 +519,20 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ -/**********************************************************************//** -Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. */ +/** Initializes the space header of a new created space and creates also the +insert buffer tree root if space == 0. +@param[in] space_id space id +@param[in] size current size in blocks +@param[in,out] mtr min-transaction +@return true on success, otherwise false. */ UNIV_INTERN -void +bool fsp_header_init( -/*============*/ - ulint space, /*!< in: space id */ - ulint size, /*!< in: current size in blocks */ - mtr_t* mtr); /*!< in/out: mini-transaction */ + ulint space_id, + ulint size, + mtr_t* mtr) + MY_ATTRIBUTE((warn_unused_result)); + /**********************************************************************//** Increases the space size field of a space. */ UNIV_INTERN diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index 59568f5c91b..9f1f216bccb 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -3614,9 +3614,15 @@ row_truncate_table_for_mysql( } while (index); mtr_start_trx(&mtr, trx); - fsp_header_init(space_id, + bool ret = fsp_header_init(space_id, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); + + if (!ret) { + table->file_unreadable = true; + err = DB_ERROR; + goto funct_exit; + } } } else { /* Lock all index trees for this table, as we will diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 9e2bd483511..9491d5328e7 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -206,6 +206,39 @@ UNIV_INTERN mysql_pfs_key_t srv_purge_thread_key; UNIV_INTERN mysql_pfs_key_t srv_log_tracking_thread_key; #endif /* UNIV_PFS_THREAD */ +/** Innobase start-up aborted. Perform cleanup actions. +@param[in] create_new_db TRUE if new db is being created +@param[in] file File name +@param[in] line Line number +@param[in] err Reason for aborting InnoDB startup +@return DB_SUCCESS or error code. */ +static +dberr_t +srv_init_abort( + bool create_new_db, + const char* file, + ulint line, + dberr_t err) +{ + if (create_new_db) { + ib_logf(IB_LOG_LEVEL_ERROR, + "Database creation was aborted" + " at %s [" ULINTPF "]" + " with error %s. You may need" + " to delete the ibdata1 file before trying to start" + " up again.", + file, line, ut_strerr(err)); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Plugin initialization aborted" + " at %s [" ULINTPF "]" + " with error %s.", + file, line, ut_strerr(err)); + } + + return(err); +} + /*********************************************************************//** Convert a numeric string that optionally ends in G or M or K, to a number containing megabytes. @@ -1568,18 +1601,26 @@ srv_undo_tablespaces_init( if (create_new_db) { mtr_t mtr; + bool ret=true; mtr_start(&mtr); /* The undo log tablespace */ for (i = 0; i < n_undo_tablespaces; ++i) { - fsp_header_init( + ret = fsp_header_init( undo_tablespace_ids[i], SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); + if (!ret) { + break; + } } mtr_commit(&mtr); + + if (!ret) { + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); + } } return(DB_SUCCESS); @@ -2466,10 +2507,14 @@ files_checked: mtr_start(&mtr); - fsp_header_init(0, sum_of_new_sizes, &mtr); + bool ret = fsp_header_init(0, sum_of_new_sizes, &mtr); mtr_commit(&mtr); + if (!ret) { + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); + } + /* To maintain backward compatibility we create only the first rollback segment before the double write buffer. All the remaining rollback segments will be created later, @@ -2899,6 +2944,9 @@ files_checked: /* Can only happen if server is read only. */ ut_a(srv_read_only_mode); srv_undo_logs = ULONG_UNDEFINED; + } else if (srv_available_undo_logs < srv_undo_logs) { + /* Should due to out of file space. */ + return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); } if (!srv_read_only_mode) { diff --git a/storage/xtradb/trx/trx0rseg.cc b/storage/xtradb/trx/trx0rseg.cc index 003d1036a8c..21dbab98e48 100644 --- a/storage/xtradb/trx/trx0rseg.cc +++ b/storage/xtradb/trx/trx0rseg.cc @@ -323,7 +323,10 @@ trx_rseg_create( page_no = trx_rseg_header_create( space, 0, ULINT_MAX, slot_no, &mtr); - ut_a(page_no != FIL_NULL); + if (page_no == FIL_NULL) { + mtr_commit(&mtr); + return (rseg); + } sys_header = trx_sysf_get(&mtr); -- cgit v1.2.1 From 3356e42d01dbef2bfa9a02ec08c7756760385e1e Mon Sep 17 00:00:00 2001 From: Monty Date: Fri, 2 Jun 2017 13:52:47 +0300 Subject: Improved warning "xxx is not BASE TABLE/SEQUENCE" - Changed warning to "'%-.192s.%-.192s' is not of type '%s'" to make the english a bit more correct --- storage/myisammrg/mysql-test/storage_engine/create_table.rdiff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff index 585e5c915ba..007fc0906a7 100644 --- a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff @@ -36,7 +36,7 @@ -1 -2 -DROP TABLE t1; -+ERROR HY000: 'test.t1' is not BASE TABLE ++ERROR HY000: 'test.t1' is not of type 'BASE TABLE' +# ERROR: Statement ended with errno 1347, errname ER_WRONG_OBJECT (expected to succeed) +# ------------ UNEXPECTED RESULT ------------ +# The statement|command finished with ER_WRONG_OBJECT. -- cgit v1.2.1 From 36e020a5d678103544b66dfaefe53c41b5af4af9 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Sat, 3 Jun 2017 02:47:52 +0300 Subject: Adjust storage_engine suite according to server changes in 10.2 --- .../mysql-test/storage_engine/disabled.def | 4 +- .../mysql-test/storage_engine/repair_table.rdiff | 2 +- .../innobase/mysql-test/storage_engine/suite.opt | 2 +- .../storage_engine/tbl_opt_data_index_dir.rdiff | 23 - .../storage_engine/tbl_opt_index_dir.rdiff | 23 + .../storage_engine/tbl_opt_row_format.rdiff | 48 +- .../storage_engine/type_spatial_indexes.rdiff | 712 --------------------- .../storage_engine/alter_tablespace.rdiff | 2 +- .../storage_engine/alter_tablespace.rdiff | 2 +- .../mysql-test/storage_engine/create_table.rdiff | 2 +- .../mysql-test/storage_engine/disabled.def | 3 + .../storage_engine/parts/repair_table.rdiff | 7 +- .../mysql-test/storage_engine/repair_table.rdiff | 8 +- .../storage_engine/tbl_opt_data_dir.rdiff | 18 + .../storage_engine/tbl_opt_data_index_dir.rdiff | 18 - .../storage_engine/tbl_opt_index_dir.rdiff | 18 + .../storage_engine/tbl_opt_row_format.rdiff | 28 +- .../myisammrg/mysql-test/storage_engine/vcol.rdiff | 30 +- storage/xtradb/mysql-test/storage_engine/suite.pm | 8 + .../storage_engine/tbl_opt_data_index_dir.rdiff | 23 - .../storage_engine/tbl_opt_index_dir.rdiff | 23 + 21 files changed, 187 insertions(+), 817 deletions(-) delete mode 100644 storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff create mode 100644 storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff delete mode 100644 storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff create mode 100644 storage/myisammrg/mysql-test/storage_engine/disabled.def create mode 100644 storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff delete mode 100644 storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff create mode 100644 storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff create mode 100644 storage/xtradb/mysql-test/storage_engine/suite.pm delete mode 100644 storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff create mode 100644 storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff (limited to 'storage') diff --git a/storage/innobase/mysql-test/storage_engine/disabled.def b/storage/innobase/mysql-test/storage_engine/disabled.def index bad10099bbf..1d67f9311ca 100644 --- a/storage/innobase/mysql-test/storage_engine/disabled.def +++ b/storage/innobase/mysql-test/storage_engine/disabled.def @@ -4,4 +4,6 @@ insert_high_prio : InnoDB does not use table-level locking insert_low_prio : InnoDB does not use table-level locking select_high_prio : InnoDB does not use table-level locking update_low_prio : InnoDB does not use table-level locking - +insert_delayed : MDEV-12880 - INSERT DELAYED is not detected as inapplicable to a table under lock +lock_concurrent : MDEV-12882 - Assertion failure +tbl_opt_index_dir : INDEX DIRECTORY option is not supported anymore diff --git a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff index be3709c5833..717d437b2d1 100644 --- a/storage/innobase/mysql-test/storage_engine/repair_table.rdiff +++ b/storage/innobase/mysql-test/storage_engine/repair_table.rdiff @@ -111,7 +111,7 @@ -test.t1 check error Corrupt +test.t1 check status OK SELECT a,b FROM t1; --ERROR HY000: Incorrect key file for table 't1'; try to repair it +-ERROR HY000: Index for table 't1' is corrupt; try to repair it -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file -INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/storage/innobase/mysql-test/storage_engine/suite.opt b/storage/innobase/mysql-test/storage_engine/suite.opt index 034b58f2628..627becdbfb5 100644 --- a/storage/innobase/mysql-test/storage_engine/suite.opt +++ b/storage/innobase/mysql-test/storage_engine/suite.opt @@ -1 +1 @@ ---innodb --ignore-builtin-innodb --plugin-load=ha_innodb +--innodb diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff deleted file mode 100644 index e09e50b17ec..00000000000 --- a/storage/innobase/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff +++ /dev/null @@ -1,23 +0,0 @@ ---- suite/storage_engine/tbl_opt_data_index_dir.result 2013-10-03 20:35:06.000000000 +0400 -+++ suite/storage_engine/tbl_opt_data_index_dir.reject 2013-11-08 22:06:54.000000000 +0400 -@@ -1,10 +1,12 @@ - DROP TABLE IF EXISTS t1; -+Warnings: -+Warning 1618 option ignored - SHOW CREATE TABLE t1; - Table Create Table - t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' -+) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' - Warnings: - Warning 1618 option ignored - SHOW CREATE TABLE t1; -@@ -12,5 +14,5 @@ - t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' -+) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' - DROP TABLE t1; diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff new file mode 100644 index 00000000000..e09e50b17ec --- /dev/null +++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_index_dir.rdiff @@ -0,0 +1,23 @@ +--- suite/storage_engine/tbl_opt_data_index_dir.result 2013-10-03 20:35:06.000000000 +0400 ++++ suite/storage_engine/tbl_opt_data_index_dir.reject 2013-11-08 22:06:54.000000000 +0400 +@@ -1,10 +1,12 @@ + DROP TABLE IF EXISTS t1; ++Warnings: ++Warning 1618 option ignored + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' + Warnings: + Warning 1618 option ignored + SHOW CREATE TABLE t1; +@@ -12,5 +14,5 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' + DROP TABLE t1; diff --git a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff index a6572ffa7f0..daa5fc67dec 100644 --- a/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff +++ b/storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.rdiff @@ -1,10 +1,44 @@ ---- suite/storage_engine/tbl_opt_row_format.result 2012-06-24 23:55:19.539380000 +0400 -+++ suite/storage_engine/tbl_opt_row_format.reject 2012-07-15 19:26:02.235049157 +0400 -@@ -1,5 +1,7 @@ - DROP TABLE IF EXISTS t1; - CREATE TABLE t1 (a , b ) ENGINE= ROW_FORMAT=FIXED; -+Warnings: -+Warning 1478 : assuming ROW_FORMAT=COMPACT. +--- ../storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.result~ 2017-05-24 00:40:12.854181048 +0300 ++++ ../storage/innobase/mysql-test/storage_engine/tbl_opt_row_format.reject 2017-05-24 00:49:06.578191030 +0300 +@@ -7,19 +7,39 @@ + `b` char(8) DEFAULT NULL + ) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC + ALTER TABLE t1 ROW_FORMAT=FIXED; ++ERROR HY000: Table storage engine '' does not support the create option 'ROW_TYPE' ++# ERROR: Statement ended with errno 1478, errname ER_ILLEGAL_HA_CREATE_OPTION (expected to succeed) ++# ------------ UNEXPECTED RESULT ------------ ++# [ ALTER TABLE t1 ROW_FORMAT=FIXED ] ++# The statement|command finished with ER_ILLEGAL_HA_CREATE_OPTION. ++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED ++) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC + ALTER TABLE t1 ROW_FORMAT=PAGE; ++ERROR HY000: Table storage engine '' does not support the create option 'ROW_TYPE' ++# ERROR: Statement ended with errno 1478, errname ER_ILLEGAL_HA_CREATE_OPTION (expected to succeed) ++# ------------ UNEXPECTED RESULT ------------ ++# [ ALTER TABLE t1 ROW_FORMAT=PAGE ] ++# The statement|command finished with ER_ILLEGAL_HA_CREATE_OPTION. ++# ALTER TABLE or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. ++# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. ++# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. ++# Also, this problem may cause a chain effect (more errors of different kinds in the test). ++# ------------------------------------------- + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE ++) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC + ALTER TABLE t1 ROW_FORMAT=COMPACT; + SHOW CREATE TABLE t1; + Table Create Table diff --git a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff b/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff deleted file mode 100644 index 154116b748c..00000000000 --- a/storage/innobase/mysql-test/storage_engine/type_spatial_indexes.rdiff +++ /dev/null @@ -1,712 +0,0 @@ ---- suite/storage_engine/type_spatial_indexes.result 2013-08-05 18:08:49.000000000 +0400 -+++ suite/storage_engine/type_spatial_indexes.reject 2013-08-05 18:25:24.000000000 +0400 -@@ -702,699 +702,15 @@ - DROP DATABASE IF EXISTS gis_ogs; - CREATE DATABASE gis_ogs; - CREATE TABLE gis_point (fid , g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_line (fid , g LINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_polygon (fid , g POLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_multi_point (fid , g MULTIPOINT NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_multi_line (fid , g MULTILINESTRING NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_multi_polygon (fid , g MULTIPOLYGON NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_geometrycollection (fid , g GEOMETRYCOLLECTION NOT NULL, SPATIAL INDEX(g)) ENGINE= ; --CREATE TABLE gis_geometry (fid , g GEOMETRY NOT NULL) ENGINE= ; --USE gis_ogs; --CREATE TABLE lakes (fid INT , --name CHAR(64) , --shore POLYGON NOT NULL, SPATIAL INDEX s(shore)) ENGINE= ; --CREATE TABLE road_segments (fid INT , --name CHAR(64) , --aliases CHAR(64) , --num_lanes INT , --centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE= ; --CREATE TABLE divided_routes (fid INT , --name CHAR(64) , --num_lanes INT , --centerlines MULTILINESTRING NOT NULL, SPATIAL INDEX c(centerlines)) ENGINE= ; --CREATE TABLE forests (fid INT , --name CHAR(64) , --boundary MULTIPOLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE= ; --CREATE TABLE bridges (fid INT , --name CHAR(64) , --position POINT NOT NULL, SPATIAL INDEX p(position)) ENGINE= ; --CREATE TABLE streams (fid INT , --name CHAR(64) , --centerline LINESTRING NOT NULL, SPATIAL INDEX c(centerline)) ENGINE= ; --CREATE TABLE buildings (fid INT , --name CHAR(64) , --position POINT NOT NULL, --footprint POLYGON NOT NULL, SPATIAL INDEX p(position), SPATIAL INDEX f(footprint)) ENGINE= ; --CREATE TABLE ponds (fid INT , --name CHAR(64) , --type CHAR(64) , --shores MULTIPOLYGON NOT NULL, SPATIAL INDEX s(shores)) ENGINE= ; --CREATE TABLE named_places (fid INT , --name CHAR(64) , --boundary POLYGON NOT NULL, SPATIAL INDEX b(boundary)) ENGINE= ; --CREATE TABLE map_neatlines (fid INT , --neatline POLYGON NOT NULL, SPATIAL INDEX n(neatline)) ENGINE= ; --USE test; --SHOW FIELDS FROM gis_point; --Field Type Null Key Default Extra --fid int(11) YES NULL --g point NO MUL NULL --SHOW FIELDS FROM gis_line; --Field Type Null Key Default Extra --fid int(11) YES NULL --g linestring NO MUL NULL --SHOW FIELDS FROM gis_polygon; --Field Type Null Key Default Extra --fid int(11) YES NULL --g polygon NO MUL NULL --SHOW FIELDS FROM gis_multi_point; --Field Type Null Key Default Extra --fid int(11) YES NULL --g multipoint NO MUL NULL --SHOW FIELDS FROM gis_multi_line; --Field Type Null Key Default Extra --fid int(11) YES NULL --g multilinestring NO MUL NULL --SHOW FIELDS FROM gis_multi_polygon; --Field Type Null Key Default Extra --fid int(11) YES NULL --g multipolygon NO MUL NULL --SHOW FIELDS FROM gis_geometrycollection; --Field Type Null Key Default Extra --fid int(11) YES NULL --g geometrycollection NO MUL NULL --SHOW FIELDS FROM gis_geometry; --Field Type Null Key Default Extra --fid int(11) YES NULL --g geometry NO NULL --INSERT INTO gis_point (fid,g) VALUES --(101, PointFromText('POINT(10 10)')), --(102, PointFromText('POINT(20 10)')), --(103, PointFromText('POINT(20 20)')), --(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)')))); --INSERT INTO gis_line (fid,g) VALUES --(105, LineFromText('LINESTRING(0 0,0 10,10 0)')), --(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')), --(107, LineStringFromWKB(AsWKB(LineString(Point(10, 10), Point(40, 10))))); --INSERT INTO gis_polygon (fid,g) VALUES --(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')), --(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')), --(110, PolyFromWKB(AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0)))))); --INSERT INTO gis_multi_point (fid,g) VALUES --(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')), --(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')), --(113, MPointFromWKB(AsWKB(MultiPoint(Point(3, 6), Point(4, 10))))); --INSERT INTO gis_multi_line (fid,g) VALUES --(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')), --(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')), --(116, MLineFromWKB(AsWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7)))))); --INSERT INTO gis_multi_polygon (fid,g) VALUES --(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), --(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')), --(119, MPolyFromWKB(AsWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3))))))); --INSERT INTO gis_geometrycollection (fid,g) VALUES --(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')), --(121, GeometryFromWKB(AsWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))))), --(122, GeomFromText('GeometryCollection()')), --(123, GeomFromText('GeometryCollection EMPTY')); --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_point; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_line; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_polygon; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_point; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_line; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_multi_polygon; --INSERT into gis_geometry (fid,g) SELECT fid,g FROM gis_geometrycollection; --SELECT fid, AsText(g) FROM gis_point; --fid AsText(g) --101 POINT(10 10) --102 POINT(20 10) --103 POINT(20 20) --104 POINT(10 20) --SELECT fid, AsText(g) FROM gis_line; --fid AsText(g) --105 LINESTRING(0 0,0 10,10 0) --106 LINESTRING(10 10,20 10,20 20,10 20,10 10) --107 LINESTRING(10 10,40 10) --SELECT fid, AsText(g) FROM gis_polygon; --fid AsText(g) --108 POLYGON((10 10,20 10,20 20,10 20,10 10)) --109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) --110 POLYGON((0 0,30 0,30 30,0 0)) --SELECT fid, AsText(g) FROM gis_multi_point; --fid AsText(g) --111 MULTIPOINT(0 0,10 10,10 20,20 20) --112 MULTIPOINT(1 1,11 11,11 21,21 21) --113 MULTIPOINT(3 6,4 10) --SELECT fid, AsText(g) FROM gis_multi_line; --fid AsText(g) --114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) --115 MULTILINESTRING((10 48,10 21,10 0)) --116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) --SELECT fid, AsText(g) FROM gis_multi_polygon; --fid AsText(g) --117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) --SELECT fid, AsText(g) FROM gis_geometrycollection; --fid AsText(g) --120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) --121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) --122 GEOMETRYCOLLECTION EMPTY --123 GEOMETRYCOLLECTION EMPTY --SELECT fid, AsText(g) FROM gis_geometry; --fid AsText(g) --101 POINT(10 10) --102 POINT(20 10) --103 POINT(20 20) --104 POINT(10 20) --105 LINESTRING(0 0,0 10,10 0) --106 LINESTRING(10 10,20 10,20 20,10 20,10 10) --107 LINESTRING(10 10,40 10) --108 POLYGON((10 10,20 10,20 20,10 20,10 10)) --109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10)) --110 POLYGON((0 0,30 0,30 30,0 0)) --111 MULTIPOINT(0 0,10 10,10 20,20 20) --112 MULTIPOINT(1 1,11 11,11 21,21 21) --113 MULTIPOINT(3 6,4 10) --114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48)) --115 MULTILINESTRING((10 48,10 21,10 0)) --116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7)) --117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18))) --119 MULTIPOLYGON(((0 3,3 3,3 0,0 3))) --120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10)) --121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9)) --122 GEOMETRYCOLLECTION EMPTY --123 GEOMETRYCOLLECTION EMPTY --SELECT fid, Dimension(g) FROM gis_geometry; --fid Dimension(g) --101 0 --102 0 --103 0 --104 0 --105 1 --106 1 --107 1 --108 2 --109 2 --110 2 --111 0 --112 0 --113 0 --114 1 --115 1 --116 1 --117 2 --118 2 --119 2 --120 1 --121 1 --122 0 --123 0 --SELECT fid, GeometryType(g) FROM gis_geometry; --fid GeometryType(g) --101 POINT --102 POINT --103 POINT --104 POINT --105 LINESTRING --106 LINESTRING --107 LINESTRING --108 POLYGON --109 POLYGON --110 POLYGON --111 MULTIPOINT --112 MULTIPOINT --113 MULTIPOINT --114 MULTILINESTRING --115 MULTILINESTRING --116 MULTILINESTRING --117 MULTIPOLYGON --118 MULTIPOLYGON --119 MULTIPOLYGON --120 GEOMETRYCOLLECTION --121 GEOMETRYCOLLECTION --122 GEOMETRYCOLLECTION --123 GEOMETRYCOLLECTION --SELECT fid, IsEmpty(g) FROM gis_geometry; --fid IsEmpty(g) --101 0 --102 0 --103 0 --104 0 --105 0 --106 0 --107 0 --108 0 --109 0 --110 0 --111 0 --112 0 --113 0 --114 0 --115 0 --116 0 --117 0 --118 0 --119 0 --120 0 --121 0 --122 0 --123 0 --SELECT fid, AsText(Envelope(g)) FROM gis_geometry; --fid AsText(Envelope(g)) --101 POLYGON((10 10,10 10,10 10,10 10,10 10)) --102 POLYGON((20 10,20 10,20 10,20 10,20 10)) --103 POLYGON((20 20,20 20,20 20,20 20,20 20)) --104 POLYGON((10 20,10 20,10 20,10 20,10 20)) --105 POLYGON((0 0,10 0,10 10,0 10,0 0)) --106 POLYGON((10 10,20 10,20 20,10 20,10 10)) --107 POLYGON((10 10,40 10,40 10,10 10,10 10)) --108 POLYGON((10 10,20 10,20 20,10 20,10 10)) --109 POLYGON((0 0,50 0,50 50,0 50,0 0)) --110 POLYGON((0 0,30 0,30 30,0 30,0 0)) --111 POLYGON((0 0,20 0,20 20,0 20,0 0)) --112 POLYGON((1 1,21 1,21 21,1 21,1 1)) --113 POLYGON((3 6,4 6,4 10,3 10,3 6)) --114 POLYGON((10 0,16 0,16 48,10 48,10 0)) --115 POLYGON((10 0,10 0,10 48,10 48,10 0)) --116 POLYGON((1 2,21 2,21 8,1 8,1 2)) --117 POLYGON((28 0,84 0,84 42,28 42,28 0)) --118 POLYGON((28 0,84 0,84 42,28 42,28 0)) --119 POLYGON((0 0,3 0,3 3,0 3,0 0)) --120 POLYGON((0 0,10 0,10 10,0 10,0 0)) --121 POLYGON((3 6,44 6,44 9,3 9,3 6)) --122 GEOMETRYCOLLECTION EMPTY --123 GEOMETRYCOLLECTION EMPTY --SELECT fid, X(g) FROM gis_point; --fid X(g) --101 10 --102 20 --103 20 --104 10 --SELECT fid, Y(g) FROM gis_point; --fid Y(g) --101 10 --102 10 --103 20 --104 20 --SELECT fid, AsText(StartPoint(g)) FROM gis_line; --fid AsText(StartPoint(g)) --105 POINT(0 0) --106 POINT(10 10) --107 POINT(10 10) --SELECT fid, AsText(EndPoint(g)) FROM gis_line; --fid AsText(EndPoint(g)) --105 POINT(10 0) --106 POINT(10 10) --107 POINT(40 10) --SELECT fid, GLength(g) FROM gis_line; --fid GLength(g) --105 24.14213562373095 --106 40 --107 30 --SELECT fid, NumPoints(g) FROM gis_line; --fid NumPoints(g) --105 3 --106 5 --107 2 --SELECT fid, AsText(PointN(g, 2)) FROM gis_line; --fid AsText(PointN(g, 2)) --105 POINT(0 10) --106 POINT(20 10) --107 POINT(40 10) --SELECT fid, IsClosed(g) FROM gis_line; --fid IsClosed(g) --105 0 --106 1 --107 0 --SELECT fid, AsText(Centroid(g)) FROM gis_polygon; --fid AsText(Centroid(g)) --108 POINT(15 15) --109 POINT(25.416666666666668 25.416666666666668) --110 POINT(20 10) --SELECT fid, Area(g) FROM gis_polygon; --fid Area(g) --108 100 --109 2400 --110 450 --SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon; --fid AsText(ExteriorRing(g)) --108 LINESTRING(10 10,20 10,20 20,10 20,10 10) --109 LINESTRING(0 0,50 0,50 50,0 50,0 0) --110 LINESTRING(0 0,30 0,30 30,0 0) --SELECT fid, NumInteriorRings(g) FROM gis_polygon; --fid NumInteriorRings(g) --108 0 --109 1 --110 0 --SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon; --fid AsText(InteriorRingN(g, 1)) --108 NULL --109 LINESTRING(10 10,20 10,20 20,10 20,10 10) --110 NULL --SELECT fid, IsClosed(g) FROM gis_multi_line; --fid IsClosed(g) --114 0 --115 0 --116 0 --SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon; --fid AsText(Centroid(g)) --117 POINT(55.58852775304245 17.426536064113982) --118 POINT(55.58852775304245 17.426536064113982) --119 POINT(2 2) --SELECT fid, Area(g) FROM gis_multi_polygon; --fid Area(g) --117 1684.5 --118 1684.5 --119 4.5 --SELECT fid, NumGeometries(g) from gis_multi_point; --fid NumGeometries(g) --111 4 --112 4 --113 2 --SELECT fid, NumGeometries(g) from gis_multi_line; --fid NumGeometries(g) --114 2 --115 1 --116 2 --SELECT fid, NumGeometries(g) from gis_multi_polygon; --fid NumGeometries(g) --117 2 --118 2 --119 1 --SELECT fid, NumGeometries(g) from gis_geometrycollection; --fid NumGeometries(g) --120 2 --121 2 --122 0 --123 0 --SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point; --fid AsText(GeometryN(g, 2)) --111 POINT(10 10) --112 POINT(11 11) --113 POINT(4 10) --SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line; --fid AsText(GeometryN(g, 2)) --114 LINESTRING(16 0,16 23,16 48) --115 NULL --116 LINESTRING(2 5,5 8,21 7) --SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon; --fid AsText(GeometryN(g, 2)) --117 POLYGON((59 18,67 18,67 13,59 13,59 18)) --118 POLYGON((59 18,67 18,67 13,59 13,59 18)) --119 NULL --SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection; --fid AsText(GeometryN(g, 2)) --120 LINESTRING(0 0,10 10) --121 LINESTRING(3 6,7 9) --122 NULL --123 NULL --SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection; --fid AsText(GeometryN(g, 1)) --120 POINT(0 0) --121 POINT(44 6) --122 NULL --123 NULL --SELECT g1.fid as first, g2.fid as second, --Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o, --Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t, --Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r --FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second; --first second w c o e d t i r --120 120 1 1 0 1 0 0 1 0 --120 121 0 0 1 0 0 0 1 0 --120 122 NULL NULL NULL NULL NULL NULL NULL NULL --120 123 NULL NULL NULL NULL NULL NULL NULL NULL --121 120 0 0 1 0 0 0 1 0 --121 121 1 1 0 1 0 0 1 0 --121 122 NULL NULL NULL NULL NULL NULL NULL NULL --121 123 NULL NULL NULL NULL NULL NULL NULL NULL --122 120 NULL NULL NULL NULL NULL NULL NULL NULL --122 121 NULL NULL NULL NULL NULL NULL NULL NULL --122 122 NULL NULL NULL NULL NULL NULL NULL NULL --122 123 NULL NULL NULL NULL NULL NULL NULL NULL --123 120 NULL NULL NULL NULL NULL NULL NULL NULL --123 121 NULL NULL NULL NULL NULL NULL NULL NULL --123 122 NULL NULL NULL NULL NULL NULL NULL NULL --123 123 NULL NULL NULL NULL NULL NULL NULL NULL --DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry; --USE gis_ogs; --# Lakes --INSERT INTO lakes (fid,name,shore) VALUES ( --101, 'BLUE LAKE', --PolyFromText( --'POLYGON( -- (52 18,66 23,73 9,48 6,52 18), -- (59 18,67 18,67 13,59 13,59 18) -- )', --101)); --# Road Segments --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(102, 'Route 5', NULL, 2, --LineFromText( --'LINESTRING( 0 18, 10 21, 16 23, 28 26, 44 31 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(103, 'Route 5', 'Main Street', 4, --LineFromText( --'LINESTRING( 44 31, 56 34, 70 38 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(104, 'Route 5', NULL, 2, --LineFromText( --'LINESTRING( 70 38, 72 48 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(105, 'Main Street', NULL, 4, --LineFromText( --'LINESTRING( 70 38, 84 42 )' ,101)); --INSERT INTO road_segments (fid,name,aliases,num_lanes,centerline) VALUES(106, 'Dirt Road by Green Forest', NULL, --1, --LineFromText( --'LINESTRING( 28 26, 28 0 )',101)); --# DividedRoutes --INSERT INTO divided_routes (fid,name,num_lanes,centerlines) VALUES(119, 'Route 75', 4, --MLineFromText( --'MULTILINESTRING((10 48,10 21,10 0), -- (16 0,16 23,16 48))', 101)); --# Forests --INSERT INTO forests (fid,name,boundary) VALUES(109, 'Green Forest', --MPolyFromText( --'MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26), -- (52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))', --101)); --# Bridges --INSERT INTO bridges (fid,name,position) VALUES(110, 'Cam Bridge', PointFromText( --'POINT( 44 31 )', 101)); --# Streams --INSERT INTO streams (fid,name,centerline) VALUES(111, 'Cam Stream', --LineFromText( --'LINESTRING( 38 48, 44 41, 41 36, 44 31, 52 18 )', 101)); --INSERT INTO streams (fid,name,centerline) VALUES(112, NULL, --LineFromText( --'LINESTRING( 76 0, 78 4, 73 9 )', 101)); --# Buildings --INSERT INTO buildings (fid,name,position,footprint) VALUES(113, '123 Main Street', --PointFromText( --'POINT( 52 30 )', 101), --PolyFromText( --'POLYGON( ( 50 31, 54 31, 54 29, 50 29, 50 31) )', 101)); --INSERT INTO buildings (fid,name,position,footprint) VALUES(114, '215 Main Street', --PointFromText( --'POINT( 64 33 )', 101), --PolyFromText( --'POLYGON( ( 66 34, 62 34, 62 32, 66 32, 66 34) )', 101)); --# Ponds --INSERT INTO ponds (fid,name,type,shores) VALUES(120, NULL, 'Stock Pond', --MPolyFromText( --'MULTIPOLYGON( ( ( 24 44, 22 42, 24 40, 24 44) ), -- ( ( 26 44, 26 40, 28 42, 26 44) ) )', 101)); --# Named Places --INSERT INTO named_places (fid,name,boundary) VALUES(117, 'Ashton', --PolyFromText( --'POLYGON( ( 62 48, 84 48, 84 30, 56 30, 56 34, 62 48) )', 101)); --INSERT INTO named_places (fid,name,boundary) VALUES(118, 'Goose Island', --PolyFromText( --'POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )', 101)); --# Map Neatlines --INSERT INTO map_neatlines (fid,neatline) VALUES(115, --PolyFromText( --'POLYGON( ( 0 0, 0 48, 84 48, 84 0, 0 0 ) )', 101)); --SELECT Dimension(shore) --FROM lakes --WHERE name = 'Blue Lake'; --Dimension(shore) --2 --SELECT GeometryType(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --GeometryType(centerlines) --MULTILINESTRING --SELECT AsText(boundary) --FROM named_places --WHERE name = 'Goose Island'; --AsText(boundary) --POLYGON((67 13,67 18,59 18,59 13,67 13)) --SELECT AsText(PolyFromWKB(AsBinary(boundary),101)) --FROM named_places --WHERE name = 'Goose Island'; --AsText(PolyFromWKB(AsBinary(boundary),101)) --POLYGON((67 13,67 18,59 18,59 13,67 13)) --SELECT SRID(boundary) --FROM named_places --WHERE name = 'Goose Island'; --SRID(boundary) --101 --SELECT IsEmpty(centerline) --FROM road_segments --WHERE name = 'Route 5' --AND aliases = 'Main Street'; --IsEmpty(centerline) --0 --SELECT AsText(Envelope(boundary)) --FROM named_places --WHERE name = 'Goose Island'; --AsText(Envelope(boundary)) --POLYGON((59 13,67 13,67 18,59 18,59 13)) --SELECT X(position) --FROM bridges --WHERE name = 'Cam Bridge'; --X(position) --44 --SELECT Y(position) --FROM bridges --WHERE name = 'Cam Bridge'; --Y(position) --31 --SELECT AsText(StartPoint(centerline)) --FROM road_segments --WHERE fid = 102; --AsText(StartPoint(centerline)) --POINT(0 18) --SELECT AsText(EndPoint(centerline)) --FROM road_segments --WHERE fid = 102; --AsText(EndPoint(centerline)) --POINT(44 31) --SELECT GLength(centerline) --FROM road_segments --WHERE fid = 106; --GLength(centerline) --26 --SELECT NumPoints(centerline) --FROM road_segments --WHERE fid = 102; --NumPoints(centerline) --5 --SELECT AsText(PointN(centerline, 1)) --FROM road_segments --WHERE fid = 102; --AsText(PointN(centerline, 1)) --POINT(0 18) --SELECT AsText(Centroid(boundary)) --FROM named_places --WHERE name = 'Goose Island'; --AsText(Centroid(boundary)) --POINT(63 15.5) --SELECT Area(boundary) --FROM named_places --WHERE name = 'Goose Island'; --Area(boundary) --40 --SELECT AsText(ExteriorRing(shore)) --FROM lakes --WHERE name = 'Blue Lake'; --AsText(ExteriorRing(shore)) --LINESTRING(52 18,66 23,73 9,48 6,52 18) --SELECT NumInteriorRings(shore) --FROM lakes --WHERE name = 'Blue Lake'; --NumInteriorRings(shore) --1 --SELECT AsText(InteriorRingN(shore, 1)) --FROM lakes --WHERE name = 'Blue Lake'; --AsText(InteriorRingN(shore, 1)) --LINESTRING(59 18,67 18,67 13,59 13,59 18) --SELECT NumGeometries(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --NumGeometries(centerlines) --2 --SELECT AsText(GeometryN(centerlines, 2)) --FROM divided_routes --WHERE name = 'Route 75'; --AsText(GeometryN(centerlines, 2)) --LINESTRING(16 0,16 23,16 48) --SELECT IsClosed(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --IsClosed(centerlines) --0 --SELECT GLength(centerlines) --FROM divided_routes --WHERE name = 'Route 75'; --GLength(centerlines) --96 --SELECT AsText(Centroid(shores)) --FROM ponds --WHERE fid = 120; --AsText(Centroid(shores)) --POINT(25 42) --SELECT Area(shores) --FROM ponds --WHERE fid = 120; --Area(shores) --8 --SELECT ST_Equals(boundary, --PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1)) --FROM named_places --WHERE name = 'Goose Island'; --ST_Equals(boundary, --PolyFromText('POLYGON( ( 67 13, 67 18, 59 18, 59 13, 67 13) )',1)) --1 --SELECT ST_Disjoint(centerlines, boundary) --FROM divided_routes, named_places --WHERE divided_routes.name = 'Route 75' --AND named_places.name = 'Ashton'; --ST_Disjoint(centerlines, boundary) --1 --SELECT ST_Touches(centerline, shore) --FROM streams, lakes --WHERE streams.name = 'Cam Stream' --AND lakes.name = 'Blue Lake'; --ST_Touches(centerline, shore) --1 --SELECT Crosses(road_segments.centerline, divided_routes.centerlines) --FROM road_segments, divided_routes --WHERE road_segments.fid = 102 --AND divided_routes.name = 'Route 75'; --Crosses(road_segments.centerline, divided_routes.centerlines) --1 --SELECT ST_Intersects(road_segments.centerline, divided_routes.centerlines) --FROM road_segments, divided_routes --WHERE road_segments.fid = 102 --AND divided_routes.name = 'Route 75'; --ST_Intersects(road_segments.centerline, divided_routes.centerlines) --1 --SELECT ST_Contains(forests.boundary, named_places.boundary) --FROM forests, named_places --WHERE forests.name = 'Green Forest' --AND named_places.name = 'Ashton'; --ST_Contains(forests.boundary, named_places.boundary) --0 --SELECT ST_Distance(position, boundary) --FROM bridges, named_places --WHERE bridges.name = 'Cam Bridge' --AND named_places.name = 'Ashton'; --ST_Distance(position, boundary) --12 --SELECT AsText(ST_Difference(named_places.boundary, forests.boundary)) --FROM named_places, forests --WHERE named_places.name = 'Ashton' --AND forests.name = 'Green Forest'; --AsText(ST_Difference(named_places.boundary, forests.boundary)) --POLYGON((56 34,62 48,84 48,84 42,56 34)) --SELECT AsText(ST_Union(shore, boundary)) --FROM lakes, named_places --WHERE lakes.name = 'Blue Lake' --AND named_places.name = 'Goose Island'; --AsText(ST_Union(shore, boundary)) --POLYGON((48 6,52 18,66 23,73 9,48 6)) --SELECT AsText(ST_SymDifference(shore, boundary)) --FROM lakes, named_places --WHERE lakes.name = 'Blue Lake' --AND named_places.name = 'Ashton'; --AsText(ST_SymDifference(shore, boundary)) --MULTIPOLYGON(((48 6,52 18,66 23,73 9,48 6),(59 13,59 18,67 18,67 13,59 13)),((56 30,56 34,62 48,84 48,84 30,56 30))) --SELECT count(*) --FROM buildings, bridges --WHERE ST_Contains(ST_Buffer(bridges.position, 15.0), buildings.footprint) = 1; --count(*) --1 -+ERROR HY000: The storage engine doesn't support SPATIAL indexes -+# ERROR: Statement ended with errno 1464, errname ER_TABLE_CANT_HANDLE_SPKEYS (expected to succeed) -+# ------------ UNEXPECTED RESULT ------------ -+# [ CREATE TABLE gis_point (fid INT(11) /*!*/ /*Custom column options*/, g POINT NOT NULL, SPATIAL INDEX(g)) ENGINE=InnoDB /*!*/ /*Custom table options*/ ] -+# The statement|command finished with ER_TABLE_CANT_HANDLE_SPKEYS. -+# Geometry types or spatial indexes or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. -+# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. -+# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. -+# Also, this problem may cause a chain effect (more errors of different kinds in the test). -+# ------------------------------------------- - DROP DATABASE gis_ogs; - USE test; diff --git a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff index 4215af58011..a8c78b117a9 100644 --- a/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff +++ b/storage/myisam/mysql-test/storage_engine/alter_tablespace.rdiff @@ -13,7 +13,7 @@ -2 -ALTER TABLE t1 DISCARD TABLESPACE; -SELECT a FROM t1; --ERROR HY000: Tablespace has been discarded for table 't1' +-ERROR HY000: Tablespace has been discarded for table `t1` -ALTER TABLE t1 IMPORT TABLESPACE; -Warnings: -Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification diff --git a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff index 19ca1a1b6e1..e5462f8cb1f 100644 --- a/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/alter_tablespace.rdiff @@ -13,7 +13,7 @@ -2 -ALTER TABLE t1 DISCARD TABLESPACE; -SELECT a FROM t1; --ERROR HY000: Tablespace has been discarded for table 't1' +-ERROR HY000: Tablespace has been discarded for table `t1` -ALTER TABLE t1 IMPORT TABLESPACE; -Warnings: -Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t1.cfg', will attempt to import without schema verification diff --git a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff index 585e5c915ba..5f5c2528a95 100644 --- a/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/create_table.rdiff @@ -29,7 +29,7 @@ -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( -- `1` bigint(20) NOT NULL DEFAULT '0' +- `1` bigint(20) NOT NULL DEFAULT 0 -) ENGINE= DEFAULT CHARSET=latin1 -SELECT * FROM t1; -1 diff --git a/storage/myisammrg/mysql-test/storage_engine/disabled.def b/storage/myisammrg/mysql-test/storage_engine/disabled.def new file mode 100644 index 00000000000..227e33029d8 --- /dev/null +++ b/storage/myisammrg/mysql-test/storage_engine/disabled.def @@ -0,0 +1,3 @@ +insert_delayed : MDEV-12880 - INSERT DELAYED is not detected as inapplicable to a table under lock +lock_concurrent : MDEV-12882 - Assertion failure +select_high_prio : MDEV-12885 - MDL_SHARED_READ_ONLY is taken instead of MDL_SHARED_READ diff --git a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff index d7bf99fd674..4346545abcf 100644 --- a/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/parts/repair_table.rdiff @@ -1,6 +1,6 @@ ---- repair_table.result 2013-01-23 01:35:44.388267080 +0400 -+++ repair_table.reject 2013-01-23 03:16:26.468307847 +0400 -@@ -1,234 +1,114 @@ +--- suite/storage_engine/parts/repair_table.result 2017-05-20 03:58:19.451939791 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/parts/repair_table.reject 2017-05-24 02:42:31.130318292 +0300 +@@ -1,234 +1,115 @@ call mtr.add_suppression("Table '.*t1.*' is marked as crashed and should be repaired"); DROP TABLE IF EXISTS t1, t2; CREATE TABLE t1 (a , b ) ENGINE= PARTITION BY HASH(a) PARTITIONS 2; @@ -144,6 +144,7 @@ call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); call mtr.add_suppression(" '\..test.t1'"); call mtr.add_suppression("Couldn't repair table: test.t1"); ++call mtr.add_suppression("Table 't1' is marked as crashed.*"); CREATE TABLE t1 (a , b , (a)) ENGINE= PARTITION BY HASH(a) PARTITIONS 2; +ERROR HY000: Engine cannot be used in partitioned tables +# ERROR: Statement ended with errno 1572, errname ER_PARTITION_MERGE_ERROR (expected to succeed) diff --git a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff index 9ff8f906511..79f6c7040e0 100644 --- a/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/repair_table.rdiff @@ -1,5 +1,5 @@ ---- repair_table.result 2013-01-23 01:26:05.995538460 +0400 -+++ repair_table.reject 2013-01-23 02:50:55.035560564 +0400 +--- suite/storage_engine/repair_table.result 2017-05-24 01:09:07.274213486 +0300 ++++ suite/storage_engine/repair_table.reject 2017-05-24 01:10:25.466214949 +0300 @@ -4,56 +4,50 @@ CREATE TABLE t2 (a , b ) ENGINE= ; REPAIR TABLE t1; @@ -71,7 +71,7 @@ DROP TABLE t1, t2; call mtr.add_suppression("Got an error from thread_id=.*"); call mtr.add_suppression("MySQL thread id .*, query id .* localhost.*root Checking table"); -@@ -62,45 +56,32 @@ +@@ -63,45 +57,32 @@ CREATE TABLE t1 (a , b , (a)) ENGINE= ; REPAIR TABLE t1; Table Op Msg_type Msg_text @@ -104,7 +104,7 @@ -test.t1 check error Corrupt +test.t1 check status OK SELECT a,b FROM t1; --ERROR HY000: Incorrect key file for table 't1'; try to repair it +-ERROR HY000: Index for table 't1' is corrupt; try to repair it -# Statement ended with one of expected results (0,ER_NOT_KEYFILE,144). -# If you got a difference in error message, just add it to rdiff file -INSERT INTO t1 (a,b) VALUES (14,'n'),(15,'o'); diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff new file mode 100644 index 00000000000..671e26ec617 --- /dev/null +++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.rdiff @@ -0,0 +1,18 @@ +--- suite/storage_engine/tbl_opt_data_dir.result 2017-05-24 00:21:15.550159778 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_dir.reject 2017-05-24 00:25:45.506164827 +0300 +@@ -5,7 +5,7 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + # For ALTER TABLE the option is ignored + # Running ALTER TABLE .. DATA DIRECTORY = <> + Warnings: +@@ -15,5 +15,5 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff deleted file mode 100644 index e6055278b3c..00000000000 --- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff +++ /dev/null @@ -1,18 +0,0 @@ ---- tbl_opt_data_index_dir.result 2013-01-22 22:05:05.246633000 +0400 -+++ tbl_opt_data_index_dir.reject 2013-01-23 02:50:59.951498762 +0400 -@@ -4,7 +4,7 @@ - t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' -+) ENGINE= DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) - Warnings: - Warning 1618 option ignored - SHOW CREATE TABLE t1; -@@ -12,5 +12,5 @@ - t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' -+) ENGINE= DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) - DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff new file mode 100644 index 00000000000..ca025861f68 --- /dev/null +++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.rdiff @@ -0,0 +1,18 @@ +--- suite/storage_engine/tbl_opt_index_dir.result 2017-05-24 00:21:15.550159778 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_index_dir.reject 2017-05-24 00:25:45.506164827 +0300 +@@ -5,7 +5,7 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 INDEX DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + # For ALTER TABLE the option is ignored + # Running ALTER TABLE .. INDEX DIRECTORY = <> + Warnings: +@@ -15,5 +15,5 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 INDEX DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff index f7e0905d4e7..6c756e7b8e1 100644 --- a/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.rdiff @@ -1,17 +1,33 @@ ---- tbl_opt_row_format.result 2013-01-22 22:05:05.246633000 +0400 -+++ tbl_opt_row_format.reject 2013-01-23 02:51:04.743438518 +0400 -@@ -5,12 +5,12 @@ +--- ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.result~ 2017-05-24 00:50:44.254192857 +0300 ++++ ../storage/myisammrg/mysql-test/storage_engine/tbl_opt_row_format.reject 2017-05-24 00:50:44.334192859 +0300 +@@ -5,26 +5,26 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC ++) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + ALTER TABLE t1 ROW_FORMAT=FIXED; + SHOW CREATE TABLE t1; + Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL -) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED +) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=FIXED INSERT_METHOD=LAST UNION=(`mrg`.`t1`) - ALTER TABLE t1 ROW_FORMAT=DYNAMIC; + ALTER TABLE t1 ROW_FORMAT=PAGE; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC -+) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC INSERT_METHOD=LAST UNION=(`mrg`.`t1`) +-) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE ++) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=PAGE INSERT_METHOD=LAST UNION=(`mrg`.`t1`) + ALTER TABLE t1 ROW_FORMAT=COMPACT; + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT ++) ENGINE= DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT INSERT_METHOD=LAST UNION=(`mrg`.`t1`) DROP TABLE t1; diff --git a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff index c7372326fad..d537967ef99 100644 --- a/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff +++ b/storage/myisammrg/mysql-test/storage_engine/vcol.rdiff @@ -6,12 +6,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # VIRTUAL +-b int(11) # # VIRTUAL GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -23,12 +23,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # PERSISTENT +-b int(11) # # STORED GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -40,12 +40,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # VIRTUAL +-b int(11) # # VIRTUAL GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -57,12 +57,12 @@ -SHOW COLUMNS IN t1; -Field Type Null Key Default Extra -a int(11) # # --b int(11) # # PERSISTENT +-b int(11) # # STORED GENERATED -INSERT INTO t1 (a) VALUES (1),(2); -INSERT INTO t1 (a,b) VALUES (3,3),(4,4); -Warnings: --Warning 1906 The value specified for computed column 'b' in table 't1' ignored --Warning 1906 The value specified for computed column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored +-Warning 1906 The value specified for generated column 'b' in table 't1' ignored -SELECT a,b FROM t1; -a b -1 2 @@ -70,11 +70,11 @@ -3 4 -4 5 -DROP TABLE t1; -+ERROR HY000: MRG_MyISAM storage engine does not support computed columns -+# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS (expected to succeed) ++ERROR HY000: MRG_MyISAM storage engine does not support generated columns ++# ERROR: Statement ended with errno 1910, errname ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS (expected to succeed) +# ------------ UNEXPECTED RESULT ------------ +# [ CREATE TABLE t1 (a INT(11) /*!*/ /*Custom column options*/, b INT(11) /*!*/ /*Custom column options*/ GENERATED ALWAYS AS (a+1)) ENGINE=MRG_MYISAM /*!*/ /*Custom table options*/ UNION(mrg.t1) INSERT_METHOD=LAST ] -+# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_VIRTUAL_COLUMNS. ++# The statement|command finished with ER_UNSUPPORTED_ENGINE_FOR_GENERATED_COLUMNS. +# Virtual columns or the mix could be unsupported|malfunctioning, or the problem was caused by previous errors. +# You can change the engine code, or create an rdiff, or disable the test by adding it to disabled.def. +# Further in this test, the message might sometimes be suppressed; a part of the test might be skipped. diff --git a/storage/xtradb/mysql-test/storage_engine/suite.pm b/storage/xtradb/mysql-test/storage_engine/suite.pm new file mode 100644 index 00000000000..e186a532dcc --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/suite.pm @@ -0,0 +1,8 @@ +package My::Suite::SE::XtraDB; + +@ISA = qw(My::Suite); + +return "Need XtraDB engine"; + +bless { }; + diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff deleted file mode 100644 index e09e50b17ec..00000000000 --- a/storage/xtradb/mysql-test/storage_engine/tbl_opt_data_index_dir.rdiff +++ /dev/null @@ -1,23 +0,0 @@ ---- suite/storage_engine/tbl_opt_data_index_dir.result 2013-10-03 20:35:06.000000000 +0400 -+++ suite/storage_engine/tbl_opt_data_index_dir.reject 2013-11-08 22:06:54.000000000 +0400 -@@ -1,10 +1,12 @@ - DROP TABLE IF EXISTS t1; -+Warnings: -+Warning 1618 option ignored - SHOW CREATE TABLE t1; - Table Create Table - t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' -+) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' - Warnings: - Warning 1618 option ignored - SHOW CREATE TABLE t1; -@@ -12,5 +14,5 @@ - t1 CREATE TABLE `t1` ( - `a` int(11) DEFAULT NULL, - `b` char(8) DEFAULT NULL --) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' -+) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' - DROP TABLE t1; diff --git a/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff b/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff new file mode 100644 index 00000000000..e09e50b17ec --- /dev/null +++ b/storage/xtradb/mysql-test/storage_engine/tbl_opt_index_dir.rdiff @@ -0,0 +1,23 @@ +--- suite/storage_engine/tbl_opt_data_index_dir.result 2013-10-03 20:35:06.000000000 +0400 ++++ suite/storage_engine/tbl_opt_data_index_dir.reject 2013-11-08 22:06:54.000000000 +0400 +@@ -1,10 +1,12 @@ + DROP TABLE IF EXISTS t1; ++Warnings: ++Warning 1618 option ignored + SHOW CREATE TABLE t1; + Table Create Table + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' + Warnings: + Warning 1618 option ignored + SHOW CREATE TABLE t1; +@@ -12,5 +14,5 @@ + t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(8) DEFAULT NULL +-) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' INDEX DIRECTORY='' ++) ENGINE= DEFAULT CHARSET=latin1 DATA DIRECTORY='' + DROP TABLE t1; -- cgit v1.2.1 From 151daaf4805d4e8ed30d2d871ac99ff1c1873bdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 5 Jun 2017 15:16:15 +0300 Subject: MDEV-12994 innodb_fast_shutdown=0 skips change buffer merge; fast shutdown does it srv_master_thread(): Pass the correct parameter to srv_shutdown(). This bug was introduced in MDEV-12052, and it affects the MariaDB 10.1.24 release. --- storage/innobase/srv/srv0srv.cc | 2 +- storage/xtradb/srv/srv0srv.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 93ed302a236..bad1579070c 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -2403,7 +2403,7 @@ suspend_thread: case SRV_SHUTDOWN_CLEANUP: if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP && srv_fast_shutdown < 2) { - srv_shutdown(srv_fast_shutdown == 1); + srv_shutdown(srv_fast_shutdown == 0); } srv_suspend_thread(slot); my_thread_end(); diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index d18abbe0574..2fa3be014a5 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -3076,7 +3076,7 @@ suspend_thread: case SRV_SHUTDOWN_CLEANUP: if (srv_shutdown_state == SRV_SHUTDOWN_CLEANUP && srv_fast_shutdown < 2) { - srv_shutdown(srv_fast_shutdown == 1); + srv_shutdown(srv_fast_shutdown == 0); } srv_suspend_thread(slot); my_thread_end(); -- cgit v1.2.1 From 6e0e5eefbc86a79f1dce756c4f3bfb5d91de95f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 6 Jun 2017 11:50:42 +0300 Subject: Fix some printf format type mismatch --- storage/innobase/buf/buf0dump.cc | 6 +++--- storage/innobase/buf/buf0rea.cc | 8 ++++---- storage/innobase/dict/dict0dict.cc | 5 +++-- storage/innobase/os/os0file.cc | 2 +- storage/innobase/que/que0que.cc | 3 ++- storage/innobase/row/row0upd.cc | 2 +- storage/maria/ma_pagecache.c | 16 ++++++++-------- 7 files changed, 22 insertions(+), 20 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index ce7488e3d1f..b318d1e9a3a 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -394,7 +394,7 @@ buf_dump( buf_dump_status( STATUS_VERBOSE, "Dumping buffer pool" - " " ULINTPF "/" ULINTPF "," + " " ULINTPF "/%lu," " page " ULINTPF "/" ULINTPF, i + 1, srv_buf_pool_instances, j + 1, n_pages); @@ -595,8 +595,8 @@ buf_load() if (dump == NULL) { fclose(f); buf_load_status(STATUS_ERR, - "Cannot allocate %lu bytes: %s", - (ulint) (dump_n * sizeof(*dump)), + "Cannot allocate " ULINTPF " bytes: %s", + dump_n * sizeof(*dump), strerror(errno)); return; } diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 789918c6a35..20603021072 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -773,11 +773,11 @@ buf_read_ahead_linear( os_aio_simulated_wake_handler_threads(); if (count) { - DBUG_PRINT("ib_buf", ("linear read-ahead %lu pages, " - "%lu:%lu", + DBUG_PRINT("ib_buf", ("linear read-ahead " ULINTPF " pages, " + "%u:%u", count, - (ulint)page_id.space(), - (ulint)page_id.page_no())); + page_id.space(), + page_id.page_no())); } /* Read ahead is considered one I/O operation for the purpose of diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index d1fd3b35061..ec8ecaafee7 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -4366,7 +4366,7 @@ dict_table_get_highest_foreign_id( } DBUG_PRINT("dict_table_get_highest_foreign_id", - ("id: %lu", biggest_id)); + ("id: " ULINTPF, biggest_id)); DBUG_RETURN(biggest_id); } @@ -6611,7 +6611,8 @@ dict_table_schema_check( if ((ulint) table->n_def - n_sys_cols != req_schema->n_cols) { /* the table has a different number of columns than required */ ut_snprintf(errstr, errstr_sz, - "%s has %lu columns but should have " ULINTPF ".", + "%s has " ULINTPF " columns but should have " + ULINTPF ".", ut_format_name(req_schema->table_name, buf, sizeof(buf)), table->n_def - n_sys_cols, diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index a8fde2e370e..5afb4527916 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -7550,7 +7550,7 @@ AIO::to_file(FILE* file) const fprintf(file, "%s IO for %s (offset=" UINT64PF - ", size=%lu)\n", + ", size=" ULINTPF ")\n", slot.type.is_read() ? "read" : "write", slot.name, slot.offset, slot.len); } diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc index 1aebef93ce5..5f9f57dc08a 100644 --- a/storage/innobase/que/que0que.cc +++ b/storage/innobase/que/que0que.cc @@ -417,7 +417,8 @@ que_graph_free_recursive( } DBUG_PRINT("que_graph_free_recursive", - ("node: %p, type: %lu", node, que_node_get_type(node))); + ("node: %p, type: " ULINTPF, node, + que_node_get_type(node))); switch (que_node_get_type(node)) { diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 16cbda1cc2b..0ca5d14084c 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -3225,7 +3225,7 @@ row_upd( ut_ad(!thr_get_trx(thr)->in_rollback); DBUG_PRINT("row_upd", ("table: %s", node->table->name.m_name)); - DBUG_PRINT("row_upd", ("info bits in update vector: 0x%lx", + DBUG_PRINT("row_upd", ("info bits in update vector: 0x" ULINTPFx, node->update ? node->update->info_bits: 0)); DBUG_PRINT("row_upd", ("foreign_id: %s", node->foreign ? node->foreign->id: "NULL")); diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index 1eebfac03f8..caaaacfc9b1 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -1187,14 +1187,14 @@ void end_pagecache(PAGECACHE *pagecache, my_bool cleanup) pagecache->blocks_changed= 0; } - DBUG_PRINT("status", ("used: %zu changed: %zu w_requests: %lu " - "writes: %lu r_requests: %lu reads: %lu", - (ulong) pagecache->blocks_used, - (ulong) pagecache->global_blocks_changed, - (ulong) pagecache->global_cache_w_requests, - (ulong) pagecache->global_cache_write, - (ulong) pagecache->global_cache_r_requests, - (ulong) pagecache->global_cache_read)); + DBUG_PRINT("status", ("used: %zu changed: %zu w_requests: %llu " + "writes: %llu r_requests: %llu reads: %llu", + pagecache->blocks_used, + pagecache->global_blocks_changed, + pagecache->global_cache_w_requests, + pagecache->global_cache_write, + pagecache->global_cache_r_requests, + pagecache->global_cache_read)); if (cleanup) { -- cgit v1.2.1 From da9c30a7b0774e043a9cfd1ca144cec74de07182 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 31 May 2017 14:42:55 +0300 Subject: Remove unused declarations --- storage/innobase/include/srv0start.h | 22 ---------------------- 1 file changed, 22 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h index f2f28c7c86d..04ee6b42867 100644 --- a/storage/innobase/include/srv0start.h +++ b/storage/innobase/include/srv0start.h @@ -38,28 +38,6 @@ struct dict_table_t; only one buffer pool instance is used. */ #define BUF_POOL_SIZE_THRESHOLD (1024 * 1024 * 1024) -/*********************************************************************//** -Parse temporary tablespace configuration. -@return true if ok, false on parse error */ -bool -srv_parse_temp_data_file_paths_and_sizes( -/*=====================================*/ - char* str); /*!< in/out: the data file path string */ -/*********************************************************************//** -Frees the memory allocated by srv_parse_data_file_paths_and_sizes() -and srv_parse_log_group_home_dirs(). */ -void -srv_free_paths_and_sizes(void); -/*==========================*/ -/*********************************************************************//** -Adds a slash or a backslash to the end of a string if it is missing -and the string is not empty. -@return string which has the separator if the string is not empty */ -char* -srv_add_path_separator_if_needed( -/*=============================*/ - char* str); /*!< in: null-terminated character string */ - /****************************************************************//** Starts Innobase and creates a new database if database files are not found and the user wants. -- cgit v1.2.1 From ed2976caf005e65bc2b5eae1d174dfe72535683f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 5 Jun 2017 13:13:42 +0300 Subject: Remove an orphan declaration lock_rec_enqueue_waiting() MySQL WL#6835 replaced the function lock_rec_enqueue_waiting() with RecLock::enqueue() and later RecLock::add_to_waitq(). --- storage/innobase/include/lock0lock.h | 26 -------------------------- 1 file changed, 26 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index e718189062d..b3de1bf27f2 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -302,32 +302,6 @@ lock_rec_insert_check_and_lock( record */ MY_ATTRIBUTE((warn_unused_result)); -/*********************************************************************//** -Enqueues a waiting request for a lock which cannot be granted immediately. -Checks for deadlocks. -@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or -DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that -there was a deadlock, but another transaction was chosen as a victim, -and we got the lock immediately: no need to wait then */ -dberr_t -lock_rec_enqueue_waiting( -/*=====================*/ - ulint type_mode,/*!< in: lock mode this - transaction is requesting: - LOCK_S or LOCK_X, possibly - ORed with LOCK_GAP or - LOCK_REC_NOT_GAP, ORed with - LOCK_INSERT_INTENTION if this - waiting lock request is set - when performing an insert of - an index record */ - const buf_block_t* block, /*!< in: buffer block containing - the record */ - ulint heap_no,/*!< in: heap number of the record */ - dict_index_t* index, /*!< in: index of record */ - que_thr_t* thr, /*!< in: query thread */ - lock_prdt_t* prdt); /*!< in: Minimum Bounding Box */ - /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (update, delete mark, or delete unmark) of a clustered index record. If they do, -- cgit v1.2.1 From 86927cc712b6589f15fb1306a5fc42bb705c5302 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 26 May 2017 14:04:19 +0300 Subject: Remove traces of multiple InnoDB redo logs InnoDB never supported more than one copy of a redo log. There were provisions to do that. For Mariabackup, let us clean up this code. log_sys_init(): Renamed from log_init(). log_set_capacity(): Renamed from log_calc_max_ages(). log_init(): Renamed from log_group_init(). Remove the parameters id, space_id. Let the caller invoke log_set_capacity() when needed. log_group_t: Remove id, space_id, log_groups. log_t: Replace log_groups with a single log. recv_find_max_checkpoint(): Declare globally. Remove the first parameter. xtrabackup_choose_lsn_offset(): Remove (dead code). --- storage/innobase/include/log0log.h | 46 ++++---- storage/innobase/include/log0recv.h | 7 ++ storage/innobase/log/log0log.cc | 133 +++++++---------------- storage/innobase/log/log0recv.cc | 210 ++++++++++++++++-------------------- storage/innobase/srv/srv0start.cc | 12 +-- 5 files changed, 161 insertions(+), 247 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index d1aae64227e..05e53e23f28 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -151,24 +151,24 @@ UNIV_INLINE lsn_t log_get_max_modified_age_async(void); /*================================*/ -/******************************************************//** -Initializes the log. */ +/** Initializes the redo logging subsystem. */ void -log_init(void); -/*==========*/ -/******************************************************************//** -Inits a log group to the log system. -@return true if success, false if not */ -MY_ATTRIBUTE((warn_unused_result)) +log_sys_init(); + +/** Initialize the redo log. +@param[in] n_files number of files +@param[in] file_size file size in bytes */ +void +log_init(ulint n_files, lsn_t file_size); +/** Calculate the recommended highest values for lsn - last_checkpoint_lsn +and lsn - buf_get_oldest_modification(). +@retval true on success +@retval false if the smallest log group is too small to +accommodate the number of OS threads in the database server */ bool -log_group_init( -/*===========*/ - ulint id, /*!< in: group id */ - ulint n_files, /*!< in: number of log files */ - lsn_t file_size, /*!< in: log file size in bytes */ - ulint space_id); /*!< in: space id of the file space - which contains the log files of this - group */ +log_set_capacity() + MY_ATTRIBUTE((warn_unused_result)); + /******************************************************//** Completes an i/o to a log file. */ void @@ -552,16 +552,12 @@ Currently, this is only protected by log_sys->mutex. However, in the case of log_write_up_to(), we will access some members only with the protection of log_sys->write_mutex, which should affect nothing for now. */ struct log_group_t{ - /** log group identifier (always 0) */ - ulint id; /** number of files in the group */ ulint n_files; /** format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT */ ulint format; /** individual log file size in bytes, including the header */ - lsn_t file_size - /** file space which implements the log group */; - ulint space_id; + lsn_t file_size; /** corruption status */ log_group_state_t state; /** lsn used to fix coordinates within the log group */ @@ -580,8 +576,6 @@ struct log_group_t{ byte* checkpoint_buf_ptr; /** buffer for writing a checkpoint header */ byte* checkpoint_buf; - /** list of log groups */ - UT_LIST_NODE_T(log_group_t) log_groups; /** @return whether the redo log is encrypted */ bool is_encrypted() const @@ -639,8 +633,8 @@ struct log_t{ max_checkpoint_age; this flag is peeked at by log_free_check(), which does not reserve the log mutex */ - UT_LIST_BASE_NODE_T(log_group_t) - log_groups; /*!< log groups */ + /** the redo log */ + log_group_t log; /** The fields involved in the log buffer flush @{ */ @@ -729,7 +723,7 @@ struct log_t{ /** @return whether the redo log is encrypted */ bool is_encrypted() const { - return(UT_LIST_GET_FIRST(log_groups)->is_encrypted()); + return(log.is_encrypted()); } }; diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 74ea6c95036..784699279d4 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -41,6 +41,13 @@ Created 9/20/1997 Heikki Tuuri /** @return whether recovery is currently running. */ #define recv_recovery_is_on() recv_recovery_on +/** Find the latest checkpoint in the log header. +@param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 +@return error code or DB_SUCCESS */ +dberr_t +recv_find_max_checkpoint(ulint* max_field) + MY_ATTRIBUTE((nonnull, warn_unused_result)); + /** Apply the hashed log records to the page, if the page lsn is less than the lsn of a log record. @param just_read_in whether the page recently arrived to the I/O handler diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 072338187a3..d8e4ba6fe98 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -667,18 +667,14 @@ log_group_set_fields( group->lsn = lsn; } -/*****************************************************************//** -Calculates the recommended highest values for lsn - last_checkpoint_lsn +/** Calculate the recommended highest values for lsn - last_checkpoint_lsn and lsn - buf_get_oldest_modification(). @retval true on success @retval false if the smallest log group is too small to accommodate the number of OS threads in the database server */ -static MY_ATTRIBUTE((warn_unused_result)) bool -log_calc_max_ages(void) -/*===================*/ +log_set_capacity() { - log_group_t* group; lsn_t margin; ulint free; bool success = true; @@ -686,21 +682,7 @@ log_calc_max_ages(void) log_mutex_enter(); - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - ut_ad(group); - - smallest_capacity = LSN_MAX; - - while (group) { - if (log_group_get_capacity(group) < smallest_capacity) { - - smallest_capacity = log_group_get_capacity(group); - } - - group = UT_LIST_GET_NEXT(log_groups, group); - } - + smallest_capacity = log_group_get_capacity(&log_sys->log); /* Add extra safety */ smallest_capacity = smallest_capacity - smallest_capacity / 10; @@ -747,11 +729,9 @@ failure: return(success); } -/******************************************************//** -Initializes the log. */ +/** Initializes the redo logging subsystem. */ void -log_init(void) -/*==========*/ +log_sys_init() { log_sys = static_cast(ut_zalloc_nokey(sizeof(log_t))); @@ -780,7 +760,6 @@ log_init(void) log_sys->max_buf_free = log_sys->buf_size / LOG_BUF_FLUSH_RATIO - LOG_BUF_FLUSH_MARGIN; log_sys->check_flush_or_checkpoint = true; - UT_LIST_INIT(log_sys->log_groups, &log_group_t::log_groups); log_sys->n_log_ios_old = log_sys->n_log_ios; log_sys->last_printout_time = time(NULL); @@ -824,32 +803,20 @@ log_init(void) } } -/******************************************************************//** -Inits a log group to the log system. -@return true if success, false if not */ -MY_ATTRIBUTE((warn_unused_result)) -bool -log_group_init( -/*===========*/ - ulint id, /*!< in: group id */ - ulint n_files, /*!< in: number of log files */ - lsn_t file_size, /*!< in: log file size in bytes */ - ulint space_id) /*!< in: space id of the file space - which contains the log files of this - group */ +/** Initialize the redo log. +@param[in] n_files number of files +@param[in] file_size file size in bytes */ +void +log_init(ulint n_files, lsn_t file_size) { ulint i; - log_group_t* group; + log_group_t* group = &log_sys->log; - group = static_cast(ut_malloc_nokey(sizeof(log_group_t))); - - group->id = id; group->n_files = n_files; group->format = srv_encrypt_log ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED : LOG_HEADER_FORMAT_CURRENT; group->file_size = file_size; - group->space_id = space_id; group->state = LOG_GROUP_OK; group->lsn = LOG_START_LSN; group->lsn_offset = LOG_FILE_HDR_SIZE; @@ -875,9 +842,6 @@ log_group_init( group->checkpoint_buf = static_cast( ut_align(group->checkpoint_buf_ptr,OS_FILE_LOG_BLOCK_SIZE)); - - UT_LIST_ADD_LAST(log_sys->log_groups, group); - return(log_calc_max_ages()); } /******************************************************//** @@ -900,12 +864,11 @@ log_io_complete( case SRV_O_DIRECT: case SRV_O_DIRECT_NO_FSYNC: case SRV_ALL_O_DIRECT_FSYNC: - fil_flush(group->space_id); + fil_flush(SRV_LOG_SPACE_FIRST_ID); } - DBUG_PRINT("ib_log", ("checkpoint info written to group %u", - unsigned(group->id))); + DBUG_PRINT("ib_log", ("checkpoint info written")); log_io_complete_checkpoint(); return; @@ -932,7 +895,6 @@ log_group_file_header_flush( ut_ad(log_write_mutex_own()); ut_ad(!recv_no_log_write); - ut_ad(group->id == 0); ut_a(nth_file < group->n_files); ut_ad((group->format & ~LOG_HEADER_FORMAT_ENCRYPTED) == LOG_HEADER_FORMAT_CURRENT); @@ -951,9 +913,8 @@ log_group_file_header_flush( dest_offset = nth_file * group->file_size; DBUG_PRINT("ib_log", ("write " LSN_PF - " group " ULINTPF " file " ULINTPF " header", - start_lsn, group->id, nth_file)); + start_lsn, nth_file)); log_sys->n_log_ios++; @@ -965,7 +926,7 @@ log_group_file_header_flush( = (ulint) (dest_offset / univ_page_size.physical()); fil_io(IORequestLogWrite, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) (dest_offset % univ_page_size.physical()), OS_FILE_LOG_BLOCK_SIZE, buf, group); @@ -1051,10 +1012,10 @@ loop: DBUG_PRINT("ib_log", ("write " LSN_PF " to " LSN_PF - ": group " ULINTPF " len " ULINTPF + ": len " ULINTPF " blocks " ULINTPF ".." ULINTPF, start_lsn, next_offset, - group->id, write_len, + write_len, log_block_get_hdr_no(buf), log_block_get_hdr_no( buf + write_len @@ -1092,7 +1053,7 @@ loop: = (ulint) (next_offset / univ_page_size.physical()); fil_io(IORequestLogWrite, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) (next_offset % UNIV_PAGE_SIZE), write_len, buf, group); @@ -1260,7 +1221,6 @@ loop: return; } - log_group_t* group; ulint start_offset; ulint end_offset; ulint area_start; @@ -1304,9 +1264,7 @@ loop: log_buffer_switch(); - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - log_group_set_fields(group, log_sys->write_lsn); + log_group_set_fields(&log_sys->log, log_sys->write_lsn); log_mutex_exit(); /* Calculate pad_size if needed. */ @@ -1317,7 +1275,7 @@ loop: end_offset = log_group_calc_lsn_offset( ut_uint64_align_up(write_lsn, OS_FILE_LOG_BLOCK_SIZE), - group); + &log_sys->log); end_offset_in_unit = (ulint) (end_offset % write_ahead_size); if (end_offset_in_unit > 0 @@ -1336,7 +1294,7 @@ loop: } /* Do the write to the log files */ log_group_write_buf( - group, write_buf + area_start, + &log_sys->log, write_buf + area_start, area_end - area_start + pad_size, #ifdef UNIV_DEBUG pad_size, @@ -1539,11 +1497,10 @@ log_io_complete_checkpoint(void) } /** Write checkpoint info to the log header. -@param[in,out] group redo log @param[in] end_lsn start LSN of the MLOG_CHECKPOINT mini-transaction */ static void -log_group_checkpoint(log_group_t* group, lsn_t end_lsn) +log_group_checkpoint(lsn_t end_lsn) { lsn_t lsn_offset; byte* buf; @@ -1556,10 +1513,11 @@ log_group_checkpoint(log_group_t* group, lsn_t end_lsn) || srv_shutdown_state != SRV_SHUTDOWN_NONE); DBUG_PRINT("ib_log", ("checkpoint " UINT64PF " at " LSN_PF - " written to group " ULINTPF, + " written", log_sys->next_checkpoint_no, - log_sys->next_checkpoint_lsn, - group->id)); + log_sys->next_checkpoint_lsn)); + + log_group_t* group = &log_sys->log; buf = group->checkpoint_buf; memset(buf, 0, OS_FILE_LOG_BLOCK_SIZE); @@ -1601,7 +1559,7 @@ log_group_checkpoint(log_group_t* group, lsn_t end_lsn) file write and a checkpoint field write */ fil_io(IORequestLogWrite, false, - page_id_t(group->space_id, 0), + page_id_t(SRV_LOG_SPACE_FIRST_ID, 0), univ_page_size, (log_sys->next_checkpoint_no & 1) ? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1, @@ -1626,7 +1584,8 @@ log_group_header_read( MONITOR_INC(MONITOR_LOG_IO); fil_io(IORequestLogRead, true, - page_id_t(group->space_id, header / univ_page_size.physical()), + page_id_t(SRV_LOG_SPACE_FIRST_ID, + header / univ_page_size.physical()), univ_page_size, header % univ_page_size.physical(), OS_FILE_LOG_BLOCK_SIZE, log_sys->checkpoint_buf, NULL); } @@ -1640,12 +1599,7 @@ log_write_checkpoint_info(bool sync, lsn_t end_lsn) ut_ad(log_mutex_own()); ut_ad(!srv_read_only_mode); - for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); - group; - group = UT_LIST_GET_NEXT(log_groups, group)) { - - log_group_checkpoint(group, end_lsn); - } + log_group_checkpoint(end_lsn); log_mutex_exit(); @@ -2283,13 +2237,11 @@ log_refresh_stats(void) log_sys->last_printout_time = time(NULL); } -/********************************************************//** -Closes a log group. */ +/** Close a log group. +@param[in,out] group log group to close */ static void -log_group_close( -/*===========*/ - log_group_t* group) /* in,own: log group to close */ +log_group_close(log_group_t* group) { ulint i; @@ -2300,7 +2252,10 @@ log_group_close( ut_free(group->file_header_bufs_ptr); ut_free(group->file_header_bufs); ut_free(group->checkpoint_buf_ptr); - ut_free(group); + group->n_files = 0; + group->file_header_bufs_ptr = NULL; + group->file_header_bufs = NULL; + group->checkpoint_buf_ptr = NULL; } /********************************************************//** @@ -2309,19 +2264,7 @@ void log_group_close_all(void) /*=====================*/ { - log_group_t* group; - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - while (UT_LIST_GET_LEN(log_sys->log_groups) > 0) { - log_group_t* prev_group = group; - - group = UT_LIST_GET_NEXT(log_groups, group); - - UT_LIST_REMOVE(log_sys->log_groups, prev_group); - - log_group_close(prev_group); - } + log_group_close(&log_sys->log); } /********************************************************//** diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index f3a00e7b5e6..e48e185274a 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -717,7 +717,7 @@ loop: = (ulint) (source_offset / univ_page_size.physical()); fil_io(IORequestLogRead, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) (source_offset % univ_page_size.physical()), len, buf, NULL); @@ -787,20 +787,13 @@ recv_synchronize_groups() const lsn_t start_lsn = ut_uint64_align_down(recovered_lsn, OS_FILE_LOG_BLOCK_SIZE); - log_group_read_log_seg(log_sys->buf, - UT_LIST_GET_FIRST(log_sys->log_groups), + log_group_read_log_seg(log_sys->buf, &log_sys->log, start_lsn, start_lsn + OS_FILE_LOG_BLOCK_SIZE); - ut_ad(UT_LIST_GET_LEN(log_sys->log_groups) == 1); + /* Update the fields in the group struct to correspond to + recovered_lsn */ - for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); - group; - group = UT_LIST_GET_NEXT(log_groups, group)) { - /* Update the fields in the group struct to correspond to - recovered_lsn */ - - log_group_set_fields(group, recovered_lsn); - } + log_group_set_fields(&log_sys->log, recovered_lsn); /* Copy the checkpoint info to the log; remember that we have incremented checkpoint_no by one, and the info will not be written @@ -831,17 +824,14 @@ recv_check_log_header_checksum( @return error code or DB_SUCCESS */ static MY_ATTRIBUTE((warn_unused_result)) dberr_t -recv_find_max_checkpoint_0( - log_group_t** max_group, - ulint* max_field) +recv_find_max_checkpoint_0(log_group_t** max_group, ulint* max_field) { - log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); + log_group_t* group = &log_sys->log; ib_uint64_t max_no = 0; ib_uint64_t checkpoint_no; byte* buf = log_sys->checkpoint_buf; ut_ad(group->format == 0); - ut_ad(UT_LIST_GET_NEXT(log_groups, group) == NULL); /** Offset of the first checkpoint checksum */ static const uint CHECKSUM_1 = 288; @@ -852,6 +842,8 @@ recv_find_max_checkpoint_0( /** Least significant bits of the checkpoint offset */ static const uint OFFSET_LOW32 = 16; + *max_group = NULL; + for (ulint field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { log_group_header_read(group, field); @@ -883,9 +875,8 @@ recv_find_max_checkpoint_0( } DBUG_PRINT("ib_log", - ("checkpoint " UINT64PF " at " LSN_PF - " found in group " ULINTPF, - checkpoint_no, group->lsn, group->id)); + ("checkpoint " UINT64PF " at " LSN_PF " found", + checkpoint_no, group->lsn)); if (checkpoint_no >= max_no) { *max_group = group; @@ -916,7 +907,7 @@ dberr_t recv_log_format_0_recover(lsn_t lsn) { log_mutex_enter(); - log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); + log_group_t* group = &log_sys->log; const lsn_t source_offset = log_group_calc_lsn_offset(lsn, group); log_mutex_exit(); @@ -932,7 +923,7 @@ recv_log_format_0_recover(lsn_t lsn) REFMAN "upgrading.html"; fil_io(IORequestLogRead, true, - page_id_t(group->space_id, page_no), + page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no), univ_page_size, (ulint) ((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1)) % univ_page_size.physical()), @@ -968,14 +959,10 @@ recv_log_format_0_recover(lsn_t lsn) } /** Find the latest checkpoint in the log header. -@param[out] max_group log group, or NULL @param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2 @return error code or DB_SUCCESS */ -static MY_ATTRIBUTE((warn_unused_result)) dberr_t -recv_find_max_checkpoint( - log_group_t** max_group, - ulint* max_field) +recv_find_max_checkpoint(ulint* max_field) { log_group_t* group; ib_uint64_t max_no; @@ -983,101 +970,92 @@ recv_find_max_checkpoint( ulint field; byte* buf; - group = UT_LIST_GET_FIRST(log_sys->log_groups); + group = &log_sys->log; max_no = 0; - *max_group = NULL; *max_field = 0; buf = log_sys->checkpoint_buf; - while (group) { - group->state = LOG_GROUP_CORRUPTED; + group->state = LOG_GROUP_CORRUPTED; - log_group_header_read(group, 0); - /* Check the header page checksum. There was no - checksum in the first redo log format (version 0). */ - group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT); - if (group->format != 0 - && !recv_check_log_header_checksum(buf)) { - ib::error() << "Invalid redo log header checksum."; - return(DB_CORRUPTION); - } + log_group_header_read(group, 0); + /* Check the header page checksum. There was no + checksum in the first redo log format (version 0). */ + group->format = mach_read_from_4(buf + LOG_HEADER_FORMAT); + if (group->format != 0 + && !recv_check_log_header_checksum(buf)) { + ib::error() << "Invalid redo log header checksum."; + return(DB_CORRUPTION); + } - switch (group->format) { - case 0: - return(recv_find_max_checkpoint_0( - max_group, max_field)); - case LOG_HEADER_FORMAT_CURRENT: - case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED: - break; - default: - /* Ensure that the string is NUL-terminated. */ - buf[LOG_HEADER_CREATOR_END] = 0; - ib::error() << "Unsupported redo log format." - " The redo log was created" - " with " << buf + LOG_HEADER_CREATOR << - ". Please follow the instructions at " - REFMAN "upgrading-downgrading.html"; - /* Do not issue a message about a possibility - to cleanly shut down the newer server version - and to remove the redo logs, because the - format of the system data structures may - radically change after MySQL 5.7. */ - return(DB_ERROR); - } + switch (group->format) { + case 0: + return(recv_find_max_checkpoint_0(&group, max_field)); + case LOG_HEADER_FORMAT_CURRENT: + case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED: + break; + default: + /* Ensure that the string is NUL-terminated. */ + buf[LOG_HEADER_CREATOR_END] = 0; + ib::error() << "Unsupported redo log format." + " The redo log was created" + " with " << buf + LOG_HEADER_CREATOR << + ". Please follow the instructions at " + REFMAN "upgrading-downgrading.html"; + /* Do not issue a message about a possibility + to cleanly shut down the newer server version + and to remove the redo logs, because the + format of the system data structures may + radically change after MySQL 5.7. */ + return(DB_ERROR); + } - for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; - field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { + for (field = LOG_CHECKPOINT_1; field <= LOG_CHECKPOINT_2; + field += LOG_CHECKPOINT_2 - LOG_CHECKPOINT_1) { - log_group_header_read(group, field); + log_group_header_read(group, field); - const ulint crc32 = log_block_calc_checksum_crc32(buf); - const ulint cksum = log_block_get_checksum(buf); + const ulint crc32 = log_block_calc_checksum_crc32(buf); + const ulint cksum = log_block_get_checksum(buf); - if (crc32 != cksum) { - DBUG_PRINT("ib_log", - ("invalid checkpoint," - " group " ULINTPF " at " ULINTPF - ", checksum %x expected %x", - group->id, field, - (unsigned) cksum, - (unsigned) crc32)); - continue; - } + if (crc32 != cksum) { + DBUG_PRINT("ib_log", + ("invalid checkpoint," + " at " ULINTPF + ", checksum " ULINTPFx + " expected " ULINTPFx, + field, cksum, crc32)); + continue; + } - if (group->is_encrypted() - && !log_crypt_read_checkpoint_buf(buf)) { - ib::error() << "Reading checkpoint" - " encryption info failed."; - continue; - } + if (group->is_encrypted() + && !log_crypt_read_checkpoint_buf(buf)) { + ib::error() << "Reading checkpoint" + " encryption info failed."; + continue; + } + + group->state = LOG_GROUP_OK; - group->state = LOG_GROUP_OK; + group->lsn = mach_read_from_8( + buf + LOG_CHECKPOINT_LSN); + group->lsn_offset = mach_read_from_8( + buf + LOG_CHECKPOINT_OFFSET); + checkpoint_no = mach_read_from_8( + buf + LOG_CHECKPOINT_NO); - group->lsn = mach_read_from_8( - buf + LOG_CHECKPOINT_LSN); - group->lsn_offset = mach_read_from_8( - buf + LOG_CHECKPOINT_OFFSET); - checkpoint_no = mach_read_from_8( - buf + LOG_CHECKPOINT_NO); + DBUG_PRINT("ib_log", + ("checkpoint " UINT64PF " at " LSN_PF " found ", + checkpoint_no, group->lsn)); - DBUG_PRINT("ib_log", - ("checkpoint " UINT64PF " at " LSN_PF - " found in group " ULINTPF, - checkpoint_no, group->lsn, group->id)); - - if (checkpoint_no >= max_no) { - *max_group = group; - *max_field = field; - max_no = checkpoint_no; - } + if (checkpoint_no >= max_no) { + *max_field = field; + max_no = checkpoint_no; } - - group = UT_LIST_GET_NEXT(log_groups, group); } - if (*max_group == NULL) { + if (*max_field == 0) { /* Before 5.7.9, we could get here during database initialization if we created an ib_logfile0 file that was filled with zeroes, and were killed. After @@ -1798,8 +1776,7 @@ recv_recover_page(bool just_read_in, buf_block_t* block) while (recv) { end_lsn = recv->end_lsn; - ut_ad(end_lsn - <= UT_LIST_GET_FIRST(log_sys->log_groups)->scanned_lsn); + ut_ad(end_lsn <= log_sys->log.scanned_lsn); if (recv->len > RECV_DATA_BLOCK_SIZE) { /* We have to copy the record body to a separate @@ -2988,10 +2965,9 @@ recv_group_scan_log_recs( DBUG_RETURN(false); } - DBUG_PRINT("ib_log", ("%s " LSN_PF - " completed for log group " ULINTPF, + DBUG_PRINT("ib_log", ("%s " LSN_PF " completed", last_phase ? "rescan" : "scan", - group->scanned_lsn, group->id)); + group->scanned_lsn)); DBUG_RETURN(store_to_hash == STORE_NO); } @@ -3142,11 +3118,9 @@ recv_init_crash_recovery_spaces() of first system tablespace page @return error code or DB_SUCCESS */ dberr_t -recv_recovery_from_checkpoint_start( - lsn_t flush_lsn) +recv_recovery_from_checkpoint_start(lsn_t flush_lsn) { log_group_t* group; - log_group_t* max_cp_group; ulint max_cp_field; lsn_t checkpoint_lsn; bool rescan; @@ -3172,14 +3146,14 @@ recv_recovery_from_checkpoint_start( /* Look for the latest checkpoint from any of the log groups */ - err = recv_find_max_checkpoint(&max_cp_group, &max_cp_field); + err = recv_find_max_checkpoint(&max_cp_field); if (err != DB_SUCCESS) { log_mutex_exit(); return(err); } - log_group_header_read(max_cp_group, max_cp_field); + log_group_header_read(&log_sys->log, max_cp_field); buf = log_sys->checkpoint_buf; @@ -3194,8 +3168,7 @@ recv_recovery_from_checkpoint_start( ut_ad(RECV_SCAN_SIZE <= log_sys->buf_size); - ut_ad(UT_LIST_GET_LEN(log_sys->log_groups) == 1); - group = UT_LIST_GET_FIRST(log_sys->log_groups); + group = &log_sys->log; const lsn_t end_lsn = mach_read_from_8( buf + LOG_CHECKPOINT_END_LSN); @@ -3483,11 +3456,8 @@ recv_reset_logs( log_sys->lsn = ut_uint64_align_up(lsn, OS_FILE_LOG_BLOCK_SIZE); - for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups); - group; group = UT_LIST_GET_NEXT(log_groups, group)) { - group->lsn = log_sys->lsn; - group->lsn_offset = LOG_FILE_HDR_SIZE; - } + log_sys->log.lsn = log_sys->lsn; + log_sys->log.lsn_offset = LOG_FILE_HDR_SIZE; log_sys->buf_next_to_write = 0; log_sys->write_lsn = log_sys->lsn; diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 95ce941d8a1..c89a11d821b 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -478,9 +478,8 @@ create_log_files( } } - if (!log_group_init(0, srv_n_log_files, - srv_log_file_size * UNIV_PAGE_SIZE, - SRV_LOG_SPACE_FIRST_ID)) { + log_init(srv_n_log_files, srv_log_file_size * UNIV_PAGE_SIZE); + if (!log_set_capacity()) { return(DB_ERROR); } @@ -1835,7 +1834,7 @@ innobase_start_or_create_for_mysql(void) #endif /* UNIV_DEBUG */ fsp_init(); - log_init(); + log_sys_init(); recv_sys_create(); recv_sys_init(buf_pool_get_curr_size()); @@ -2091,8 +2090,9 @@ innobase_start_or_create_for_mysql(void) } } - if (!log_group_init(0, i, srv_log_file_size * UNIV_PAGE_SIZE, - SRV_LOG_SPACE_FIRST_ID)) { + log_init(i, srv_log_file_size * UNIV_PAGE_SIZE); + + if (!log_set_capacity()) { return(srv_init_abort(DB_ERROR)); } } -- cgit v1.2.1 From bb1f41423a0c26313a7bd4b7704ccc4ad66e348a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 2 Jun 2017 16:04:50 +0300 Subject: InnoDB: Remove thread_mutex Use my_atomic for updating os_thread_count. os_thread_init(): Remove. --- storage/innobase/include/os0thread.h | 8 ------ storage/innobase/include/sync0types.h | 1 - storage/innobase/os/os0thread.cc | 46 +++++------------------------------ storage/innobase/srv/srv0srv.cc | 1 - storage/innobase/sync/sync0debug.cc | 2 -- 5 files changed, 6 insertions(+), 52 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h index 6f521b5a2ec..5c3a62e59a0 100644 --- a/storage/innobase/include/os0thread.h +++ b/storage/innobase/include/os0thread.h @@ -151,23 +151,15 @@ os_thread_sleep( /*============*/ ulint tm); /*!< in: time in microseconds */ -/** -Initializes OS thread management data structures. */ -void -os_thread_init(); -/*============*/ - /** Frees OS thread management data structures. */ void os_thread_free(); -/*============*/ /*****************************************************************//** Check if there are threads active. @return true if the thread count > 0. */ bool os_thread_active(); -/*==============*/ #endif diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index cb1b36d5962..ec4503daa64 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -354,7 +354,6 @@ enum latch_id_t { LATCH_ID_EVENT_MANAGER, LATCH_ID_EVENT_MUTEX, LATCH_ID_SYNC_ARRAY_MUTEX, - LATCH_ID_THREAD_MUTEX, LATCH_ID_ZIP_PAD_MUTEX, LATCH_ID_OS_AIO_READ_MUTEX, LATCH_ID_OS_AIO_WRITE_MUTEX, diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc index 72199b4cf0b..dc6e54f0162 100644 --- a/storage/innobase/os/os0thread.cc +++ b/storage/innobase/os/os0thread.cc @@ -31,14 +31,9 @@ Created 9/8/1995 Heikki Tuuri #include "os0event.h" #include -/** Mutex that tracks the thread count. Used by innorwlocktest.cc -FIXME: the unit tests should use APIs */ -SysMutex thread_mutex; - /** Number of threads active. */ ulint os_thread_count; - /***************************************************************//** Compares two thread ids for equality. @return TRUE if equal */ @@ -127,11 +122,7 @@ os_thread_create_func( CloseHandle(handle); - mutex_enter(&thread_mutex); - - os_thread_count++; - - mutex_exit(&thread_mutex); + my_atomic_addlint(&os_thread_count, 1); return((os_thread_t)new_thread_id); #else /* _WIN32 else */ @@ -140,9 +131,7 @@ os_thread_create_func( pthread_attr_init(&attr); - mutex_enter(&thread_mutex); - ++os_thread_count; - mutex_exit(&thread_mutex); + my_atomic_addlint(&os_thread_count, 1); int ret = pthread_create(&new_thread_id, &attr, func, arg); @@ -197,16 +186,11 @@ os_thread_exit( pfs_delete_thread(); #endif - mutex_enter(&thread_mutex); - - os_thread_count--; + my_atomic_addlint(&os_thread_count, -1); #ifdef _WIN32 - mutex_exit(&thread_mutex); - ExitThread(0); #else - mutex_exit(&thread_mutex); if (detach) { pthread_detach(pthread_self()); } @@ -260,10 +244,6 @@ bool os_thread_active() /*==============*/ { - mutex_enter(&thread_mutex); - - bool active = (os_thread_count > 0); - /* All the threads have exited or are just exiting; NOTE that the threads may not have completed their exit yet. Should we use pthread_join() to make sure @@ -272,30 +252,16 @@ os_thread_active() os_thread_exit(). Now we just sleep 0.1 seconds and hope that is enough! */ - mutex_exit(&thread_mutex); - - return(active); -} - -/** -Initializes OS thread management data structures. */ -void -os_thread_init() -/*============*/ -{ - mutex_create(LATCH_ID_THREAD_MUTEX, &thread_mutex); + return(my_atomic_loadlint(&os_thread_count) > 0); } /** Frees OS thread management data structures. */ void os_thread_free() -/*============*/ { - if (os_thread_count != 0) { - ib::warn() << "Some (" << os_thread_count << ") threads are" + if (ulint count = my_atomic_loadlint(&os_thread_count)) { + ib::warn() << "Some (" << count << ") threads are" " still active"; } - - mutex_destroy(&thread_mutex); } diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 0a84afa5106..06ed058f18e 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1183,7 +1183,6 @@ srv_boot(void) srv_normalize_init_values(); sync_check_init(); - os_thread_init(); /* Reset the system variables in the recovery module. */ recv_sys_var_init(); trx_pool_init(); diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index 4fff24a77f1..11743e14be2 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -1498,8 +1498,6 @@ sync_latch_meta_init() LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK, sync_array_mutex_key); - LATCH_ADD_MUTEX(THREAD_MUTEX, SYNC_NO_ORDER_CHECK, thread_mutex_key); - LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key); LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK, -- cgit v1.2.1 From 6439238c539577588e36961aeb8005bb102a1fbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 6 Jun 2017 14:57:20 +0300 Subject: Correct a merge error --- storage/xtradb/buf/buf0buf.cc | 1 + 1 file changed, 1 insertion(+) (limited to 'storage') diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 759e1441ee3..c72900bd082 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -3450,6 +3450,7 @@ got_block: } if (buf_flush_page_try(buf_pool, fix_block)) { + guess = fix_block; goto loop; } -- cgit v1.2.1 From 68890fe7d48b6fee505f294400224fe01107950c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 6 Jun 2017 15:24:43 +0300 Subject: Revert part of MDEV-12113 commit 1af8bf39ca2513bfdf43a55af0b6af10d32dcebb added unnecessary calls to fil_write_flushed_lsn() during redo log resizing at InnoDB server startup. Because fil_write_flushed_lsn() is neither redo-logged nor doublewrite buffered, the call is risky and should be avoided, because if the server killed during the write call, the whole InnoDB instance can become inaccessible (corrupted page 0 in the system tablespace). In the best case, this call might prevent a diagnostic message from being emitted to the error log on the next startup. --- storage/innobase/srv/srv0start.cc | 13 ++----------- storage/xtradb/srv/srv0start.cc | 10 ---------- 2 files changed, 2 insertions(+), 21 deletions(-) (limited to 'storage') diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 032b902c633..4e59ddd1f87 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2766,17 +2766,8 @@ files_checked: return(err); } - /* create_log_files() can increase system lsn that is - why FIL_PAGE_FILE_FLUSH_LSN have to be updated */ - flushed_lsn = log_get_lsn(); - - err = fil_write_flushed_lsn(flushed_lsn); - - if (err == DB_SUCCESS) { - err = create_log_files_rename( - logfilename, dirnamelen, - flushed_lsn, logfile0); - } + err = create_log_files_rename(logfilename, dirnamelen, + log_get_lsn(), logfile0); if (err != DB_SUCCESS) { return(err); diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 9491d5328e7..cbab9b2f8c4 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -2852,16 +2852,6 @@ files_checked: return(err); } - /* create_log_files() can increase system lsn that is - why FIL_PAGE_FILE_FLUSH_LSN have to be updated */ - flushed_lsn = log_get_lsn(); - - err = fil_write_flushed_lsn(flushed_lsn); - - if (err != DB_SUCCESS) { - return(err); - } - err = create_log_files_rename(logfilename, dirnamelen, log_get_lsn(), logfile0); -- cgit v1.2.1 From d03abc71a490f4ee6d173d07c95a2adb9c60457c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 8 Jun 2017 10:34:10 +0300 Subject: MDEV-12609: Allow suppression of InnoDB log messages about reserving extents Removed this output. --- storage/innobase/fsp/fsp0fsp.cc | 11 ----------- storage/xtradb/fsp/fsp0fsp.cc | 11 ----------- 2 files changed, 22 deletions(-) (limited to 'storage') diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 1732a02aeac..d04cd7168ed 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -2729,7 +2729,6 @@ fsp_reserve_free_extents( ibool success; ulint n_pages_added; size_t total_reserved = 0; - ulint rounds = 0; ut_ad(mtr); *n_reserved = n_ext; @@ -2809,17 +2808,7 @@ try_to_extend: space_header, mtr); if (success && n_pages_added > 0) { - - rounds++; total_reserved += n_pages_added; - - if (rounds > 50) { - ib_logf(IB_LOG_LEVEL_INFO, - "Space id %lu trying to reserve %lu extents actually reserved %lu " - " reserve %lu free %lu size %lu rounds %lu total_reserved %llu", - space, n_ext, n_pages_added, reserve, n_free, size, rounds, (ullint) total_reserved); - } - goto try_again; } diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc index 9292f84ab02..cf6fa4211ca 100644 --- a/storage/xtradb/fsp/fsp0fsp.cc +++ b/storage/xtradb/fsp/fsp0fsp.cc @@ -2738,7 +2738,6 @@ fsp_reserve_free_extents( ibool success; ulint n_pages_added; size_t total_reserved = 0; - ulint rounds = 0; ut_ad(mtr); *n_reserved = n_ext; @@ -2817,17 +2816,7 @@ try_to_extend: success = fsp_try_extend_data_file(&n_pages_added, space, space_header, mtr); if (success && n_pages_added > 0) { - - rounds++; total_reserved += n_pages_added; - - if (rounds > 50) { - ib_logf(IB_LOG_LEVEL_INFO, - "Space id %lu trying to reserve %lu extents actually reserved %lu " - " reserve %lu free %lu size %lu rounds %lu total_reserved %llu", - space, n_ext, n_pages_added, reserve, n_free, size, rounds, (ullint) total_reserved); - } - goto try_again; } -- cgit v1.2.1 From fbeb9489cd7d6ad859a49ae5ab8f876f3d988470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 6 Jun 2017 14:59:42 +0300 Subject: Cleanup of MDEV-12600: crash during install_db with innodb_page_size=32K and ibdata1=3M The doublewrite buffer pages must fit in the first InnoDB system tablespace data file. The checks that were added in the initial patch (commit 112b21da37dad0fbb28bc65f9ab5a3ba6c0c2186) were at too high level and did not cover all cases. innodb.log_data_file_size: Test all innodb_page_size combinations. fsp_header_init(): Never return an error. Move the change buffer creation to the only caller that needs to do it. btr_create(): Clean up the logic. Remove the error log messages. buf_dblwr_create(): Try to return an error on non-fatal failure. Check that the first data file is big enough for creating the doublewrite buffers. buf_dblwr_process(): Check if the doublewrite buffer is available. Display the message only if it is available. recv_recovery_from_checkpoint_start_func(): Remove a redundant message about FIL_PAGE_FILE_FLUSH_LSN mismatch when crash recovery has already been initiated. fil_report_invalid_page_access(): Simplify the message. fseg_create_general(): Do not emit messages to the error log. innobase_init(): Revert the changes. trx_rseg_create(): Refactor (no functional change). --- storage/innobase/btr/btr0btr.cc | 54 ++++++++----------------- storage/innobase/buf/buf0dblwr.cc | 74 ++++++++++++++++++++++++----------- storage/innobase/dict/dict0crea.cc | 6 +-- storage/innobase/fil/fil0fil.cc | 20 +++++----- storage/innobase/fsp/fsp0fsp.cc | 37 +++--------------- storage/innobase/handler/ha_innodb.cc | 14 ------- storage/innobase/include/buf0dblwr.h | 16 ++++---- storage/innobase/include/fsp0fsp.h | 14 ++----- storage/innobase/include/trx0rseg.h | 11 +++--- storage/innobase/log/log0recv.cc | 23 ----------- storage/innobase/row/row0mysql.cc | 8 +--- storage/innobase/srv/srv0start.cc | 33 ++++++++-------- storage/innobase/trx/trx0rseg.cc | 34 +++++++--------- storage/xtradb/btr/btr0btr.cc | 54 ++++++++----------------- storage/xtradb/buf/buf0dblwr.cc | 74 ++++++++++++++++++++++++----------- storage/xtradb/dict/dict0crea.cc | 6 +-- storage/xtradb/fil/fil0fil.cc | 20 +++++----- storage/xtradb/fsp/fsp0fsp.cc | 37 +++--------------- storage/xtradb/handler/ha_innodb.cc | 14 ------- storage/xtradb/include/buf0dblwr.h | 16 ++++---- storage/xtradb/include/fsp0fsp.h | 14 ++----- storage/xtradb/include/trx0rseg.h | 11 +++--- storage/xtradb/log/log0recv.cc | 23 ----------- storage/xtradb/row/row0mysql.cc | 8 +--- storage/xtradb/srv/srv0start.cc | 33 ++++++++-------- storage/xtradb/trx/trx0rseg.cc | 34 +++++++--------- 26 files changed, 268 insertions(+), 420 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index e200a2b9677..b7afcf12b39 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2017, MariaDB Corporation +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1684,9 +1684,7 @@ btr_create( dict_index_t* index, /*!< in: index */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ulint page_no; buf_block_t* block; - buf_frame_t* frame; page_t* page; page_zip_des_t* page_zip; @@ -1702,9 +1700,7 @@ btr_create( IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); if (ibuf_hdr_block == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the first ibuf header page failed."); - return (FIL_NULL); + return(FIL_NULL); } buf_block_dbg_add_level( @@ -1721,11 +1717,16 @@ btr_create( IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); - if (!block) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the tree root page segment failed."); + if (block == NULL) { + return(FIL_NULL); } + ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO); + + buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); + + flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, + mtr); } else { #ifdef UNIV_BLOB_DEBUG if ((type & DICT_CLUSTERED) && !index->blobs) { @@ -1738,41 +1739,18 @@ btr_create( block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); - if (!block) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the btree segment failed."); + if (block == NULL) { + return(FIL_NULL); } - } - - if (block == NULL) { - - return(FIL_NULL); - } - - page_no = buf_block_get_page_no(block); - frame = buf_block_get_frame(block); - - if (type & DICT_IBUF) { - /* It is an insert buffer tree: initialize the free list */ - buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); - - ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO); - - flst_init(frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr); - } else { - /* It is a non-ibuf tree: create a file segment for leaf - pages */ buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW); - if (!fseg_create(space, page_no, + if (!fseg_create(space, buf_block_get_page_no(block), PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) { /* Not enough space for new segment, free root segment before return. */ - btr_free_root(space, zip_size, page_no, mtr); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the non-ibuf tree segment for leaf pages failed."); + btr_free_root(space, zip_size, + buf_block_get_page_no(block), mtr); return(FIL_NULL); } @@ -1816,7 +1794,7 @@ btr_create( ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE); - return(page_no); + return(buf_block_get_page_no(block)); } /************************************************************//** diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index e2c7ae9bae1..8ff5428d3a5 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -175,13 +175,14 @@ buf_dblwr_init( mem_zalloc(buf_size * sizeof(void*))); } -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ UNIV_INTERN -void -buf_dblwr_create(void) -/*==================*/ +bool +buf_dblwr_create() { buf_block_t* block2; buf_block_t* new_block; @@ -194,8 +195,7 @@ buf_dblwr_create(void) if (buf_dblwr) { /* Already inited */ - - return; + return(true); } start_again: @@ -213,39 +213,59 @@ start_again: mtr_commit(&mtr); buf_dblwr_being_created = FALSE; - return; + return(true); } - ib_logf(IB_LOG_LEVEL_INFO, - "Doublewrite buffer not found: creating new"); - if (buf_pool_get_curr_size() < ((TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE + FSP_EXTENT_SIZE / 2 + 100) * UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Cannot create doublewrite buffer: you must " - "increase your buffer pool size. Cannot continue " - "operation."); + ib_logf(IB_LOG_LEVEL_ERROR, + "Cannot create doublewrite buffer: " + "innodb_buffer_pool_size is too small."); + mtr_commit(&mtr); + return(false); + } else { + fil_space_t* space = fil_space_acquire(TRX_SYS_SPACE); + const bool fail = UT_LIST_GET_FIRST(space->chain)->size + < 3 * FSP_EXTENT_SIZE; + fil_space_release(space); + + if (fail) { + goto too_small; + } } block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG, &mtr); + if (block2 == NULL) { +too_small: + ib_logf(IB_LOG_LEVEL_ERROR, + "Cannot create doublewrite buffer: " + "the first file in innodb_data_file_path" + " must be at least %luM.", + 3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20); + mtr_commit(&mtr); + return(false); + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Doublewrite buffer not found: creating new"); + + /* FIXME: After this point, the doublewrite buffer creation + is not atomic. The doublewrite buffer should not exist in + the InnoDB system tablespace file in the first place. + It could be located in separate optional file(s) in a + user-specified location. */ + /* fseg_create acquires a second latch on the page, therefore we must declare it: */ buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK); - if (block2 == NULL) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Cannot create doublewrite buffer: you must " - "increase your tablespace size. " - "Cannot continue operation."); - } - fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG; prev_page_no = 0; @@ -482,6 +502,14 @@ buf_dblwr_process() byte* unaligned_read_buf; recv_dblwr_t& recv_dblwr = recv_sys->dblwr; + if (!buf_dblwr) { + return; + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Restoring possible half-written data pages " + "from the doublewrite buffer..."); + unaligned_read_buf = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); read_buf = static_cast( diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index 1ec7123bbc2..f6cd294884b 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -321,13 +321,9 @@ dict_build_table_def_step( mtr_start(&mtr); - bool res = fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); + fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); - - if (!res) { - return (DB_ERROR); - } } else { /* Create in the system tablespace: disallow Barracuda features by keeping only the first bit which says whether diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index e19ef110b4f..f88bb2add59 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -5812,21 +5812,19 @@ fil_report_invalid_page_access( ulint len, /*!< in: I/O length */ ulint type) /*!< in: I/O type */ { - ib_logf(IB_LOG_LEVEL_ERROR, + ib_logf(IB_LOG_LEVEL_FATAL, "Trying to access page number " ULINTPF " in space " ULINTPF " space name %s," " which is outside the tablespace bounds." - " Byte offset " ULINTPF ", len " ULINTPF " i/o type " ULINTPF ".", + " Byte offset " ULINTPF ", len " ULINTPF + " i/o type " ULINTPF ".%s", block_offset, space_id, space_name, - byte_offset, len, type); - - ib_logf(IB_LOG_LEVEL_FATAL, - "If you get this error at mysqld startup," - " please check that" - " your my.cnf matches the ibdata files" - " that you have in the" - " MySQL server."); + byte_offset, len, type, + space_id == 0 && !srv_was_started + ? "Please check that the configuration matches" + " the InnoDB system tablespace location (ibdata files)" + : ""); } /********************************************************************//** diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 98cd11f3369..4f05549bc1c 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -670,18 +670,13 @@ fsp_header_init_fields( } #ifndef UNIV_HOTBACKUP -/** Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. +/** Initialize a tablespace header. @param[in] space_id space id @param[in] size current size in blocks -@param[in,out] mtr min-transaction -@return true on success, otherwise false. */ +@param[in,out] mtr mini-transaction */ UNIV_INTERN -bool -fsp_header_init( - ulint space_id, - ulint size, - mtr_t* mtr) +void +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; @@ -725,17 +720,7 @@ fsp_header_init( mlog_write_ull(header + FSP_SEG_ID, 1, mtr); - if (space_id == 0) { - fsp_fill_free_list(FALSE, space_id, header, mtr); - - if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, - 0, 0, DICT_IBUF_ID_MIN + space_id, - dict_ind_redundant, mtr) == FIL_NULL) { - return (false); - } - } else { - fsp_fill_free_list(TRUE, space_id, header, mtr); - } + fsp_fill_free_list(space_id != TRX_SYS_SPACE, space_id, header, mtr); fil_space_t* space = fil_space_acquire(space_id); ut_ad(space); @@ -745,8 +730,6 @@ fsp_header_init( } fil_space_release(space); - - return (true); } #endif /* !UNIV_HOTBACKUP */ @@ -2065,10 +2048,6 @@ fseg_create_general( success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_NORMAL, mtr); if (!success) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Reserving %d free extents failed" - " could reserve only " ULINTPF " extents.", - 2, n_reserved); return(NULL); } } @@ -2078,9 +2057,6 @@ fseg_create_general( inode = fsp_alloc_seg_inode(space_header, mtr); if (inode == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of a new file segment inode page failed."); - goto funct_exit; } @@ -2109,9 +2085,6 @@ fseg_create_general( inode, 0, FSP_UP, mtr, mtr); if (block == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of a free page from space " ULINTPF " failed.", - space); fsp_free_seg_inode(space, zip_size, inode, mtr); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index b68a96c8846..012539d1ace 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -3353,7 +3353,6 @@ innobase_init( char *default_path; uint format_id; ulong num_pll_degree; - ulint min_size = 0; DBUG_ENTER("innobase_init"); handlerton *innobase_hton= (handlerton*) p; @@ -3564,19 +3563,6 @@ mem_free_and_error: goto error; } - /* All doublewrite buffer pages must fit to first system - datafile and first datafile must be at least 3M. */ - min_size = ut_max((3*1024*1024U), (192U*UNIV_PAGE_SIZE)); - - if ((srv_data_file_sizes[0]*1024*1024) < min_size) { - sql_print_error( - "InnoDB: first datafile is too small current=" ULINTPF - "M it should be at least " ULINTPF "M.", - srv_data_file_sizes[0], - min_size / (1024 * 1024)); - goto mem_free_and_error; - } - /* -------------- All log files ---------------------------*/ /* The default dir for log files is the datadir of MySQL */ diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index 8e1b00db83c..7b7464761cc 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -39,13 +39,15 @@ extern buf_dblwr_t* buf_dblwr; /** Set to TRUE when the doublewrite buffer is being created */ extern ibool buf_dblwr_being_created; -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ UNIV_INTERN -void -buf_dblwr_create(void); -/*==================*/ +bool +buf_dblwr_create() + MY_ATTRIBUTE((warn_unused_result)); /****************************************************************//** At a database startup initializes the doublewrite buffer memory structure if diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 905e98cc1e6..d8d044ba2ec 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -520,19 +520,13 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ -/** Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. +/** Initialize a tablespace header. @param[in] space_id space id @param[in] size current size in blocks -@param[in,out] mtr min-transaction -@return true on success, otherwise false. */ +@param[in,out] mtr mini-transaction */ UNIV_INTERN -bool -fsp_header_init( - ulint space_id, - ulint size, - mtr_t* mtr) - MY_ATTRIBUTE((warn_unused_result)); +void +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr); /**********************************************************************//** Increases the space size field of a space. */ diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h index 185b05876b4..2fe5df14cee 100644 --- a/storage/innobase/include/trx0rseg.h +++ b/storage/innobase/include/trx0rseg.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -124,13 +125,13 @@ trx_rseg_mem_free( /*==============*/ trx_rseg_t* rseg); /*!< in, own: instance to free */ -/********************************************************************* -Creates a rollback segment. */ +/** Create a rollback segment. +@param[in] space undo tablespace ID +@return pointer to new rollback segment +@retval NULL on failure */ UNIV_INTERN trx_rseg_t* -trx_rseg_create( -/*============*/ - ulint space); /*!< in: id of UNDO tablespace */ +trx_rseg_create(ulint space); /******************************************************************** Get the number of unique rollback tablespaces in use except space id 0. diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index bf00cd8e8d9..4e6e66e808e 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2914,11 +2914,6 @@ recv_init_crash_recovery(void) possible */ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) { - - ib_logf(IB_LOG_LEVEL_INFO, - "Restoring possible half-written data pages " - "from the doublewrite buffer..."); - buf_dblwr_process(); /* Spawn the background thread to flush dirty pages @@ -3179,24 +3174,6 @@ recv_recovery_from_checkpoint_start_func( user about recovery: */ if (checkpoint_lsn != flushed_lsn) { - - if (checkpoint_lsn < flushed_lsn) { - - ib_logf(IB_LOG_LEVEL_WARN, - "The log sequence number " - "in the ibdata files is higher " - "than the log sequence number " - "in the ib_logfiles! Are you sure " - "you are using the right " - "ib_logfiles to start up the database. " - "Log sequence number in the " - "ib_logfiles is " LSN_PF ", log" - "sequence number stamped " - "to ibdata file header is " LSN_PF ".", - checkpoint_lsn, - flushed_lsn); - } - if (!recv_needed_recovery) { ib_logf(IB_LOG_LEVEL_INFO, "The log sequence number " diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 6ca9443dc7d..82938995e93 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -3590,15 +3590,9 @@ row_truncate_table_for_mysql( } while (index); mtr_start_trx(&mtr, trx); - bool ret = fsp_header_init(space_id, + fsp_header_init(space_id, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); - - if (!ret) { - table->file_unreadable = true; - err = DB_ERROR; - goto funct_exit; - } } } else { /* Lock all index trees for this table, as we will diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 4e59ddd1f87..c7026b4f04a 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -1561,26 +1561,18 @@ srv_undo_tablespaces_init( if (create_new_db) { mtr_t mtr; - bool ret=true; mtr_start(&mtr); /* The undo log tablespace */ for (i = 0; i < n_undo_tablespaces; ++i) { - ret = fsp_header_init( + fsp_header_init( undo_tablespace_ids[i], SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); - if (!ret) { - break; - } } mtr_commit(&mtr); - - if (!ret) { - return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); - } } return(DB_SUCCESS); @@ -2419,14 +2411,24 @@ files_checked: mtr_start(&mtr); - bool ret = fsp_header_init(0, sum_of_new_sizes, &mtr); + fsp_header_init(0, sum_of_new_sizes, &mtr); + compile_time_assert(TRX_SYS_SPACE == 0); + compile_time_assert(IBUF_SPACE_ID == 0); + + ulint ibuf_root = btr_create( + DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, + 0, 0, DICT_IBUF_ID_MIN, + dict_ind_redundant, &mtr); mtr_commit(&mtr); - if (!ret) { - return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); + if (ibuf_root == FIL_NULL) { + return(srv_init_abort(true, __FILE__, __LINE__, + DB_ERROR)); } + ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO); + /* To maintain backward compatibility we create only the first rollback segment before the double write buffer. All the remaining rollback segments will be created later, @@ -2812,10 +2814,9 @@ files_checked: /* fprintf(stderr, "Max allowed record size %lu\n", page_get_free_space_of_empty() / 2); */ - if (buf_dblwr == NULL) { - /* Create the doublewrite buffer to a new tablespace */ - - buf_dblwr_create(); + if (!buf_dblwr_create()) { + return(srv_init_abort(create_new_db, __FILE__, __LINE__, + DB_ERROR)); } /* Here the double write buffer has already been created and so diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc index 21dbab98e48..16fa334872b 100644 --- a/storage/innobase/trx/trx0rseg.cc +++ b/storage/innobase/trx/trx0rseg.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -293,14 +294,13 @@ trx_rseg_create_instance( } } -/********************************************************************* -Creates a rollback segment. -@return pointer to new rollback segment if create successful */ +/** Create a rollback segment. +@param[in] space undo tablespace ID +@return pointer to new rollback segment +@retval NULL on failure */ UNIV_INTERN trx_rseg_t* -trx_rseg_create( -/*============*/ - ulint space) /*!< in: id of UNDO tablespace */ +trx_rseg_create(ulint space) { mtr_t mtr; ulint slot_no; @@ -323,25 +323,21 @@ trx_rseg_create( page_no = trx_rseg_header_create( space, 0, ULINT_MAX, slot_no, &mtr); - if (page_no == FIL_NULL) { - mtr_commit(&mtr); - return (rseg); - } - - sys_header = trx_sysf_get(&mtr); + if (page_no != FIL_NULL) { + sys_header = trx_sysf_get(&mtr); - id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr); - ut_a(id == space); + id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr); + ut_a(id == space); - zip_size = space ? fil_space_get_zip_size(space) : 0; + zip_size = space ? fil_space_get_zip_size(space) : 0; - rseg = trx_rseg_mem_create( - slot_no, space, zip_size, page_no, - purge_sys->ib_bh, &mtr); + rseg = trx_rseg_mem_create( + slot_no, space, zip_size, page_no, + purge_sys->ib_bh, &mtr); + } } mtr_commit(&mtr); - return(rseg); } diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc index 7b5b3bb6cba..e66599e206d 100644 --- a/storage/xtradb/btr/btr0btr.cc +++ b/storage/xtradb/btr/btr0btr.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2014, 2017, MariaDB Corporation +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -1703,9 +1703,7 @@ btr_create( dict_index_t* index, /*!< in: index */ mtr_t* mtr) /*!< in: mini-transaction handle */ { - ulint page_no; buf_block_t* block; - buf_frame_t* frame; page_t* page; page_zip_des_t* page_zip; @@ -1721,9 +1719,7 @@ btr_create( IBUF_HEADER + IBUF_TREE_SEG_HEADER, mtr); if (ibuf_hdr_block == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the first ibuf header page failed."); - return (FIL_NULL); + return(FIL_NULL); } buf_block_dbg_add_level( @@ -1740,11 +1736,16 @@ btr_create( IBUF_TREE_ROOT_PAGE_NO, FSP_UP, mtr); - if (!block) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the tree root page segment failed."); + if (block == NULL) { + return(FIL_NULL); } + ut_ad(buf_block_get_page_no(block) == IBUF_TREE_ROOT_PAGE_NO); + + buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); + + flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, + mtr); } else { #ifdef UNIV_BLOB_DEBUG if ((type & DICT_CLUSTERED) && !index->blobs) { @@ -1757,41 +1758,18 @@ btr_create( block = fseg_create(space, 0, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr); - if (!block) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the btree segment failed."); + if (block == NULL) { + return(FIL_NULL); } - } - - if (block == NULL) { - - return(FIL_NULL); - } - - page_no = buf_block_get_page_no(block); - frame = buf_block_get_frame(block); - - if (type & DICT_IBUF) { - /* It is an insert buffer tree: initialize the free list */ - buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW); - - ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO); - - flst_init(frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr); - } else { - /* It is a non-ibuf tree: create a file segment for leaf - pages */ buf_block_dbg_add_level(block, SYNC_TREE_NODE_NEW); - if (!fseg_create(space, page_no, + if (!fseg_create(space, buf_block_get_page_no(block), PAGE_HEADER + PAGE_BTR_SEG_LEAF, mtr)) { /* Not enough space for new segment, free root segment before return. */ - btr_free_root(space, zip_size, page_no, mtr); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of the non-ibuf tree segment for leaf pages failed."); + btr_free_root(space, zip_size, + buf_block_get_page_no(block), mtr); return(FIL_NULL); } @@ -1835,7 +1813,7 @@ btr_create( ut_ad(page_get_max_insert_size(page, 2) > 2 * BTR_PAGE_MAX_REC_SIZE); - return(page_no); + return(buf_block_get_page_no(block)); } /************************************************************//** diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc index c0c52deb57f..49371f9a6f1 100644 --- a/storage/xtradb/buf/buf0dblwr.cc +++ b/storage/xtradb/buf/buf0dblwr.cc @@ -175,13 +175,14 @@ buf_dblwr_init( mem_zalloc(buf_size * sizeof(void*))); } -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ UNIV_INTERN -void -buf_dblwr_create(void) -/*==================*/ +bool +buf_dblwr_create() { buf_block_t* block2; buf_block_t* new_block; @@ -194,8 +195,7 @@ buf_dblwr_create(void) if (buf_dblwr) { /* Already inited */ - - return; + return(true); } start_again: @@ -213,39 +213,59 @@ start_again: mtr_commit(&mtr); buf_dblwr_being_created = FALSE; - return; + return(true); } - ib_logf(IB_LOG_LEVEL_INFO, - "Doublewrite buffer not found: creating new"); - if (buf_pool_get_curr_size() < ((TRX_SYS_DOUBLEWRITE_BLOCKS * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE + FSP_EXTENT_SIZE / 2 + 100) * UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Cannot create doublewrite buffer: you must " - "increase your buffer pool size. Cannot continue " - "operation."); + ib_logf(IB_LOG_LEVEL_ERROR, + "Cannot create doublewrite buffer: " + "innodb_buffer_pool_size is too small."); + mtr_commit(&mtr); + return(false); + } else { + fil_space_t* space = fil_space_acquire(TRX_SYS_SPACE); + const bool fail = UT_LIST_GET_FIRST(space->chain)->size + < 3 * FSP_EXTENT_SIZE; + fil_space_release(space); + + if (fail) { + goto too_small; + } } block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO, TRX_SYS_DOUBLEWRITE + TRX_SYS_DOUBLEWRITE_FSEG, &mtr); + if (block2 == NULL) { +too_small: + ib_logf(IB_LOG_LEVEL_ERROR, + "Cannot create doublewrite buffer: " + "the first file in innodb_data_file_path" + " must be at least %luM.", + 3 * (FSP_EXTENT_SIZE * UNIV_PAGE_SIZE) >> 20); + mtr_commit(&mtr); + return(false); + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Doublewrite buffer not found: creating new"); + + /* FIXME: After this point, the doublewrite buffer creation + is not atomic. The doublewrite buffer should not exist in + the InnoDB system tablespace file in the first place. + It could be located in separate optional file(s) in a + user-specified location. */ + /* fseg_create acquires a second latch on the page, therefore we must declare it: */ buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK); - if (block2 == NULL) { - ib_logf(IB_LOG_LEVEL_FATAL, - "Cannot create doublewrite buffer: you must " - "increase your tablespace size. " - "Cannot continue operation."); - } - fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG; prev_page_no = 0; @@ -482,6 +502,14 @@ buf_dblwr_process() byte* unaligned_read_buf; recv_dblwr_t& recv_dblwr = recv_sys->dblwr; + if (!buf_dblwr) { + return; + } + + ib_logf(IB_LOG_LEVEL_INFO, + "Restoring possible half-written data pages " + "from the doublewrite buffer..."); + unaligned_read_buf = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); read_buf = static_cast( diff --git a/storage/xtradb/dict/dict0crea.cc b/storage/xtradb/dict/dict0crea.cc index faaa6d1bdc7..6d5b12474eb 100644 --- a/storage/xtradb/dict/dict0crea.cc +++ b/storage/xtradb/dict/dict0crea.cc @@ -323,13 +323,9 @@ dict_build_table_def_step( mtr_start(&mtr); - bool res = fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); + fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr); mtr_commit(&mtr); - - if(!res) { - return (DB_ERROR); - } } else { /* Create in the system tablespace: disallow Barracuda features by keeping only the first bit which says whether diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index b669d35ff15..d979c05c9a6 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2014, 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -6115,21 +6115,19 @@ fil_report_invalid_page_access( ulint len, /*!< in: I/O length */ ulint type) /*!< in: I/O type */ { - ib_logf(IB_LOG_LEVEL_ERROR, + ib_logf(IB_LOG_LEVEL_FATAL, "Trying to access page number " ULINTPF " in space " ULINTPF " space name %s," " which is outside the tablespace bounds." - " Byte offset " ULINTPF ", len " ULINTPF " i/o type " ULINTPF ".", + " Byte offset " ULINTPF ", len " ULINTPF + " i/o type " ULINTPF ".%s", block_offset, space_id, space_name, - byte_offset, len, type); - - ib_logf(IB_LOG_LEVEL_FATAL, - "If you get this error at mysqld startup," - " please check that" - " your my.cnf matches the ibdata files" - " that you have in the" - " MySQL server."); + byte_offset, len, type, + space_id == 0 && !srv_was_started + ? "Please check that the configuration matches" + " the InnoDB system tablespace location (ibdata files)" + : ""); } /********************************************************************//** diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc index 7929372ae6c..bd87b88f58d 100644 --- a/storage/xtradb/fsp/fsp0fsp.cc +++ b/storage/xtradb/fsp/fsp0fsp.cc @@ -673,18 +673,13 @@ fsp_header_init_fields( } #ifndef UNIV_HOTBACKUP -/** Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. +/** Initialize a tablespace header. @param[in] space_id space id @param[in] size current size in blocks -@param[in,out] mtr min-transaction -@return true on success, otherwise false. */ +@param[in,out] mtr mini-transaction */ UNIV_INTERN -bool -fsp_header_init( - ulint space_id, - ulint size, - mtr_t* mtr) +void +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr) { fsp_header_t* header; buf_block_t* block; @@ -728,17 +723,7 @@ fsp_header_init( mlog_write_ull(header + FSP_SEG_ID, 1, mtr); - if (space_id == 0) { - fsp_fill_free_list(FALSE, space_id, header, mtr); - - if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, - 0, 0, DICT_IBUF_ID_MIN + space_id, - dict_ind_redundant, mtr) == FIL_NULL) { - return (false); - } - } else { - fsp_fill_free_list(TRUE, space_id, header, mtr); - } + fsp_fill_free_list(space_id != TRX_SYS_SPACE, space_id, header, mtr); fil_space_t* space = fil_space_acquire(space_id); ut_ad(space); @@ -748,8 +733,6 @@ fsp_header_init( } fil_space_release(space); - - return (true); } #endif /* !UNIV_HOTBACKUP */ @@ -2074,10 +2057,6 @@ fseg_create_general( success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_NORMAL, mtr); if (!success) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Reserving %d free extents failed" - " could reserve only " ULINTPF " extents.", - 2, n_reserved); return(NULL); } } @@ -2087,9 +2066,6 @@ fseg_create_general( inode = fsp_alloc_seg_inode(space_header, mtr); if (inode == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of a new file segment inode page failed."); - goto funct_exit; } @@ -2118,9 +2094,6 @@ fseg_create_general( inode, 0, FSP_UP, mtr, mtr); if (block == NULL) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Allocation of a free page from space " ULINTPF " failed.", - space); fsp_free_seg_inode(space, zip_size, inode, mtr); diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 7be6c64407b..dd5d041aed9 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -3770,7 +3770,6 @@ innobase_init( char *default_path; uint format_id; ulong num_pll_degree; - ulint min_size = 0; DBUG_ENTER("innobase_init"); handlerton *innobase_hton= (handlerton*) p; @@ -4021,19 +4020,6 @@ mem_free_and_error: goto error; } - /* All doublewrite buffer pages must fit to first system - datafile and first datafile must be at least 3M. */ - min_size = ut_max((3*1024*1024U), (192U*UNIV_PAGE_SIZE)); - - if ((srv_data_file_sizes[0]*1024*1024) < min_size) { - sql_print_error( - "InnoDB: first datafile is too small current=" ULINTPF - "M it should be at least " ULINTPF "M.", - srv_data_file_sizes[0], - min_size / (1024 * 1024)); - goto mem_free_and_error; - } - /* -------------- All log files ---------------------------*/ /* The default dir for log files is the datadir of MySQL */ diff --git a/storage/xtradb/include/buf0dblwr.h b/storage/xtradb/include/buf0dblwr.h index 8e1b00db83c..7b7464761cc 100644 --- a/storage/xtradb/include/buf0dblwr.h +++ b/storage/xtradb/include/buf0dblwr.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -39,13 +39,15 @@ extern buf_dblwr_t* buf_dblwr; /** Set to TRUE when the doublewrite buffer is being created */ extern ibool buf_dblwr_being_created; -/****************************************************************//** -Creates the doublewrite buffer to a new InnoDB installation. The header of the -doublewrite buffer is placed on the trx system header page. */ +/** Create the doublewrite buffer if the doublewrite buffer header +is not present in the TRX_SYS page. +@return whether the operation succeeded +@retval true if the doublewrite buffer exists or was created +@retval false if the creation failed (too small first data file) */ UNIV_INTERN -void -buf_dblwr_create(void); -/*==================*/ +bool +buf_dblwr_create() + MY_ATTRIBUTE((warn_unused_result)); /****************************************************************//** At a database startup initializes the doublewrite buffer memory structure if diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h index d8f851a0846..715572199ab 100644 --- a/storage/xtradb/include/fsp0fsp.h +++ b/storage/xtradb/include/fsp0fsp.h @@ -519,19 +519,13 @@ fsp_header_init_fields( ulint space_id, /*!< in: space id */ ulint flags); /*!< in: tablespace flags (FSP_SPACE_FLAGS): 0, or table->flags if newer than COMPACT */ -/** Initializes the space header of a new created space and creates also the -insert buffer tree root if space == 0. +/** Initialize a tablespace header. @param[in] space_id space id @param[in] size current size in blocks -@param[in,out] mtr min-transaction -@return true on success, otherwise false. */ +@param[in,out] mtr mini-transaction */ UNIV_INTERN -bool -fsp_header_init( - ulint space_id, - ulint size, - mtr_t* mtr) - MY_ATTRIBUTE((warn_unused_result)); +void +fsp_header_init(ulint space_id, ulint size, mtr_t* mtr); /**********************************************************************//** Increases the space size field of a space. */ diff --git a/storage/xtradb/include/trx0rseg.h b/storage/xtradb/include/trx0rseg.h index b9c84ef2b06..e2853df7045 100644 --- a/storage/xtradb/include/trx0rseg.h +++ b/storage/xtradb/include/trx0rseg.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -124,13 +125,13 @@ trx_rseg_mem_free( /*==============*/ trx_rseg_t* rseg); /*!< in, own: instance to free */ -/********************************************************************* -Creates a rollback segment. */ +/** Create a rollback segment. +@param[in] space undo tablespace ID +@return pointer to new rollback segment +@retval NULL on failure */ UNIV_INTERN trx_rseg_t* -trx_rseg_create( -/*============*/ - ulint space); /*!< in: id of UNDO tablespace */ +trx_rseg_create(ulint space); /******************************************************************** Get the number of unique rollback tablespaces in use except space id 0. diff --git a/storage/xtradb/log/log0recv.cc b/storage/xtradb/log/log0recv.cc index 037bc9d35d3..fb64309cee4 100644 --- a/storage/xtradb/log/log0recv.cc +++ b/storage/xtradb/log/log0recv.cc @@ -3004,11 +3004,6 @@ recv_init_crash_recovery(void) possible */ if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) { - - ib_logf(IB_LOG_LEVEL_INFO, - "Restoring possible half-written data pages " - "from the doublewrite buffer..."); - buf_dblwr_process(); /* Spawn the background thread to flush dirty pages @@ -3276,24 +3271,6 @@ recv_recovery_from_checkpoint_start_func( user about recovery: */ if (checkpoint_lsn != flushed_lsn) { - - if (checkpoint_lsn file_unreadable = true; - err = DB_ERROR; - goto funct_exit; - } } } else { /* Lock all index trees for this table, as we will diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index cbab9b2f8c4..6fd8e8944f5 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -1601,26 +1601,18 @@ srv_undo_tablespaces_init( if (create_new_db) { mtr_t mtr; - bool ret=true; mtr_start(&mtr); /* The undo log tablespace */ for (i = 0; i < n_undo_tablespaces; ++i) { - ret = fsp_header_init( + fsp_header_init( undo_tablespace_ids[i], SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr); - if (!ret) { - break; - } } mtr_commit(&mtr); - - if (!ret) { - return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); - } } return(DB_SUCCESS); @@ -2507,14 +2499,24 @@ files_checked: mtr_start(&mtr); - bool ret = fsp_header_init(0, sum_of_new_sizes, &mtr); + fsp_header_init(0, sum_of_new_sizes, &mtr); + compile_time_assert(TRX_SYS_SPACE == 0); + compile_time_assert(IBUF_SPACE_ID == 0); + + ulint ibuf_root = btr_create( + DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, + 0, 0, DICT_IBUF_ID_MIN, + dict_ind_redundant, &mtr); mtr_commit(&mtr); - if (!ret) { - return (srv_init_abort(create_new_db, __FILE__, __LINE__, DB_ERROR)); + if (ibuf_root == FIL_NULL) { + return(srv_init_abort(true, __FILE__, __LINE__, + DB_ERROR)); } + ut_ad(ibuf_root == IBUF_TREE_ROOT_PAGE_NO); + /* To maintain backward compatibility we create only the first rollback segment before the double write buffer. All the remaining rollback segments will be created later, @@ -2901,10 +2903,9 @@ files_checked: /* fprintf(stderr, "Max allowed record size %lu\n", page_get_free_space_of_empty() / 2); */ - if (buf_dblwr == NULL) { - /* Create the doublewrite buffer to a new tablespace */ - - buf_dblwr_create(); + if (!buf_dblwr_create()) { + return(srv_init_abort(create_new_db, __FILE__, __LINE__, + DB_ERROR)); } /* Here the double write buffer has already been created and so diff --git a/storage/xtradb/trx/trx0rseg.cc b/storage/xtradb/trx/trx0rseg.cc index 21dbab98e48..16fa334872b 100644 --- a/storage/xtradb/trx/trx0rseg.cc +++ b/storage/xtradb/trx/trx0rseg.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -293,14 +294,13 @@ trx_rseg_create_instance( } } -/********************************************************************* -Creates a rollback segment. -@return pointer to new rollback segment if create successful */ +/** Create a rollback segment. +@param[in] space undo tablespace ID +@return pointer to new rollback segment +@retval NULL on failure */ UNIV_INTERN trx_rseg_t* -trx_rseg_create( -/*============*/ - ulint space) /*!< in: id of UNDO tablespace */ +trx_rseg_create(ulint space) { mtr_t mtr; ulint slot_no; @@ -323,25 +323,21 @@ trx_rseg_create( page_no = trx_rseg_header_create( space, 0, ULINT_MAX, slot_no, &mtr); - if (page_no == FIL_NULL) { - mtr_commit(&mtr); - return (rseg); - } - - sys_header = trx_sysf_get(&mtr); + if (page_no != FIL_NULL) { + sys_header = trx_sysf_get(&mtr); - id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr); - ut_a(id == space); + id = trx_sysf_rseg_get_space(sys_header, slot_no, &mtr); + ut_a(id == space); - zip_size = space ? fil_space_get_zip_size(space) : 0; + zip_size = space ? fil_space_get_zip_size(space) : 0; - rseg = trx_rseg_mem_create( - slot_no, space, zip_size, page_no, - purge_sys->ib_bh, &mtr); + rseg = trx_rseg_mem_create( + slot_no, space, zip_size, page_no, + purge_sys->ib_bh, &mtr); + } } mtr_commit(&mtr); - return(rseg); } -- cgit v1.2.1 From 58c56dd7f8e787c48b6f09a4d03cddd7c922588b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 8 Jun 2017 15:40:25 +0300 Subject: MDEV-12610: MariaDB start is slow Problem appears to be that the function fsp_flags_try_adjust() is being unconditionally invoked on every .ibd file on startup. Based on performance investigation also the top function fsp_header_get_crypt_offset() needs to addressed. Ported implementation of fsp_header_get_encryption_offset() function from 10.2 to fsp_header_get_crypt_offset(). Introduced a new function fil_crypt_read_crypt_data() to read page 0 if it is not yet read. fil_crypt_find_space_to_rotate(): Now that page 0 for every .ibd file is not read on startup we need to check has page 0 read from space that we investigate for key rotation, if it is not read we read it. fil_space_crypt_get_status(): Now that page 0 for every .ibd file is not read on startup here also we need to read page 0 if it is not yet read it. This is needed as tests use IS query to wait until background encryption or decryption has finished and this function is used to produce results. fil_crypt_thread(): Add is_stopping condition for tablespace so that we do not rotate pages if usage of tablespace should be stopped. This was needed for failure seen on regression testing. fil_space_create: Remove page_0_crypt_read and extra unnecessary info output. fil_open_single_table_tablespace(): We call fsp_flags_try_adjust only when when no errors has happened and server was not started on read only mode and tablespace validation was requested or flags contain other table options except low order bits to FSP_FLAGS_POS_PAGE_SSIZE position. fil_space_t::page_0_crypt_read removed. Added test case innodb-first-page-read to test startup when encryption is on and when encryption is off to check that not for all tables page 0 is read on startup. --- storage/innobase/buf/buf0buf.cc | 1 + storage/innobase/fil/fil0crypt.cc | 61 +++++++++++++++++++++++++++++++++++--- storage/innobase/fil/fil0fil.cc | 41 +++++++++++-------------- storage/innobase/fsp/fsp0fsp.cc | 16 ++-------- storage/innobase/include/fil0fil.h | 3 -- storage/xtradb/buf/buf0buf.cc | 1 + storage/xtradb/fil/fil0crypt.cc | 61 +++++++++++++++++++++++++++++++++++--- storage/xtradb/fil/fil0fil.cc | 41 +++++++++++-------------- storage/xtradb/fsp/fsp0fsp.cc | 16 ++-------- storage/xtradb/include/fil0fil.h | 3 -- 10 files changed, 154 insertions(+), 90 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index b4ee3ebc067..334b2fbddb5 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -4549,6 +4549,7 @@ buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space) !bpage->encrypted && fil_space_verify_crypt_checksum(dst_frame, zip_size, space, bpage->offset)); + if (!still_encrypted) { /* If traditional checksums match, we assume that page is not anymore encrypted. */ diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index 2131a936656..70d8558ede2 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -1115,6 +1115,43 @@ fil_crypt_needs_rotation( return false; } +/** Read page 0 and possible crypt data from there. +@param[in] space Tablespace */ +static inline +void +fil_crypt_read_crypt_data(fil_space_t* space) +{ + mutex_enter(&fil_system->mutex); + + /* If space does not contain crypt data and space size is 0 + we have not yet read first page of tablespace. We need to + read it to find out tablespace current encryption status. */ + if (!space->crypt_data && space->size == 0) { + mtr_t mtr; + mtr_start(&mtr); + ulint zip_size = fsp_flags_get_zip_size(space->flags); + ulint offset = fsp_header_get_crypt_offset(zip_size); + mutex_exit(&fil_system->mutex); + if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, + RW_X_LATCH, &mtr)) { + byte* frame = buf_block_get_frame(block); + + mutex_enter(&fil_system->mutex); + + if (!space->crypt_data) { + space->crypt_data = fil_space_read_crypt_data(space->id, + frame, offset); + } + + mutex_exit(&fil_system->mutex); + } + + mtr_commit(&mtr); + } else { + mutex_exit(&fil_system->mutex); + } +} + /*********************************************************************** Start encrypting a space @param[in,out] space Tablespace @@ -1125,6 +1162,7 @@ fil_crypt_start_encrypting_space( fil_space_t* space) { bool recheck = false; + mutex_enter(&fil_crypt_threads_mutex); fil_space_crypt_t *crypt_data = space->crypt_data; @@ -1191,8 +1229,6 @@ fil_crypt_start_encrypting_space( byte* frame = buf_block_get_frame(block); crypt_data->type = CRYPT_SCHEME_1; crypt_data->write_page0(frame, &mtr); - - mtr_commit(&mtr); /* record lsn of update */ @@ -1620,6 +1656,13 @@ fil_crypt_find_space_to_rotate( } while (!state->should_shutdown() && state->space) { + /* If there is no crypt data and we have not yet read + page 0 for this tablespace, we need to read it before + we can continue. */ + if (!state->space->crypt_data) { + fil_crypt_read_crypt_data(state->space); + } + if (fil_crypt_space_needs_rotation(state, key_state, recheck)) { ut_ad(key_state->key_id); /* init state->min_key_version_found before @@ -2314,8 +2357,10 @@ DECLARE_THREAD(fil_crypt_thread)( while (!thr.should_shutdown() && fil_crypt_find_page_to_rotate(&new_state, &thr)) { - /* rotate a (set) of pages */ - fil_crypt_rotate_pages(&new_state, &thr); + if (!thr.space->is_stopping()) { + /* rotate a (set) of pages */ + fil_crypt_rotate_pages(&new_state, &thr); + } /* If space is marked as stopping, release space and stop rotation. */ @@ -2544,6 +2589,14 @@ fil_space_crypt_get_status( memset(status, 0, sizeof(*status)); ut_ad(space->n_pending_ops > 0); + + /* If there is no crypt data and we have not yet read + page 0 for this tablespace, we need to read it before + we can continue. */ + if (!space->crypt_data) { + fil_crypt_read_crypt_data(const_cast(space)); + } + fil_space_crypt_t* crypt_data = space->crypt_data; status->space = space->id; diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index f88bb2add59..ffb01312fdb 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -653,12 +653,10 @@ fil_node_open_file( /* Try to read crypt_data from page 0 if it is not yet read. */ - if (!node->space->page_0_crypt_read) { - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(flags)); - ut_ad(node->space->crypt_data == NULL); + if (!node->space->crypt_data) { + const ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(flags)); node->space->crypt_data = fil_space_read_crypt_data(space_id, page, offset); - node->space->page_0_crypt_read = true; } ut_free(buf2); @@ -1557,22 +1555,6 @@ fil_space_create( space->magic_n = FIL_SPACE_MAGIC_N; space->crypt_data = crypt_data; - /* In create table we write page 0 so we have already - "read" it and for system tablespaces we have read - crypt data at startup. */ - if (create_table || crypt_data != NULL) { - space->page_0_crypt_read = true; - } - -#ifdef UNIV_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Created tablespace for space %lu name %s key_id %u encryption %d.", - space->id, - space->name, - space->crypt_data ? space->crypt_data->key_id : 0, - space->crypt_data ? space->crypt_data->encryption : 0); -#endif - rw_lock_create(fil_space_latch_key, &space->latch, SYNC_FSP); HASH_INSERT(fil_space_t, hash, fil_system->spaces, id, space); @@ -2401,8 +2383,8 @@ fil_read_first_page( /* Possible encryption crypt data is also stored only to first page of the first datafile. */ - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(*flags)); + const ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(*flags)); cdata = fil_space_read_crypt_data(*space_id, page, offset); @@ -4018,6 +4000,7 @@ fsp_flags_try_adjust(ulint space_id, ulint flags) flags, MLOG_4BYTES, &mtr); } } + mtr_commit(&mtr); } @@ -4437,7 +4420,17 @@ cleanup_and_exit: mem_free(def.filepath); - if (err == DB_SUCCESS && !srv_read_only_mode) { + /* We need to check fsp flags when no errors has happened and + server was not started on read only mode and tablespace validation + was requested or flags contain other table options except + low order bits to FSP_FLAGS_POS_PAGE_SSIZE position. + Note that flag comparison is pessimistic. Adjust is required + only when flags contain buggy MariaDB 10.1.0 - + MariaDB 10.1.20 flags. */ + if (err == DB_SUCCESS + && !srv_read_only_mode + && (validate + || flags >= (1U << FSP_FLAGS_POS_PAGE_SSIZE))) { fsp_flags_try_adjust(id, flags & ~FSP_FLAGS_MEM_MASK); } diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 4f05549bc1c..878b8d824c7 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -4124,20 +4124,8 @@ ulint fsp_header_get_crypt_offset( const ulint zip_size) { - ulint pageno = 0; - /* compute first page_no that will have xdes stored on page != 0*/ - for (ulint i = 0; - (pageno = xdes_calc_descriptor_page(zip_size, i)) == 0; ) - i++; - - /* use pageno prior to this...i.e last page on page 0 */ - ut_ad(pageno > 0); - pageno--; - - ulint iv_offset = XDES_ARR_OFFSET + - XDES_SIZE * (1 + xdes_calc_descriptor_index(zip_size, pageno)); - - return FSP_HEADER_OFFSET + iv_offset; + return (FSP_HEADER_OFFSET + (XDES_ARR_OFFSET + XDES_SIZE * + (zip_size ? zip_size : UNIV_PAGE_SIZE) / FSP_EXTENT_SIZE)); } /**********************************************************************//** diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index ba440cb2a1c..e16c7cb102e 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -351,9 +351,6 @@ struct fil_space_t { compression failure */ fil_space_crypt_t* crypt_data; /*!< tablespace crypt data or NULL */ - bool page_0_crypt_read; - /*!< tablespace crypt data has been - read */ ulint file_block_size; /*!< file system block size */ diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index c72900bd082..54f3ac311be 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -4636,6 +4636,7 @@ buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space) !bpage->encrypted && fil_space_verify_crypt_checksum(dst_frame, zip_size, space, bpage->offset)); + if (!still_encrypted) { /* If traditional checksums match, we assume that page is not anymore encrypted. */ diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc index 21c1e3b730e..e24278dd102 100644 --- a/storage/xtradb/fil/fil0crypt.cc +++ b/storage/xtradb/fil/fil0crypt.cc @@ -1115,6 +1115,43 @@ fil_crypt_needs_rotation( return false; } +/** Read page 0 and possible crypt data from there. +@param[in] space Tablespace */ +static inline +void +fil_crypt_read_crypt_data(fil_space_t* space) +{ + mutex_enter(&fil_system->mutex); + + /* If space does not contain crypt data and space size is 0 + we have not yet read first page of tablespace. We need to + read it to find out tablespace current encryption status. */ + if (!space->crypt_data && space->size == 0) { + mtr_t mtr; + mtr_start(&mtr); + ulint zip_size = fsp_flags_get_zip_size(space->flags); + ulint offset = fsp_header_get_crypt_offset(zip_size); + mutex_exit(&fil_system->mutex); + if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, + RW_X_LATCH, &mtr)) { + byte* frame = buf_block_get_frame(block); + + mutex_enter(&fil_system->mutex); + + if (!space->crypt_data) { + space->crypt_data = fil_space_read_crypt_data(space->id, + frame, offset); + } + + mutex_exit(&fil_system->mutex); + } + + mtr_commit(&mtr); + } else { + mutex_exit(&fil_system->mutex); + } +} + /*********************************************************************** Start encrypting a space @param[in,out] space Tablespace @@ -1125,6 +1162,7 @@ fil_crypt_start_encrypting_space( fil_space_t* space) { bool recheck = false; + mutex_enter(&fil_crypt_threads_mutex); fil_space_crypt_t *crypt_data = space->crypt_data; @@ -1191,8 +1229,6 @@ fil_crypt_start_encrypting_space( byte* frame = buf_block_get_frame(block); crypt_data->type = CRYPT_SCHEME_1; crypt_data->write_page0(frame, &mtr); - - mtr_commit(&mtr); /* record lsn of update */ @@ -1620,6 +1656,13 @@ fil_crypt_find_space_to_rotate( } while (!state->should_shutdown() && state->space) { + /* If there is no crypt data and we have not yet read + page 0 for this tablespace, we need to read it before + we can continue. */ + if (!state->space->crypt_data) { + fil_crypt_read_crypt_data(state->space); + } + if (fil_crypt_space_needs_rotation(state, key_state, recheck)) { ut_ad(key_state->key_id); /* init state->min_key_version_found before @@ -2314,8 +2357,10 @@ DECLARE_THREAD(fil_crypt_thread)( while (!thr.should_shutdown() && fil_crypt_find_page_to_rotate(&new_state, &thr)) { - /* rotate a (set) of pages */ - fil_crypt_rotate_pages(&new_state, &thr); + if (!thr.space->is_stopping()) { + /* rotate a (set) of pages */ + fil_crypt_rotate_pages(&new_state, &thr); + } /* If space is marked as stopping, release space and stop rotation. */ @@ -2545,6 +2590,14 @@ fil_space_crypt_get_status( memset(status, 0, sizeof(*status)); ut_ad(space->n_pending_ops > 0); + + /* If there is no crypt data and we have not yet read + page 0 for this tablespace, we need to read it before + we can continue. */ + if (!space->crypt_data) { + fil_crypt_read_crypt_data(const_cast(space)); + } + fil_space_crypt_t* crypt_data = space->crypt_data; status->space = space->id; diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index d979c05c9a6..12048bc479f 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -661,12 +661,10 @@ fil_node_open_file( /* Try to read crypt_data from page 0 if it is not yet read. */ - if (!node->space->page_0_crypt_read) { - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(flags)); - ut_ad(node->space->crypt_data == NULL); + if (!node->space->crypt_data) { + const ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(flags)); node->space->crypt_data = fil_space_read_crypt_data(space_id, page, offset); - node->space->page_0_crypt_read = true; } ut_free(buf2); @@ -1600,22 +1598,6 @@ fil_space_create( space->magic_n = FIL_SPACE_MAGIC_N; space->crypt_data = crypt_data; - /* In create table we write page 0 so we have already - "read" it and for system tablespaces we have read - crypt data at startup. */ - if (create_table || crypt_data != NULL) { - space->page_0_crypt_read = true; - } - -#ifdef UNIV_DEBUG - ib_logf(IB_LOG_LEVEL_INFO, - "Created tablespace for space %lu name %s key_id %u encryption %d.", - space->id, - space->name, - space->crypt_data ? space->crypt_data->key_id : 0, - space->crypt_data ? space->crypt_data->encryption : 0); -#endif - rw_lock_create(fil_space_latch_key, &space->latch, SYNC_FSP); HASH_INSERT(fil_space_t, hash, fil_system->spaces, id, space); @@ -2463,8 +2445,8 @@ fil_read_first_page( /* Possible encryption crypt data is also stored only to first page of the first datafile. */ - ulint offset = fsp_header_get_crypt_offset( - fsp_flags_get_zip_size(*flags)); + const ulint offset = fsp_header_get_crypt_offset( + fsp_flags_get_zip_size(*flags)); cdata = fil_space_read_crypt_data(*space_id, page, offset); @@ -4211,6 +4193,7 @@ fsp_flags_try_adjust(ulint space_id, ulint flags) flags, MLOG_4BYTES, &mtr); } } + mtr_commit(&mtr); } @@ -4631,7 +4614,17 @@ cleanup_and_exit: mem_free(def.filepath); - if (err == DB_SUCCESS && !srv_read_only_mode) { + /* We need to check fsp flags when no errors has happened and + server was not started on read only mode and tablespace validation + was requested or flags contain other table options except + low order bits to FSP_FLAGS_POS_PAGE_SSIZE position. + Note that flag comparison is pessimistic. Adjust is required + only when flags contain buggy MariaDB 10.1.0 - + MariaDB 10.1.20 flags. */ + if (err == DB_SUCCESS + && !srv_read_only_mode + && (validate + || flags >= (1U << FSP_FLAGS_POS_PAGE_SSIZE))) { fsp_flags_try_adjust(id, flags & ~FSP_FLAGS_MEM_MASK); } diff --git a/storage/xtradb/fsp/fsp0fsp.cc b/storage/xtradb/fsp/fsp0fsp.cc index bd87b88f58d..40a9faa6914 100644 --- a/storage/xtradb/fsp/fsp0fsp.cc +++ b/storage/xtradb/fsp/fsp0fsp.cc @@ -4150,20 +4150,8 @@ ulint fsp_header_get_crypt_offset( const ulint zip_size) { - ulint pageno = 0; - /* compute first page_no that will have xdes stored on page != 0*/ - for (ulint i = 0; - (pageno = xdes_calc_descriptor_page(zip_size, i)) == 0; ) - i++; - - /* use pageno prior to this...i.e last page on page 0 */ - ut_ad(pageno > 0); - pageno--; - - ulint iv_offset = XDES_ARR_OFFSET + - XDES_SIZE * (1 + xdes_calc_descriptor_index(zip_size, pageno)); - - return FSP_HEADER_OFFSET + iv_offset; + return (FSP_HEADER_OFFSET + (XDES_ARR_OFFSET + XDES_SIZE * + (zip_size ? zip_size : UNIV_PAGE_SIZE) / FSP_EXTENT_SIZE)); } /**********************************************************************//** diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index b861225f562..a09833c3a73 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -350,9 +350,6 @@ struct fil_space_t { compression failure */ fil_space_crypt_t* crypt_data; /*!< tablespace crypt data or NULL */ - bool page_0_crypt_read; - /*!< tablespace crypt data has been - read */ ulint file_block_size; /*!< file system block size */ -- cgit v1.2.1 From a9117c90083d3f26e43c460073e06f9562b66f44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 9 Jun 2017 13:44:04 +0300 Subject: Correct a merge error of MDEV-11626 --- storage/xtradb/buf/buf0buf.cc | 1 + 1 file changed, 1 insertion(+) (limited to 'storage') diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index 26b95926e86..1b031f5eabc 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -3201,6 +3201,7 @@ got_block: } if (buf_flush_page_try(buf_pool, fix_block)) { + guess = fix_block; goto loop; } -- cgit v1.2.1 From 417434f12dba3ccf5b79ca4e7afe72fe27807fd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2017 15:43:06 +0300 Subject: MDEV-13039 innodb_fast_shutdown=0 may fail to purge all undo log When a slow shutdown is performed soon after spawning some work for background threads that can create or commit transactions, it is possible that new transactions are started or committed after the purge has finished. This is violating the specification of innodb_fast_shutdown=0, namely that the purge must be completed. (None of the history of the recent transactions would be purged.) Also, it is possible that the purge threads would exit in slow shutdown while there exist active transactions, such as recovered incomplete transactions that are being rolled back. Thus, the slow shutdown could fail to purge some undo log that becomes purgeable after the transaction commit or rollback. srv_undo_sources: A flag that indicates if undo log can be generated or the persistent, whether by background threads or by user SQL. Even when this flag is clear, active transactions that already exist in the system may be committed or rolled back. innodb_shutdown(): Renamed from innobase_shutdown_for_mysql(). Do not return an error code; the operation never fails. Clear the srv_undo_sources flag, and also ensure that the background DROP TABLE queue is empty. srv_purge_should_exit(): Do not allow the purge to exit if srv_undo_sources are active or the background DROP TABLE queue is not empty, or in slow shutdown, if any active transactions exist (and are being rolled back). srv_purge_coordinator_thread(): Remove some previous workarounds for this bug. innobase_start_or_create_for_mysql(): Set buf_page_cleaner_is_active and srv_dict_stats_thread_active directly. Set srv_undo_sources before starting the purge subsystem, to prevent immediate shutdown of the purge. Create dict_stats_thread and fts_optimize_thread immediately after setting srv_undo_sources, so that shutdown can use this flag to determine if these subsystems were started. dict_stats_shutdown(): Shut down dict_stats_thread. Backported from 10.2. srv_shutdown_table_bg_threads(): Remove (unused). --- storage/innobase/buf/buf0flu.cc | 8 +- storage/innobase/dict/dict0stats_bg.cc | 35 ++++++--- storage/innobase/handler/ha_innodb.cc | 29 ++----- storage/innobase/include/buf0flu.h | 2 +- storage/innobase/include/dict0stats_bg.h | 6 +- storage/innobase/include/srv0srv.h | 2 +- storage/innobase/include/srv0start.h | 20 ++--- storage/innobase/srv/srv0srv.cc | 71 +++++------------ storage/innobase/srv/srv0start.cc | 128 +++++++------------------------ storage/innobase/trx/trx0purge.cc | 13 ++++ storage/xtradb/buf/buf0flu.cc | 8 +- storage/xtradb/dict/dict0stats_bg.cc | 35 ++++++--- storage/xtradb/handler/ha_innodb.cc | 29 ++----- storage/xtradb/include/buf0flu.h | 2 +- storage/xtradb/include/dict0stats_bg.h | 6 +- storage/xtradb/include/srv0srv.h | 2 +- storage/xtradb/include/srv0start.h | 20 ++--- storage/xtradb/srv/srv0srv.cc | 71 +++++------------ storage/xtradb/srv/srv0start.cc | 128 +++++++------------------------ storage/xtradb/trx/trx0purge.cc | 13 ++++ 20 files changed, 226 insertions(+), 402 deletions(-) (limited to 'storage') diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 486b7b4db0c..643c5287e17 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -58,7 +58,7 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set back to FALSE at shutdown by the page_cleaner as well. Therefore no need to protect it by a mutex. It is only ever read by the thread doing the shutdown */ -UNIV_INTERN ibool buf_page_cleaner_is_active = FALSE; +UNIV_INTERN bool buf_page_cleaner_is_active; /** LRU flush batch is further divided into this chunk size to reduce the wait time for the threads waiting for a clean block */ @@ -2422,8 +2422,6 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ - buf_page_cleaner_is_active = TRUE; - while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { page_cleaner_sleep_if_needed(next_loop_time); @@ -2517,7 +2515,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /* We have lived our life. Time to die. */ thread_exit: - buf_page_cleaner_is_active = FALSE; + buf_page_cleaner_is_active = false; my_thread_end(); /* We count the number of threads in os_thread_exit(). A created diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc index 40a2a1488f8..75214a1b2e9 100644 --- a/storage/innobase/dict/dict0stats_bg.cc +++ b/storage/innobase/dict/dict0stats_bg.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,12 +38,18 @@ Created Apr 25, 2012 Vasil Dimov /** Minimum time interval between stats recalc for a given table */ #define MIN_RECALC_INTERVAL 10 /* seconds */ -#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE) - /** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add() or shutdown. Not protected by any mutex. */ UNIV_INTERN os_event_t dict_stats_event; +/** Variable to initiate shutdown the dict stats thread. Note we don't +use 'srv_shutdown_state' because we want to shutdown dict stats thread +before purge thread. */ +static bool dict_stats_start_shutdown; + +/** Event to wait for shutdown of the dict stats thread */ +static os_event_t dict_stats_shutdown_event; + /** This mutex protects the "recalc_pool" variable. */ static ib_mutex_t recalc_pool_mutex; #ifdef HAVE_PSI_INTERFACE @@ -217,11 +223,11 @@ Must be called before dict_stats_thread() is started. */ UNIV_INTERN void dict_stats_thread_init() -/*====================*/ { ut_a(!srv_read_only_mode); dict_stats_event = os_event_create(); + dict_stats_shutdown_event = os_event_create(); /* The recalc_pool_mutex is acquired from: 1) the background stats gathering thread before any other latch @@ -260,6 +266,9 @@ dict_stats_thread_deinit() os_event_free(dict_stats_event); dict_stats_event = NULL; + os_event_free(dict_stats_shutdown_event); + dict_stats_shutdown_event = NULL; + dict_stats_start_shutdown = false; } /*****************************************************************//** @@ -349,9 +358,7 @@ DECLARE_THREAD(dict_stats_thread)( my_thread_init(); ut_a(!srv_read_only_mode); - srv_dict_stats_thread_active = TRUE; - - while (!SHUTTING_DOWN()) { + while (!dict_stats_start_shutdown) { /* Wake up periodically even if not signaled. This is because we may lose an event - if the below call to @@ -361,7 +368,7 @@ DECLARE_THREAD(dict_stats_thread)( os_event_wait_time( dict_stats_event, MIN_RECALC_INTERVAL * 1000000); - if (SHUTTING_DOWN()) { + if (dict_stats_start_shutdown) { break; } @@ -370,8 +377,9 @@ DECLARE_THREAD(dict_stats_thread)( os_event_reset(dict_stats_event); } - srv_dict_stats_thread_active = FALSE; + srv_dict_stats_thread_active = false; + os_event_set(dict_stats_shutdown_event); my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit instead of return(). */ @@ -379,3 +387,12 @@ DECLARE_THREAD(dict_stats_thread)( OS_THREAD_DUMMY_RETURN; } + +/** Shut down the dict_stats_thread. */ +void +dict_stats_shutdown() +{ + dict_stats_start_shutdown = true; + os_event_set(dict_stats_event); + os_event_wait(dict_stats_shutdown_event); +} diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 091f660dcee..e77c9bb4caf 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1142,14 +1142,11 @@ innobase_drop_database( the path is used as the database name: for example, in 'mysql/data/test' the database name is 'test' */ -/*******************************************************************//** -Closes an InnoDB database. */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /* in: Innodb handlerton */ - ha_panic_function type); +innobase_end(handlerton*, ha_panic_function); /*****************************************************************//** Creates an InnoDB transaction struct for the thd if it does not yet have one. @@ -3651,21 +3648,13 @@ error: DBUG_RETURN(TRUE); } -/*******************************************************************//** -Closes an InnoDB database. -@return TRUE if error */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /*!< in/out: InnoDB handlerton */ - ha_panic_function type MY_ATTRIBUTE((unused))) - /*!< in: ha_panic() parameter */ +innobase_end(handlerton*, ha_panic_function) { - int err= 0; - DBUG_ENTER("innobase_end"); - DBUG_ASSERT(hton == innodb_hton_ptr); if (innodb_inited) { @@ -3682,9 +3671,7 @@ innobase_end( innodb_inited = 0; hash_table_free(innobase_open_tables); innobase_open_tables = NULL; - if (innobase_shutdown_for_mysql() != DB_SUCCESS) { - err = 1; - } + innodb_shutdown(); srv_free_paths_and_sizes(); my_free(internal_innobase_data_file_path); mysql_mutex_destroy(&innobase_share_mutex); @@ -3693,7 +3680,7 @@ innobase_end( mysql_mutex_destroy(&pending_checkpoint_mutex); } - DBUG_RETURN(err); + DBUG_RETURN(0); } /****************************************************************//** diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h index 0c5812f802c..4a0781ffeda 100644 --- a/storage/innobase/include/buf0flu.h +++ b/storage/innobase/include/buf0flu.h @@ -34,7 +34,7 @@ Created 11/5/1995 Heikki Tuuri #include "buf0types.h" /** Flag indicating if the page_cleaner is in active state. */ -extern ibool buf_page_cleaner_is_active; +extern bool buf_page_cleaner_is_active; /********************************************************************//** Remove a block from the flush list of modified blocks. */ diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h index 1973380d197..2bd441d5f5e 100644 --- a/storage/innobase/include/dict0stats_bg.h +++ b/storage/innobase/include/dict0stats_bg.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -122,6 +122,10 @@ DECLARE_THREAD(dict_stats_thread)( void* arg); /*!< in: a dummy parameter required by os_thread_create */ +/** Shut down the dict_stats_thread. */ +void +dict_stats_shutdown(); + # ifndef UNIV_NONINL # include "dict0stats_bg.ic" # endif diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 696d3892db2..190397595ee 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -398,7 +398,7 @@ extern ibool srv_error_monitor_active; extern ibool srv_buf_dump_thread_active; /* TRUE during the lifetime of the stats thread */ -extern ibool srv_dict_stats_thread_active; +extern bool srv_dict_stats_thread_active; extern ulong srv_n_spin_wait_rounds; extern ulong srv_n_free_tickets_to_enter; diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h index a60776a4665..b055a9d834f 100644 --- a/storage/innobase/include/srv0start.h +++ b/storage/innobase/include/srv0start.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -75,22 +76,12 @@ are not found and the user wants. @return DB_SUCCESS or error code */ UNIV_INTERN dberr_t -innobase_start_or_create_for_mysql(void); -/*====================================*/ -/****************************************************************//** -Shuts down the Innobase database. -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -innobase_shutdown_for_mysql(void); +innobase_start_or_create_for_mysql(); -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ +/** Shut down InnoDB. */ UNIV_INTERN void -srv_shutdown_table_bg_threads(void); -/*=============================*/ +innodb_shutdown(); /*************************************************************//** Copy the file path component of the physical file to parameter. It will @@ -158,6 +149,9 @@ enum srv_shutdown_state { SRV_SHUTDOWN_EXIT_THREADS/*!< Exit all threads */ }; +/** Whether any undo log records can be generated */ +extern bool srv_undo_sources; + /** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ extern enum srv_shutdown_state srv_shutdown_state; diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index bad1579070c..10baf546e5b 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -84,7 +84,7 @@ UNIV_INTERN ibool srv_error_monitor_active = FALSE; UNIV_INTERN ibool srv_buf_dump_thread_active = FALSE; -UNIV_INTERN ibool srv_dict_stats_thread_active = FALSE; +UNIV_INTERN bool srv_dict_stats_thread_active; UNIV_INTERN const char* srv_main_thread_op_info = ""; @@ -2424,31 +2424,29 @@ suspend_thread: goto loop; } -/*********************************************************************//** -Check if purge should stop. -@return true if it should shutdown. */ +/** Check if purge should stop. +@param[in] n_purged pages purged in the last batch +@return whether purge should exit */ static bool -srv_purge_should_exit( -/*==============*/ - ulint n_purged) /*!< in: pages purged in last batch */ +srv_purge_should_exit(ulint n_purged) { - switch (srv_shutdown_state) { - case SRV_SHUTDOWN_NONE: - /* Normal operation. */ - break; + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_NONE + || srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); - case SRV_SHUTDOWN_CLEANUP: - case SRV_SHUTDOWN_EXIT_THREADS: - /* Exit unless slow shutdown requested or all done. */ - return(srv_fast_shutdown != 0 || n_purged == 0); - - case SRV_SHUTDOWN_LAST_PHASE: - case SRV_SHUTDOWN_FLUSH_PHASE: - ut_error; + if (srv_undo_sources) { + return(false); } - - return(false); + if (srv_fast_shutdown) { + return(true); + } + /* Slow shutdown was requested. */ + if (n_purged) { + /* The previous round still did some work. */ + return(false); + } + /* Exit if there are no active transactions to roll back. */ + return(trx_sys_any_active_transactions() == 0); } /*********************************************************************//** @@ -2709,7 +2707,7 @@ srv_purge_coordinator_suspend( } rw_lock_x_unlock(&purge_sys->latch); - } while (stop); + } while (stop && srv_undo_sources); srv_resume_thread(slot, 0, false); } @@ -2760,6 +2758,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( purge didn't purge any records then wait for activity. */ if (srv_shutdown_state == SRV_SHUTDOWN_NONE + && srv_undo_sources && (purge_sys->state == PURGE_STATE_STOP || n_total_purged == 0)) { @@ -2776,36 +2775,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( rseg_history_len = srv_do_purge( srv_n_purge_threads, &n_total_purged); - } while (!srv_purge_should_exit(n_total_purged)); - /* Ensure that we don't jump out of the loop unless the - exit condition is satisfied. */ - - ut_a(srv_purge_should_exit(n_total_purged)); - - ulint n_pages_purged = ULINT_MAX; - - /* Ensure that all records are purged if it is not a fast shutdown. - This covers the case where a record can be added after we exit the - loop above. */ - while (srv_fast_shutdown == 0 && n_pages_purged > 0) { - n_pages_purged = trx_purge(1, srv_purge_batch_size, false); - } - - /* This trx_purge is called to remove any undo records (added by - background threads) after completion of the above loop. When - srv_fast_shutdown != 0, a large batch size can cause significant - delay in shutdown ,so reducing the batch size to magic number 20 - (which was default in 5.5), which we hope will be sufficient to - remove all the undo records */ - const uint temp_batch_size = 20; - - n_pages_purged = trx_purge(1, srv_purge_batch_size <= temp_batch_size - ? srv_purge_batch_size : temp_batch_size, - true); - ut_a(n_pages_purged == 0 || srv_fast_shutdown != 0); - /* The task queue should always be empty, independent of fast shutdown state. */ ut_a(srv_get_task_queue_length() == 0); diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index c70260ff1a3..789fe50d337 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -121,7 +121,10 @@ UNIV_INTERN ibool srv_is_being_started = FALSE; /** TRUE if the server was successfully started */ UNIV_INTERN ibool srv_was_started = FALSE; /** TRUE if innobase_start_or_create_for_mysql() has been called */ -static ibool srv_start_has_been_called = FALSE; +static ibool srv_start_has_been_called; + +/** Whether any undo log records can be generated */ +UNIV_INTERN bool srv_undo_sources; /** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ @@ -1565,8 +1568,7 @@ are not found and the user wants. @return DB_SUCCESS or error code */ UNIV_INTERN dberr_t -innobase_start_or_create_for_mysql(void) -/*====================================*/ +innobase_start_or_create_for_mysql() { ibool create_new_db; lsn_t min_flushed_lsn; @@ -2705,8 +2707,8 @@ files_checked: } } - srv_startup_is_before_trx_rollback_phase = FALSE; recv_recovery_rollback_active(); + srv_startup_is_before_trx_rollback_phase = FALSE; /* It is possible that file_format tag has never been set. In this case we initialize it to minimum @@ -2827,6 +2829,16 @@ files_checked: srv_master_thread, NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS)); thread_started[1 + SRV_MAX_N_IO_THREADS] = true; + + srv_undo_sources = true; + /* Create the dict stats gathering thread */ + srv_dict_stats_thread_active = true; + dict_stats_thread_handle = os_thread_create( + dict_stats_thread, NULL, NULL); + dict_stats_thread_started = true; + + /* Create the thread that will optimize the FTS sub-system. */ + fts_optimize_init(); } if (!srv_read_only_mode @@ -2856,6 +2868,7 @@ files_checked: } if (!srv_read_only_mode) { + buf_page_cleaner_is_active = true; buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL); buf_flush_page_cleaner_thread_started = true; } @@ -2885,13 +2898,6 @@ files_checked: /* Create the buffer pool dump/load thread */ buf_dump_thread_handle = os_thread_create(buf_dump_thread, NULL, NULL); buf_dump_thread_started = true; - - /* Create the dict stats gathering thread */ - dict_stats_thread_handle = os_thread_create(dict_stats_thread, NULL, NULL); - dict_stats_thread_started = true; - - /* Create the thread that will optimize the FTS sub-system. */ - fts_optimize_init(); } srv_was_started = TRUE; @@ -2929,13 +2935,10 @@ srv_fts_close(void) } #endif -/****************************************************************//** -Shuts down the InnoDB database. -@return DB_SUCCESS or error code */ +/** Shut down InnoDB. */ UNIV_INTERN -dberr_t -innobase_shutdown_for_mysql(void) -/*=============================*/ +void +innodb_shutdown() { ulint i; @@ -2945,15 +2948,20 @@ innobase_shutdown_for_mysql(void) "Shutting down an improperly started, " "or created database!"); } - - return(DB_SUCCESS); } - if (!srv_read_only_mode) { + if (srv_undo_sources) { + ut_ad(!srv_read_only_mode); /* Shutdown the FTS optimize sub system. */ fts_optimize_start_shutdown(); fts_optimize_end(); + dict_stats_shutdown(); + while (row_get_background_drop_list_len_low()) { + srv_wake_master_thread(); + os_thread_yield(); + } + srv_undo_sources = false; } /* 1. Flush the buffer pool to disk, write the current lsn to @@ -3156,89 +3164,9 @@ innobase_shutdown_for_mysql(void) srv_was_started = FALSE; srv_start_has_been_called = FALSE; - - return(DB_SUCCESS); } #endif /* !UNIV_HOTBACKUP */ - -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ -UNIV_INTERN -void -srv_shutdown_table_bg_threads(void) -/*===============================*/ -{ - dict_table_t* table; - dict_table_t* first; - dict_table_t* last = NULL; - - mutex_enter(&dict_sys->mutex); - - /* Signal all threads that they should stop. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - first = table; - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_start_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (!next) { - last = table; - } - - table = next; - } - - /* We must release dict_sys->mutex here; if we hold on to it in the - loop below, we will deadlock if any of the background threads try to - acquire it (for example, the FTS thread by calling que_eval_sql). - - Releasing it here and going through dict_sys->table_LRU without - holding it is safe because: - - a) MySQL only starts the shutdown procedure after all client - threads have been disconnected and no new ones are accepted, so no - new tables are added or old ones dropped. - - b) Despite its name, the list is not LRU, and the order stays - fixed. - - To safeguard against the above assumptions ever changing, we store - the first and last items in the list above, and then check that - they've stayed the same below. */ - - mutex_exit(&dict_sys->mutex); - - /* Wait for the threads of each table to stop. This is not inside - the above loop, because by signaling all the threads first we can - overlap their shutting down delays. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - ut_a(first == table); - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (table == last) { - ut_a(!next); - } - - table = next; - } -} - /*****************************************************************//** Get the meta-data filename from the table name. */ UNIV_INTERN diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 8aae2969b39..cfe6309c36b 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -243,6 +243,19 @@ trx_purge_add_update_undo_to_history( hist_size + undo->size, MLOG_4BYTES, mtr); } + /* Before any transaction-generating background threads or the + purge have been started, recv_recovery_rollback_active() can + start transactions in row_merge_drop_temp_indexes() and + fts_drop_orphaned_tables(), and roll back recovered transactions. + After the purge thread has been given permission to exit, + in fast shutdown, we may roll back transactions (trx->undo_no==0) + in THD::cleanup() invoked from unlink_thd(). */ + ut_ad(srv_undo_sources + || ((srv_startup_is_before_trx_rollback_phase + || trx_rollback_or_clean_is_active) + && purge_sys->state == PURGE_STATE_INIT) + || (trx->undo_no == 0 && srv_fast_shutdown)); + /* Add the log as the first in the history list */ flst_add_first(rseg_header + TRX_RSEG_HISTORY, undo_header + TRX_UNDO_HISTORY_NODE, mtr); diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index 4781b874032..955be5a6161 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -58,7 +58,7 @@ is set to TRUE by the page_cleaner thread when it is spawned and is set back to FALSE at shutdown by the page_cleaner as well. Therefore no need to protect it by a mutex. It is only ever read by the thread doing the shutdown */ -UNIV_INTERN ibool buf_page_cleaner_is_active = FALSE; +UNIV_INTERN bool buf_page_cleaner_is_active; /** Flag indicating if the lru_manager is in active state. */ UNIV_INTERN bool buf_lru_manager_is_active = false; @@ -2718,8 +2718,6 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ - buf_page_cleaner_is_active = TRUE; - while (srv_shutdown_state == SRV_SHUTDOWN_NONE) { ulint page_cleaner_sleep_time; @@ -2829,7 +2827,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)( /* We have lived our life. Time to die. */ thread_exit: - buf_page_cleaner_is_active = FALSE; + buf_page_cleaner_is_active = false; my_thread_end(); /* We count the number of threads in os_thread_exit(). A created diff --git a/storage/xtradb/dict/dict0stats_bg.cc b/storage/xtradb/dict/dict0stats_bg.cc index 8cb419c6c37..4d646ed3081 100644 --- a/storage/xtradb/dict/dict0stats_bg.cc +++ b/storage/xtradb/dict/dict0stats_bg.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,12 +38,18 @@ Created Apr 25, 2012 Vasil Dimov /** Minimum time interval between stats recalc for a given table */ #define MIN_RECALC_INTERVAL 10 /* seconds */ -#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE) - /** Event to wake up dict_stats_thread on dict_stats_recalc_pool_add() or shutdown. Not protected by any mutex. */ UNIV_INTERN os_event_t dict_stats_event; +/** Variable to initiate shutdown the dict stats thread. Note we don't +use 'srv_shutdown_state' because we want to shutdown dict stats thread +before purge thread. */ +static bool dict_stats_start_shutdown; + +/** Event to wait for shutdown of the dict stats thread */ +static os_event_t dict_stats_shutdown_event; + /** This mutex protects the "recalc_pool" variable. */ static ib_mutex_t recalc_pool_mutex; #ifdef HAVE_PSI_INTERFACE @@ -207,11 +213,11 @@ Must be called before dict_stats_thread() is started. */ UNIV_INTERN void dict_stats_thread_init() -/*====================*/ { ut_a(!srv_read_only_mode); dict_stats_event = os_event_create(); + dict_stats_shutdown_event = os_event_create(); /* The recalc_pool_mutex is acquired from: 1) the background stats gathering thread before any other latch @@ -250,6 +256,9 @@ dict_stats_thread_deinit() os_event_free(dict_stats_event); dict_stats_event = NULL; + os_event_free(dict_stats_shutdown_event); + dict_stats_shutdown_event = NULL; + dict_stats_start_shutdown = false; } /*****************************************************************//** @@ -339,9 +348,7 @@ DECLARE_THREAD(dict_stats_thread)( my_thread_init(); ut_a(!srv_read_only_mode); - srv_dict_stats_thread_active = TRUE; - - while (!SHUTTING_DOWN()) { + while (!dict_stats_start_shutdown) { /* Wake up periodically even if not signaled. This is because we may lose an event - if the below call to @@ -351,7 +358,7 @@ DECLARE_THREAD(dict_stats_thread)( os_event_wait_time( dict_stats_event, MIN_RECALC_INTERVAL * 1000000); - if (SHUTTING_DOWN()) { + if (dict_stats_start_shutdown) { break; } @@ -360,8 +367,9 @@ DECLARE_THREAD(dict_stats_thread)( os_event_reset(dict_stats_event); } - srv_dict_stats_thread_active = FALSE; + srv_dict_stats_thread_active = false; + os_event_set(dict_stats_shutdown_event); my_thread_end(); /* We count the number of threads in os_thread_exit(). A created thread should always use that to exit instead of return(). */ @@ -369,3 +377,12 @@ DECLARE_THREAD(dict_stats_thread)( OS_THREAD_DUMMY_RETURN; } + +/** Shut down the dict_stats_thread. */ +void +dict_stats_shutdown() +{ + dict_stats_start_shutdown = true; + os_event_set(dict_stats_event); + os_event_wait(dict_stats_shutdown_event); +} diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 7d1ed3da5fd..9717136e030 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1366,14 +1366,11 @@ innobase_drop_database( the path is used as the database name: for example, in 'mysql/data/test' the database name is 'test' */ -/*******************************************************************//** -Closes an InnoDB database. */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /* in: Innodb handlerton */ - ha_panic_function type); +innobase_end(handlerton*, ha_panic_function); #if NOT_USED /*****************************************************************//** @@ -4140,21 +4137,13 @@ error: DBUG_RETURN(TRUE); } -/*******************************************************************//** -Closes an InnoDB database. -@return TRUE if error */ +/** Shut down the InnoDB storage engine. +@return 0 */ static int -innobase_end( -/*=========*/ - handlerton* hton, /*!< in/out: InnoDB handlerton */ - ha_panic_function type MY_ATTRIBUTE((unused))) - /*!< in: ha_panic() parameter */ +innobase_end(handlerton*, ha_panic_function) { - int err= 0; - DBUG_ENTER("innobase_end"); - DBUG_ASSERT(hton == innodb_hton_ptr); if (innodb_inited) { @@ -4171,9 +4160,7 @@ innobase_end( innodb_inited = 0; hash_table_free(innobase_open_tables); innobase_open_tables = NULL; - if (innobase_shutdown_for_mysql() != DB_SUCCESS) { - err = 1; - } + innodb_shutdown(); srv_free_paths_and_sizes(); my_free(internal_innobase_data_file_path); mysql_mutex_destroy(&innobase_share_mutex); @@ -4182,7 +4169,7 @@ innobase_end( mysql_mutex_destroy(&pending_checkpoint_mutex); } - DBUG_RETURN(err); + DBUG_RETURN(0); } /****************************************************************//** diff --git a/storage/xtradb/include/buf0flu.h b/storage/xtradb/include/buf0flu.h index 96035452e6e..384014939ae 100644 --- a/storage/xtradb/include/buf0flu.h +++ b/storage/xtradb/include/buf0flu.h @@ -34,7 +34,7 @@ Created 11/5/1995 Heikki Tuuri #include "buf0types.h" /** Flag indicating if the page_cleaner is in active state. */ -extern ibool buf_page_cleaner_is_active; +extern bool buf_page_cleaner_is_active; /** Flag indicating if the lru_manager is in active state. */ extern bool buf_lru_manager_is_active; diff --git a/storage/xtradb/include/dict0stats_bg.h b/storage/xtradb/include/dict0stats_bg.h index 1973380d197..2bd441d5f5e 100644 --- a/storage/xtradb/include/dict0stats_bg.h +++ b/storage/xtradb/include/dict0stats_bg.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -122,6 +122,10 @@ DECLARE_THREAD(dict_stats_thread)( void* arg); /*!< in: a dummy parameter required by os_thread_create */ +/** Shut down the dict_stats_thread. */ +void +dict_stats_shutdown(); + # ifndef UNIV_NONINL # include "dict0stats_bg.ic" # endif diff --git a/storage/xtradb/include/srv0srv.h b/storage/xtradb/include/srv0srv.h index cfd961d9c46..f27ed9857ec 100644 --- a/storage/xtradb/include/srv0srv.h +++ b/storage/xtradb/include/srv0srv.h @@ -499,7 +499,7 @@ extern ibool srv_error_monitor_active; extern ibool srv_buf_dump_thread_active; /* TRUE during the lifetime of the stats thread */ -extern ibool srv_dict_stats_thread_active; +extern bool srv_dict_stats_thread_active; extern ulong srv_n_spin_wait_rounds; extern ulong srv_n_free_tickets_to_enter; diff --git a/storage/xtradb/include/srv0start.h b/storage/xtradb/include/srv0start.h index a60776a4665..b055a9d834f 100644 --- a/storage/xtradb/include/srv0start.h +++ b/storage/xtradb/include/srv0start.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -75,22 +76,12 @@ are not found and the user wants. @return DB_SUCCESS or error code */ UNIV_INTERN dberr_t -innobase_start_or_create_for_mysql(void); -/*====================================*/ -/****************************************************************//** -Shuts down the Innobase database. -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -innobase_shutdown_for_mysql(void); +innobase_start_or_create_for_mysql(); -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ +/** Shut down InnoDB. */ UNIV_INTERN void -srv_shutdown_table_bg_threads(void); -/*=============================*/ +innodb_shutdown(); /*************************************************************//** Copy the file path component of the physical file to parameter. It will @@ -158,6 +149,9 @@ enum srv_shutdown_state { SRV_SHUTDOWN_EXIT_THREADS/*!< Exit all threads */ }; +/** Whether any undo log records can be generated */ +extern bool srv_undo_sources; + /** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ extern enum srv_shutdown_state srv_shutdown_state; diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 2fa3be014a5..e11697081c6 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -92,7 +92,7 @@ UNIV_INTERN ibool srv_error_monitor_active = FALSE; UNIV_INTERN ibool srv_buf_dump_thread_active = FALSE; -UNIV_INTERN ibool srv_dict_stats_thread_active = FALSE; +UNIV_INTERN bool srv_dict_stats_thread_active; UNIV_INTERN const char* srv_main_thread_op_info = ""; @@ -3097,31 +3097,29 @@ suspend_thread: goto loop; } -/*********************************************************************//** -Check if purge should stop. -@return true if it should shutdown. */ +/** Check if purge should stop. +@param[in] n_purged pages purged in the last batch +@return whether purge should exit */ static bool -srv_purge_should_exit( -/*==============*/ - ulint n_purged) /*!< in: pages purged in last batch */ +srv_purge_should_exit(ulint n_purged) { - switch (srv_shutdown_state) { - case SRV_SHUTDOWN_NONE: - /* Normal operation. */ - break; + ut_ad(srv_shutdown_state == SRV_SHUTDOWN_NONE + || srv_shutdown_state == SRV_SHUTDOWN_CLEANUP); - case SRV_SHUTDOWN_CLEANUP: - case SRV_SHUTDOWN_EXIT_THREADS: - /* Exit unless slow shutdown requested or all done. */ - return(srv_fast_shutdown != 0 || n_purged == 0); - - case SRV_SHUTDOWN_LAST_PHASE: - case SRV_SHUTDOWN_FLUSH_PHASE: - ut_error; + if (srv_undo_sources) { + return(false); } - - return(false); + if (srv_fast_shutdown) { + return(true); + } + /* Slow shutdown was requested. */ + if (n_purged) { + /* The previous round still did some work. */ + return(false); + } + /* Exit if there are no active transactions to roll back. */ + return(trx_sys_any_active_transactions() == 0); } /*********************************************************************//** @@ -3396,7 +3394,7 @@ srv_purge_coordinator_suspend( } rw_lock_x_unlock(&purge_sys->latch); - } while (stop); + } while (stop && srv_undo_sources); srv_resume_thread(slot, 0, false); } @@ -3450,6 +3448,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( purge didn't purge any records then wait for activity. */ if (srv_shutdown_state == SRV_SHUTDOWN_NONE + && srv_undo_sources && (purge_sys->state == PURGE_STATE_STOP || n_total_purged == 0)) { @@ -3470,36 +3469,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)( srv_n_purge_threads, &n_total_purged); srv_inc_activity_count(); - } while (!srv_purge_should_exit(n_total_purged)); - /* Ensure that we don't jump out of the loop unless the - exit condition is satisfied. */ - - ut_a(srv_purge_should_exit(n_total_purged)); - - ulint n_pages_purged = ULINT_MAX; - - /* Ensure that all records are purged if it is not a fast shutdown. - This covers the case where a record can be added after we exit the - loop above. */ - while (srv_fast_shutdown == 0 && n_pages_purged > 0) { - n_pages_purged = trx_purge(1, srv_purge_batch_size, false); - } - - /* This trx_purge is called to remove any undo records (added by - background threads) after completion of the above loop. When - srv_fast_shutdown != 0, a large batch size can cause significant - delay in shutdown ,so reducing the batch size to magic number 20 - (which was default in 5.5), which we hope will be sufficient to - remove all the undo records */ - const uint temp_batch_size = 20; - - n_pages_purged = trx_purge(1, srv_purge_batch_size <= temp_batch_size - ? srv_purge_batch_size : temp_batch_size, - true); - ut_a(n_pages_purged == 0 || srv_fast_shutdown != 0); - /* The task queue should always be empty, independent of fast shutdown state. */ ut_a(srv_get_task_queue_length() == 0); diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index e5b06a4919c..525c985f71b 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -123,7 +123,10 @@ UNIV_INTERN ibool srv_is_being_started = FALSE; /** TRUE if the server was successfully started */ UNIV_INTERN ibool srv_was_started = FALSE; /** TRUE if innobase_start_or_create_for_mysql() has been called */ -static ibool srv_start_has_been_called = FALSE; +static ibool srv_start_has_been_called; + +/** Whether any undo log records can be generated */ +UNIV_INTERN bool srv_undo_sources; /** At a shutdown this value climbs from SRV_SHUTDOWN_NONE to SRV_SHUTDOWN_CLEANUP and then to SRV_SHUTDOWN_LAST_PHASE, and so on */ @@ -1623,8 +1626,7 @@ are not found and the user wants. @return DB_SUCCESS or error code */ UNIV_INTERN dberr_t -innobase_start_or_create_for_mysql(void) -/*====================================*/ +innobase_start_or_create_for_mysql() { ibool create_new_db; lsn_t min_flushed_lsn; @@ -2777,8 +2779,8 @@ files_checked: } } - srv_startup_is_before_trx_rollback_phase = FALSE; recv_recovery_rollback_active(); + srv_startup_is_before_trx_rollback_phase = FALSE; /* It is possible that file_format tag has never been set. In this case we initialize it to minimum @@ -2905,6 +2907,16 @@ files_checked: srv_master_thread, NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS)); thread_started[1 + SRV_MAX_N_IO_THREADS] = true; + + srv_undo_sources = true; + /* Create the dict stats gathering thread */ + srv_dict_stats_thread_active = true; + dict_stats_thread_handle = os_thread_create( + dict_stats_thread, NULL, NULL); + dict_stats_thread_started = true; + + /* Create the thread that will optimize the FTS sub-system. */ + fts_optimize_init(); } if (!srv_read_only_mode @@ -2934,6 +2946,7 @@ files_checked: } if (!srv_read_only_mode) { + buf_page_cleaner_is_active = true; buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL); buf_flush_page_cleaner_thread_started = true; } @@ -2973,13 +2986,6 @@ files_checked: /* Create the buffer pool dump/load thread */ buf_dump_thread_handle = os_thread_create(buf_dump_thread, NULL, NULL); buf_dump_thread_started = true; - - /* Create the dict stats gathering thread */ - dict_stats_thread_handle = os_thread_create(dict_stats_thread, NULL, NULL); - dict_stats_thread_started = true; - - /* Create the thread that will optimize the FTS sub-system. */ - fts_optimize_init(); } srv_was_started = TRUE; @@ -3017,13 +3023,10 @@ srv_fts_close(void) } #endif -/****************************************************************//** -Shuts down the InnoDB database. -@return DB_SUCCESS or error code */ +/** Shut down InnoDB. */ UNIV_INTERN -dberr_t -innobase_shutdown_for_mysql(void) -/*=============================*/ +void +innodb_shutdown() { ulint i; @@ -3033,15 +3036,20 @@ innobase_shutdown_for_mysql(void) "Shutting down an improperly started, " "or created database!"); } - - return(DB_SUCCESS); } - if (!srv_read_only_mode) { + if (srv_undo_sources) { + ut_ad(!srv_read_only_mode); /* Shutdown the FTS optimize sub system. */ fts_optimize_start_shutdown(); fts_optimize_end(); + dict_stats_shutdown(); + while (row_get_background_drop_list_len_low()) { + srv_wake_master_thread(); + os_thread_yield(); + } + srv_undo_sources = false; } /* 1. Flush the buffer pool to disk, write the current lsn to @@ -3245,89 +3253,9 @@ innobase_shutdown_for_mysql(void) srv_was_started = FALSE; srv_start_has_been_called = FALSE; - - return(DB_SUCCESS); } #endif /* !UNIV_HOTBACKUP */ - -/******************************************************************** -Signal all per-table background threads to shutdown, and wait for them to do -so. */ -UNIV_INTERN -void -srv_shutdown_table_bg_threads(void) -/*===============================*/ -{ - dict_table_t* table; - dict_table_t* first; - dict_table_t* last = NULL; - - mutex_enter(&dict_sys->mutex); - - /* Signal all threads that they should stop. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - first = table; - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_start_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (!next) { - last = table; - } - - table = next; - } - - /* We must release dict_sys->mutex here; if we hold on to it in the - loop below, we will deadlock if any of the background threads try to - acquire it (for example, the FTS thread by calling que_eval_sql). - - Releasing it here and going through dict_sys->table_LRU without - holding it is safe because: - - a) MySQL only starts the shutdown procedure after all client - threads have been disconnected and no new ones are accepted, so no - new tables are added or old ones dropped. - - b) Despite its name, the list is not LRU, and the order stays - fixed. - - To safeguard against the above assumptions ever changing, we store - the first and last items in the list above, and then check that - they've stayed the same below. */ - - mutex_exit(&dict_sys->mutex); - - /* Wait for the threads of each table to stop. This is not inside - the above loop, because by signaling all the threads first we can - overlap their shutting down delays. */ - table = UT_LIST_GET_FIRST(dict_sys->table_LRU); - ut_a(first == table); - while (table) { - dict_table_t* next; - fts_t* fts = table->fts; - - if (fts != NULL) { - fts_shutdown(table, fts); - } - - next = UT_LIST_GET_NEXT(table_LRU, table); - - if (table == last) { - ut_a(!next); - } - - table = next; - } -} - /*****************************************************************//** Get the meta-data filename from the table name. */ UNIV_INTERN diff --git a/storage/xtradb/trx/trx0purge.cc b/storage/xtradb/trx/trx0purge.cc index 474ca077ce5..a4463744a69 100644 --- a/storage/xtradb/trx/trx0purge.cc +++ b/storage/xtradb/trx/trx0purge.cc @@ -247,6 +247,19 @@ trx_purge_add_update_undo_to_history( hist_size + undo->size, MLOG_4BYTES, mtr); } + /* Before any transaction-generating background threads or the + purge have been started, recv_recovery_rollback_active() can + start transactions in row_merge_drop_temp_indexes() and + fts_drop_orphaned_tables(), and roll back recovered transactions. + After the purge thread has been given permission to exit, + in fast shutdown, we may roll back transactions (trx->undo_no==0) + in THD::cleanup() invoked from unlink_thd(). */ + ut_ad(srv_undo_sources + || ((srv_startup_is_before_trx_rollback_phase + || trx_rollback_or_clean_is_active) + && purge_sys->state == PURGE_STATE_INIT) + || (trx->undo_no == 0 && srv_fast_shutdown)); + /* Add the log as the first in the history list */ flst_add_first(rseg_header + TRX_RSEG_HISTORY, undo_header + TRX_UNDO_HISTORY_NODE, mtr); -- cgit v1.2.1 From 4325041df6cf13ed1e62749fdb43fb8f138d1e78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 12 Jun 2017 11:08:06 +0300 Subject: MDEV-13057 innodb_read_only=1 should avoid creating buf_flush_page_cleaner_thread When the server is started in innodb_read_only mode, there cannot be any writes to persistent InnoDB/XtraDB files. Just like the creation of buf_flush_page_cleaner_thread is skipped in this case, also the creation of the XtraDB-specific buf_flush_lru_manager_thread should be skipped. --- storage/xtradb/buf/buf0flu.cc | 4 +--- storage/xtradb/handler/ha_innodb.cc | 8 ++++---- storage/xtradb/srv/srv0start.cc | 11 +++++++---- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'storage') diff --git a/storage/xtradb/buf/buf0flu.cc b/storage/xtradb/buf/buf0flu.cc index 955be5a6161..17bfb92580e 100644 --- a/storage/xtradb/buf/buf0flu.cc +++ b/storage/xtradb/buf/buf0flu.cc @@ -61,7 +61,7 @@ doing the shutdown */ UNIV_INTERN bool buf_page_cleaner_is_active; /** Flag indicating if the lru_manager is in active state. */ -UNIV_INTERN bool buf_lru_manager_is_active = false; +UNIV_INTERN bool buf_lru_manager_is_active; #ifdef UNIV_PFS_THREAD UNIV_INTERN mysql_pfs_key_t buf_page_cleaner_thread_key; @@ -2868,8 +2868,6 @@ DECLARE_THREAD(buf_flush_lru_manager_thread)( os_thread_pf(os_thread_get_curr_id())); #endif /* UNIV_DEBUG_THREAD_CREATION */ - buf_lru_manager_is_active = true; - /* On server shutdown, the LRU manager thread runs through cleanup phase to provide free pages for the master and purge threads. */ while (srv_shutdown_state == SRV_SHUTDOWN_NONE diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 9717136e030..e4238af7747 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -17032,6 +17032,10 @@ innodb_sched_priority_cleaner_update( const void* save) /*!< in: immediate result from check function */ { + if (srv_read_only_mode) { + return; + } + ulint priority = *static_cast(save); ulint actual_priority; ulint nice = 0; @@ -17058,10 +17062,6 @@ innodb_sched_priority_cleaner_update( } /* Set the priority for the page cleaner thread */ - if (srv_read_only_mode) { - - return; - } ut_ad(buf_page_cleaner_is_active); nice = os_thread_get_priority(srv_cleaner_tid); diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 525c985f71b..f12f895488d 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -2947,12 +2947,15 @@ files_checked: if (!srv_read_only_mode) { buf_page_cleaner_is_active = true; - buf_flush_page_cleaner_thread_handle = os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL); + buf_flush_page_cleaner_thread_handle = os_thread_create( + buf_flush_page_cleaner_thread, NULL, NULL); buf_flush_page_cleaner_thread_started = true; - } - buf_flush_lru_manager_thread_handle = os_thread_create(buf_flush_lru_manager_thread, NULL, NULL); - buf_flush_lru_manager_thread_started = true; + buf_lru_manager_is_active = true; + buf_flush_lru_manager_thread_handle = os_thread_create( + buf_flush_lru_manager_thread, NULL, NULL); + buf_flush_lru_manager_thread_started = true; + } if (!srv_file_per_table && srv_pass_corrupt_table) { fprintf(stderr, "InnoDB: Warning:" -- cgit v1.2.1 From 3005cebc96ef65dc54bf6caaa5e089d52d52e7ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 12 Jun 2017 17:10:56 +0300 Subject: Post-push fix for MDEV-12610 MariaDB start is slow fil_crypt_read_crypt_data(): Remove an unnecessary acquisition of fil_system->mutex. Remove a duplicated condition from the callers. --- storage/innobase/fil/fil0crypt.cc | 64 ++++++++++++++------------------------- storage/xtradb/fil/fil0crypt.cc | 64 ++++++++++++++------------------------- 2 files changed, 44 insertions(+), 84 deletions(-) (limited to 'storage') diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc index 70d8558ede2..df5c250df90 100644 --- a/storage/innobase/fil/fil0crypt.cc +++ b/storage/innobase/fil/fil0crypt.cc @@ -1116,40 +1116,33 @@ fil_crypt_needs_rotation( } /** Read page 0 and possible crypt data from there. -@param[in] space Tablespace */ +@param[in,out] space Tablespace */ static inline void fil_crypt_read_crypt_data(fil_space_t* space) { - mutex_enter(&fil_system->mutex); - - /* If space does not contain crypt data and space size is 0 - we have not yet read first page of tablespace. We need to - read it to find out tablespace current encryption status. */ - if (!space->crypt_data && space->size == 0) { - mtr_t mtr; - mtr_start(&mtr); - ulint zip_size = fsp_flags_get_zip_size(space->flags); - ulint offset = fsp_header_get_crypt_offset(zip_size); - mutex_exit(&fil_system->mutex); - if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, - RW_X_LATCH, &mtr)) { - byte* frame = buf_block_get_frame(block); - - mutex_enter(&fil_system->mutex); - - if (!space->crypt_data) { - space->crypt_data = fil_space_read_crypt_data(space->id, - frame, offset); - } + if (space->crypt_data || space->size) { + /* The encryption metadata has already been read, or + the tablespace is not encrypted and the file has been + opened already. */ + return; + } - mutex_exit(&fil_system->mutex); + mtr_t mtr; + mtr_start(&mtr); + ulint zip_size = fsp_flags_get_zip_size(space->flags); + ulint offset = fsp_header_get_crypt_offset(zip_size); + if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, + RW_S_LATCH, &mtr)) { + mutex_enter(&fil_system->mutex); + if (!space->crypt_data) { + space->crypt_data = fil_space_read_crypt_data( + space->id, block->frame, offset); } - - mtr_commit(&mtr); - } else { mutex_exit(&fil_system->mutex); } + + mtr_commit(&mtr); } /*********************************************************************** @@ -1656,12 +1649,7 @@ fil_crypt_find_space_to_rotate( } while (!state->should_shutdown() && state->space) { - /* If there is no crypt data and we have not yet read - page 0 for this tablespace, we need to read it before - we can continue. */ - if (!state->space->crypt_data) { - fil_crypt_read_crypt_data(state->space); - } + fil_crypt_read_crypt_data(state->space); if (fil_crypt_space_needs_rotation(state, key_state, recheck)) { ut_ad(key_state->key_id); @@ -2589,18 +2577,10 @@ fil_space_crypt_get_status( memset(status, 0, sizeof(*status)); ut_ad(space->n_pending_ops > 0); - - /* If there is no crypt data and we have not yet read - page 0 for this tablespace, we need to read it before - we can continue. */ - if (!space->crypt_data) { - fil_crypt_read_crypt_data(const_cast(space)); - } - - fil_space_crypt_t* crypt_data = space->crypt_data; + fil_crypt_read_crypt_data(const_cast(space)); status->space = space->id; - if (crypt_data != NULL) { + if (fil_space_crypt_t* crypt_data = space->crypt_data) { mutex_enter(&crypt_data->mutex); status->scheme = crypt_data->type; status->keyserver_requests = crypt_data->keyserver_requests; diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc index e24278dd102..6d3c2e98010 100644 --- a/storage/xtradb/fil/fil0crypt.cc +++ b/storage/xtradb/fil/fil0crypt.cc @@ -1116,40 +1116,33 @@ fil_crypt_needs_rotation( } /** Read page 0 and possible crypt data from there. -@param[in] space Tablespace */ +@param[in,out] space Tablespace */ static inline void fil_crypt_read_crypt_data(fil_space_t* space) { - mutex_enter(&fil_system->mutex); - - /* If space does not contain crypt data and space size is 0 - we have not yet read first page of tablespace. We need to - read it to find out tablespace current encryption status. */ - if (!space->crypt_data && space->size == 0) { - mtr_t mtr; - mtr_start(&mtr); - ulint zip_size = fsp_flags_get_zip_size(space->flags); - ulint offset = fsp_header_get_crypt_offset(zip_size); - mutex_exit(&fil_system->mutex); - if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, - RW_X_LATCH, &mtr)) { - byte* frame = buf_block_get_frame(block); - - mutex_enter(&fil_system->mutex); - - if (!space->crypt_data) { - space->crypt_data = fil_space_read_crypt_data(space->id, - frame, offset); - } + if (space->crypt_data || space->size) { + /* The encryption metadata has already been read, or + the tablespace is not encrypted and the file has been + opened already. */ + return; + } - mutex_exit(&fil_system->mutex); + mtr_t mtr; + mtr_start(&mtr); + ulint zip_size = fsp_flags_get_zip_size(space->flags); + ulint offset = fsp_header_get_crypt_offset(zip_size); + if (buf_block_t* block = buf_page_get(space->id, zip_size, 0, + RW_S_LATCH, &mtr)) { + mutex_enter(&fil_system->mutex); + if (!space->crypt_data) { + space->crypt_data = fil_space_read_crypt_data( + space->id, block->frame, offset); } - - mtr_commit(&mtr); - } else { mutex_exit(&fil_system->mutex); } + + mtr_commit(&mtr); } /*********************************************************************** @@ -1656,12 +1649,7 @@ fil_crypt_find_space_to_rotate( } while (!state->should_shutdown() && state->space) { - /* If there is no crypt data and we have not yet read - page 0 for this tablespace, we need to read it before - we can continue. */ - if (!state->space->crypt_data) { - fil_crypt_read_crypt_data(state->space); - } + fil_crypt_read_crypt_data(state->space); if (fil_crypt_space_needs_rotation(state, key_state, recheck)) { ut_ad(key_state->key_id); @@ -2590,18 +2578,10 @@ fil_space_crypt_get_status( memset(status, 0, sizeof(*status)); ut_ad(space->n_pending_ops > 0); - - /* If there is no crypt data and we have not yet read - page 0 for this tablespace, we need to read it before - we can continue. */ - if (!space->crypt_data) { - fil_crypt_read_crypt_data(const_cast(space)); - } - - fil_space_crypt_t* crypt_data = space->crypt_data; + fil_crypt_read_crypt_data(const_cast(space)); status->space = space->id; - if (crypt_data != NULL) { + if (fil_space_crypt_t* crypt_data = space->crypt_data) { mutex_enter(&crypt_data->mutex); status->scheme = crypt_data->type; status->keyserver_requests = crypt_data->keyserver_requests; -- cgit v1.2.1 From 35248fed10becf08368a3dee547c938963ffc986 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2017 15:43:06 +0300 Subject: 10.2 follow-up to MDEV-13039 innodb_fast_shutdown=0 crash due premature purge shutdown before fts_optimize_shutdown() srv_start_state_t: Document the flags. Replace SRV_START_STATE_STAT with SRV_START_STATE_REDO. The srv_bg_undo_sources replaces the original use of SRV_START_STATE_STAT. dict_stats_thread_started, buf_dump_thread_started, buf_flush_page_cleaner_thread_started: Remove (unused). srv_shutdown_all_bg_threads(): Always wait for the I/O threads to exit, also in read-only mode. os_thread_free(): Remove. --- storage/innobase/include/os0thread.h | 5 ---- storage/innobase/include/srv0start.h | 3 +-- storage/innobase/os/os0thread.cc | 12 +-------- storage/innobase/srv/srv0start.cc | 49 +++++++++++++++--------------------- 4 files changed, 22 insertions(+), 47 deletions(-) (limited to 'storage') diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h index 5c3a62e59a0..071e7422894 100644 --- a/storage/innobase/include/os0thread.h +++ b/storage/innobase/include/os0thread.h @@ -151,11 +151,6 @@ os_thread_sleep( /*============*/ ulint tm); /*!< in: time in microseconds */ -/** -Frees OS thread management data structures. */ -void -os_thread_free(); - /*****************************************************************//** Check if there are threads active. @return true if the thread count > 0. */ diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h index d7d988f5282..47b42725541 100644 --- a/storage/innobase/include/srv0start.h +++ b/storage/innobase/include/srv0start.h @@ -49,8 +49,7 @@ innobase_start_or_create_for_mysql(); void innodb_shutdown(); -/****************************************************************//** -Shuts down background threads that can generate undo pages. */ +/** Shut down background threads that can generate undo log. */ void srv_shutdown_bg_undo_sources(); diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc index dc6e54f0162..0462c62f4b2 100644 --- a/storage/innobase/os/os0thread.cc +++ b/storage/innobase/os/os0thread.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -254,14 +255,3 @@ os_thread_active() return(my_atomic_loadlint(&os_thread_count) > 0); } - -/** -Frees OS thread management data structures. */ -void -os_thread_free() -{ - if (ulint count = my_atomic_loadlint(&os_thread_count)) { - ib::warn() << "Some (" << count << ") threads are" - " still active"; - } -} diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 0398e36eaf8..2aea7b8ccb3 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -153,15 +153,23 @@ UNIV_INTERN uint srv_sys_space_size_debug; determine which threads need to be stopped if we need to abort during the initialisation step. */ enum srv_start_state_t { + /** No thread started */ SRV_START_STATE_NONE = 0, /*!< No thread started */ + /** lock_wait_timeout_thread started */ SRV_START_STATE_LOCK_SYS = 1, /*!< Started lock-timeout thread. */ - SRV_START_STATE_IO = 2, /*!< Started IO threads */ - SRV_START_STATE_MONITOR = 4, /*!< Started montior thread */ - SRV_START_STATE_MASTER = 8, /*!< Started master threadd. */ - SRV_START_STATE_PURGE = 16, /*!< Started purge thread(s) */ - SRV_START_STATE_STAT = 32 /*!< Started bufdump + dict stat - and FTS optimize thread. */ + /** buf_flush_page_cleaner_coordinator, + buf_flush_page_cleaner_worker started */ + SRV_START_STATE_IO = 2, + /** srv_error_monitor_thread, srv_monitor_thread started */ + SRV_START_STATE_MONITOR = 4, + /** srv_master_thread started */ + SRV_START_STATE_MASTER = 8, + /** srv_purge_coordinator_thread, srv_worker_thread started */ + SRV_START_STATE_PURGE = 16, + /** fil_crypt_thread, btr_defragment_thread started + (all background threads that can generate redo log but not undo log */ + SRV_START_STATE_REDO = 32 }; /** Track server thrd starting phases */ @@ -189,9 +197,6 @@ static os_thread_t buf_dump_thread_handle; static os_thread_t dict_stats_thread_handle; /** Status variables, is thread started ?*/ static bool thread_started[SRV_MAX_N_IO_THREADS + 6 + 32] = {false}; -static bool buf_dump_thread_started = false; -static bool dict_stats_thread_started = false; -static bool buf_flush_page_cleaner_thread_started = false; /** Name of srv_monitor_file */ static char* srv_monitor_file_name; @@ -1228,10 +1233,6 @@ srv_shutdown_all_bg_threads() { srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS; - if (!srv_start_state) { - return; - } - /* All threads end up waiting for certain events. Put those events to the signaled state. Then the threads will exit themselves after os_event_wait(). */ @@ -1841,7 +1842,6 @@ innobase_start_or_create_for_mysql() recv_sys_create(); recv_sys_init(buf_pool_get_curr_size()); lock_sys_create(srv_lock_table_size); - srv_start_state_set(SRV_START_STATE_LOCK_SYS); /* Create i/o-handler threads: */ @@ -1860,8 +1860,6 @@ innobase_start_or_create_for_mysql() os_thread_create(buf_flush_page_cleaner_coordinator, NULL, NULL); - buf_flush_page_cleaner_thread_started = true; - for (i = 1; i < srv_n_page_cleaners; ++i) { os_thread_create(buf_flush_page_cleaner_worker, NULL, NULL); @@ -2559,7 +2557,8 @@ files_checked: srv_monitor_thread, NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS); thread_started[4 + SRV_MAX_N_IO_THREADS] = true; - srv_start_state_set(SRV_START_STATE_MONITOR); + srv_start_state |= SRV_START_STATE_LOCK_SYS + | SRV_START_STATE_MONITOR; } /* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */ @@ -2615,7 +2614,6 @@ files_checked: srv_dict_stats_thread_active = true; dict_stats_thread_handle = os_thread_create( dict_stats_thread, NULL, NULL); - dict_stats_thread_started = true; /* Create the thread that will optimize the FTS sub-system. */ fts_optimize_init(); @@ -2703,7 +2701,6 @@ files_checked: buf_dump_thread_handle= os_thread_create(buf_dump_thread, NULL, NULL); - buf_dump_thread_started = true; #ifdef WITH_WSREP } else { ib::warn() << @@ -2735,7 +2732,7 @@ files_checked: btr_defragment_thread_active = true; os_thread_create(btr_defragment_thread, NULL, NULL); - srv_start_state_set(SRV_START_STATE_STAT); + srv_start_state |= SRV_START_STATE_REDO; } /* Create the buffer pool resize thread */ @@ -2775,8 +2772,7 @@ srv_fts_close(void) } #endif -/****************************************************************//** -Shuts down background threads that can generate undo pages. */ +/** Shut down background threads that can generate undo log. */ void srv_shutdown_bg_undo_sources() { @@ -2797,8 +2793,7 @@ void innodb_shutdown() { ut_ad(!srv_running); - - srv_shutdown_bg_undo_sources(); + ut_ad(!srv_undo_sources); /* 1. Flush the buffer pool to disk, write the current lsn to the tablespace header(s), and copy all log data to archive. @@ -2851,7 +2846,7 @@ innodb_shutdown() dict_stats_thread_deinit(); } - if (srv_start_state_is_set(SRV_START_STATE_STAT)) { + if (srv_start_state_is_set(SRV_START_STATE_REDO)) { ut_ad(!srv_read_only_mode); /* srv_shutdown_bg_undo_sources() already invoked fts_optimize_shutdown(); dict_stats_shutdown(); */ @@ -2924,10 +2919,6 @@ innodb_shutdown() buf_pool_free(srv_buf_pool_instances); } - /* 6. Free the thread management resoruces. */ - os_thread_free(); - - /* 7. Free the synchronisation infrastructure. */ sync_check_close(); if (dict_foreign_err_file) { -- cgit v1.2.1 From a4efeabc43c5c4d20b452cb578e68b349e11a0e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 12 Jun 2017 19:09:04 +0300 Subject: MDEV-13061 innodb_encrypt_log recovery is spamming the error log log_crypt(): Remove the useless error log output that was accidentally introduced in MDEV-11782. These messages could be emitted to the server error log during crash recovery. --- storage/innobase/log/log0crypt.cc | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'storage') diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc index 9ddd20db163..69cfec10fed 100644 --- a/storage/innobase/log/log0crypt.cc +++ b/storage/innobase/log/log0crypt.cc @@ -156,19 +156,6 @@ log_crypt(byte* buf, ulint size, bool decrypt) ut_a(rc == MY_AES_OK); ut_a(dst_len == sizeof dst); - if (decrypt) { - std::ostringstream s; - ut_print_buf_hex(s, buf + LOG_CRYPT_HDR_SIZE, - OS_FILE_LOG_BLOCK_SIZE - - LOG_CRYPT_HDR_SIZE); - ib::info() << "S: " << s.str(); - std::ostringstream d; - ut_print_buf_hex(d, dst, - OS_FILE_LOG_BLOCK_SIZE - - LOG_CRYPT_HDR_SIZE); - ib::info() << "c: " << d.str(); - } - memcpy(buf + LOG_CRYPT_HDR_SIZE, dst, sizeof dst); } } -- cgit v1.2.1 From 74e4cf70d09bbe7a0a530bc50895820e04466194 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 12 Jun 2017 18:43:23 +0000 Subject: MDEV-13059 XtraDB hangs on Windows due to failing to release block->lock X-latch in innodb_read_only mode. The reason for the hang is that there was no notification received about completed read io. File handles are bound to completion_port, and there were no background "write" threads that would be waiting on completion_port, only 2 "read" threads waiting on read_completion_port were active. The fix is to use a single IO completion port for all IOs, if innodb_read_only is set. --- storage/xtradb/os/os0file.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 5e67d2473f0..9be9c64b759 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -4163,7 +4163,7 @@ os_aio_init( #ifdef _WIN32 ut_a(completion_port == 0 && read_completion_port == 0); completion_port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); - read_completion_port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); + read_completion_port = srv_read_only_mode? completion_port : CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); ut_a(completion_port && read_completion_port); #endif -- cgit v1.2.1 From 9f0ed6c67e96e9b7fa4222c2c36640c0f691f43d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Jun 2017 17:21:15 +0300 Subject: MDEV-13009 10.1.24 does not compile on architectures without 64-bit atomics Add a missing #include "sync0types.h" that was removed in MDEV-12674. --- storage/innobase/include/srv0mon.h | 1 + 1 file changed, 1 insertion(+) (limited to 'storage') diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h index 2d5ff22cc21..316a467b6ff 100644 --- a/storage/innobase/include/srv0mon.h +++ b/storage/innobase/include/srv0mon.h @@ -644,6 +644,7 @@ Use MONITOR_DEC if appropriate mutex protection exists. # define srv_mon_create() ((void) 0) # define srv_mon_free() ((void) 0) #else /* HAVE_ATOMIC_BUILTINS_64 */ +# include "sync0types.h" /** Mutex protecting atomic operations on platforms that lack built-in operations for atomic memory access */ extern ib_mutex_t monitor_mutex; -- cgit v1.2.1 From e813fe862226554cfe31754b3dfeafbb2b9a7159 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Jun 2017 23:04:01 +0300 Subject: MDEV-13084 MariaDB 10.2 crashes on corrupted SYS_TABLES.MIX_LEN field innodb.row_format_redundant: Really corrupt the SYS_TABLES.MIX_LEN, and do not use any debug instrumentation. For tables created in the system tablespace, the contents of the column will be ignored. Only the table t1 will refuse to load. dict_load_table_one(): Remove the DBUG_EXECUTE_IF instrumentation. Omit a redundant error message "incorrect flags in SYS_TABLES". dict_sys_tables_rec_read(): Partially revert the Oracle Bug#21644827 fix, and always report errors by the return value. fts_create_in_mem_aux_table(): Do not rely on dict_table_t::flags2, but instead evaluate the tablespace ID. DICT_TF2_BITS: Reduce to the correct value of 7. The two extra high-order bits were specific to MySQL 5.7. --- storage/innobase/dict/dict0load.cc | 89 +++++++++++++++++-------------------- storage/innobase/fts/fts0fts.cc | 19 ++------ storage/innobase/include/dict0mem.h | 7 +-- 3 files changed, 45 insertions(+), 70 deletions(-) (limited to 'storage') diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index ba1fbc5eb5d..0ca9ebfc622 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -185,17 +185,6 @@ dict_load_field_low( for temporary storage */ const rec_t* rec); /*!< in: SYS_FIELDS record */ -/** Load a table definition from a SYS_TABLES record to dict_table_t. -Do not load any columns or indexes. -@param[in] name Table name -@param[in] rec SYS_TABLES record -@param[out,own] table table, or NULL -@return error message -@retval NULL on success */ -static -const char* -dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table); - /* If this flag is TRUE, then we will load the cluster index's (and tables') metadata even if it is marked as "corrupted". */ my_bool srv_load_corrupted; @@ -1149,6 +1138,7 @@ dict_sys_tablespaces_rec_read( @param[out] flags Pointer to table flags @param[out] flags2 Pointer to table flags2 @return true if the record was read correctly, false if not. */ +MY_ATTRIBUTE((warn_unused_result)) static bool dict_sys_tables_rec_read( @@ -1164,8 +1154,6 @@ dict_sys_tables_rec_read( ulint len; ulint type; - *flags2 = 0; - field = rec_get_nth_field_old( rec, DICT_FLD__SYS_TABLES__ID, &len); ut_ad(len == 8); @@ -1202,22 +1190,40 @@ dict_sys_tables_rec_read( " data dictionary contains invalid flags." " SYS_TABLES.TYPE=" << type << " SYS_TABLES.N_COLS=" << *n_cols; - *flags = ULINT_UNDEFINED; return(false); } *flags = dict_sys_tables_type_to_tf(type, *n_cols); - /* Get flags2 from SYS_TABLES.MIX_LEN */ - field = rec_get_nth_field_old( - rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); - *flags2 = mach_read_from_4(field); + /* For tables created before MySQL 4.1, there may be + garbage in SYS_TABLES.MIX_LEN where flags2 are found. Such tables + would always be in ROW_FORMAT=REDUNDANT which do not have the + high bit set in n_cols, and flags would be zero. + MySQL 4.1 was the first version to support innodb_file_per_table, + that is, *space_id != 0. */ + if (*flags != 0 || *space_id != 0 || *n_cols & DICT_N_COLS_COMPACT) { - /* DICT_TF2_FTS will be set when indexes are being loaded */ - *flags2 &= ~DICT_TF2_FTS; + /* Get flags2 from SYS_TABLES.MIX_LEN */ + field = rec_get_nth_field_old( + rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len); + *flags2 = mach_read_from_4(field); + + if (!dict_tf2_is_valid(*flags, *flags2)) { + ib::error() << "Table " << table_name << " in InnoDB" + " data dictionary contains invalid flags." + " SYS_TABLES.MIX_LEN=" << *flags2; + return(false); + } + + /* DICT_TF2_FTS will be set when indexes are being loaded */ + *flags2 &= ~DICT_TF2_FTS; + + /* Now that we have used this bit, unset it. */ + *n_cols &= ~DICT_N_COLS_COMPACT; + } else { + *flags2 = 0; + } - /* Now that we have used this bit, unset it. */ - *n_cols &= ~DICT_N_COLS_COMPACT; return(true); } @@ -1280,11 +1286,10 @@ dict_check_sys_tables( ("name: %p, '%s'", table_name.m_name, table_name.m_name)); - dict_sys_tables_rec_read(rec, table_name, - &table_id, &space_id, - &n_cols, &flags, &flags2); - if (flags == ULINT_UNDEFINED - || is_system_tablespace(space_id)) { + if (!dict_sys_tables_rec_read(rec, table_name, + &table_id, &space_id, + &n_cols, &flags, &flags2) + || space_id == TRX_SYS_SPACE) { ut_free(table_name.m_name); continue; } @@ -2091,6 +2096,8 @@ func_exit: static const char* dict_load_index_del = "delete-marked record in SYS_INDEXES"; /** Error message for table->id mismatch in dict_load_index_low() */ static const char* dict_load_index_id_err = "SYS_INDEXES.TABLE_ID mismatch"; +/** Error message for SYS_TABLES flags mismatch in dict_load_table_low() */ +static const char* dict_load_table_flags = "incorrect flags in SYS_TABLES"; /** Load an index definition from a SYS_INDEXES record to dict_index_t. If allocate=TRUE, we will create a dict_index_t structure and fill it @@ -2539,11 +2546,9 @@ dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table) return(error_text); } - dict_sys_tables_rec_read(rec, name, &table_id, &space_id, - &t_num, &flags, &flags2); - - if (flags == ULINT_UNDEFINED) { - return("incorrect flags in SYS_TABLES"); + if (!dict_sys_tables_rec_read(rec, name, &table_id, &space_id, + &t_num, &flags, &flags2)) { + return(dict_load_table_flags); } dict_table_decode_n_col(t_num, &n_cols, &n_v_col); @@ -2857,8 +2862,9 @@ err_exit: err_msg = dict_load_table_low(name, rec, &table); if (err_msg) { - - ib::error() << err_msg; + if (err_msg != dict_load_table_flags) { + ib::error() << err_msg; + } goto err_exit; } @@ -2914,21 +2920,6 @@ err_exit: } } - /* We don't trust the table->flags2(retrieved from SYS_TABLES.MIX_LEN - field) if the datafiles are from 3.23.52 version. To identify this - version, we do the below check and reset the flags. */ - if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID) - && table->space == srv_sys_space.space_id() - && table->flags == 0) { - table->flags2 = 0; - } - - DBUG_EXECUTE_IF("ib_table_invalid_flags", - if(strcmp(table->name.m_name, "test/t1") == 0) { - table->flags2 = 255; - table->flags = 255; - }); - if (!dict_tf2_is_valid(table->flags, table->flags2)) { ib::error() << "Table " << table->name << " in InnoDB" " data dictionary contains invalid flags." diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 1cce58dd9c5..60cc3c91fef 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1715,21 +1715,6 @@ fts_drop_tables( return(error); } -/** Extract only the required flags from table->flags2 for FTS Aux -tables. -@param[in] in_flags2 Table flags2 -@return extracted flags2 for FTS aux tables */ -static inline -ulint -fts_get_table_flags2_for_aux_tables( - ulint flags2) -{ - /* Extract the file_per_table flag & temporary file flag - from the main FTS table flags2 */ - return((flags2 & DICT_TF2_USE_FILE_PER_TABLE) | - (flags2 & DICT_TF2_TEMPORARY)); -} - /** Create dict_table_t object for FTS Aux tables. @param[in] aux_table_name FTS Aux table name @param[in] table table object of FTS Index @@ -1744,7 +1729,9 @@ fts_create_in_mem_aux_table( { dict_table_t* new_table = dict_mem_table_create( aux_table_name, table->space, n_cols, 0, table->flags, - fts_get_table_flags2_for_aux_tables(table->flags2)); + table->space == TRX_SYS_SPACE + ? 0 : table->space == SRV_TMP_SPACE_ID + ? DICT_TF2_TEMPORARY : DICT_TF2_USE_FILE_PER_TABLE); if (DICT_TF_HAS_DATA_DIR(table->flags)) { ut_ad(table->data_dir_path != NULL); diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 87f415c8a04..42396f089b2 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -294,9 +294,8 @@ ROW_FORMAT=REDUNDANT. InnoDB engines do not check these flags for unknown bits in order to protect backward incompatibility. */ /* @{ */ /** Total number of bits in table->flags2. */ -#define DICT_TF2_BITS 9 -#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS | \ - 1U << DICT_TF_POS_SHARED_SPACE) +#define DICT_TF2_BITS 7 +#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS) #define DICT_TF2_BIT_MASK ~DICT_TF2_UNUSED_BIT_MASK /** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */ @@ -1413,8 +1412,6 @@ struct dict_table_t { 5 whether the table is being created its own tablespace, 6 whether the table has been DISCARDed, 7 whether the aux FTS tables names are in hex. - 8 whether the table is instinc table. - 9 whether the table has encryption setting. Use DICT_TF2_FLAG_IS_SET() to parse this flag. */ unsigned flags2:DICT_TF2_BITS; -- cgit v1.2.1 From 58f87a41bd8de7370cc05c41977fadc685826c9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 15 Jun 2017 12:41:02 +0300 Subject: Remove some fields from dict_table_t dict_table_t::thd: Remove. This was only used by btr_root_block_get() for reporting decryption failures, and it was only assigned by ha_innobase::open(), and never cleared. This could mean that if a connection is closed, the pointer would become stale, and the server could crash while trying to report the error. It could also mean that an error is being reported to the wrong client. It is better to use current_thd in this case, even though it could mean that if the code is invoked from an InnoDB background operation, there would be no connection to which to send the error message. Remove dict_table_t::crypt_data and dict_table_t::page_0_read. These fields were never read. fil_open_single_table_tablespace(): Remove the parameter "table". --- storage/innobase/btr/btr0btr.cc | 3 ++- storage/innobase/dict/dict0load.cc | 4 ++-- storage/innobase/fil/fil0fil.cc | 18 +----------------- storage/innobase/handler/ha_innodb.cc | 2 -- storage/innobase/include/dict0mem.h | 4 ---- storage/innobase/include/fil0fil.h | 3 +-- storage/innobase/row/row0import.cc | 2 +- storage/innobase/row/row0mysql.cc | 12 ------------ storage/xtradb/btr/btr0btr.cc | 3 ++- storage/xtradb/dict/dict0load.cc | 4 ++-- storage/xtradb/fil/fil0fil.cc | 18 +----------------- storage/xtradb/handler/ha_innodb.cc | 2 -- storage/xtradb/include/dict0mem.h | 4 ---- storage/xtradb/include/fil0fil.h | 3 +-- storage/xtradb/row/row0import.cc | 2 +- storage/xtradb/row/row0mysql.cc | 12 ------------ 16 files changed, 14 insertions(+), 82 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index b7afcf12b39..c519baef58c 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -740,7 +740,8 @@ btr_root_block_get( if (index && index->table) { index->table->file_unreadable = true; - ib_push_warning(index->table->thd, DB_DECRYPTION_FAILED, + ib_push_warning( + static_cast(NULL), DB_DECRYPTION_FAILED, "Table %s in tablespace %lu is encrypted but encryption service or" " used key_id is not available. " " Can't continue reading table.", diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index a1d18690476..4a2b6d43bbd 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -1173,7 +1173,7 @@ loop: dberr_t err = fil_open_single_table_tablespace( read_page_0, srv_read_only_mode ? false : true, space_id, dict_tf_to_fsp_flags(flags), - name, filepath, NULL); + name, filepath); if (err != DB_SUCCESS) { ib_logf(IB_LOG_LEVEL_ERROR, @@ -2415,7 +2415,7 @@ err_exit: err = fil_open_single_table_tablespace( true, false, table->space, dict_tf_to_fsp_flags(table->flags), - name, filepath, table); + name, filepath); if (err != DB_SUCCESS) { /* We failed to find a sensible diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index ffb01312fdb..d78a8b20f33 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -4036,8 +4036,7 @@ fil_open_single_table_tablespace( ulint flags, /*!< in: expected FSP_SPACE_FLAGS */ const char* tablename, /*!< in: table name in the databasename/tablename format */ - const char* path_in, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ + const char* path_in) /*!< in: table */ { dberr_t err = DB_SUCCESS; bool dict_filepath_same_as_default = false; @@ -4147,11 +4146,6 @@ fil_open_single_table_tablespace( #endif /* UNIV_LOG_ARCHIVE */ NULL, &def.crypt_data); - if (table) { - table->crypt_data = def.crypt_data; - table->page_0_read = true; - } - def.valid = !def.check_msg && def.id == id && fsp_flags_match(flags, def.flags); @@ -4174,11 +4168,6 @@ fil_open_single_table_tablespace( #endif /* UNIV_LOG_ARCHIVE */ NULL, &remote.crypt_data); - if (table) { - table->crypt_data = remote.crypt_data; - table->page_0_read = true; - } - /* Validate this single-table-tablespace with SYS_TABLES. */ remote.valid = !remote.check_msg && remote.id == id && fsp_flags_match(flags, remote.flags); @@ -4203,11 +4192,6 @@ fil_open_single_table_tablespace( #endif /* UNIV_LOG_ARCHIVE */ NULL, &dict.crypt_data); - if (table) { - table->crypt_data = dict.crypt_data; - table->page_0_read = true; - } - /* Validate this single-table-tablespace with SYS_TABLES. */ dict.valid = !dict.check_msg && dict.id == id && fsp_flags_match(flags, dict.flags); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index bd89744c1f0..6c43e637318 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -5860,8 +5860,6 @@ table_opened: innobase_copy_frm_flags_from_table_share(ib_table, table->s); - ib_table->thd = (void*)thd; - /* No point to init any statistics if tablespace is still encrypted. */ if (ib_table->is_readable()) { dict_stats_init(ib_table); diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index cea75d6ab47..1c0f4d73006 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -1032,10 +1032,6 @@ struct dict_table_t{ table_id_t id; /*!< id of the table */ mem_heap_t* heap; /*!< memory heap */ char* name; /*!< table name */ - void* thd; /*!< thd */ - bool page_0_read; /*!< true if page 0 has - been already read */ - fil_space_crypt_t *crypt_data; /*!< crypt data if present */ const char* dir_path_of_temp_table;/*!< NULL or the directory path where a TEMPORARY table that was explicitly created by a user should be placed if diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index e16c7cb102e..8033d0cc839 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1045,8 +1045,7 @@ fil_open_single_table_tablespace( ulint flags, /*!< in: expected FSP_SPACE_FLAGS */ const char* tablename, /*!< in: table name in the databasename/tablename format */ - const char* filepath, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ + const char* filepath) /*!< in: tablespace filepath */ __attribute__((nonnull(5), warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index f0db79be17e..43830725978 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3617,7 +3617,7 @@ row_import_for_mysql( err = fil_open_single_table_tablespace( true, true, table->space, dict_tf_to_fsp_flags(table->flags), - table->name, filepath, table); + table->name, filepath); DBUG_EXECUTE_IF("ib_import_open_tablespace_failure", err = DB_TABLESPACE_NOT_FOUND;); diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 82938995e93..1d64d66cd49 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -4242,18 +4242,6 @@ row_drop_table_for_mysql( rw_lock_x_unlock(dict_index_get_lock(index)); } - /* If table has not yet have crypt_data, try to read it to - make freeing the table easier. */ - if (!table->crypt_data) { - - if (fil_space_t* space = fil_space_acquire_silent(table->space)) { - /* We use crypt data in dict_table_t in ha_innodb.cc - to push warnings to user thread. */ - table->crypt_data = space->crypt_data; - fil_space_release(space); - } - } - /* We use the private SQL parser of Innobase to generate the query graphs needed in deleting the dictionary data from system tables in Innobase. Deleting a row from SYS_INDEXES table also diff --git a/storage/xtradb/btr/btr0btr.cc b/storage/xtradb/btr/btr0btr.cc index e66599e206d..85a083aaee0 100644 --- a/storage/xtradb/btr/btr0btr.cc +++ b/storage/xtradb/btr/btr0btr.cc @@ -745,7 +745,8 @@ btr_root_block_get( if (index && index->table) { index->table->file_unreadable = true; - ib_push_warning(index->table->thd, DB_DECRYPTION_FAILED, + ib_push_warning( + static_cast(NULL), DB_DECRYPTION_FAILED, "Table %s in tablespace %lu is encrypted but encryption service or" " used key_id is not available. " " Can't continue reading table.", diff --git a/storage/xtradb/dict/dict0load.cc b/storage/xtradb/dict/dict0load.cc index dc3f9c85bba..6171bbd80d0 100644 --- a/storage/xtradb/dict/dict0load.cc +++ b/storage/xtradb/dict/dict0load.cc @@ -1193,7 +1193,7 @@ loop: dberr_t err = fil_open_single_table_tablespace( read_page_0, srv_read_only_mode ? false : true, space_id, dict_tf_to_fsp_flags(flags), - name, filepath, NULL); + name, filepath); if (err != DB_SUCCESS) { ib_logf(IB_LOG_LEVEL_ERROR, @@ -2437,7 +2437,7 @@ err_exit: err = fil_open_single_table_tablespace( true, false, table->space, dict_tf_to_fsp_flags(table->flags), - name, filepath, table); + name, filepath); if (err != DB_SUCCESS) { /* We failed to find a sensible diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index 12048bc479f..fdd09a6034e 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -4229,8 +4229,7 @@ fil_open_single_table_tablespace( ulint flags, /*!< in: expected FSP_SPACE_FLAGS */ const char* tablename, /*!< in: table name in the databasename/tablename format */ - const char* path_in, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ + const char* path_in) /*!< in: table */ { dberr_t err = DB_SUCCESS; bool dict_filepath_same_as_default = false; @@ -4339,11 +4338,6 @@ fil_open_single_table_tablespace( def.file, false, &def.flags, &def.id, NULL, &def.crypt_data); - if (table) { - table->crypt_data = def.crypt_data; - table->page_0_read = true; - } - def.valid = !def.check_msg && def.id == id && fsp_flags_match(flags, def.flags); @@ -4363,11 +4357,6 @@ fil_open_single_table_tablespace( remote.file, false, &remote.flags, &remote.id, NULL, &remote.crypt_data); - if (table) { - table->crypt_data = remote.crypt_data; - table->page_0_read = true; - } - /* Validate this single-table-tablespace with SYS_TABLES. */ remote.valid = !remote.check_msg && remote.id == id && fsp_flags_match(flags, remote.flags); @@ -4389,11 +4378,6 @@ fil_open_single_table_tablespace( dict.file, false, &dict.flags, &dict.id, NULL, &dict.crypt_data); - if (table) { - table->crypt_data = dict.crypt_data; - table->page_0_read = true; - } - /* Validate this single-table-tablespace with SYS_TABLES. */ dict.valid = !dict.check_msg && dict.id == id && fsp_flags_match(flags, dict.flags); diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index ee1e306b441..eb02ed6699e 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -6429,8 +6429,6 @@ table_opened: innobase_copy_frm_flags_from_table_share(ib_table, table->s); - ib_table->thd = (void*)thd; - /* No point to init any statistics if tablespace is still encrypted. */ if (ib_table->is_readable()) { dict_stats_init(ib_table); diff --git a/storage/xtradb/include/dict0mem.h b/storage/xtradb/include/dict0mem.h index a32581a0e90..2a4422fc18b 100644 --- a/storage/xtradb/include/dict0mem.h +++ b/storage/xtradb/include/dict0mem.h @@ -1048,10 +1048,6 @@ struct dict_table_t{ table_id_t id; /*!< id of the table */ mem_heap_t* heap; /*!< memory heap */ char* name; /*!< table name */ - void* thd; /*!< thd */ - bool page_0_read; /*!< true if page 0 has - been already read */ - fil_space_crypt_t *crypt_data; /*!< crypt data if present */ const char* dir_path_of_temp_table;/*!< NULL or the directory path where a TEMPORARY table that was explicitly created by a user should be placed if diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index a09833c3a73..6eab5db6883 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1043,8 +1043,7 @@ fil_open_single_table_tablespace( ulint flags, /*!< in: expected FSP_SPACE_FLAGS */ const char* tablename, /*!< in: table name in the databasename/tablename format */ - const char* filepath, /*!< in: tablespace filepath */ - dict_table_t* table) /*!< in: table */ + const char* filepath) /*!< in: tablespace filepath */ __attribute__((nonnull(5), warn_unused_result)); #endif /* !UNIV_HOTBACKUP */ diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 2f7aece665a..86b2d782b7b 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -3618,7 +3618,7 @@ row_import_for_mysql( err = fil_open_single_table_tablespace( true, true, table->space, dict_tf_to_fsp_flags(table->flags), - table->name, filepath, table); + table->name, filepath); DBUG_EXECUTE_IF("ib_import_open_tablespace_failure", err = DB_TABLESPACE_NOT_FOUND;); diff --git a/storage/xtradb/row/row0mysql.cc b/storage/xtradb/row/row0mysql.cc index 59568f5c91b..0079fc79a0e 100644 --- a/storage/xtradb/row/row0mysql.cc +++ b/storage/xtradb/row/row0mysql.cc @@ -4272,18 +4272,6 @@ row_drop_table_for_mysql( rw_lock_x_unlock(dict_index_get_lock(index)); } - /* If table has not yet have crypt_data, try to read it to - make freeing the table easier. */ - if (!table->crypt_data) { - - if (fil_space_t* space = fil_space_acquire_silent(table->space)) { - /* We use crypt data in dict_table_t in ha_innodb.cc - to push warnings to user thread. */ - table->crypt_data = space->crypt_data; - fil_space_release(space); - } - } - /* We use the private SQL parser of Innobase to generate the query graphs needed in deleting the dictionary data from system tables in Innobase. Deleting a row from SYS_INDEXES table also -- cgit v1.2.1 From 72378a25830184f91005be7e80cfb28381c79f23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 14 Jun 2017 14:08:49 +0300 Subject: MDEV-12873 InnoDB SYS_TABLES.TYPE incompatibility for PAGE_COMPRESSED=YES in MariaDB 10.2.2 to 10.2.6 Remove the SHARED_SPACE flag that was erroneously introduced in MariaDB 10.2.2, and shift the SYS_TABLES.TYPE flags back to where they were before MariaDB 10.2.2. While doing this, ensure that tables created with affected MariaDB versions can be loaded, and also ensure that tables created with MySQL 5.7 using the TABLESPACE attribute cannot be loaded. MariaDB 10.2.2 picked the SHARED_SPACE flag from MySQL 5.7, shifting the MariaDB 10.1 flags PAGE_COMPRESSION, PAGE_COMPRESSION_LEVEL, ATOMIC_WRITES by one bit. The SHARED_SPACE flag would always be written as 0 by MariaDB, because MariaDB does not support CREATE TABLESPACE or CREATE TABLE...TABLESPACE for InnoDB. So, instead of the bits AALLLLCxxxxxxx we would have AALLLLC0xxxxxxx if the table was created with MariaDB 10.2.2 to 10.2.6. (AA=ATOMIC_WRITES, LLLL=PAGE_COMPRESSION_LEVEL, C=PAGE_COMPRESSED, xxxxxxx=7 bits that were not moved.) PAGE_COMPRESSED=NO implies LLLLC=00000. That is not a problem. If someone created a table in MariaDB 10.2.2 or 10.2.3 with the attribute ATOMIC_WRITES=OFF (value 2; AA=10) and without PAGE_COMPRESSED=YES or PAGE_COMPRESSION_LEVEL, the table should be rejected. We ignore this problem, because it should be unlikely for anyone to specify ATOMIC_WRITES=OFF, and because 10.2.2 and 10.2.2 were not mature releases. The value ATOMIC_WRITES=ON (1) would be interpreted as ATOMIC_WRITES=OFF, but starting with MariaDB 10.2.4 the ATOMIC_WRITES attribute is ignored. PAGE_COMPRESSED=YES implies that PAGE_COMPRESSION_LEVEL be between 1 and 9 and that ROW_FORMAT be COMPACT or DYNAMIC. Thus, the affected wrong bit pattern in SYS_TABLES.TYPE is of the form AALLLL10DB00001 where D signals the presence of a DATA DIRECTORY attribute and B is 1 for ROW_FORMAT=DYNAMIC and 0 for ROW_FORMAT=COMPACT. We must interpret this bit pattern as AALLLL1DB00001 (discarding the extraneous 0 bit). dict_sys_tables_rec_read(): Adjust the affected bit pattern when reading the SYS_TABLES.TYPE column. In case of invalid flags, report both SYS_TABLES.TYPE (after possible adjustment) and SYS_TABLES.MIX_LEN. dict_load_table_one(): Replace an unreachable condition on !dict_tf2_is_valid() with a debug assertion. The flags will already have been validated by dict_sys_tables_rec_read(); if that validation fails, dict_load_table_low() will have failed. fil_ibd_create(): Shorten an error message about a file pre-existing. Datafile::validate_to_dd(): Clarify an error message about tablespace flags mismatch. ha_innobase::open(): Remove an unnecessary warning message. dict_tf_is_valid(): Simplify and stricten the logic. Validate the values of PAGE_COMPRESSION. Remove error log output; let the callers handle that. DICT_TF_BITS: Remove ATOMIC_WRITES, PAGE_ENCRYPTION, PAGE_ENCRYPTION_KEY. The ATOMIC_WRITES is ignored once the SYS_TABLES.TYPE has been validated; there is no need to store it in dict_table_t::flags. The PAGE_ENCRYPTION and PAGE_ENCRYPTION_KEY are unused since MariaDB 10.1.4 (the GA release was 10.1.8). DICT_TF_BIT_MASK: Remove (unused). FSP_FLAGS_MEM_ATOMIC_WRITES: Remove (the flags are never read). row_import_read_v1(): Display an error if dict_tf_is_valid() fails. --- storage/innobase/dict/dict0load.cc | 98 ++++++++++++++++++++++++++++------- storage/innobase/fil/fil0fil.cc | 10 +--- storage/innobase/fsp/fsp0file.cc | 11 ++-- storage/innobase/handler/ha_innodb.cc | 4 -- storage/innobase/include/dict0dict.ic | 96 +++++++++++++--------------------- storage/innobase/include/dict0mem.h | 50 ++---------------- storage/innobase/include/fsp0types.h | 12 +---- storage/innobase/row/row0import.cc | 16 +++--- 8 files changed, 135 insertions(+), 162 deletions(-) (limited to 'storage') diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 0ca9ebfc622..5c8449512e6 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -1170,6 +1170,80 @@ dict_sys_tables_rec_read( ut_a(len == 4); type = mach_read_from_4(field); + /* Handle MDEV-12873 InnoDB SYS_TABLES.TYPE incompatibility + for PAGE_COMPRESSED=YES in MariaDB 10.2.2 to 10.2.6. + + MariaDB 10.2.2 introduced the SHARED_SPACE flag from MySQL 5.7, + shifting the flags PAGE_COMPRESSION, PAGE_COMPRESSION_LEVEL, + ATOMIC_WRITES by one bit. The SHARED_SPACE flag would always + be written as 0 by MariaDB, because MariaDB does not support + CREATE TABLESPACE or CREATE TABLE...TABLESPACE for InnoDB. + + So, instead of the bits AALLLLCxxxxxxx we would have + AALLLLC0xxxxxxx if the table was created with MariaDB 10.2.2 + to 10.2.6. (AA=ATOMIC_WRITES, LLLL=PAGE_COMPRESSION_LEVEL, + C=PAGE_COMPRESSED, xxxxxxx=7 bits that were not moved.) + + The case LLLLC=00000 is not a problem. The problem is the case + AALLLL10DB00001 where D is the (mostly ignored) DATA_DIRECTORY + flag and B is the ATOMIC_BLOBS flag (1 for ROW_FORMAT=DYNAMIC + and 0 for ROW_FORMAT=COMPACT in this case). Other low-order + bits must be so, because PAGE_COMPRESSED=YES is only allowed + for ROW_FORMAT=DYNAMIC and ROW_FORMAT=COMPACT, not for + ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPRESSED. + + Starting with MariaDB 10.2.4, the flags would be + 00LLLL10DB00001, because ATOMIC_WRITES is always written as 0. + + We will concentrate on the PAGE_COMPRESSION_LEVEL and + PAGE_COMPRESSED=YES. PAGE_COMPRESSED=NO implies + PAGE_COMPRESSION_LEVEL=0, and in that case all the affected + bits will be 0. For PAGE_COMPRESSED=YES, the values 1..9 are + allowed for PAGE_COMPRESSION_LEVEL. That is, we must interpret + the bits AALLLL10DB00001 as AALLLL1DB00001. + + If someone created a table in MariaDB 10.2.2 or 10.2.3 with + the attribute ATOMIC_WRITES=OFF (value 2) and without + PAGE_COMPRESSED=YES or PAGE_COMPRESSION_LEVEL, that should be + rejected. The value ATOMIC_WRITES=ON (1) would look like + ATOMIC_WRITES=OFF, but it would be ignored starting with + MariaDB 10.2.4. */ + compile_time_assert(DICT_TF_POS_PAGE_COMPRESSION == 7); + compile_time_assert(DICT_TF_POS_UNUSED == 14); + + if ((type & 0x19f) != 0x101) { + /* The table cannot have been created with MariaDB + 10.2.2 to 10.2.6, because they would write the + low-order bits of SYS_TABLES.TYPE as 0b10xx00001 for + PAGE_COMPRESSED=YES. No adjustment is applicable. */ + } else if (type >= 3 << 13) { + /* 10.2.2 and 10.2.3 write ATOMIC_WRITES less than 3, + and no other flags above that can be set for the + SYS_TABLES.TYPE to be in the 10.2.2..10.2.6 format. + This would in any case be invalid format for 10.2 and + earlier releases. */ + ut_ad(ULINT_UNDEFINED == dict_sys_tables_type_validate( + type, DICT_N_COLS_COMPACT)); + } else { + /* SYS_TABLES.TYPE is of the form AALLLL10DB00001. We + must still validate that the LLLL bits are between 0 + and 9 before we can discard the extraneous 0 bit. */ + ut_ad(!DICT_TF_GET_PAGE_COMPRESSION(type)); + + if ((((type >> 9) & 0xf) - 1) < 9) { + ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) & 1); + + type = (type & 0x7fU) | (type >> 1 & ~0x7fU); + + ut_ad(DICT_TF_GET_PAGE_COMPRESSION(type)); + ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) >= 1); + ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) <= 9); + } else { + ut_ad(ULINT_UNDEFINED == dict_sys_tables_type_validate( + type, DICT_N_COLS_COMPACT)); + } + } + /* The low order bit of SYS_TABLES.TYPE is always set to 1. But in dict_table_t::flags the low order bit is used to determine if the row format is Redundant (0) or Compact (1) when the format is Antelope. @@ -1211,7 +1285,8 @@ dict_sys_tables_rec_read( if (!dict_tf2_is_valid(*flags, *flags2)) { ib::error() << "Table " << table_name << " in InnoDB" " data dictionary contains invalid flags." - " SYS_TABLES.MIX_LEN=" << *flags2; + " SYS_TABLES.TYPE=" << type + << " SYS_TABLES.MIX_LEN=" << *flags2; return(false); } @@ -2541,8 +2616,7 @@ dict_load_table_low(table_name_t& name, const rec_t* rec, dict_table_t** table) ulint flags2; ulint n_v_col; - const char* error_text = dict_sys_tables_rec_check(rec); - if (error_text != NULL) { + if (const char* error_text = dict_sys_tables_rec_check(rec)) { return(error_text); } @@ -2802,7 +2876,6 @@ dict_load_table_one( const rec_t* rec; const byte* field; ulint len; - const char* err_msg; mtr_t mtr; DBUG_ENTER("dict_load_table_one"); @@ -2859,9 +2932,7 @@ err_exit: goto err_exit; } - err_msg = dict_load_table_low(name, rec, &table); - - if (err_msg) { + if (const char* err_msg = dict_load_table_low(name, rec, &table)) { if (err_msg != dict_load_table_flags) { ib::error() << err_msg; } @@ -2885,6 +2956,8 @@ err_exit: mem_heap_empty(heap); + ut_ad(dict_tf2_is_valid(table->flags, table->flags2)); + /* If there is no tablespace for the table then we only need to load the index definitions. So that we can IMPORT the tablespace later. When recovering table locks for resurrected incomplete @@ -2920,17 +2993,6 @@ err_exit: } } - if (!dict_tf2_is_valid(table->flags, table->flags2)) { - ib::error() << "Table " << table->name << " in InnoDB" - " data dictionary contains invalid flags." - " SYS_TABLES.MIX_LEN=" << table->flags2; - table->flags2 &= ~DICT_TF2_TEMPORARY; - dict_table_remove_from_cache(table); - table = NULL; - err = DB_FAIL; - goto func_exit; - } - /* Initialize table foreign_child value. Its value could be changed when dict_load_foreigns() is called below */ table->fk_max_recusive_level = 0; diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 2cbc863102c..cc03524dbbc 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -3819,18 +3819,12 @@ fil_ibd_create( ib::error() << "Cannot create file '" << path << "'"; if (error == OS_FILE_ALREADY_EXISTS) { - ib::error() << "The file '" << path << "'" + ib::info() << "The file '" << path << "'" " already exists though the" " corresponding table did not exist" " in the InnoDB data dictionary." - " Have you moved InnoDB .ibd files" - " around without using the SQL commands" - " DISCARD TABLESPACE and IMPORT TABLESPACE," - " or did mysqld crash in the middle of" - " CREATE TABLE?" " You can resolve the problem by removing" - " the file '" << path - << "' under the 'datadir' of MySQL."; + " the file."; return(DB_TABLESPACE_EXISTS); } diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc index b8ad49a254f..346962567d6 100644 --- a/storage/innobase/fsp/fsp0file.cc +++ b/storage/innobase/fsp/fsp0file.cc @@ -425,13 +425,10 @@ Datafile::validate_to_dd(ulint space_id, ulint flags) /* else do not use this tablespace. */ m_is_valid = false; - ib::error() << "In file '" << m_filepath << "', tablespace id and" - " flags are " << m_space_id << " and " << ib::hex(m_flags) - << ", but in the InnoDB data dictionary they are " - << space_id << " and " << ib::hex(flags) - << ". Have you moved InnoDB .ibd files around without" - " using the commands DISCARD TABLESPACE and IMPORT TABLESPACE?" - " " << TROUBLESHOOT_DATADICT_MSG; + ib::error() << "Refusing to load '" << m_filepath << "' (id=" + << m_space_id << ", flags=" << ib::hex(m_flags) + << "); dictionary contains id=" + << space_id << ", flags=" << ib::hex(flags); return(DB_ERROR); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 23edecda245..670e170e9cc 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -6624,10 +6624,6 @@ ha_innobase::open( norm_name); } - ib::warn() << "Cannot open table " << norm_name << " from the" - " internal data dictionary of InnoDB though the .frm" - " file for the table exists. " << TROUBLESHOOTING_MSG; - free_share(m_share); set_my_errno(ENOENT); diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index e9507302e66..deb48d0cd31 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -646,74 +646,54 @@ bool dict_tf_is_valid( ulint flags) { - bool compact = DICT_TF_GET_COMPACT(flags); - ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); - bool atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); - bool data_dir = DICT_TF_HAS_DATA_DIR(flags); - ulint unused = DICT_TF_GET_UNUSED(flags); - bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(flags); - ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags); - bool flags_corrupt = false; - /* Make sure there are no bits that we do not know about. */ - if (unused != 0) { - flags_corrupt = true; + if (flags >= 1U << DICT_TF_BITS) { + return(false); } - if (atomic_blobs) { - /* Barracuda row formats COMPRESSED and DYNAMIC both use + const bool not_redundant = DICT_TF_GET_COMPACT(flags); + const bool atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); + + if (atomic_blobs && !not_redundant) { + /* ROW_FORMAT=COMPRESSED and ROW_FORMAT=DYNAMIC both use atomic_blobs, which build on the page structure introduced - for the COMPACT row format by allowing keys in secondary + for ROW_FORMAT=COMPACT by allowing keys in secondary indexes to be made from data stored off-page in the clustered index. */ - - if (!compact) { - flags_corrupt = true; - } - - } else if (zip_ssize) { - /* Antelope does not support COMPRESSED row format. */ - flags_corrupt = true; + return(false); } - if (zip_ssize) { - - /* COMPRESSED row format must have compact and atomic_blobs - bits set and validate the number is within allowed range. */ - - if (!compact - || !atomic_blobs - || zip_ssize > PAGE_ZIP_SSIZE_MAX) { - flags_corrupt = true; - } - } + ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); - if (page_compression || page_compression_level) { - /* Page compression format must have compact and - atomic_blobs and page_compression_level requires - page_compression */ - if (!compact - || !page_compression - || !atomic_blobs) { - flags_corrupt = true; - } + if (!zip_ssize) { + /* Not ROW_FORMAT=COMPRESSED */ + } else if (!atomic_blobs) { + /* ROW_FORMAT=COMPRESSED implies ROW_FORMAT=DYNAMIC + for the uncompressed page format */ + return(false); + } else if (zip_ssize > PAGE_ZIP_SSIZE_MAX + || zip_ssize > UNIV_PAGE_SIZE_SHIFT + || UNIV_PAGE_SIZE_SHIFT > UNIV_ZIP_SIZE_SHIFT_MAX) { + /* KEY_BLOCK_SIZE is out of bounds, or + ROW_FORMAT=COMPRESSED is not supported with this + innodb_page_size (only up to 16KiB) */ + return(false); } - - if (flags_corrupt) { - ib::error() - << "InnoDB: Error: table unused flags are:" << flags - << " in the data dictionary and are corrupted:" - << " compact:" << compact - << " atomic_blobs:" << atomic_blobs - << " unused:" << unused - << " data_dir:" << data_dir - << " zip_ssize:" << zip_ssize - << " page_compression:" << page_compression - << " page_compression_level:" << page_compression_level; - return (false); - } else { - return(true); + switch (DICT_TF_GET_PAGE_COMPRESSION_LEVEL(flags)) { + case 0: + /* PAGE_COMPRESSION_LEVEL=0 should imply PAGE_COMPRESSED=NO */ + return(!DICT_TF_GET_PAGE_COMPRESSION(flags)); + case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: + /* PAGE_COMPRESSION_LEVEL requires + ROW_FORMAT=COMPACT or ROW_FORMAT=DYNAMIC + (not ROW_FORMAT=COMPRESSED or ROW_FORMAT=REDUNDANT) + and PAGE_COMPRESSED=YES */ + return(!zip_ssize && not_redundant + && DICT_TF_GET_PAGE_COMPRESSION(flags)); + default: + /* Invalid PAGE_COMPRESSION_LEVEL value */ + return(false); } } @@ -978,7 +958,6 @@ dict_tf_to_fsp_flags(ulint table_flags) ulint fsp_flags; ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL( table_flags); - ulint atomic_writes = DICT_TF_GET_ATOMIC_WRITES(table_flags); ut_ad((DICT_TF_GET_PAGE_COMPRESSION(table_flags) == 0) == (page_compression_level == 0)); @@ -1005,7 +984,6 @@ dict_tf_to_fsp_flags(ulint table_flags) fsp_flags |= 1U << FSP_FLAGS_MEM_DATA_DIR; } - fsp_flags |= atomic_writes << FSP_FLAGS_MEM_ATOMIC_WRITES; fsp_flags |= page_compression_level << FSP_FLAGS_MEM_COMPRESSION_LEVEL; return(fsp_flags); diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index 42396f089b2..eb2b7b9d785 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -137,22 +137,12 @@ This flag prevents older engines from attempting to open the table and allows InnoDB to update_create_info() accordingly. */ #define DICT_TF_WIDTH_DATA_DIR 1 -/** Width of the SHARED tablespace flag (Oracle MYSQL 5.7). -Not supported by MariaDB. */ -#define DICT_TF_WIDTH_SHARED_SPACE 1 - /** Width of the page compression flag */ #define DICT_TF_WIDTH_PAGE_COMPRESSION 1 #define DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL 4 -/** -Width of the page encryption flag -*/ -#define DICT_TF_WIDTH_PAGE_ENCRYPTION 1 -#define DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY 8 - /** Width of atomic writes flag DEFAULT=0, ON = 1, OFF = 2 @@ -164,15 +154,8 @@ DEFAULT=0, ON = 1, OFF = 2 + DICT_TF_WIDTH_ZIP_SSIZE \ + DICT_TF_WIDTH_ATOMIC_BLOBS \ + DICT_TF_WIDTH_DATA_DIR \ - + DICT_TF_WIDTH_SHARED_SPACE \ + DICT_TF_WIDTH_PAGE_COMPRESSION \ - + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL \ - + DICT_TF_WIDTH_ATOMIC_WRITES \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY) - -/** A mask of all the known/used bits in table flags */ -#define DICT_TF_BIT_MASK (~(~0U << DICT_TF_BITS)) + + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL) /** Zero relative shift position of the COMPACT field */ #define DICT_TF_POS_COMPACT 0 @@ -185,26 +168,17 @@ DEFAULT=0, ON = 1, OFF = 2 /** Zero relative shift position of the DATA_DIR field */ #define DICT_TF_POS_DATA_DIR (DICT_TF_POS_ATOMIC_BLOBS \ + DICT_TF_WIDTH_ATOMIC_BLOBS) -/** Zero relative shift position of the SHARED TABLESPACE field */ -#define DICT_TF_POS_SHARED_SPACE (DICT_TF_POS_DATA_DIR \ - + DICT_TF_WIDTH_DATA_DIR) /** Zero relative shift position of the PAGE_COMPRESSION field */ -#define DICT_TF_POS_PAGE_COMPRESSION (DICT_TF_POS_SHARED_SPACE \ - + DICT_TF_WIDTH_SHARED_SPACE) +#define DICT_TF_POS_PAGE_COMPRESSION (DICT_TF_POS_DATA_DIR \ + + DICT_TF_WIDTH_DATA_DIR) /** Zero relative shift position of the PAGE_COMPRESSION_LEVEL field */ #define DICT_TF_POS_PAGE_COMPRESSION_LEVEL (DICT_TF_POS_PAGE_COMPRESSION \ + DICT_TF_WIDTH_PAGE_COMPRESSION) /** Zero relative shift position of the ATOMIC_WRITES field */ #define DICT_TF_POS_ATOMIC_WRITES (DICT_TF_POS_PAGE_COMPRESSION_LEVEL \ + DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL) -/** Zero relative shift position of the PAGE_ENCRYPTION field */ -#define DICT_TF_POS_PAGE_ENCRYPTION (DICT_TF_POS_ATOMIC_WRITES \ +#define DICT_TF_POS_UNUSED (DICT_TF_POS_ATOMIC_WRITES \ + DICT_TF_WIDTH_ATOMIC_WRITES) -/** Zero relative shift position of the PAGE_ENCRYPTION_KEY field */ -#define DICT_TF_POS_PAGE_ENCRYPTION_KEY (DICT_TF_POS_PAGE_ENCRYPTION \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION) -#define DICT_TF_POS_UNUSED (DICT_TF_POS_PAGE_ENCRYPTION_KEY \ - + DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY) /** Bit mask of the COMPACT field */ #define DICT_TF_MASK_COMPACT \ @@ -234,14 +208,6 @@ DEFAULT=0, ON = 1, OFF = 2 #define DICT_TF_MASK_ATOMIC_WRITES \ ((~(~0U << DICT_TF_WIDTH_ATOMIC_WRITES)) \ << DICT_TF_POS_ATOMIC_WRITES) -/** Bit mask of the PAGE_ENCRYPTION field */ -#define DICT_TF_MASK_PAGE_ENCRYPTION \ - ((~(~0U << DICT_TF_WIDTH_PAGE_ENCRYPTION)) \ - << DICT_TF_POS_PAGE_ENCRYPTION) -/** Bit mask of the PAGE_ENCRYPTION_KEY field */ -#define DICT_TF_MASK_PAGE_ENCRYPTION_KEY \ - ((~(~0U << DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY)) \ - << DICT_TF_POS_PAGE_ENCRYPTION_KEY) /** Return the value of the COMPACT field */ #define DICT_TF_GET_COMPACT(flags) \ @@ -271,14 +237,6 @@ DEFAULT=0, ON = 1, OFF = 2 #define DICT_TF_GET_ATOMIC_WRITES(flags) \ ((flags & DICT_TF_MASK_ATOMIC_WRITES) \ >> DICT_TF_POS_ATOMIC_WRITES) -/** Return the contents of the PAGE_ENCRYPTION field */ -#define DICT_TF_GET_PAGE_ENCRYPTION(flags) \ - ((flags & DICT_TF_MASK_PAGE_ENCRYPTION) \ - >> DICT_TF_POS_PAGE_ENCRYPTION) -/** Return the contents of the PAGE_ENCRYPTION KEY field */ -#define DICT_TF_GET_PAGE_ENCRYPTION_KEY(flags) \ - ((flags & DICT_TF_MASK_PAGE_ENCRYPTION_KEY) \ - >> DICT_TF_POS_PAGE_ENCRYPTION_KEY) /** Return the contents of the UNUSED bits */ #define DICT_TF_GET_UNUSED(flags) \ diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h index cd2a07af4f0..129fd102f0f 100644 --- a/storage/innobase/include/fsp0types.h +++ b/storage/innobase/include/fsp0types.h @@ -278,10 +278,8 @@ The flags below only exist in fil_space_t::flags, not in FSP_SPACE_FLAGS: /** Zero relative shift position of the DATA_DIR flag */ #define FSP_FLAGS_MEM_DATA_DIR 25 -/** Zero relative shift position of the ATOMIC_WRITES field */ -#define FSP_FLAGS_MEM_ATOMIC_WRITES 26 /** Zero relative shift position of the COMPRESSION_LEVEL field */ -#define FSP_FLAGS_MEM_COMPRESSION_LEVEL 28 +#define FSP_FLAGS_MEM_COMPRESSION_LEVEL 26 /** Zero relative shift position of the POST_ANTELOPE field */ #define FSP_FLAGS_POS_POST_ANTELOPE 0 @@ -327,10 +325,6 @@ these are only used in MySQL 5.7 and used for compatibility. */ ((~(~0U << FSP_FLAGS_WIDTH_PAGE_COMPRESSION)) \ << FSP_FLAGS_POS_PAGE_COMPRESSION) -/** Bit mask of the in-memory ATOMIC_WRITES field */ -#define FSP_FLAGS_MASK_MEM_ATOMIC_WRITES \ - (3U << FSP_FLAGS_MEM_ATOMIC_WRITES) - /** Bit mask of the in-memory COMPRESSION_LEVEL field */ #define FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL \ (15U << FSP_FLAGS_MEM_COMPRESSION_LEVEL) @@ -371,10 +365,6 @@ these are only used in MySQL 5.7 and used for compatibility. */ #define FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags) \ ((flags & FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL) \ >> FSP_FLAGS_MEM_COMPRESSION_LEVEL) -/** @return the ATOMIC_WRITES field */ -#define FSP_FLAGS_GET_ATOMIC_WRITES(flags) \ - ((flags & FSP_FLAGS_MASK_MEM_ATOMIC_WRITES) \ - >> FSP_FLAGS_MEM_ATOMIC_WRITES) /* @} */ diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 82e17c6fb0a..9bbe6968de1 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -3002,21 +3002,19 @@ row_import_read_v1( cfg->m_n_cols = mach_read_from_4(ptr); if (!dict_tf_is_valid(cfg->m_flags)) { + ib_errf(thd, IB_LOG_LEVEL_ERROR, + ER_TABLE_SCHEMA_MISMATCH, + "Invalid table flags: " ULINTPF, cfg->m_flags); return(DB_CORRUPTION); + } - } else if ((err = row_import_read_columns(file, thd, cfg)) - != DB_SUCCESS) { - - return(err); - - } else if ((err = row_import_read_indexes(file, thd, cfg)) - != DB_SUCCESS) { + err = row_import_read_columns(file, thd, cfg); - return(err); + if (err == DB_SUCCESS) { + err = row_import_read_indexes(file, thd, cfg); } - ut_a(err == DB_SUCCESS); return(err); } -- cgit v1.2.1 From 6b71b3e3484818b667790b0f7426c37138fbbe53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 15 Jun 2017 17:42:49 +0300 Subject: Follow-up to MDEV-12873: Refactor SYS_TABLES.TYPE validation dict_sys_tables_type_to_tf(): Change the parameter n_cols to not_redundant. dict_tf_is_valid_not_redundant(): Refactored from dict_tf_is_valid(). dict_sys_tables_type_valid(): Replaces dict_sys_tables_type_validate(). Use the common function dict_tf_is_valid_not_redundant(), which validates PAGE_COMPRESSION_LEVEL more strictly. DICT_TF_GET_UNUSED(flags): Remove. --- storage/innobase/dict/dict0load.cc | 81 ++++++++++++++-- storage/innobase/include/dict0dict.ic | 177 ++++++---------------------------- storage/innobase/include/dict0mem.h | 3 - 3 files changed, 100 insertions(+), 161 deletions(-) (limited to 'storage') diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 7572e7a00f5..3dc95b1956a 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -1129,6 +1129,71 @@ dict_sys_tablespaces_rec_read( return(true); } +/** Check if SYS_TABLES.TYPE is valid +@param[in] type SYS_TABLES.TYPE +@param[in] not_redundant whether ROW_FORMAT=REDUNDANT is not used +@return whether the SYS_TABLES.TYPE value is valid */ +static +bool +dict_sys_tables_type_valid(ulint type, bool not_redundant) +{ + /* The DATA_DIRECTORY flag can be assigned fully independently + of all other persistent table flags. */ + type &= ~DICT_TF_MASK_DATA_DIR; + + if (type == 1) { + return(true); /* ROW_FORMAT=REDUNDANT or ROW_FORMAT=COMPACT */ + } + + if (!(type & 1)) { + /* For ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT, + SYS_TABLES.TYPE=1. Else, it is the same as + dict_table_t::flags, and the least significant bit + would be set. So, the bit never can be 0. */ + return(false); + } + + if (!not_redundant) { + /* SYS_TABLES.TYPE must be 1 for ROW_FORMAT=REDUNDANT. */ + return(false); + } + + if (type >= 1U << DICT_TF_POS_UNUSED) { + /* Some unknown bits are set. */ + return(false); + } + + /* ATOMIC_WRITES cannot be 3; it is the 10.3 NO_ROLLBACK flag. */ + if (!(~type & DICT_TF_MASK_ATOMIC_WRITES)) { + return(false); + } + + return(dict_tf_is_valid_not_redundant(type)); +} + +/** Convert SYS_TABLES.TYPE to dict_table_t::flags. +@param[in] type SYS_TABLES.TYPE +@param[in] not_redundant whether ROW_FORMAT=REDUNDANT is not used +@return table flags */ +static +ulint +dict_sys_tables_type_to_tf(ulint type, bool not_redundant) +{ + ut_ad(dict_sys_tables_type_valid(type, not_redundant)); + ulint flags = not_redundant ? 1 : 0; + + /* ZIP_SSIZE, ATOMIC_BLOBS, DATA_DIR, PAGE_COMPRESSION, + PAGE_COMPRESSION_LEVEL are the same. */ + flags |= type & (DICT_TF_MASK_ZIP_SSIZE + | DICT_TF_MASK_ATOMIC_BLOBS + | DICT_TF_MASK_DATA_DIR + | DICT_TF_MASK_PAGE_COMPRESSION + | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL); + + ut_ad(dict_tf_is_valid(flags)); + return(flags); +} + /** Read and return 5 integer fields from a SYS_TABLES record. @param[in] rec A record of SYS_TABLES @param[in] name Table Name, the same as SYS_TABLES.NAME @@ -1222,8 +1287,7 @@ dict_sys_tables_rec_read( SYS_TABLES.TYPE to be in the 10.2.2..10.2.6 format. This would in any case be invalid format for 10.2 and earlier releases. */ - ut_ad(ULINT_UNDEFINED == dict_sys_tables_type_validate( - type, DICT_N_COLS_COMPACT)); + ut_ad(!dict_sys_tables_type_valid(type, true)); } else { /* SYS_TABLES.TYPE is of the form AALLLL10DB00001. We must still validate that the LLLL bits are between 0 @@ -1239,8 +1303,7 @@ dict_sys_tables_rec_read( ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) >= 1); ut_ad(DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type) <= 9); } else { - ut_ad(ULINT_UNDEFINED == dict_sys_tables_type_validate( - type, DICT_N_COLS_COMPACT)); + ut_ad(!dict_sys_tables_type_valid(type, true)); } } @@ -1255,11 +1318,9 @@ dict_sys_tables_rec_read( ut_a(len == 4); *n_cols = mach_read_from_4(field); - /* This validation function also combines the DICT_N_COLS_COMPACT - flag in n_cols into the type field to effectively make it a - dict_table_t::flags. */ + const bool not_redundant = 0 != (*n_cols & DICT_N_COLS_COMPACT); - if (ULINT_UNDEFINED == dict_sys_tables_type_validate(type, *n_cols)) { + if (!dict_sys_tables_type_valid(type, not_redundant)) { ib::error() << "Table " << table_name << " in InnoDB" " data dictionary contains invalid flags." " SYS_TABLES.TYPE=" << type << @@ -1267,7 +1328,7 @@ dict_sys_tables_rec_read( return(false); } - *flags = dict_sys_tables_type_to_tf(type, *n_cols); + *flags = dict_sys_tables_type_to_tf(type, not_redundant); /* For tables created before MySQL 4.1, there may be garbage in SYS_TABLES.MIX_LEN where flags2 are found. Such tables @@ -1275,7 +1336,7 @@ dict_sys_tables_rec_read( high bit set in n_cols, and flags would be zero. MySQL 4.1 was the first version to support innodb_file_per_table, that is, *space_id != 0. */ - if (*flags != 0 || *space_id != 0 || *n_cols & DICT_N_COLS_COMPACT) { + if (not_redundant || *space_id != 0 || *n_cols & DICT_N_COLS_COMPACT) { /* Get flags2 from SYS_TABLES.MIX_LEN */ field = rec_get_nth_field_old( diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic index 1200b62f92f..0244bb784e0 100644 --- a/storage/innobase/include/dict0dict.ic +++ b/storage/innobase/include/dict0dict.ic @@ -638,31 +638,15 @@ dict_table_has_fts_index( return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS)); } -/** Validate the table flags. -@param[in] flags Table flags -@return true if valid. */ -UNIV_INLINE +/** Validate the flags for tables that are not ROW_FORMAT=REDUNDANT. +@param[in] flags table flags +@return whether the flags are valid */ +inline bool -dict_tf_is_valid( - ulint flags) +dict_tf_is_valid_not_redundant(ulint flags) { - /* Make sure there are no bits that we do not know about. */ - if (flags >= 1U << DICT_TF_BITS) { - return(false); - } - - const bool not_redundant = DICT_TF_GET_COMPACT(flags); const bool atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(flags); - if (atomic_blobs && !not_redundant) { - /* ROW_FORMAT=COMPRESSED and ROW_FORMAT=DYNAMIC both use - atomic_blobs, which build on the page structure introduced - for ROW_FORMAT=COMPACT by allowing keys in secondary - indexes to be made from data stored off-page in the - clustered index. */ - return(false); - } - ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags); if (!zip_ssize) { @@ -689,14 +673,36 @@ dict_tf_is_valid( ROW_FORMAT=COMPACT or ROW_FORMAT=DYNAMIC (not ROW_FORMAT=COMPRESSED or ROW_FORMAT=REDUNDANT) and PAGE_COMPRESSED=YES */ - return(!zip_ssize && not_redundant - && DICT_TF_GET_PAGE_COMPRESSION(flags)); + return(!zip_ssize && DICT_TF_GET_PAGE_COMPRESSION(flags)); default: /* Invalid PAGE_COMPRESSION_LEVEL value */ return(false); } } +/** Validate the table flags. +@param[in] flags Table flags +@return true if valid. */ +UNIV_INLINE +bool +dict_tf_is_valid( + ulint flags) +{ + ut_ad(flags < 1U << DICT_TF_BITS); + /* The DATA_DIRECTORY flag can be assigned fully independently + of all other persistent table flags. */ + flags &= ~DICT_TF_MASK_DATA_DIR; + if (!(flags & 1)) { + /* Only ROW_FORMAT=REDUNDANT has 0 in the least significant + bit. For ROW_FORMAT=REDUNDANT, only the DATA_DIR flag + (which we cleared above) can be set. If any other flags + are set, the flags are invalid. */ + return(flags == 0); + } + + return(dict_tf_is_valid_not_redundant(flags)); +} + /** Validate both table flags and table flags2 and make sure they are compatible. @param[in] flags Table flags @@ -719,96 +725,6 @@ dict_tf2_is_valid( return(true); } -/********************************************************************//** -Validate a SYS_TABLES TYPE field and return it. -@return Same as input after validating it as a SYS_TABLES TYPE field. -If there is an error, return ULINT_UNDEFINED. */ -UNIV_INLINE -ulint -dict_sys_tables_type_validate( -/*==========================*/ - ulint type, /*!< in: SYS_TABLES.TYPE */ - ulint n_cols) /*!< in: SYS_TABLES.N_COLS */ -{ - ulint low_order_bit = DICT_TF_GET_COMPACT(type); - ulint redundant = !(n_cols & DICT_N_COLS_COMPACT); - ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(type); - ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(type); - ulint unused = DICT_TF_GET_UNUSED(type); - bool page_compression = DICT_TF_GET_PAGE_COMPRESSION(type); - ulint page_compression_level = DICT_TF_GET_PAGE_COMPRESSION_LEVEL(type); - - /* The low order bit of SYS_TABLES.TYPE is always set to 1. - If the format is UNIV_FORMAT_B or higher, this field is the same - as dict_table_t::flags. Zero is not allowed here. */ - if (!low_order_bit) { - return(ULINT_UNDEFINED); - } - - if (redundant) { - if (zip_ssize || atomic_blobs) { - return(ULINT_UNDEFINED); - } - } - - /* Make sure there are no bits that we do not know about. */ - if (unused) { - return(ULINT_UNDEFINED); - } - - if (atomic_blobs) { - /* Barracuda row formats COMPRESSED and DYNAMIC build on - the page structure introduced for the COMPACT row format - by allowing keys in secondary indexes to be made from - data stored off-page in the clustered index. - - The DICT_N_COLS_COMPACT flag should be in N_COLS, - but we already know that. */ - } else if (zip_ssize) { - /* Antelope does not support COMPRESSED format. */ - return(ULINT_UNDEFINED); - } - - if (zip_ssize) { - /* COMPRESSED row format must have low_order_bit and - atomic_blobs bits set and the DICT_N_COLS_COMPACT flag - should be in N_COLS, but we already know about the - low_order_bit and DICT_N_COLS_COMPACT flags. */ - if (!atomic_blobs) { - return(ULINT_UNDEFINED); - } - - /* Validate that the number is within allowed range. */ - if (zip_ssize > PAGE_ZIP_SSIZE_MAX) { - return(ULINT_UNDEFINED); - } - } - - /* There is nothing to validate for the data_dir field. - CREATE TABLE ... DATA DIRECTORY is supported for any row - format, so the DATA_DIR flag is compatible with any other - table flags. However, it is not used with TEMPORARY tables. */ - - if (page_compression || page_compression_level) { - /* page compressed row format must have low_order_bit and - atomic_blobs bits set and the DICT_N_COLS_COMPACT flag - should be in N_COLS, but we already know about the - low_order_bit and DICT_N_COLS_COMPACT flags. */ - - if (!atomic_blobs || !page_compression) { - return(ULINT_UNDEFINED); - } - } - - /* Validate that the atomic writes number is within allowed range. */ - if (DICT_TF_GET_ATOMIC_WRITES(type) == 3) { - return(ULINT_UNDEFINED); - } - - /* Return the validated SYS_TABLES.TYPE. */ - return(type); -} - /********************************************************************//** Determine the file format from dict_table_t::flags The low order bit will be zero for REDUNDANT and 1 for COMPACT. For any @@ -973,41 +889,6 @@ dict_tf_to_fsp_flags(ulint table_flags) return(fsp_flags); } -/********************************************************************//** -Convert a 32 bit integer from SYS_TABLES.TYPE to dict_table_t::flags -The following chart shows the translation of the low order bit. -Other bits are the same. -========================= Low order bit ========================== - | REDUNDANT | COMPACT | COMPRESSED and DYNAMIC -SYS_TABLES.TYPE | 1 | 1 | 1 -dict_table_t::flags | 0 | 1 | 1 -================================================================== -@return ulint containing SYS_TABLES.TYPE */ -UNIV_INLINE -ulint -dict_sys_tables_type_to_tf( -/*=======================*/ - ulint type, /*!< in: SYS_TABLES.TYPE field */ - ulint n_cols) /*!< in: SYS_TABLES.N_COLS field */ -{ - ulint flags; - ulint redundant = !(n_cols & DICT_N_COLS_COMPACT); - - /* Adjust bit zero. */ - flags = redundant ? 0 : 1; - - /* ZIP_SSIZE, ATOMIC_BLOBS, DATA_DIR, PAGE_COMPRESSION, - PAGE_COMPRESSION_LEVEL are the same. */ - flags |= type & (DICT_TF_MASK_ZIP_SSIZE - | DICT_TF_MASK_ATOMIC_BLOBS - | DICT_TF_MASK_DATA_DIR - | DICT_TF_MASK_PAGE_COMPRESSION - | DICT_TF_MASK_PAGE_COMPRESSION_LEVEL); - - ut_ad(!DICT_TF_GET_ZIP_SSIZE(flags) || DICT_TF_HAS_ATOMIC_BLOBS(flags)); - return(flags); -} - /********************************************************************//** Convert a 32 bit integer table flags to the 32bit integer that is written to a SYS_TABLES.TYPE field. The following chart shows the translation of diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index c4f5ab8a15c..daa5af95a71 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -238,9 +238,6 @@ DEFAULT=0, ON = 1, OFF = 2 ((flags & DICT_TF_MASK_ATOMIC_WRITES) \ >> DICT_TF_POS_ATOMIC_WRITES) -/** Return the contents of the UNUSED bits */ -#define DICT_TF_GET_UNUSED(flags) \ - (flags >> DICT_TF_POS_UNUSED) /* @} */ /** @brief Table Flags set number 2. -- cgit v1.2.1 From e5980bf1b12f6dc4fa4ab44feb1b6a734dcc84d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 16 Jun 2017 11:05:09 +0300 Subject: Remove the unnecessary method handlerton::release_temporary_latches() The sole purpose of handlerton::release_temporary_latches and its wrapper function was to release the InnoDB adaptive hash index latch (btr_search_latch). When the btr_search_latch was split into an array of latches in MySQL 5.7.8 as part of the Oracle Bug#20985298 fix, the "caching" of the latch across storage engine API calls was removed. As part of that, the function trx_search_latch_release_if_reserved() was changed to an assertion and the function trx_reserve_search_latch_if_not_reserved() was removed, and handlerton::release_temporary_latches() practically became a no-op. Note: MDEV-12121 replaced the function trx_search_latch_release_if_reserved() with the more appropriately named macro trx_assert_no_search_latch(). --- storage/innobase/handler/ha_innodb.cc | 60 ++++----------------------------- storage/innobase/handler/ha_innodb.h | 12 ------- storage/innobase/handler/ha_innopart.cc | 14 +------- storage/myisam/ha_myisam.cc | 3 -- storage/xtradb/handler/ha_innodb.cc | 42 ----------------------- 5 files changed, 7 insertions(+), 124 deletions(-) (limited to 'storage') diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 902e55fc02f..8f5a17738f5 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2035,28 +2035,6 @@ thd_to_trx_id( } #endif /* WITH_WSREP */ -/********************************************************************//** -Call this function when mysqld passes control to the client. That is to -avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more -documentation, see handler.cc. -@return 0 */ -inline -int -innobase_release_temporary_latches( -/*===============================*/ - handlerton* hton, /*!< in: handlerton */ - THD* thd) /*!< in: MySQL thread */ -{ - DBUG_ASSERT(hton == innodb_hton_ptr); - - if (!srv_was_started) { - } else if (trx_t* trx = thd_to_trx(thd)) { - trx_assert_no_search_latch(trx); - } - - return(0); -} - /********************************************************************//** Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth time calls srv_active_wake_master_thread. This function should be used @@ -3911,9 +3889,6 @@ innobase_init( innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; - innobase_hton->release_temporary_latches = - innobase_release_temporary_latches; - #ifdef MYSQL_REPLACE_TRX_IN_THD innobase_hton->replace_native_transaction_in_thd = innodb_replace_trx_in_thd; @@ -6538,37 +6513,19 @@ initialize_auto_increment(dict_table_t* table, const Field* field) dict_table_autoinc_unlock(table); } -/*****************************************************************//** -Creates and opens a handle to a table which already exists in an InnoDB -database. -@return 1 if error, 0 if success */ - +/** Open an InnoDB table +@param[in] name table name +@return error code +@retval 0 on success */ int -ha_innobase::open( -/*==============*/ - const char* name, /*!< in: table name */ - int mode, /*!< in: not used */ - uint test_if_locked) /*!< in: not used */ +ha_innobase::open(const char* name, int, uint) { dict_table_t* ib_table; char norm_name[FN_REFLEN]; - THD* thd; dict_err_ignore_t ignore_err = DICT_ERR_IGNORE_NONE; DBUG_ENTER("ha_innobase::open"); - UT_NOT_USED(mode); - UT_NOT_USED(test_if_locked); - - thd = ha_thd(); - - /* Under some cases MySQL seems to call this function while - holding search latch(es). This breaks the latching order as - we acquire dict_sys->mutex below and leads to a deadlock. */ - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - normalize_table_name(norm_name, name); m_user_thd = NULL; @@ -6583,6 +6540,7 @@ ha_innobase::open( m_upd_buf_size = 0; char* is_part = is_partition(norm_name); + THD* thd = ha_thd(); /* Check whether FOREIGN_KEY_CHECKS is set to 0. If so, the table can be opened even if some FK indexes are missing. If not, the table @@ -7053,12 +7011,6 @@ ha_innobase::close() { DBUG_ENTER("ha_innobase::close"); - THD* thd = ha_thd(); - - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - row_prebuilt_free(m_prebuilt, FALSE); if (m_upd_buf != NULL) { diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index a9f063dc224..769d4ae2a59 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -906,18 +906,6 @@ innodb_base_col_setup_for_stored( /** whether this is a computed virtual column */ #define innobase_is_v_fld(field) ((field)->vcol_info && !(field)->stored_in_db()) -/** Release temporary latches. -Call this function when mysqld passes control to the client. That is to -avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more -documentation, see handler.cc. -@param[in] hton Handlerton. -@param[in] thd MySQL thread. -@return 0 */ -int -innobase_release_temporary_latches( - handlerton* hton, - THD* thd); - /** Always normalize table name to lower case on Windows */ #ifdef _WIN32 #define normalize_table_name(norm_name, name) \ diff --git a/storage/innobase/handler/ha_innopart.cc b/storage/innobase/handler/ha_innopart.cc index 1041871805e..d5bea656f8a 100644 --- a/storage/innobase/handler/ha_innopart.cc +++ b/storage/innobase/handler/ha_innopart.cc @@ -932,7 +932,6 @@ ha_innopart::open( { dict_table_t* ib_table; char norm_name[FN_REFLEN]; - THD* thd; DBUG_ENTER("ha_innopart::open"); @@ -942,16 +941,11 @@ ha_innopart::open( ut_ad(table->part_info != NULL); m_part_info = table->part_info; } - thd = ha_thd(); /* Under some cases MySQL seems to call this function while holding search latch(es). This breaks the latching order as we acquire dict_sys->mutex below and leads to a deadlock. */ - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - normalize_table_name(norm_name, name); m_user_thd = NULL; @@ -1017,6 +1011,7 @@ share_error: MONITOR_INC(MONITOR_TABLE_OPEN); bool no_tablespace; + THD* thd = ha_thd(); /* TODO: Should we do this check for every partition during ::open()? */ /* TODO: refactor this in ha_innobase so it can increase code reuse. */ @@ -1372,15 +1367,8 @@ void ha_innopart::clear_ins_upd_nodes() int ha_innopart::close() { - THD* thd; - DBUG_ENTER("ha_innopart::close"); - thd = ha_thd(); - if (thd != NULL) { - innobase_release_temporary_latches(ht, thd); - } - ut_ad(m_pcur_parts == NULL); ut_ad(m_clust_pcur_parts == NULL); close_partitioning(); diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index d46b4df8e05..77d098cd9f1 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1182,9 +1182,6 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) share->state.dupp_key= MI_MAX_KEY; strmov(fixed_name,file->filename); - // Release latches since this can take a long time - ha_release_temporary_latches(thd); - /* Don't lock tables if we have used LOCK TABLE or if we come from enable_index() diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 591df34a578..b797c0a2114 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -2122,39 +2122,6 @@ ha_innobase::is_fake_change_enabled(THD* thd) return(trx && UNIV_UNLIKELY(trx->fake_changes)); } -/********************************************************************//** -In XtraDB it is impossible for a transaction to own a search latch outside of -InnoDB code, so there is nothing to release on demand. We keep this function to -simplify maintenance. -@return 0 */ -static -int -innobase_release_temporary_latches( -/*===============================*/ - handlerton* hton MY_ATTRIBUTE((unused)), /*!< in: handlerton */ - THD* thd MY_ATTRIBUTE((unused))) /*!< in: MySQL thread */ -{ -#ifdef UNIV_DEBUG - DBUG_ASSERT(hton == innodb_hton_ptr); - - if (!innodb_inited || thd == NULL) { - - return(0); - } - - trx_t* trx = thd_to_trx(thd); - - if (trx != NULL) { -#ifdef UNIV_SYNC_DEBUG - ut_ad(!btr_search_own_any()); -#endif - trx_search_latch_release_if_reserved(trx); - } -#endif - - return(0); -} - /********************************************************************//** Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth time calls srv_active_wake_master_thread. This function should be used @@ -3813,9 +3780,6 @@ innobase_init( innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; - innobase_hton->release_temporary_latches = - innobase_release_temporary_latches; - innobase_hton->kill_query = innobase_kill_connection; if (srv_file_per_table) @@ -6262,9 +6226,6 @@ ha_innobase::open( thd = ha_thd(); - /* No-op in XtraDB */ - innobase_release_temporary_latches(ht, thd); - normalize_table_name(norm_name, name); user_thd = NULL; @@ -6724,9 +6685,6 @@ ha_innobase::close() thd = ha_thd(); - /* No-op in XtraDB */ - innobase_release_temporary_latches(ht, thd); - row_prebuilt_free(prebuilt, FALSE); if (upd_buf != NULL) { -- cgit v1.2.1 From 50faeda4d6dfa67b08022e74f6d4e6e7a69a5edc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 16 Jun 2017 12:21:46 +0300 Subject: Remove trx_t::has_search_latch and simplify debug code When the btr_search_latch was split into an array of latches in MySQL 5.7.8 as part of the Oracle Bug#20985298 fix, the "caching" of the latch across storage engine API calls was removed, and the field trx->has_search_latch would only be set during a short time frame in the execution of row_search_mvcc(), which was formerly called row_search_for_mysql(). This means that the column INFORMATION_SCHEMA.INNODB_TRX.TRX_ADAPTIVE_HASH_LATCHED will always report 0. That column cannot be removed in MariaDB 10.2, but it can be removed in future releases. trx_t::has_search_latch: Remove. trx_assert_no_search_latch(): Remove. row_sel_try_search_shortcut_for_mysql(): Remove a redundant condition on trx->has_search_latch (it was always true). sync_check_iterate(): Make the parameter const. sync_check_functor_t: Make the operator() const, and remove result() and the virtual destructor. There is no need to have mutable state in the functors. sync_checker: Replaces dict_sync_check and btrsea_sync_check. sync_check: Replaces btrsea_sync_check. dict_sync_check: Instantiated from sync_checker. sync_allowed_latches: Use std::find() directly on the array. Remove the std::vector. TrxInInnoDB::enter(), TrxInInnoDB::exit(): Remove obviously redundant debug assertions on trx->in_depth, and use equality comparison against 0 because it could be more efficient on some architectures. --- storage/innobase/btr/btr0bulk.cc | 7 +- storage/innobase/buf/buf0flu.cc | 11 +- storage/innobase/dict/dict0dict.cc | 9 +- storage/innobase/handler/ha_innodb.cc | 84 ++------------- storage/innobase/handler/ha_innopart.cc | 17 +--- storage/innobase/handler/handler0alter.cc | 1 - storage/innobase/handler/i_s.cc | 3 +- storage/innobase/include/log0log.ic | 12 +-- storage/innobase/include/sync0debug.h | 8 +- storage/innobase/include/sync0types.h | 163 +++++++----------------------- storage/innobase/include/trx0i_s.h | 5 +- storage/innobase/include/trx0trx.h | 30 +----- storage/innobase/row/row0sel.cc | 64 ++---------- storage/innobase/srv/srv0conc.cc | 35 +------ storage/innobase/sync/sync0debug.cc | 39 +++---- storage/innobase/trx/trx0i_s.cc | 4 - storage/innobase/trx/trx0trx.cc | 17 ---- 17 files changed, 89 insertions(+), 420 deletions(-) (limited to 'storage') diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc index 9ff3bc5f6d1..8a954f9d3c3 100644 --- a/storage/innobase/btr/btr0bulk.cc +++ b/storage/innobase/btr/btr0bulk.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -991,11 +992,7 @@ BtrBulk::finish(dberr_t err) ut_ad(err == DB_SUCCESS); } -#ifdef UNIV_DEBUG - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); -#endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate(dict_sync_check())); ut_ad(err != DB_SUCCESS || btr_validate_index(m_index, NULL, false)); return(err); diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 2c660c76c33..89fe74d59b8 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1870,15 +1870,8 @@ buf_flush_batch( counts */ { ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); - -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(flush_type != BUF_FLUSH_LIST - || !sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ + ut_ad(flush_type == BUF_FLUSH_LRU + || !sync_check_iterate(dict_sync_check())); buf_pool_mutex_enter(buf_pool); diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index ec8ecaafee7..b0ba04c7fbf 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -6113,14 +6113,7 @@ dict_set_corrupted( ut_ad(mutex_own(&dict_sys->mutex)); ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); - -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate(dict_sync_check())); /* Mark the table as corrupted only if the clustered index is corrupted */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 8f5a17738f5..21422dfaac0 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1869,6 +1869,8 @@ void innobase_srv_conc_exit_innodb( row_prebuilt_t* prebuilt) { + ut_ad(!sync_check_iterate(sync_check())); + #ifdef WITH_WSREP if (wsrep_on(prebuilt->trx->mysql_thd) && wsrep_thd_is_BF(prebuilt->trx->mysql_thd, FALSE)) { @@ -1877,13 +1879,6 @@ innobase_srv_conc_exit_innodb( #endif /* WITH_WSREP */ trx_t* trx = prebuilt->trx; -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ /* This is to avoid making an unnecessary function call. */ if (trx->declared_to_be_inside_innodb @@ -1901,13 +1896,7 @@ innobase_srv_conc_force_exit_innodb( /*================================*/ trx_t* trx) /*!< in: transaction handle */ { -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); /* This is to avoid making an unnecessary function call. */ if (trx->declared_to_be_inside_innodb) { @@ -1994,13 +1983,7 @@ const char* thd_innodb_tmpdir( THD* thd) { -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - trx_t* trx = thd_to_trx(thd); - btrsea_sync_check check(trx->has_search_latch); - ut_ad(!sync_check_iterate(check)); -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); const char* tmp_dir = THDVAR(thd, tmpdir); @@ -3360,7 +3343,6 @@ innobase_query_caching_of_table_permitted( return(false); } - trx_assert_no_search_latch(trx); innobase_srv_conc_force_exit_innodb(trx); if (!thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) @@ -3668,8 +3650,6 @@ ha_innobase::init_table_handle_for_HANDLER(void) /* Initialize the m_prebuilt struct much like it would be inited in external_lock */ - trx_assert_no_search_latch(m_prebuilt->trx); - innobase_srv_conc_force_exit_innodb(m_prebuilt->trx); /* If the transaction is not started yet, start it */ @@ -4781,12 +4761,6 @@ innobase_commit_ordered( trx = check_trx_exists(thd); TrxInInnoDB trx_in_innodb(trx); - /* Since we will reserve the kernel mutex, we must not be holding the - search system latch, or we will disobey the latching order. But we - already released it in innobase_xa_prepare() (if not before), so just - have an assert here.*/ - trx_assert_no_search_latch(trx); - if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { /* We cannot throw error here; instead we will catch this error again in innobase_commit() and report it from there. */ @@ -4998,12 +4972,6 @@ innobase_rollback_trx( DBUG_ENTER("innobase_rollback_trx"); DBUG_PRINT("trans", ("aborting transaction")); - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ - - trx_assert_no_search_latch(trx); - innobase_srv_conc_force_exit_innodb(trx); /* If we had reserved the auto-inc lock for some table (if @@ -13101,10 +13069,6 @@ create_table_info_t::initialize() parent_trx = check_trx_exists(m_thd); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - trx_assert_no_search_latch(parent_trx); DBUG_RETURN(0); } @@ -13953,12 +13917,6 @@ innobase_drop_database( THD* thd = current_thd; - /* In case MySQL calls this in the middle of a SELECT - query, release possible adaptive hash latch to avoid - deadlocks of threads */ - - trx_assert_no_search_latch(check_trx_exists(thd)); - ulint len = 0; char* ptr = strend(path) - 2; @@ -14692,12 +14650,7 @@ ha_innobase::info_low( update_thd(ha_thd()); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - m_prebuilt->trx->op_info = (char*)"returning various info to MariaDB"; - - trx_assert_no_search_latch(m_prebuilt->trx); + m_prebuilt->trx->op_info = "returning various info to MariaDB"; ib_table = m_prebuilt->table; DBUG_ASSERT(ib_table->n_ref_count > 0); @@ -15544,12 +15497,7 @@ ha_innobase::update_table_comment( update_thd(ha_thd()); - m_prebuilt->trx->op_info = (char*)"returning table comment"; - - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads */ - - trx_assert_no_search_latch(m_prebuilt->trx); + m_prebuilt->trx->op_info = "returning table comment"; #define SSTR( x ) reinterpret_cast< std::ostringstream & >( \ ( std::ostringstream() << std::dec << x ) ).str() @@ -15605,22 +15553,14 @@ ha_innobase::get_foreign_key_create_info(void) update_thd(ha_thd()); - m_prebuilt->trx->op_info = (char*)"getting info on foreign keys"; - - /* In case MySQL calls this in the middle of a SELECT query, - release possible adaptive hash latch to avoid - deadlocks of threads */ - - trx_assert_no_search_latch(m_prebuilt->trx); - - + m_prebuilt->trx->op_info = "getting info on foreign keys"; /* Output the data to a temporary string */ std::string str = dict_print_info_on_foreign_keys( TRUE, m_prebuilt->trx, m_prebuilt->table); - m_prebuilt->trx->op_info = (char*)""; + m_prebuilt->trx->op_info = ""; /* Allocate buffer for the string */ char* fk_str = (char*) my_malloc(str.length() + 1, MYF(0)); @@ -16624,8 +16564,6 @@ innodb_show_status( trx_t* trx = check_trx_exists(thd); - trx_assert_no_search_latch(trx); - innobase_srv_conc_force_exit_innodb(trx); TrxInInnoDB trx_in_innodb(trx); @@ -17936,12 +17874,6 @@ innobase_xa_prepare( thd_get_xid(thd, (MYSQL_XID*) trx->xid); - /* Release a possible FIFO ticket and search latch. Since we will - reserve the trx_sys->mutex, we have to release the search system - latch first to obey the latching order. */ - - trx_assert_no_search_latch(trx); - innobase_srv_conc_force_exit_innodb(trx); TrxInInnoDB trx_in_innodb(trx); diff --git a/storage/innobase/handler/ha_innopart.cc b/storage/innobase/handler/ha_innopart.cc index d5bea656f8a..fb6f4b89a41 100644 --- a/storage/innobase/handler/ha_innopart.cc +++ b/storage/innobase/handler/ha_innopart.cc @@ -3020,11 +3020,6 @@ ha_innopart::records_in_range( m_prebuilt->trx->op_info = (char*)"estimating records in index range"; - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - - trx_assert_no_search_latch(m_prebuilt->trx); - active_index = keynr; key = table->key_info + active_index; @@ -3159,11 +3154,6 @@ ha_innopart::estimate_rows_upper_bound() m_prebuilt->trx->op_info = "calculating upper bound for table rows"; - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - - trx_assert_no_search_latch(m_prebuilt->trx); - for (uint i = m_part_info->get_first_used_partition(); i < m_tot_parts; i = m_part_info->get_next_used_partition(i)) { @@ -3275,12 +3265,7 @@ ha_innopart::info_low( update_thd(ha_thd()); - /* In case MySQL calls this in the middle of a SELECT query, release - possible adaptive hash latch to avoid deadlocks of threads. */ - - m_prebuilt->trx->op_info = (char*)"returning various info to MySQL"; - - trx_assert_no_search_latch(m_prebuilt->trx); + m_prebuilt->trx->op_info = "returning various info to MySQL"; ut_ad(m_part_share->get_table_part(0)->n_ref_count > 0); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 5cc3347ebbe..107205e0375 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -591,7 +591,6 @@ ha_innobase::check_if_supported_inplace_alter( } update_thd(); - trx_assert_no_search_latch(m_prebuilt->trx); /* Change on engine specific table options require rebuild of the table */ diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 36ecc0b8a29..8f04f9d15ee 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -699,8 +699,7 @@ fill_innodb_trx_from_cache( #ifdef BTR_CUR_HASH_ADAPT /* trx_adaptive_hash_latched */ - OK(fields[IDX_TRX_ADAPTIVE_HASH_LATCHED]->store( - row->trx_has_search_latch, true)); + OK(fields[IDX_TRX_ADAPTIVE_HASH_LATCHED]->store(0, true)); #endif /* BTR_CUR_HASH_ADAPT */ /* trx_is_read_only*/ diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic index 82a94265776..1e09c263975 100644 --- a/storage/innobase/include/log0log.ic +++ b/storage/innobase/include/log0log.ic @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -479,11 +480,11 @@ void log_free_check(void) /*================*/ { -#ifdef UNIV_DEBUG /* During row_log_table_apply(), this function will be called while we are holding some latches. This is OK, as long as we are not holding any latches on buffer blocks. */ +#ifdef UNIV_DEBUG static const latch_level_t latches[] = { SYNC_DICT, /* dict_sys->mutex during commit_try_rebuild() */ @@ -491,13 +492,12 @@ log_free_check(void) commit_try_rebuild() */ SYNC_INDEX_TREE /* index->lock */ }; - - sync_allowed_latches check( - latches, latches + sizeof(latches)/sizeof(*latches)); - - ut_ad(!sync_check_iterate(check)); #endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate( + sync_allowed_latches(latches, + latches + UT_ARR_SIZE(latches)))); + if (log_sys->check_flush_or_checkpoint) { log_check_margins(); diff --git a/storage/innobase/include/sync0debug.h b/storage/innobase/include/sync0debug.h index ba697b70e13..ecc742918f0 100644 --- a/storage/innobase/include/sync0debug.h +++ b/storage/innobase/include/sync0debug.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -84,10 +84,10 @@ sync_check_find(latch_level_t level); /** Checks that the level array for the current thread is empty. Terminate iteration if the functor returns true. -@param[in,out] functor called for each element. -@return true if the functor returns true */ +@param[in] functor called for each element. +@return true if the functor returns true for any element */ bool -sync_check_iterate(sync_check_functor_t& functor); +sync_check_iterate(const sync_check_functor_t& functor); /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync, because the debug mutex is also acquired in sync0arr while holding the OS diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h index ec4503daa64..8a68f3d0cc6 100644 --- a/storage/innobase/include/sync0types.h +++ b/storage/innobase/include/sync0types.h @@ -1078,108 +1078,43 @@ struct latch_t { /** Subclass this to iterate over a thread's acquired latch levels. */ struct sync_check_functor_t { - virtual ~sync_check_functor_t() { } - virtual bool operator()(const latch_level_t) = 0; - virtual bool result() const = 0; + virtual bool operator()(const latch_level_t) const = 0; }; -#ifdef BTR_CUR_HASH_ADAPT -/** Functor to check whether the calling thread owns the btr search mutex. */ -struct btrsea_sync_check : public sync_check_functor_t { - - /** Constructor - @param[in] has_search_latch true if owns the latch */ - explicit btrsea_sync_check(bool has_search_latch) - : - m_result(), - m_has_search_latch(has_search_latch) { } - - /** Destructor */ - virtual ~btrsea_sync_check() { } - - /** Called for every latch owned by the calling thread. - @param[in] level Level of the existing latch - @return true if the predicate check is successful */ - virtual bool operator()(const latch_level_t level) - { - /* If calling thread doesn't hold search latch then - check if there are latch level exception provided. */ - - if (!m_has_search_latch - && (level != SYNC_SEARCH_SYS - && level != SYNC_FTS_CACHE)) { - - m_result = true; - - return(m_result); - } - - return(false); - } - - /** @return result from the check */ - virtual bool result() const - { - return(m_result); - } - -private: - /** True if all OK */ - bool m_result; - - /** If the caller owns the search latch */ - const bool m_has_search_latch; -}; -#endif /* BTR_CUR_HASH_ADAPT */ - -/** Functor to check for dictionay latching constraints. */ -struct dict_sync_check : public sync_check_functor_t { - - /** Constructor - @param[in] dict_mutex_allow true if the dict mutex - is allowed */ - explicit dict_sync_check(bool dict_mutex_allowed) - : - m_result(), - m_dict_mutex_allowed(dict_mutex_allowed) { } - - /** Destructor */ - virtual ~dict_sync_check() { } - +/** Check that no latch is being held. +@tparam some_allowed whether some latches are allowed to be held */ +template +struct sync_checker : public sync_check_functor_t +{ /** Check the latching constraints - @param[in] level The level held by the thread */ - virtual bool operator()(const latch_level_t level) + @param[in] level The level held by the thread + @return whether a latch violation was detected */ + bool operator()(const latch_level_t level) const { - if (!m_dict_mutex_allowed - || (level != SYNC_DICT - && level != SYNC_DICT_OPERATION - && level != SYNC_FTS_CACHE - /* This only happens in recv_apply_hashed_log_recs. */ - && level != SYNC_RECV_WRITER - && level != SYNC_NO_ORDER_CHECK)) { - - m_result = true; - - return(true); + if (some_allowed) { + switch (level) { + case SYNC_RECV_WRITER: + /* This only happens in + recv_apply_hashed_log_recs. */ + case SYNC_DICT: + case SYNC_DICT_OPERATION: + case SYNC_FTS_CACHE: + case SYNC_NO_ORDER_CHECK: + return(false); + default: + return(true); + } } - return(false); - } - - /** @return the result of the check */ - virtual bool result() const - { - return(m_result); + return(true); } - -private: - /** True if all OK */ - bool m_result; - - /** True if it is OK to hold the dict mutex */ - const bool m_dict_mutex_allowed; }; +/** The strict latch checker (no InnoDB latches may be held) */ +typedef struct sync_checker sync_check; +/** The sloppy latch checker (can hold InnoDB dictionary or SQL latches) */ +typedef struct sync_checker dict_sync_check; + /** Functor to check for given latching constraints. */ struct sync_allowed_latches : public sync_check_functor_t { @@ -1189,9 +1124,7 @@ struct sync_allowed_latches : public sync_check_functor_t { sync_allowed_latches( const latch_level_t* from, const latch_level_t* to) - : - m_result(), - m_latches(from, to) { } + : begin(from), end(to) { } /** Checks whether the given latch_t violates the latch constraint. This object maintains a list of allowed latch levels, and if the given @@ -1199,41 +1132,17 @@ struct sync_allowed_latches : public sync_check_functor_t { then it is a violation. @param[in] latch The latch level to check - @return true if there is a latch ordering violation */ - virtual bool operator()(const latch_level_t level) + @return true if there is a latch violation */ + bool operator()(const latch_level_t level) const { - for (latches_t::const_iterator it = m_latches.begin(); - it != m_latches.end(); - ++it) { - - if (level == *it) { - - m_result = false; - - /* No violation */ - return(false); - } - } - - return(true); - } - - /** @return the result of the check */ - virtual bool result() const - { - return(m_result); + return(std::find(begin, end, level) == end); } private: - /** Save the result of validation check here - True if all OK */ - bool m_result; - - typedef std::vector > - latches_t; - - /** List of latch levels that are allowed to be held */ - latches_t m_latches; + /** First element in an array of allowed latch levels */ + const latch_level_t* const begin; + /** First element after the end of the array of allowed latch levels */ + const latch_level_t* const end; }; /** Get the latch id from a latch name. diff --git a/storage/innobase/include/trx0i_s.h b/storage/innobase/include/trx0i_s.h index 17a297527af..e02c5d88a29 100644 --- a/storage/innobase/include/trx0i_s.h +++ b/storage/innobase/include/trx0i_s.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -162,10 +163,6 @@ struct i_s_trx_row_t { /*!< check_foreigns in trx_t */ const char* trx_foreign_key_error; /*!< detailed_error in trx_t */ -#ifdef BTR_CUR_HASH_ADAPT - ibool trx_has_search_latch; - /*!< has_search_latch in trx_t */ -#endif /* BTR_CUR_HASH_ADAPT */ ulint trx_is_read_only; /*!< trx_t::read_only */ ulint trx_is_autocommit_non_locking; diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 6274dec9f9d..b2d4952318c 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -58,15 +58,6 @@ class FlushObserver; /** Dummy session used currently in MySQL interface */ extern sess_t* trx_dummy_sess; -#ifdef BTR_CUR_HASH_ADAPT -/** Assert that the transaction is not holding the adaptive hash index latch. -@param[in] trx transaction */ -# define trx_assert_no_search_latch(trx) \ - ut_ad(!trx->has_search_latch) -#else /* BTR_CUR_HASH_ADAPT */ -# define trx_assert_no_search_latch(trx) -#endif - /** Set flush observer for the transaction @param[in/out] trx transaction struct @param[in] observer flush observer */ @@ -1072,11 +1063,6 @@ struct trx_t { flush the log in trx_commit_complete_for_mysql() */ ulint duplicates; /*!< TRX_DUP_IGNORE | TRX_DUP_REPLACE */ -#ifdef BTR_CUR_HASH_ADAPT - bool has_search_latch; - /*!< TRUE if this trx has latched the - search system latch in S-mode */ -#endif /* BTR_CUR_HASH_ADAPT */ trx_dict_op_t dict_operation; /**< @see enum trx_dict_op_t */ /* Fields protected by the srv_conc_mutex. */ @@ -1508,17 +1494,11 @@ private: } /* Avoid excessive mutex acquire/release */ - if (++trx->in_depth > 1) { + if (trx->in_depth++) { /* The transaction is already inside InnoDB. */ - ut_ad(trx->in_depth > 1); return; } - /* Only the owning thread should release the latch. */ - - ut_ad(trx->in_depth == 1); - trx_assert_no_search_latch(trx); - trx_mutex_enter(trx); wait(trx); @@ -1543,16 +1523,10 @@ private: ut_ad(trx->in_depth > 0); - if (--trx->in_depth > 0) { - ut_ad(trx->in_depth); + if (--trx->in_depth) { return; } - /* Only the owning thread should release the latch. */ - - ut_ad(trx->in_depth == 0); - trx_assert_no_search_latch(trx); - trx_mutex_enter(trx); ut_ad((trx->in_innodb & TRX_FORCE_ROLLBACK_MASK) > 0); diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 098d70ae7e3..52af4bd1828 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -2231,16 +2231,7 @@ stop_for_a_while: btr_pcur_store_position(&(plan->pcur), &mtr); mtr_commit(&mtr); - -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); err = DB_SUCCESS; goto func_exit; @@ -2258,14 +2249,7 @@ commit_mtr_for_a_while: mtr_commit(&mtr); mtr_has_extra_clust_latch = FALSE; - -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ + ut_ad(!sync_check_iterate(dict_sync_check())); goto table_loop; @@ -2280,20 +2264,13 @@ lock_wait_or_error: mtr_commit(&mtr); -#ifdef UNIV_DEBUG - { - dict_sync_check check(true); - - ut_ad(!sync_check_iterate(check)); - } -#endif /* UNIV_DEBUG */ - func_exit: #ifdef BTR_CUR_HASH_ADAPT if (search_latch_locked) { btr_search_s_unlock(index); } #endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(dict_sync_check())); if (heap != NULL) { mem_heap_free(heap); @@ -3041,7 +3018,6 @@ row_sel_store_mysql_field_func( mem_heap_t* heap; /* Copy an externally stored field to a temporary heap */ - trx_assert_no_search_latch(prebuilt->trx); ut_ad(field_no == templ->clust_rec_field_no); ut_ad(templ->type != DATA_POINT); @@ -3928,11 +3904,7 @@ row_sel_try_search_shortcut_for_mysql( ut_ad(!prebuilt->templ_contains_blob); btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE, - BTR_SEARCH_LEAF, pcur, - (trx->has_search_latch) - ? RW_S_LATCH - : 0, - mtr); + BTR_SEARCH_LEAF, pcur, RW_S_LATCH, mtr); rec = btr_pcur_get_rec(pcur); if (!page_rec_is_user_rec(rec)) { @@ -4189,14 +4161,7 @@ row_search_mvcc( DBUG_RETURN(DB_END_OF_INDEX); } -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); if (dict_table_is_discarded(prebuilt->table)) { @@ -4221,8 +4186,6 @@ row_search_mvcc( && (prebuilt->read_just_key || prebuilt->m_read_virtual_key); - trx_assert_no_search_latch(trx); - /* Reset the new record lock info if srv_locks_unsafe_for_binlog is set or session is using a READ COMMITED isolation level. Then we are able to remove the record locks set here on an individual @@ -4377,9 +4340,7 @@ row_search_mvcc( and if we try that, we can deadlock on the adaptive hash index semaphore! */ - trx_assert_no_search_latch(trx); rw_lock_s_lock(btr_get_search_latch(index)); - trx->has_search_latch = true; switch (row_sel_try_search_shortcut_for_mysql( &rec, prebuilt, &offsets, &heap, @@ -4434,7 +4395,6 @@ row_search_mvcc( err = DB_SUCCESS; rw_lock_s_unlock(btr_get_search_latch(index)); - trx->has_search_latch = false; goto func_exit; @@ -4445,7 +4405,6 @@ row_search_mvcc( err = DB_RECORD_NOT_FOUND; rw_lock_s_unlock(btr_get_search_latch(index)); - trx->has_search_latch = false; /* NOTE that we do NOT store the cursor position */ @@ -4463,7 +4422,6 @@ row_search_mvcc( mtr_start(&mtr); rw_lock_s_unlock(btr_get_search_latch(index)); - trx->has_search_latch = false; } } #endif /* BTR_CUR_HASH_ADAPT */ @@ -4471,8 +4429,6 @@ row_search_mvcc( /*-------------------------------------------------------------*/ /* PHASE 3: Open or restore index cursor position */ - trx_assert_no_search_latch(trx); - spatial_search = dict_index_is_spatial(index) && mode >= PAGE_CUR_CONTAIN; @@ -5777,15 +5733,7 @@ func_exit: } } -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); DEBUG_SYNC_C("innodb_row_search_for_mysql_exit"); diff --git a/storage/innobase/srv/srv0conc.cc b/storage/innobase/srv/srv0conc.cc index bf8a326a633..9f589b57d9c 100644 --- a/storage/innobase/srv/srv0conc.cc +++ b/storage/innobase/srv/srv0conc.cc @@ -197,11 +197,6 @@ srv_conc_enter_innodb_with_atomics( (void) my_atomic_addlint( &srv_conc.n_waiting, 1); - /* Release possible search system latch this - thread has */ - - trx_assert_no_search_latch(trx); - thd_wait_begin(trx->mysql_thd, THD_WAIT_USER_LOCK); notified_mysql = TRUE; @@ -257,15 +252,7 @@ srv_conc_enter_innodb( { trx_t* trx = prebuilt->trx; -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); srv_conc_enter_innodb_with_atomics(trx); } @@ -279,15 +266,7 @@ srv_conc_force_enter_innodb( trx_t* trx) /*!< in: transaction object associated with the thread */ { -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); if (!srv_thread_concurrency) { @@ -320,15 +299,7 @@ srv_conc_force_exit_innodb( srv_conc_exit_innodb_with_atomics(trx); -#ifdef BTR_CUR_HASH_ADAPT -# ifdef UNIV_DEBUG - { - btrsea_sync_check check(trx->has_search_latch); - - ut_ad(!sync_check_iterate(check)); - } -# endif /* UNIV_DEBUG */ -#endif /* BTR_CUR_HASH_ADAPT */ + ut_ad(!sync_check_iterate(sync_check())); } /*********************************************************************//** diff --git a/storage/innobase/sync/sync0debug.cc b/storage/innobase/sync/sync0debug.cc index 11743e14be2..e43e87eb14e 100644 --- a/storage/innobase/sync/sync0debug.cc +++ b/storage/innobase/sync/sync0debug.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -298,29 +298,23 @@ struct LatchDebug { } /** Iterate over a thread's latches. - @param[in,out] functor The callback + @param[in] functor The callback @return true if the functor returns true. */ - bool for_each(sync_check_functor_t& functor) + bool for_each(const sync_check_functor_t& functor) UNIV_NOTHROW { - const Latches* latches = thread_latches(); - - if (latches == 0) { - return(functor.result()); - } - - Latches::const_iterator end = latches->end(); - - for (Latches::const_iterator it = latches->begin(); - it != end; - ++it) { - - if (functor(it->m_level)) { - break; + if (const Latches* latches = thread_latches()) { + Latches::const_iterator end = latches->end(); + for (Latches::const_iterator it = latches->begin(); + it != end; ++it) { + + if (functor(it->m_level)) { + return(true); + } } } - return(functor.result()); + return(false); } /** Removes a latch from the thread level array if it is found there. @@ -1215,13 +1209,12 @@ sync_check_find(latch_level_t level) /** Iterate over the thread's latches. @param[in,out] functor called for each element. -@return false if the sync debug hasn't been initialised -@return the value returned by the functor */ +@return true if the functor returns true for any element */ bool -sync_check_iterate(sync_check_functor_t& functor) +sync_check_iterate(const sync_check_functor_t& functor) { - if (LatchDebug::instance() != NULL) { - return(LatchDebug::instance()->for_each(functor)); + if (LatchDebug* debug = LatchDebug::instance()) { + return(debug->for_each(functor)); } return(false); diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index fcc50b8c76d..7854ad2ab5a 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -589,10 +589,6 @@ thd_done: row->trx_foreign_key_error = NULL; } -#ifdef BTR_CUR_HASH_ADAPT - row->trx_has_search_latch = (ibool) trx->has_search_latch; -#endif /* BTR_CUR_HASH_ADAPT */ - row->trx_is_read_only = trx->read_only; row->trx_is_autocommit_non_locking = trx_is_autocommit_non_locking(trx); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 315697403c4..7fdbd808a60 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -272,8 +272,6 @@ struct TrxFactory { ut_a(trx->lock.wait_lock == NULL); ut_a(trx->lock.wait_thr == NULL); - - trx_assert_no_search_latch(trx); ut_a(trx->dict_operation_lock_mode == 0); if (trx->lock.lock_heap != NULL) { @@ -341,9 +339,6 @@ struct TrxFactory { ut_a(trx->lock.wait_thr == NULL); ut_a(trx->lock.wait_lock == NULL); - - trx_assert_no_search_latch(trx); - ut_a(trx->dict_operation_lock_mode == 0); ut_a(UT_LIST_GET_LEN(trx->lock.trx_locks) == 0); @@ -2413,13 +2408,6 @@ state_ok: (ulong) n_rec_locks); } -#ifdef BTR_CUR_HASH_ADAPT - if (trx->has_search_latch) { - newline = TRUE; - fputs(", holds adaptive hash latch", f); - } -#endif /* BTR_CUR_HASH_ADAPT */ - if (trx->undo_no != 0) { newline = TRUE; fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); @@ -2551,11 +2539,6 @@ state_ok: fprintf(f, "que state %lu ", (ulong) trx->lock.que_state); } - if (trx->has_search_latch) { - newline = TRUE; - fputs(", holds adaptive hash latch", f); - } - if (trx->undo_no != 0) { newline = TRUE; fprintf(f, ", undo log entries " TRX_ID_FMT, trx->undo_no); -- cgit v1.2.1 From d1e182d603c73e42a18667f3984d6487c9a3b090 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 19 Jun 2017 15:59:19 +0300 Subject: MDEV-12975 InnoDB redo log minimum size check uses detected file size instead of requested innodb_log_file_size log_calc_max_ages(): Use the requested size in the check, instead of the detected redo log size. The redo log will be resized at startup if it differs from what has been requested. --- storage/innobase/log/log0log.cc | 71 ++++++++++++----------------------------- storage/xtradb/log/log0log.cc | 71 ++++++++++++----------------------------- 2 files changed, 40 insertions(+), 102 deletions(-) (limited to 'storage') diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 0dc06fd4b1b..1c9cbc3a219 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -754,43 +754,16 @@ ibool log_calc_max_ages(void) /*===================*/ { - log_group_t* group; lsn_t margin; ulint free; - ibool success = TRUE; - lsn_t smallest_capacity; - lsn_t archive_margin; - lsn_t smallest_archive_margin; - - mutex_enter(&(log_sys->mutex)); - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - ut_ad(group); - - smallest_capacity = LSN_MAX; - smallest_archive_margin = LSN_MAX; - - while (group) { - if (log_group_get_capacity(group) < smallest_capacity) { - smallest_capacity = log_group_get_capacity(group); - } - - archive_margin = log_group_get_capacity(group) - - (group->file_size - LOG_FILE_HDR_SIZE) - - LOG_ARCHIVE_EXTRA_MARGIN; - - if (archive_margin < smallest_archive_margin) { - - smallest_archive_margin = archive_margin; - } - - group = UT_LIST_GET_NEXT(log_groups, group); - } + lsn_t smallest_capacity = ((srv_log_file_size_requested + << srv_page_size_shift) + - LOG_FILE_HDR_SIZE) + * srv_n_log_files; /* Add extra safety */ - smallest_capacity = smallest_capacity - smallest_capacity / 10; + smallest_capacity -= smallest_capacity / 10; /* For each OS thread we must reserve so much free space in the smallest log group that it can accommodate the log entries produced @@ -800,15 +773,16 @@ log_calc_max_ages(void) free = LOG_CHECKPOINT_FREE_PER_THREAD * (10 + srv_thread_concurrency) + LOG_CHECKPOINT_EXTRA_FREE; if (free >= smallest_capacity / 2) { - success = FALSE; - - goto failure; - } else { - margin = smallest_capacity - free; + ib_logf(IB_LOG_LEVEL_FATAL, + "The combined size of ib_logfiles" + " should be bigger than\n" + "InnoDB: 200 kB * innodb_thread_concurrency."); } - + margin = smallest_capacity - free; margin = margin - margin / 10; /* Add still some extra safety */ + mutex_enter(&log_sys->mutex); + log_sys->log_group_capacity = smallest_capacity; log_sys->max_modified_age_async = margin @@ -821,22 +795,17 @@ log_calc_max_ages(void) log_sys->max_checkpoint_age = margin; #ifdef UNIV_LOG_ARCHIVE - log_sys->max_archived_lsn_age = smallest_archive_margin; + lsn_t archive_margin = smallest_capacity + - (srv_log_file_size_requested - LOG_FILE_HDR_SIZE) + - LOG_ARCHIVE_EXTRA_MARGIN; + log_sys->max_archived_lsn_age = archive_margin; - log_sys->max_archived_lsn_age_async = smallest_archive_margin - - smallest_archive_margin / LOG_ARCHIVE_RATIO_ASYNC; + log_sys->max_archived_lsn_age_async = archive_margin + - archive_margin / LOG_ARCHIVE_RATIO_ASYNC; #endif /* UNIV_LOG_ARCHIVE */ -failure: - mutex_exit(&(log_sys->mutex)); - - if (!success) { - ib_logf(IB_LOG_LEVEL_FATAL, - "The combined size of ib_logfiles" - " should be bigger than\n" - "InnoDB: 200 kB * innodb_thread_concurrency."); - } + mutex_exit(&log_sys->mutex); - return(success); + return(true); } /******************************************************//** diff --git a/storage/xtradb/log/log0log.cc b/storage/xtradb/log/log0log.cc index 3252cd793c9..833f3240369 100644 --- a/storage/xtradb/log/log0log.cc +++ b/storage/xtradb/log/log0log.cc @@ -860,43 +860,16 @@ ibool log_calc_max_ages(void) /*===================*/ { - log_group_t* group; lsn_t margin; ulint free; - ibool success = TRUE; - lsn_t smallest_capacity; - lsn_t archive_margin; - lsn_t smallest_archive_margin; - - mutex_enter(&(log_sys->mutex)); - - group = UT_LIST_GET_FIRST(log_sys->log_groups); - - ut_ad(group); - - smallest_capacity = LSN_MAX; - smallest_archive_margin = LSN_MAX; - - while (group) { - if (log_group_get_capacity(group) < smallest_capacity) { - smallest_capacity = log_group_get_capacity(group); - } - - archive_margin = log_group_get_capacity(group) - - (group->file_size - LOG_FILE_HDR_SIZE) - - LOG_ARCHIVE_EXTRA_MARGIN; - - if (archive_margin < smallest_archive_margin) { - - smallest_archive_margin = archive_margin; - } - - group = UT_LIST_GET_NEXT(log_groups, group); - } + lsn_t smallest_capacity = ((srv_log_file_size_requested + << srv_page_size_shift) + - LOG_FILE_HDR_SIZE) + * srv_n_log_files; /* Add extra safety */ - smallest_capacity = smallest_capacity - smallest_capacity / 10; + smallest_capacity -= smallest_capacity / 10; /* For each OS thread we must reserve so much free space in the smallest log group that it can accommodate the log entries produced @@ -906,15 +879,16 @@ log_calc_max_ages(void) free = LOG_CHECKPOINT_FREE_PER_THREAD * (10 + srv_thread_concurrency) + LOG_CHECKPOINT_EXTRA_FREE; if (free >= smallest_capacity / 2) { - success = FALSE; - - goto failure; - } else { - margin = smallest_capacity - free; + ib_logf(IB_LOG_LEVEL_FATAL, + "The combined size of ib_logfiles" + " should be bigger than\n" + "InnoDB: 200 kB * innodb_thread_concurrency."); } - + margin = smallest_capacity - free; margin = margin - margin / 10; /* Add still some extra safety */ + mutex_enter(&log_sys->mutex); + log_sys->log_group_capacity = smallest_capacity; log_sys->max_modified_age_async = margin @@ -927,22 +901,17 @@ log_calc_max_ages(void) log_sys->max_checkpoint_age = margin; #ifdef UNIV_LOG_ARCHIVE - log_sys->max_archived_lsn_age = smallest_archive_margin; + lsn_t archive_margin = smallest_capacity + - (srv_log_file_size_requested - LOG_FILE_HDR_SIZE) + - LOG_ARCHIVE_EXTRA_MARGIN; + log_sys->max_archived_lsn_age = archive_margin; - log_sys->max_archived_lsn_age_async = smallest_archive_margin - - smallest_archive_margin / LOG_ARCHIVE_RATIO_ASYNC; + log_sys->max_archived_lsn_age_async = archive_margin + - archive_margin / LOG_ARCHIVE_RATIO_ASYNC; #endif /* UNIV_LOG_ARCHIVE */ -failure: - mutex_exit(&(log_sys->mutex)); - - if (!success) { - ib_logf(IB_LOG_LEVEL_FATAL, - "The combined size of ib_logfiles" - " should be bigger than\n" - "InnoDB: 200 kB * innodb_thread_concurrency."); - } + mutex_exit(&log_sys->mutex); - return(success); + return(true); } /******************************************************//** -- cgit v1.2.1