summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEugene Kosov <claprix@yandex.ru>2019-11-03 00:15:29 +0300
committerEugene Kosov <claprix@yandex.ru>2019-11-04 15:52:39 +0300
commit5afd8fade9aa6465386bedc2197fe043f76f5c59 (patch)
treef0c67d8886903b89e591d939f89ec39535921116
parent0b1bc4bf76fcb3c63b208c8e1449809e6d492fd2 (diff)
downloadmariadb-git-bb-10.2-MDEV-20494-row-size-ddl.tar.gz
MDEV-20949 Stop issuing 'row size' error on DMLbb-10.2-MDEV-20494-row-size-ddl
Move row size check to early CREATE/ALTER TABLE phase. Stop checking on table open. dict_index_add_to_cache(): remove parameter 'strict', stop checking row size dict_index_t::record_size_info_t: this is a result of row size check operation create_table_info_t::row_size_is_acceptable(): performs row size check. Issues error or warning. Writes first overflow field to InnoDB log. create_table_info_t::create_table(): add row size check dict_index_t::record_size_info(): this is a refactored version of dict_index_t::rec_potentially_too_big(). New version doesn't change global state of a program but return all interesting info. And it's calles who desides how to handle row size overflow. dict_index_t::rec_potentially_too_big(): removed
-rw-r--r--mysql-test/suite/innodb/r/innodb-32k-crash.result2
-rw-r--r--mysql-test/suite/innodb_zip/r/bug53591.result1
-rw-r--r--mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result1
-rw-r--r--mysql-test/suite/innodb_zip/t/bug53591.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/prefix_index_liftedlimit.test2
-rw-r--r--storage/innobase/dict/dict0boot.cc18
-rw-r--r--storage/innobase/dict/dict0crea.cc3
-rw-r--r--storage/innobase/dict/dict0dict.cc24
-rw-r--r--storage/innobase/dict/dict0mem.cc144
-rw-r--r--storage/innobase/handler/ha_innodb.cc129
-rw-r--r--storage/innobase/handler/ha_innodb.h7
-rw-r--r--storage/innobase/handler/handler0alter.cc9
-rw-r--r--storage/innobase/include/dict0dict.h6
-rw-r--r--storage/innobase/include/dict0mem.h55
-rw-r--r--storage/innobase/row/row0mysql.cc3
15 files changed, 328 insertions, 78 deletions
diff --git a/mysql-test/suite/innodb/r/innodb-32k-crash.result b/mysql-test/suite/innodb/r/innodb-32k-crash.result
index 25b19310481..6c6b7c7fd35 100644
--- a/mysql-test/suite/innodb/r/innodb-32k-crash.result
+++ b/mysql-test/suite/innodb/r/innodb-32k-crash.result
@@ -136,8 +136,6 @@ v=@h,w=@h,x=@b,y=@h,z=@h,
aa=@h,ba=@h,ca=@h,da=@h,ea=@h,fa=@h,ga=@h,ha=@h,ia=@h,ja=@h,
ka=@h,la=@h,ma=@h,na=@h,oa=@h,pa=@h,qa=@h,ra=@h,sa=@h,ta=@b,ua=@h,
va=@h,wa=@h,xa=@h,ya=@h,za=@h;
-Warnings:
-Warning 139 Row size too large (> 16318). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline.
BEGIN;
UPDATE t1 SET a=@g,b=@g,c=@g,d=@g,e=@g;
UPDATE t1 SET f=@g,g=@g,h=@g,i=@g,j=@g;
diff --git a/mysql-test/suite/innodb_zip/r/bug53591.result b/mysql-test/suite/innodb_zip/r/bug53591.result
index e14a1942750..0e0a5a05e4b 100644
--- a/mysql-test/suite/innodb_zip/r/bug53591.result
+++ b/mysql-test/suite/innodb_zip/r/bug53591.result
@@ -1,3 +1,4 @@
+call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
SET GLOBAL innodb_file_per_table=on;
SET GLOBAL innodb_strict_mode=on;
set old_alter_table=0;
diff --git a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
index a7a9917b6f4..92a5cd51c8b 100644
--- a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
+++ b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result
@@ -1,3 +1,4 @@
+call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
SET @large_prefix_orig = @@GLOBAL.innodb_large_prefix;
CREATE TABLE worklog5743 (
col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) ,
diff --git a/mysql-test/suite/innodb_zip/t/bug53591.test b/mysql-test/suite/innodb_zip/t/bug53591.test
index 67223027bad..17c79e0f6f8 100644
--- a/mysql-test/suite/innodb_zip/t/bug53591.test
+++ b/mysql-test/suite/innodb_zip/t/bug53591.test
@@ -1,5 +1,7 @@
-- source include/innodb_page_size_small.inc
+call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
+
let $file_per_table=`select @@innodb_file_per_table`;
SET GLOBAL innodb_file_per_table=on;
diff --git a/mysql-test/suite/innodb_zip/t/prefix_index_liftedlimit.test b/mysql-test/suite/innodb_zip/t/prefix_index_liftedlimit.test
index ac4946e08c6..c52ef09fe90 100644
--- a/mysql-test/suite/innodb_zip/t/prefix_index_liftedlimit.test
+++ b/mysql-test/suite/innodb_zip/t/prefix_index_liftedlimit.test
@@ -15,6 +15,8 @@
--source include/have_innodb.inc
--source include/have_innodb_16k.inc
+call mtr.add_suppression('InnoDB: Cannot add field.*because after adding it, the row size is');
+
SET @large_prefix_orig = @@GLOBAL.innodb_large_prefix;
# Prefix index with VARCHAR data type , primary/secondary index and DML ops
diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc
index 52c384f118d..2f27d45a3d7 100644
--- a/storage/innobase/dict/dict0boot.cc
+++ b/storage/innobase/dict/dict0boot.cc
@@ -362,8 +362,7 @@ dict_boot(void)
error = dict_index_add_to_cache(table, index,
mach_read_from_4(dict_hdr
- + DICT_HDR_TABLES),
- FALSE);
+ + DICT_HDR_TABLES));
ut_a(error == DB_SUCCESS);
/*-------------------------*/
@@ -372,10 +371,8 @@ dict_boot(void)
dict_mem_index_add_field(index, "ID", 0);
index->id = DICT_TABLE_IDS_ID;
- error = dict_index_add_to_cache(table, index,
- mach_read_from_4(dict_hdr
- + DICT_HDR_TABLE_IDS),
- FALSE);
+ error = dict_index_add_to_cache(
+ table, index, mach_read_from_4(dict_hdr + DICT_HDR_TABLE_IDS));
ut_a(error == DB_SUCCESS);
/*-------------------------*/
@@ -406,8 +403,7 @@ dict_boot(void)
index->id = DICT_COLUMNS_ID;
error = dict_index_add_to_cache(table, index,
mach_read_from_4(dict_hdr
- + DICT_HDR_COLUMNS),
- FALSE);
+ + DICT_HDR_COLUMNS));
ut_a(error == DB_SUCCESS);
/*-------------------------*/
@@ -439,8 +435,7 @@ dict_boot(void)
index->id = DICT_INDEXES_ID;
error = dict_index_add_to_cache(table, index,
mach_read_from_4(dict_hdr
- + DICT_HDR_INDEXES),
- FALSE);
+ + DICT_HDR_INDEXES));
ut_a(error == DB_SUCCESS);
/*-------------------------*/
@@ -466,8 +461,7 @@ dict_boot(void)
index->id = DICT_FIELDS_ID;
error = dict_index_add_to_cache(table, index,
mach_read_from_4(dict_hdr
- + DICT_HDR_FIELDS),
- FALSE);
+ + DICT_HDR_FIELDS));
ut_a(error == DB_SUCCESS);
mtr_commit(&mtr);
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index d277b593a9c..65a2643635c 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -1477,8 +1477,7 @@ dict_create_index_step(
index_id_t index_id = node->index->id;
err = dict_index_add_to_cache(
- node->table, node->index, FIL_NULL,
- trx_is_strict(trx), node->add_v);
+ node->table, node->index, FIL_NULL, node->add_v);
node->index = dict_index_get_if_in_cache_low(index_id);
ut_a((node->index == NULL) == (err != DB_SUCCESS));
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 08afaddcf8b..dc0d4ab8747 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -45,11 +45,6 @@ dict_index_t* dict_ind_redundant;
extern uint ibuf_debug;
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
-/**********************************************************************
-Issue a warning that the row is too big. */
-void
-ib_warn_row_too_big(const dict_table_t* table);
-
#include "btr0btr.h"
#include "btr0cur.h"
#include "btr0sea.h"
@@ -2350,18 +2345,14 @@ added column.
@param[in,out] index index; NOTE! The index memory
object is freed in this function!
@param[in] page_no root page number of the index
-@param[in] strict TRUE=refuse to create the index
- if records could be too big to fit in
- an B-tree page
@param[in] add_v new virtual column that being added along with
an add index call
-@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
+@return DB_SUCCESS, or DB_CORRUPTION */
dberr_t
dict_index_add_to_cache(
dict_table_t* table,
dict_index_t* index,
ulint page_no,
- ibool strict,
const dict_add_v_col_t* add_v)
{
dict_index_t* new_index;
@@ -2406,19 +2397,6 @@ dict_index_add_to_cache(
new_index->disable_ahi = index->disable_ahi;
#endif
- if (new_index->rec_potentially_too_big(table, strict)) {
-
- if (strict) {
- dict_mem_index_free(new_index);
- dict_mem_index_free(index);
- return(DB_TOO_BIG_RECORD);
- } else if (current_thd != NULL) {
- /* Avoid the warning to be printed
- during recovery. */
- ib_warn_row_too_big((const dict_table_t*)table);
- }
- }
-
n_ord = new_index->n_uniq;
/* Flag the ordering columns and also set column max_prefix */
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index fe01948b70d..0f373e84f6b 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -1185,3 +1185,147 @@ dict_mem_table_is_system(
return true;
}
}
+
+dict_index_t::record_size_info_t dict_index_t::record_size_info() const {
+ ut_ad(table);
+ ut_ad(!(type & DICT_FTS));
+
+ /* maximum allowed size of a node pointer record */
+ ulint page_ptr_max;
+
+ const bool comp = dict_table_is_comp(table);
+ const page_size_t page_size(dict_table_page_size(table));
+ record_size_info_t result;
+
+ if (page_size.is_compressed()
+ && page_size.physical() < univ_page_size.physical()) {
+ /* On a compressed page, two records must fit in the
+ uncompressed page modification log. On compressed pages
+ with size.physical() == univ_page_size.physical(),
+ this limit will never be reached. */
+ ut_ad(comp);
+ /* The maximum allowed record size is the size of
+ an empty page, minus a byte for recoding the heap
+ number in the page modification log. The maximum
+ allowed node pointer size is half that. */
+ result.max_leaf_size = page_zip_empty_size(n_fields,
+ page_size.physical());
+ if (result.max_leaf_size) {
+ result.max_leaf_size--;
+ }
+ page_ptr_max = result.max_leaf_size / 2;
+ /* On a compressed page, there is a two-byte entry in
+ the dense page directory for every record. But there
+ is no record header. */
+ result.shortest_size = 2;
+ } else {
+ /* The maximum allowed record size is half a B-tree
+ page(16k for 64k page size). No additional sparse
+ page directory entry will be generated for the first
+ few user records. */
+ result.max_leaf_size = (comp || srv_page_size < UNIV_PAGE_SIZE_MAX)
+ ? page_get_free_space_of_empty(comp) / 2
+ : REDUNDANT_REC_MAX_DATA_SIZE;
+
+ page_ptr_max = result.max_leaf_size;
+ /* Each record has a header. */
+ result.shortest_size = comp
+ ? REC_N_NEW_EXTRA_BYTES
+ : REC_N_OLD_EXTRA_BYTES;
+ }
+
+ if (comp) {
+ /* Include the "null" flags in the
+ maximum possible record size. */
+ result.shortest_size += UT_BITS_IN_BYTES(n_nullable);
+ } else {
+ /* For each column, include a 2-byte offset and a
+ "null" flag. The 1-byte format is only used in short
+ records that do not contain externally stored columns.
+ Such records could never exceed the page limit, even
+ when using the 2-byte format. */
+ result.shortest_size += 2 * n_fields;
+ }
+
+ const ulint max_local_len = table->get_overflow_field_local_len();
+
+ /* Compute the maximum possible record size. */
+ for (unsigned i = 0; i < n_fields; i++) {
+ const dict_field_t* field
+ = dict_index_get_nth_field(this, i);
+ const dict_col_t* col
+ = dict_field_get_col(field);
+
+ /* In dtuple_convert_big_rec(), variable-length columns
+ that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE
+ may be chosen for external storage.
+
+ Fixed-length columns, and all columns of secondary
+ index records are always stored inline. */
+
+ /* Determine the maximum length of the index field.
+ The field_ext_max_size should be computed as the worst
+ case in rec_get_converted_size_comp() for
+ REC_STATUS_ORDINARY records. */
+
+ size_t field_max_size = dict_col_get_fixed_size(col, comp);
+ if (field_max_size && field->fixed_len != 0) {
+ /* dict_index_add_col() should guarantee this */
+ ut_ad(!field->prefix_len
+ || field->fixed_len == field->prefix_len);
+ /* Fixed lengths are not encoded
+ in ROW_FORMAT=COMPACT. */
+ goto add_field_size;
+ }
+
+ field_max_size = dict_col_get_max_size(col);
+
+ if (field->prefix_len) {
+ if (field->prefix_len < field_max_size) {
+ field_max_size = field->prefix_len;
+ }
+
+ // those conditions were copied from dtuple_convert_big_rec()
+ } else if (field_max_size > max_local_len
+ && field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE
+ && DATA_BIG_COL(col)
+ && dict_index_is_clust(this)) {
+
+ /* In the worst case, we have a locally stored
+ column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes.
+ The length can be stored in one byte. If the
+ column were stored externally, the lengths in
+ the clustered index page would be
+ BTR_EXTERN_FIELD_REF_SIZE and 2. */
+ field_max_size = max_local_len;
+ }
+
+ if (comp) {
+ /* Add the extra size for ROW_FORMAT=COMPACT.
+ For ROW_FORMAT=REDUNDANT, these bytes were
+ added to result.shortest_size before this loop. */
+ result.shortest_size += field_max_size < 256 ? 1 : 2;
+ }
+add_field_size:
+ result.shortest_size += field_max_size;
+
+ /* Check the size limit on leaf pages. */
+ if (result.shortest_size >= result.max_leaf_size) {
+ result.set_too_big(i);
+ }
+
+ /* Check the size limit on non-leaf pages. Records
+ stored in non-leaf B-tree pages consist of the unique
+ columns of the record (the key columns of the B-tree)
+ and a node pointer field. When we have processed the
+ unique columns, result.shortest_size equals the size of the
+ node pointer record minus the node pointer column. */
+ if (i + 1 == dict_index_get_n_unique_in_tree(this)
+ && result.shortest_size + REC_NODE_PTR_SIZE >= page_ptr_max) {
+
+ result.set_too_big(i);
+ }
+ }
+
+ return result;
+}
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 8a6a3e3d8fb..547a3b056f9 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -5578,7 +5578,7 @@ normalize_table_name_c_low(
create_table_info_t::create_table_info_t(
THD* thd,
- TABLE* form,
+ const TABLE* form,
HA_CREATE_INFO* create_info,
char* table_name,
char* remote_path,
@@ -12738,16 +12738,109 @@ int create_table_info_t::create_table(bool create_fk)
}
}
- innobase_table = dict_table_open_on_name(
- m_table_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
+ innobase_table = dict_table_open_on_name(m_table_name, true, false,
+ DICT_ERR_IGNORE_NONE);
+ ut_ad(innobase_table);
- if (innobase_table != NULL) {
- dict_table_close(innobase_table, TRUE, FALSE);
+ for (dict_index_t* index = dict_table_get_first_index(innobase_table);
+ index; index = dict_table_get_next_index(index)) {
+
+ if (!row_size_is_acceptable(*index)) {
+ dict_table_close(innobase_table, true, false);
+ DBUG_RETURN(convert_error_code_to_mysql(
+ DB_TOO_BIG_RECORD, m_flags, NULL));
+ }
}
+ dict_table_close(innobase_table, true, false);
+
DBUG_RETURN(0);
}
+bool create_table_info_t::row_size_is_acceptable(
+ const dict_table_t& table) const
+{
+ for (dict_index_t* index = dict_table_get_first_index(&table); index;
+ index = dict_table_get_next_index(index)) {
+
+ if (!row_size_is_acceptable(*index)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/** Issue a warning that the row is too big. */
+void ib_warn_row_too_big(THD* thd, const dict_table_t* table)
+{
+ /* If prefix is true then a 768-byte prefix is stored
+ locally for BLOB fields. Refer to dict_table_get_format() */
+ const bool prefix = (dict_tf_get_format(table->flags)
+ == UNIV_FORMAT_A);
+
+ const ulint free_space = page_get_free_space_of_empty(
+ table->flags & DICT_TF_COMPACT) / 2;
+
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
+ "Row size too large (> " ULINTPF ")."
+ " Changing some columns to TEXT"
+ " or BLOB %smay help. In current row format, BLOB prefix of"
+ " %d bytes is stored inline.", free_space
+ , prefix ? "or using ROW_FORMAT=DYNAMIC or"
+ " ROW_FORMAT=COMPRESSED ": ""
+ , prefix ? DICT_MAX_FIXED_COL_LEN : 0);
+}
+
+
+bool create_table_info_t::row_size_is_acceptable(
+ const dict_index_t& index) const
+{
+ if (index.type & DICT_FTS) {
+ return true;
+ }
+ if (index.type & DICT_FTS
+ /* Ignore system tables check because innodb_table_stats maximum
+ row size can not fit on 4k page. */
+ || index.table->is_system_db) {
+ return true;
+ }
+
+ const bool strict = THDVAR(m_thd, strict_mode);
+
+ dict_index_t::record_size_info_t info = index.record_size_info();
+
+ if (info.row_is_too_big()) {
+ int idx = info.get_first_overrun_field_index();
+ if (idx != INT_MAX) {
+ ut_ad(info.get_overrun_size() != 0);
+ ut_ad(info.max_leaf_size != 0);
+
+ const dict_field_t* field
+ = dict_index_get_nth_field(&index, idx);
+
+ ib::error_or_warn(strict)
+ << "Cannot add field " << field->name
+ << " in table " << index.table->name
+ << " because after adding it, the row size is "
+ << info.get_overrun_size()
+ << " which is greater than maximum allowed "
+ "size ("
+ << info.max_leaf_size
+ << " bytes) for a record on index leaf page.";
+ }
+
+ if (strict) {
+ return false;
+ }
+
+ ib_warn_row_too_big(m_thd, index.table);
+ }
+
+ return true;
+}
+
/** Update a new table in an InnoDB database.
@return error number */
int
@@ -22165,32 +22258,6 @@ innobase_convert_to_system_charset(
cs2, to, static_cast<uint>(len), errors)));
}
-/**********************************************************************
-Issue a warning that the row is too big. */
-void
-ib_warn_row_too_big(const dict_table_t* table)
-{
- /* If prefix is true then a 768-byte prefix is stored
- locally for BLOB fields. Refer to dict_table_get_format() */
- const bool prefix = (dict_tf_get_format(table->flags)
- == UNIV_FORMAT_A);
-
- const ulint free_space = page_get_free_space_of_empty(
- table->flags & DICT_TF_COMPACT) / 2;
-
- THD* thd = current_thd;
-
- push_warning_printf(
- thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_TO_BIG_ROW,
- "Row size too large (> " ULINTPF ")."
- " Changing some columns to TEXT"
- " or BLOB %smay help. In current row format, BLOB prefix of"
- " %d bytes is stored inline.", free_space
- , prefix ? "or using ROW_FORMAT=DYNAMIC or"
- " ROW_FORMAT=COMPRESSED ": ""
- , prefix ? DICT_MAX_FIXED_COL_LEN : 0);
-}
-
/** Validate the requested buffer pool size. Also, reserve the necessary
memory needed for buffer pool resize.
@param[in] thd thread handle
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index b812a6f3d59..312da6451f0 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -631,7 +631,7 @@ public:
- all but name/path is used, when validating options and using flags. */
create_table_info_t(
THD* thd,
- TABLE* form,
+ const TABLE* form,
HA_CREATE_INFO* create_info,
char* table_name,
char* remote_path,
@@ -679,6 +679,11 @@ public:
void allocate_trx();
+ /** Checks that every index have sane size. Depends on strict mode */
+ bool row_size_is_acceptable(const dict_table_t& table) const;
+ /** Checks that given index have sane size. Depends on strict mode */
+ bool row_size_is_acceptable(const dict_index_t& index) const;
+
/** Determines InnoDB table flags.
If strict_mode=OFF, this will adjust the flags to what should be assumed.
@retval true if successful, false if error */
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 9ed13f5ac0d..bae93e399e9 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -4400,6 +4400,10 @@ prepare_inplace_alter_table_dict(
new_clustered = DICT_CLUSTERED & index_defs[0].ind_type;
+ create_table_info_t info(ctx->prebuilt->trx->mysql_thd, altered_table,
+ ha_alter_info->create_info, NULL, NULL,
+ srv_file_per_table);
+
if (num_fts_index > 1) {
my_error(ER_INNODB_FT_LIMIT, MYF(0));
goto error_handled;
@@ -4840,6 +4844,11 @@ index_created:
goto error_handling;
}
+ if (!info.row_size_is_acceptable(*ctx->add_index[a])) {
+ error = DB_TOO_BIG_RECORD;
+ goto error_handling;
+ }
+
DBUG_ASSERT(ctx->add_index[a]->is_committed()
== !!new_clustered);
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 52ce261a521..ad7d6bc62cb 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -1099,18 +1099,14 @@ added column.
@param[in] index index; NOTE! The index memory
object is freed in this function!
@param[in] page_no root page number of the index
-@param[in] strict TRUE=refuse to create the index
- if records could be too big to fit in
- an B-tree page
@param[in] add_v new virtual column that being added along with
an add index call
-@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
+@return DB_SUCCESS, or DB_CORRUPTION */
dberr_t
dict_index_add_to_cache(
dict_table_t* table,
dict_index_t* index,
ulint page_no,
- ibool strict,
const dict_add_v_col_t* add_v=NULL)
MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index a6047d6f8c3..b42ddeacd0e 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -1036,6 +1036,61 @@ struct dict_index_t{
@return true if the index record could become too big */
bool rec_potentially_too_big(const dict_table_t* candidate_table,
bool strict) const;
+
+ /** This ad-hoc class is used by record_size_info only. */
+ class record_size_info_t {
+ public:
+ record_size_info_t()
+ : max_leaf_size(0), shortest_size(0), too_big(false),
+ first_overrun_field_index(SIZE_T_MAX), overrun_size(0)
+ {
+ }
+
+ /** Mark row potentially too big for page and set up first
+ overflow field index. */
+ void set_too_big(size_t field_index = SIZE_T_MAX)
+ {
+ too_big = true;
+ if (first_overrun_field_index > field_index) {
+ first_overrun_field_index = field_index;
+ overrun_size = shortest_size;
+ }
+ }
+
+ int get_first_overrun_field_index() const
+ {
+ ut_ad(row_is_too_big());
+ return first_overrun_field_index;
+ }
+
+ size_t get_overrun_size() const
+ {
+ ut_ad(row_is_too_big());
+ return overrun_size;
+ }
+
+ bool row_is_too_big() const { return too_big; }
+
+ size_t max_leaf_size; /** Bigger row size this index can
+ produce */
+ size_t shortest_size; /** shortest because it counts everything
+ as in overflow pages */
+
+ private:
+ bool too_big; /** This one is true when maximum row size this
+ index can produce is bigger than maximum row
+ size given page can hold. */
+ size_t first_overrun_field_index; /** After adding this field
+ index row overflowed maximum
+ allowed size. Useful for
+ reporting back to user. */
+ size_t overrun_size; /** Just overrun row size */
+ };
+
+ /** Returns max possibly record size for that index, size of a shortest
+ everything in overflow) size of the longest possible row and index
+ of a field which made index records too big to fit on a page.*/
+ record_size_info_t record_size_info() const;
};
/** Detach a column from an index.
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index fe2a012c323..ffe070426b7 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2453,8 +2453,7 @@ row_create_index_for_mysql(
index_id_t index_id = index->id;
/* add index to dictionary cache and also free index object. */
- err = dict_index_add_to_cache(
- table, index, FIL_NULL, trx_is_strict(trx));
+ err = dict_index_add_to_cache(table, index, FIL_NULL);
if (err != DB_SUCCESS) {
goto error_handling;