summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2020-05-16 07:39:15 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2020-05-16 07:39:15 +0300
commit9e6e43551fc61bc34152f8d60f5d72f0d3814787 (patch)
tree67a834e1fee12ed5d4bf9897aa00146d728d936b
parent4f29d776c756ac522ae49c481ea8975dee8787fe (diff)
parent3d0bb2b7f1eb39d1b3773e634499ff98576679f7 (diff)
downloadmariadb-git-9e6e43551fc61bc34152f8d60f5d72f0d3814787.tar.gz
Merge 10.3 into 10.4
We will expose some more std::atomic internals in Atomic_counter, so that dict_index_t::lock will support the default assignment operator.
-rw-r--r--client/mysqltest.cc9
-rw-r--r--include/my_counter.h13
-rw-r--r--include/my_valgrind.h34
-rw-r--r--include/span.h62
m---------libmariadb0
-rw-r--r--mysql-test/suite/encryption/t/innodb_encryption_tables.test2
-rw-r--r--mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result45
-rw-r--r--mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test64
-rw-r--r--mysql-test/suite/innodb/r/analyze_table.result18
-rw-r--r--mysql-test/suite/innodb/t/analyze_table.test33
-rw-r--r--mysql-test/suite/rpl/r/rpl_failed_drop_tbl_binlog.result32
-rw-r--r--mysql-test/suite/rpl/t/rpl_failed_drop_tbl_binlog.test64
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result12
-rw-r--r--sql/field.cc11
-rw-r--r--sql/field.h18
-rw-r--r--sql/log_event.cc2
-rw-r--r--sql/sql_statistics.cc1
-rw-r--r--storage/innobase/btr/btr0btr.cc14
-rw-r--r--storage/innobase/btr/btr0cur.cc2
-rw-r--r--storage/innobase/btr/btr0pcur.cc2
-rw-r--r--storage/innobase/btr/btr0sea.cc260
-rw-r--r--storage/innobase/buf/buf0buf.cc97
-rw-r--r--storage/innobase/buf/buf0lru.cc160
-rw-r--r--storage/innobase/dict/dict0crea.cc9
-rw-r--r--storage/innobase/dict/dict0dict.cc143
-rw-r--r--storage/innobase/dict/dict0mem.cc7
-rw-r--r--storage/innobase/dict/dict0stats.cc9
-rw-r--r--storage/innobase/fil/fil0pagecompress.cc1
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc114
-rw-r--r--storage/innobase/handler/ha_innodb.cc38
-rw-r--r--storage/innobase/handler/handler0alter.cc16
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc2
-rw-r--r--storage/innobase/include/btr0pcur.h2
-rw-r--r--storage/innobase/include/btr0pcur.ic2
-rw-r--r--storage/innobase/include/btr0sea.h23
-rw-r--r--storage/innobase/include/btr0sea.ic2
-rw-r--r--storage/innobase/include/buf0buf.h10
-rw-r--r--storage/innobase/include/buf0lru.h11
-rw-r--r--storage/innobase/include/dict0mem.h58
-rw-r--r--storage/innobase/include/fsp0fsp.h48
-rw-r--r--storage/innobase/include/srv0srv.h7
-rw-r--r--storage/innobase/include/sync0rw.h10
-rw-r--r--storage/innobase/include/sync0rw.ic54
-rw-r--r--storage/innobase/row/row0import.cc14
-rw-r--r--storage/innobase/row/row0merge.cc10
-rw-r--r--storage/innobase/row/row0mysql.cc32
-rw-r--r--storage/innobase/row/row0purge.cc21
-rw-r--r--storage/innobase/row/row0sel.cc2
-rw-r--r--storage/innobase/row/row0vers.cc1
-rw-r--r--storage/innobase/srv/srv0srv.cc13
-rw-r--r--storage/innobase/sync/sync0arr.cc10
-rw-r--r--storage/innobase/sync/sync0rw.cc44
-rw-r--r--storage/innobase/trx/trx0purge.cc6
-rw-r--r--storage/innobase/trx/trx0undo.cc6
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result8
55 files changed, 606 insertions, 1082 deletions
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index a29835d9b36..40c0a4ae3fe 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -587,9 +587,10 @@ ATTRIBUTE_NORETURN
static void cleanup_and_exit(int exit_code);
ATTRIBUTE_NORETURN
-void really_die(const char *msg);
+static void really_die(const char *msg);
void report_or_die(const char *fmt, ...);
-void die(const char *fmt, ...);
+ATTRIBUTE_NORETURN
+static void die(const char *fmt, ...);
static void make_error_message(char *buf, size_t len, const char *fmt, va_list args);
ATTRIBUTE_NORETURN ATTRIBUTE_FORMAT(printf, 1, 2)
void abort_not_supported_test(const char *fmt, ...);
@@ -1540,7 +1541,7 @@ static void make_error_message(char *buf, size_t len, const char *fmt, va_list a
s+= my_snprintf(s, end -s, "\n");
}
-void die(const char *fmt, ...)
+static void die(const char *fmt, ...)
{
char buff[DIE_BUFF_SIZE];
va_list args;
@@ -1549,7 +1550,7 @@ void die(const char *fmt, ...)
really_die(buff);
}
-void really_die(const char *msg)
+static void really_die(const char *msg)
{
static int dying= 0;
fflush(stdout);
diff --git a/include/my_counter.h b/include/my_counter.h
index c5cbe296df0..432dc7dda3d 100644
--- a/include/my_counter.h
+++ b/include/my_counter.h
@@ -1,7 +1,7 @@
#ifndef MY_COUNTER_H_INCLUDED
#define MY_COUNTER_H_INCLUDED
/*
- Copyright (C) 2018 MariaDB Foundation
+ Copyright (C) 2018, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -45,5 +45,16 @@ public:
operator Type() const { return m_counter.load(std::memory_order_relaxed); }
Type operator=(const Type val)
{ m_counter.store(val, std::memory_order_relaxed); return val; }
+ Type operator=(const Atomic_counter<Type> &rhs) { return *this= Type{rhs}; }
+
+ Type fetch_add(const Type i, std::memory_order m)
+ { return m_counter.fetch_add(i, m); }
+ Type fetch_sub(const Type i, std::memory_order m)
+ { return m_counter.fetch_sub(i, m); }
+ bool compare_exchange_strong(Type& i1, const Type i2,
+ std::memory_order m1, std::memory_order m2)
+ { return m_counter.compare_exchange_strong(i1, i2, m1, m2); }
+ Type exchange(const Type i, std::memory_order m)
+ { return m_counter.exchange(i, m); }
};
#endif /* MY_COUNTER_H_INCLUDED */
diff --git a/include/my_valgrind.h b/include/my_valgrind.h
index 08ad3f46b96..8aaa261fd5b 100644
--- a/include/my_valgrind.h
+++ b/include/my_valgrind.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010, 2019, MariaDB Corporation.
+/* Copyright (C) 2010, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -32,7 +32,9 @@
#if defined(HAVE_VALGRIND_MEMCHECK_H) && defined(HAVE_valgrind)
# include <valgrind/memcheck.h>
+# define HAVE_valgrind_or_MSAN
# define MEM_UNDEFINED(a,len) VALGRIND_MAKE_MEM_UNDEFINED(a,len)
+# define MEM_MAKE_DEFINED(a,len) VALGRIND_MAKE_MEM_DEFINED(a,len)
# define MEM_NOACCESS(a,len) VALGRIND_MAKE_MEM_NOACCESS(a,len)
# define MEM_CHECK_ADDRESSABLE(a,len) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(a,len)
# define MEM_CHECK_DEFINED(a,len) VALGRIND_CHECK_MEM_IS_DEFINED(a,len)
@@ -42,28 +44,42 @@
/* How to do manual poisoning:
https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */
# define MEM_UNDEFINED(a,len) ASAN_UNPOISON_MEMORY_REGION(a,len)
+# define MEM_MAKE_DEFINED(a,len) ((void) 0)
# define MEM_NOACCESS(a,len) ASAN_POISON_MEMORY_REGION(a,len)
# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
# define MEM_CHECK_DEFINED(a,len) ((void) 0)
# define REDZONE_SIZE 8
+#elif __has_feature(memory_sanitizer)
+# include <sanitizer/msan_interface.h>
+# define HAVE_valgrind_or_MSAN
+# define MEM_UNDEFINED(a,len) __msan_allocated_memory(a,len)
+# define MEM_MAKE_DEFINED(a,len) __msan_unpoison(a,len)
+# define MEM_NOACCESS(a,len) ((void) 0)
+# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
+# define MEM_CHECK_DEFINED(a,len) __msan_check_mem_is_initialized(a,len)
+# define REDZONE_SIZE 8
#else
# define MEM_UNDEFINED(a,len) ((void) (a), (void) (len))
+# define MEM_MAKE_DEFINED(a,len) ((void) 0)
# define MEM_NOACCESS(a,len) ((void) 0)
# define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0)
# define MEM_CHECK_DEFINED(a,len) ((void) 0)
# define REDZONE_SIZE 0
#endif /* HAVE_VALGRIND_MEMCHECK_H */
-#if defined(TRASH_FREED_MEMORY)
-/* NOTE: Do not invoke TRASH_FILL directly! Use TRASH_ALLOC or TRASH_FREE.
-
-The MEM_UNDEFINED() call before memset() is for canceling the effect
-of any previous MEM_NOACCESS(). We must invoke MEM_UNDEFINED() after
-writing the dummy pattern, unless MEM_NOACCESS() is going to be invoked.
-On AddressSanitizer, the MEM_UNDEFINED() in TRASH_ALLOC() has no effect. */
+#ifdef TRASH_FREED_MEMORY
+/*
+ TRASH_FILL() has to call MEM_UNDEFINED() to cancel any effect of TRASH_FREE().
+ This can happen in the case one does
+ TRASH_ALLOC(A,B) ; TRASH_FREE(A,B) ; TRASH_ALLOC(A,B)
+ to reuse the same memory in an internal memory allocator like MEM_ROOT.
+ For my_malloc() and safemalloc() the extra MEM_UNDEFINED is bit of an
+ overkill.
+ TRASH_FILL() is an internal function and should not be used externally.
+*/
#define TRASH_FILL(A,B,C) do { const size_t trash_tmp= (B); MEM_UNDEFINED(A, trash_tmp); memset(A, C, trash_tmp); } while (0)
#else
-#define TRASH_FILL(A,B,C) while (0)
+#define TRASH_FILL(A,B,C) do { MEM_UNDEFINED((A), (B)); } while (0)
#endif
/** Note that some memory became allocated or uninitialized. */
#define TRASH_ALLOC(A,B) do { TRASH_FILL(A,B,0xA5); MEM_UNDEFINED(A,B); } while(0)
diff --git a/include/span.h b/include/span.h
index 0ed0158088c..0e8516933c6 100644
--- a/include/span.h
+++ b/include/span.h
@@ -24,11 +24,33 @@ this program; if not, write to the Free Software Foundation, Inc.,
namespace st_
{
+namespace detail
+{
+
+template <class T> struct remove_cv
+{
+ typedef T type;
+};
+template <class T> struct remove_cv<const T>
+{
+ typedef T type;
+};
+template <class T> struct remove_cv<volatile T>
+{
+ typedef T type;
+};
+template <class T> struct remove_cv<const volatile T>
+{
+ typedef T type;
+};
+
+} // namespace detail
+
template <class ElementType> class span
{
public:
typedef ElementType element_type;
- typedef ElementType value_type;
+ typedef typename detail::remove_cv<ElementType>::type value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef element_type *pointer;
@@ -38,7 +60,6 @@ public:
typedef pointer iterator;
typedef const_pointer const_iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
span() : data_(NULL), size_(0) {}
@@ -64,73 +85,72 @@ public:
span &operator=(const span &other)
{
- data_= other.data_;
- size_= other.size_;
+ data_= other.data();
+ size_= other.size();
return *this;
}
template <size_t Count> span<element_type> first() const
{
assert(!empty());
- return span(data_, 1);
+ return span(data(), 1);
}
template <size_t Count> span<element_type> last() const
{
assert(!empty());
- return span(data_ + size() - 1, 1);
+ return span(data() + size() - 1, 1);
}
span<element_type> first(size_type count) const
{
assert(!empty());
- return span(data_, 1);
+ return span(data(), 1);
}
span<element_type> last(size_type count) const
{
assert(!empty());
- return span(data_ + size() - 1, 1);
+ return span(data() + size() - 1, 1);
}
span<element_type> subspan(size_type offset, size_type count) const
{
assert(!empty());
assert(size() >= offset + count);
- return span(data_ + offset, count);
+ return span(data() + offset, count);
}
size_type size() const { return size_; }
- size_type size_bytes() const { return size_ * sizeof(ElementType); }
- bool empty() const __attribute__((warn_unused_result)) { return size_ == 0; }
+ size_type size_bytes() const { return size() * sizeof(ElementType); }
+ bool empty() const __attribute__((warn_unused_result))
+ {
+ return size() == 0;
+ }
reference operator[](size_type idx) const
{
assert(size() > idx);
- return data_[idx];
+ return data()[idx];
}
reference front() const
{
assert(!empty());
- return data_[0];
+ return data()[0];
}
reference back() const
{
assert(!empty());
- return data_[size() - 1];
- }
- pointer data() const
- {
- assert(!empty());
- return data_;
+ return data()[size() - 1];
}
+ pointer data() const { return data_; }
iterator begin() const { return data_; }
iterator end() const { return data_ + size_; }
reverse_iterator rbegin() const
{
- return std::reverse_iterator<iterator>(std::advance(end(), -1));
+ return std::reverse_iterator<iterator>(end());
}
reverse_iterator rend() const
{
- return std::reverse_iterator<iterator>(std::advance(begin(), -1));
+ return std::reverse_iterator<iterator>(begin());
}
private:
diff --git a/libmariadb b/libmariadb
-Subproject 2759b87d72926b7c9b5426437a7c8dd15ff5794
+Subproject cdfecebc9932a0dd5516c10505bfe78d79132e7
diff --git a/mysql-test/suite/encryption/t/innodb_encryption_tables.test b/mysql-test/suite/encryption/t/innodb_encryption_tables.test
index bc762faf12e..d03bc890ba4 100644
--- a/mysql-test/suite/encryption/t/innodb_encryption_tables.test
+++ b/mysql-test/suite/encryption/t/innodb_encryption_tables.test
@@ -1,6 +1,8 @@
-- source include/have_innodb.inc
-- source include/have_example_key_management_plugin.inc
-- source include/not_embedded.inc
+# We can't run this test under valgrind as it 'takes forever'
+-- source include/not_valgrind.inc
create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb;
create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact;
diff --git a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result
index e3e5ee6857c..a545b66a473 100644
--- a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result
+++ b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result
@@ -234,48 +234,3 @@ set global debug_dbug= @saved_dbug;
drop table t1;
set debug_sync=reset;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
-#
-# MDEV-18546 ASAN heap-use-after-free
-# in innobase_get_computed_value / row_purge
-#
-CREATE TABLE t1 (
-pk INT AUTO_INCREMENT,
-b BIT(15),
-v BIT(15) AS (b) VIRTUAL,
-PRIMARY KEY(pk),
-UNIQUE(v)
-) ENGINE=InnoDB;
-INSERT IGNORE INTO t1 (b) VALUES
-(NULL),(b'011'),(b'000110100'),
-(b'01101101010'),(b'01111001001011'),(NULL);
-SET GLOBAL innodb_debug_sync = "ib_clust_v_col_before_row_allocated "
- "SIGNAL before_row_allocated "
- "WAIT_FOR flush_unlock";
-SET GLOBAL innodb_debug_sync = "ib_open_after_dict_open "
- "SIGNAL purge_open "
- "WAIT_FOR select_open";
-SET @saved_dbug= @@GLOBAL.debug_dbug;
-set global debug_dbug= "d,ib_purge_virtual_index_callback";
-connect purge_waiter,localhost,root;
-SET debug_sync= "now WAIT_FOR before_row_allocated";
-connection default;
-REPLACE INTO t1 (pk, b) SELECT pk, b FROM t1;
-connection purge_waiter;
-connection default;
-disconnect purge_waiter;
-FLUSH TABLES;
-SET GLOBAL innodb_debug_sync = reset;
-SET debug_sync= "now SIGNAL flush_unlock WAIT_FOR purge_open";
-SET GLOBAL innodb_debug_sync = reset;
-SET debug_sync= "ib_open_after_dict_open SIGNAL select_open";
-SELECT * FROM t1;
-pk b v
-1 NULL NULL
-2
-3
-4 j j
-5 K K
-6 NULL NULL
-DROP TABLE t1;
-SET debug_sync= reset;
-set global debug_dbug= @saved_dbug;
diff --git a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test
index 69b784d497a..3d91cd87cb9 100644
--- a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test
+++ b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test
@@ -311,67 +311,3 @@ drop table t1;
--source include/wait_until_count_sessions.inc
set debug_sync=reset;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
-
---echo #
---echo # MDEV-18546 ASAN heap-use-after-free
---echo # in innobase_get_computed_value / row_purge
---echo #
-
-CREATE TABLE t1 (
- pk INT AUTO_INCREMENT,
- b BIT(15),
- v BIT(15) AS (b) VIRTUAL,
- PRIMARY KEY(pk),
- UNIQUE(v)
-) ENGINE=InnoDB;
-INSERT IGNORE INTO t1 (b) VALUES
- (NULL),(b'011'),(b'000110100'),
- (b'01101101010'),(b'01111001001011'),(NULL);
-
-SET GLOBAL innodb_debug_sync = "ib_clust_v_col_before_row_allocated "
- "SIGNAL before_row_allocated "
- "WAIT_FOR flush_unlock";
-SET GLOBAL innodb_debug_sync = "ib_open_after_dict_open "
- "SIGNAL purge_open "
- "WAIT_FOR select_open";
-
-# In 10.2 trx_undo_roll_ptr_is_insert(t_roll_ptr) condition never pass in purge,
-# so this condition is forced to pass in row_vers_old_has_index_entry
-SET @saved_dbug= @@GLOBAL.debug_dbug;
-set global debug_dbug= "d,ib_purge_virtual_index_callback";
-
-# The purge starts from REPLACE command. To avoid possible race, separate
-# connection is used.
---connect(purge_waiter,localhost,root)
---send
-SET debug_sync= "now WAIT_FOR before_row_allocated";
-
---connection default
-REPLACE INTO t1 (pk, b) SELECT pk, b FROM t1;
-
---connection purge_waiter
-# Now we will definitely catch ib_clust_v_col_before_row_allocated
---reap
---connection default
---disconnect purge_waiter
-
-# purge hangs on the sync point. table is purged, ref_count is set to 0
-FLUSH TABLES;
-
-# Avoid hang on repeating purge.
-# Reset Will be applied after first record is purged
-SET GLOBAL innodb_debug_sync = reset;
-
-SET debug_sync= "now SIGNAL flush_unlock WAIT_FOR purge_open";
-
-# Avoid hang on repeating purge
-SET GLOBAL innodb_debug_sync = reset;
-
-# select unblocks purge thread
-SET debug_sync= "ib_open_after_dict_open SIGNAL select_open";
-SELECT * FROM t1;
-
-# Cleanup
-DROP TABLE t1;
-SET debug_sync= reset;
-set global debug_dbug= @saved_dbug;
diff --git a/mysql-test/suite/innodb/r/analyze_table.result b/mysql-test/suite/innodb/r/analyze_table.result
index 57095b725eb..830130821da 100644
--- a/mysql-test/suite/innodb/r/analyze_table.result
+++ b/mysql-test/suite/innodb/r/analyze_table.result
@@ -1,26 +1,16 @@
-CREATE PROCEDURE populate_t1()
-BEGIN
-DECLARE i int DEFAULT 1;
-START TRANSACTION;
-WHILE (i <= 1000000) DO
-INSERT INTO t1 VALUES (i, i, CONCAT('a', i));
-SET i = i + 1;
-END WHILE;
-COMMIT;
-END|
+set use_stat_tables='preferably';
CREATE TABLE t1(
class INT,
id INT,
title VARCHAR(100)
) ENGINE=InnoDB;
+insert into t1 select seq, seq, concat('a', seq) from seq_1_to_500;
SELECT COUNT(*) FROM t1;
COUNT(*)
-1000000
-SET GLOBAL innodb_stats_persistent_sample_pages=2000;
+500
+set @@max_heap_table_size=16384;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
-DROP PROCEDURE populate_t1;
-SET GLOBAL innodb_stats_persistent_sample_pages=default;
diff --git a/mysql-test/suite/innodb/t/analyze_table.test b/mysql-test/suite/innodb/t/analyze_table.test
index e9db3668f02..538eed04ba4 100644
--- a/mysql-test/suite/innodb/t/analyze_table.test
+++ b/mysql-test/suite/innodb/t/analyze_table.test
@@ -1,23 +1,11 @@
-#
-# BUG#22385442 - INNODB: DIFFICULT TO FIND FREE BLOCKS IN THE BUFFER POOL
-#
-
--source include/have_innodb.inc
---source include/big_test.inc
+--source include/have_sequence.inc
-DELIMITER |;
-CREATE PROCEDURE populate_t1()
-BEGIN
- DECLARE i int DEFAULT 1;
+#
+# MDEV-22073 MSAN use-of-uninitialized-value in collect_statistics_for_table()
+#
- START TRANSACTION;
- WHILE (i <= 1000000) DO
- INSERT INTO t1 VALUES (i, i, CONCAT('a', i));
- SET i = i + 1;
- END WHILE;
- COMMIT;
-END|
-DELIMITER ;|
+set use_stat_tables='preferably';
CREATE TABLE t1(
class INT,
@@ -25,18 +13,11 @@ CREATE TABLE t1(
title VARCHAR(100)
) ENGINE=InnoDB;
--- disable_query_log
-CALL populate_t1();
--- enable_query_log
+insert into t1 select seq, seq, concat('a', seq) from seq_1_to_500;
SELECT COUNT(*) FROM t1;
-SET GLOBAL innodb_stats_persistent_sample_pages=2000;
-
+set @@max_heap_table_size=16384;
ANALYZE TABLE t1;
DROP TABLE t1;
-
-DROP PROCEDURE populate_t1;
-
-SET GLOBAL innodb_stats_persistent_sample_pages=default;
diff --git a/mysql-test/suite/rpl/r/rpl_failed_drop_tbl_binlog.result b/mysql-test/suite/rpl/r/rpl_failed_drop_tbl_binlog.result
deleted file mode 100644
index df36fa82e0f..00000000000
--- a/mysql-test/suite/rpl/r/rpl_failed_drop_tbl_binlog.result
+++ /dev/null
@@ -1,32 +0,0 @@
-include/master-slave.inc
-[connection master]
-create table t1 (a int) engine=innodb;
-create table t2 (b longblob) engine=innodb;
-create table t3 (c int) engine=innodb;
-insert into t2 values (repeat('b',1024*1024));
-insert into t2 select * from t2;
-insert into t2 select * from t2;
-insert into t2 select * from t2;
-insert into t2 select * from t2;
-set debug_sync='rm_table_no_locks_before_delete_table SIGNAL nogo WAIT_FOR go EXECUTE 2';
-drop table t1, t2, t3;
-connect foo,localhost,root;
-set debug_sync='now SIGNAL go';
-kill query CONNECTION_ID;
-connection master;
-ERROR 70100: Query execution was interrupted
-"Tables t2 and t3 should be listed"
-SHOW TABLES;
-Tables_in_test
-t2
-t3
-include/show_binlog_events.inc
-Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
-connection slave;
-drop table t2, t3;
-connection master;
-set debug_sync='RESET';
-drop table t2, t3;
-include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_failed_drop_tbl_binlog.test b/mysql-test/suite/rpl/t/rpl_failed_drop_tbl_binlog.test
deleted file mode 100644
index 281e2a2ab47..00000000000
--- a/mysql-test/suite/rpl/t/rpl_failed_drop_tbl_binlog.test
+++ /dev/null
@@ -1,64 +0,0 @@
-# ==== Purpose ====
-#
-# Check that when the execution of a DROP TABLE command with single table
-# fails it should not be written to the binary log. Also test that when the
-# execution of DROP TABLE command with multiple tables fails the command
-# should be written into the binary log.
-#
-# ==== Implementation ====
-#
-# Steps:
-# 0 - Create tables named t1, t2, t3
-# 1 - Execute DROP TABLE t1,t2,t3 command.
-# 2 - Kill the DROP TABLE command while it is trying to drop table 't2'.
-# 3 - Verify that tables t2,t3 are present after the DROP command execution
-# was interrupted.
-# 4 - Check that table 't1' is present in binary log as part of DROP
-# command.
-#
-# ==== References ====
-#
-# MDEV-20348: DROP TABLE IF EXISTS killed on master but was replicated.
-#
-
---source include/have_innodb.inc
---source include/have_debug_sync.inc
---source include/have_binlog_format_statement.inc
---source include/master-slave.inc
-
-create table t1 (a int) engine=innodb;
-create table t2 (b longblob) engine=innodb;
-create table t3 (c int) engine=innodb;
-insert into t2 values (repeat('b',1024*1024));
-insert into t2 select * from t2;
-insert into t2 select * from t2;
-insert into t2 select * from t2;
-insert into t2 select * from t2;
-let $binlog_start= query_get_value(SHOW MASTER STATUS, Position, 1);
-
-let $id=`select connection_id()`;
-set debug_sync='rm_table_no_locks_before_delete_table SIGNAL nogo WAIT_FOR go EXECUTE 2';
-send drop table t1, t2, t3;
-
-connect foo,localhost,root;
-set debug_sync='now SIGNAL go';
-let $wait_condition=select 1 from information_schema.processlist where state like 'debug sync point:%';
-source include/wait_condition.inc;
---replace_result $id CONNECTION_ID
-eval kill query $id;
-
-connection master;
-error ER_QUERY_INTERRUPTED;
-reap;
-
---echo "Tables t2 and t3 should be listed"
-SHOW TABLES;
---source include/show_binlog_events.inc
---sync_slave_with_master
-drop table t2, t3;
-
-connection master;
-set debug_sync='RESET';
-drop table t2, t3;
-
-source include/rpl_end.inc;
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index 4b8d3b3e60f..c63e8afa709 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -537,18 +537,6 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
-VARIABLE_NAME INNODB_DEBUG_SYNC
-SESSION_VALUE NULL
-DEFAULT_VALUE
-VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE VARCHAR
-VARIABLE_COMMENT debug_sync for innodb purge threads. Use it to set up sync points for all purge threads at once. The commands will be applied sequentially at the beginning of purging the next undo record.
-NUMERIC_MIN_VALUE NULL
-NUMERIC_MAX_VALUE NULL
-NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST NULL
-READ_ONLY NO
-COMMAND_LINE_ARGUMENT NONE
VARIABLE_NAME INNODB_DEFAULT_ENCRYPTION_KEY_ID
SESSION_VALUE 1
DEFAULT_VALUE 1
diff --git a/sql/field.cc b/sql/field.cc
index 9d118a15748..0a8fdc3d3f5 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1392,7 +1392,7 @@ void Field::error_generated_column_function_is_not_allowed(THD *thd,
QT_ITEM_IDENT_SKIP_TABLE_NAMES));
my_error(ER_GENERATED_COLUMN_FUNCTION_IS_NOT_ALLOWED,
MYF(error ? 0 : ME_WARNING),
- tmp.c_ptr(), vcol_info->get_vcol_type_name(),
+ tmp.c_ptr_safe(), vcol_info->get_vcol_type_name(),
const_cast<const char*>(field_name.str));
}
@@ -7708,6 +7708,15 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
}
+#ifdef HAVE_valgrind_or_MSAN
+void Field_varstring::mark_unused_memory_as_defined()
+{
+ uint used_length= get_length();
+ MEM_MAKE_DEFINED(get_data() + used_length, field_length - used_length);
+}
+#endif
+
+
int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
uint max_len)
{
diff --git a/sql/field.h b/sql/field.h
index 773f0e05468..0715981431b 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1,7 +1,7 @@
#ifndef FIELD_INCLUDED
#define FIELD_INCLUDED
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2019, MariaDB Corporation.
+ Copyright (c) 2008, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -825,6 +825,19 @@ public:
DBUG_ASSERT(ls.length < UINT_MAX32);
return store(ls.str, (uint) ls.length, cs);
}
+
+#ifdef HAVE_valgrind_or_MSAN
+ /**
+ Mark unused memory in the field as defined. Mainly used to ensure
+ that if we write full field to disk (for example in
+ Count_distinct_field::add(), we don't write unitalized data to
+ disk which would confuse valgrind or MSAN.
+ */
+ virtual void mark_unused_memory_as_defined() {}
+#else
+ void mark_unused_memory_as_defined() {}
+#endif
+
virtual double val_real(void)=0;
virtual longlong val_int(void)=0;
/*
@@ -3679,6 +3692,9 @@ public:
}
int store(const char *to,size_t length,CHARSET_INFO *charset);
using Field_str::store;
+#ifdef HAVE_valgrind_or_MSAN
+ void mark_unused_memory_as_defined();
+#endif
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 1a77397bf79..3707f73a716 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -10775,6 +10775,8 @@ const char *sql_ex_info::init(const char *buf, const char *buf_end,
}
else
{
+ if (buf_end - buf < 7)
+ return 0; // Wrong data
field_term_len= enclosed_len= line_term_len= line_start_len= escaped_len=1;
field_term = buf++; // Use first byte in string
enclosed= buf++;
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index a94fb1196b4..2681a9b286f 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -1671,6 +1671,7 @@ public:
*/
virtual bool add()
{
+ table_field->mark_unused_memory_as_defined();
return tree->unique_add(table_field->ptr);
}
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index c180bbb8831..a8e319dd321 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -733,8 +733,10 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
{
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
#ifdef BTR_CUR_HASH_ADAPT
- ut_ad(!block->index || !blob);
- ut_ad(!block->index || page_is_leaf(block->frame));
+ if (block->index && !block->index->freed()) {
+ ut_ad(!blob);
+ ut_ad(page_is_leaf(block->frame));
+ }
#endif
ut_ad(index->table->space_id == block->page.id.space());
/* The root page is freed by btr_free_root(). */
@@ -760,7 +762,7 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
: PAGE_HEADER + PAGE_BTR_SEG_TOP];
fseg_free_page(seg_header,
index->table->space, block->page.id.page_no(),
- block->index != NULL, !block->page.flush_observer, mtr);
+ !block->page.flush_observer, mtr);
/* The page was marked free in the allocation bitmap, but it
should remain exclusively latched until mtr_t::commit() or until it
@@ -1011,7 +1013,7 @@ static void btr_free_root(buf_block_t* block, mtr_t* mtr, bool invalidate)
BTR_FREED_INDEX_ID, mtr);
}
- while (!fseg_free_step(header, true, mtr)) {
+ while (!fseg_free_step(header, mtr)) {
/* Free the entire segment in small steps. */
}
}
@@ -1232,7 +1234,7 @@ leaf_loop:
fsp0fsp. */
finished = fseg_free_step(root + PAGE_HEADER + PAGE_BTR_SEG_LEAF,
- true, &mtr);
+ &mtr);
mtr_commit(&mtr);
if (!finished) {
@@ -1252,7 +1254,7 @@ top_loop:
#endif /* UNIV_BTR_DEBUG */
finished = fseg_free_step_not_header(
- root + PAGE_HEADER + PAGE_BTR_SEG_TOP, true, &mtr);
+ root + PAGE_HEADER + PAGE_BTR_SEG_TOP, &mtr);
mtr_commit(&mtr);
if (!finished) {
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 5a9c953cfba..1233f97ed9e 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -3406,7 +3406,7 @@ btr_cur_optimistic_insert(
page_t* page;
rec_t* dummy;
bool leaf;
- bool reorg;
+ bool reorg __attribute__((unused));
bool inherit = true;
ulint rec_size;
dberr_t err;
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index 2a5a5c59b61..11e822f9e54 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2019, MariaDB Corporation.
+Copyright (c) 2016, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 046e0795916..fbd3d5a4b1f 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -2,7 +2,7 @@
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -191,15 +191,8 @@ static
void
btr_search_check_free_space_in_heap(const dict_index_t* index)
{
- hash_table_t* table;
- mem_heap_t* heap;
-
- ut_ad(!btr_search_own_any(RW_LOCK_S));
- ut_ad(!btr_search_own_any(RW_LOCK_X));
-
- table = btr_get_search_table(index);
-
- heap = table->heap;
+ hash_table_t* table = btr_get_search_table(index);
+ mem_heap_t* heap = table->heap;
/* Note that we peek the value of heap->free_block without reserving
the latch: this is ok, because we will not guarantee that there will
@@ -335,20 +328,86 @@ void btr_search_sys_free()
/** Set index->ref_count = 0 on all indexes of a table.
@param[in,out] table table handler */
-static
-void
-btr_search_disable_ref_count(
- dict_table_t* table)
+static void btr_search_disable_ref_count(dict_table_t *table)
{
- dict_index_t* index;
+ for (dict_index_t *index= dict_table_get_first_index(table); index;
+ index= dict_table_get_next_index(index))
+ index->search_info->ref_count= 0;
+}
- ut_ad(mutex_own(&dict_sys.mutex));
+/** Lazily free detached metadata when removing the last reference. */
+ATTRIBUTE_COLD static void btr_search_lazy_free(dict_index_t *index)
+{
+ ut_ad(index->freed());
+ dict_table_t *table= index->table;
+ /* Perform the skipped steps of dict_index_remove_from_cache_low(). */
+ UT_LIST_REMOVE(table->freed_indexes, index);
+ rw_lock_free(&index->lock);
+ dict_mem_index_free(index);
+
+ if (!UT_LIST_GET_LEN(table->freed_indexes) &&
+ !UT_LIST_GET_LEN(table->indexes))
+ {
+ ut_ad(table->id == 0);
+ dict_mem_table_free(table);
+ }
+}
- for (index = dict_table_get_first_index(table);
- index != NULL;
- index = dict_table_get_next_index(index)) {
- index->search_info->ref_count = 0;
- }
+/** Clear the adaptive hash index on all pages in the buffer pool. */
+static void buf_pool_clear_hash_index()
+{
+ ut_ad(btr_search_own_all(RW_LOCK_X));
+ ut_ad(!btr_search_enabled);
+
+ std::set<dict_index_t*> garbage;
+
+ for (ulong p = 0; p < srv_buf_pool_instances; p++)
+ {
+ buf_pool_t *buf_pool= buf_pool_from_array(p);
+ buf_chunk_t *chunks= buf_pool->chunks;
+ buf_chunk_t *chunk= chunks + buf_pool->n_chunks;
+
+ while (--chunk >= chunks)
+ {
+ buf_block_t *block= chunk->blocks;
+ for (ulint i= chunk->size; i--; block++)
+ {
+ dict_index_t *index= block->index;
+ assert_block_ahi_valid(block);
+
+ /* We can clear block->index and block->n_pointers when
+ btr_search_own_all(RW_LOCK_X); see the comments in buf0buf.h */
+
+ if (!index)
+ {
+# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
+ ut_a(!block->n_pointers);
+# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
+ continue;
+ }
+
+ ut_d(buf_page_state state= buf_block_get_state(block));
+ /* Another thread may have set the state to
+ BUF_BLOCK_REMOVE_HASH in buf_LRU_block_remove_hashed().
+
+ The state change in buf_page_realloc() is not observable here,
+ because in that case we would have !block->index.
+
+ In the end, the entire adaptive hash index will be removed. */
+ ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
+# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
+ block->n_pointers= 0;
+# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
+ if (index->freed())
+ garbage.insert(index);
+ block->index= NULL;
+ }
+ }
+ }
+
+ for (std::set<dict_index_t*>::iterator i= garbage.begin();
+ i != garbage.end(); i++)
+ btr_search_lazy_free(*i);
}
/** Disable the adaptive hash search system and empty the index.
@@ -420,31 +479,6 @@ void btr_search_enable()
btr_search_x_unlock_all();
}
-/** Returns the value of ref_count. The value is protected by latch.
-@param[in] info search info
-@param[in] index index identifier
-@return ref_count value. */
-ulint
-btr_search_info_get_ref_count(
- btr_search_t* info,
- dict_index_t* index)
-{
- ulint ret = 0;
-
- if (!btr_search_enabled) {
- return(ret);
- }
-
- ut_ad(info);
-
- rw_lock_t* ahi_latch = btr_get_search_latch(index);
- rw_lock_s_lock(ahi_latch);
- ret = info->ref_count;
- rw_lock_s_unlock(ahi_latch);
-
- return(ret);
-}
-
/** Updates the search info of an index about hash successes. NOTE that info
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
are consistent.
@@ -641,28 +675,25 @@ btr_search_update_hash_ref(
buf_block_t* block,
const btr_cur_t* cursor)
{
- dict_index_t* index;
- ulint fold;
- rec_t* rec;
-
ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
- ut_ad(rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
+
ut_ad(rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
ut_ad(page_align(btr_cur_get_rec(cursor)) == block->frame);
ut_ad(page_is_leaf(block->frame));
assert_block_ahi_valid(block);
- index = block->index;
+ dict_index_t* index = block->index;
if (!index) {
-
return;
}
ut_ad(block->page.id.space() == index->table->space_id);
ut_ad(index == cursor->index);
ut_ad(!dict_index_is_ibuf(index));
+ rw_lock_t* const latch = btr_get_search_latch(index);
+ rw_lock_x_lock(latch);
if ((info->n_hash_potential > 0)
&& (block->curr_n_fields == info->n_fields)
@@ -672,18 +703,18 @@ btr_search_update_hash_ref(
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
- rec = btr_cur_get_rec(cursor);
+ const rec_t* rec = btr_cur_get_rec(cursor);
if (!page_rec_is_user_rec(rec)) {
-
- return;
+ goto func_exit;
}
- fold = rec_fold(rec,
- rec_get_offsets(rec, index, offsets_, true,
- ULINT_UNDEFINED, &heap),
- block->curr_n_fields,
- block->curr_n_bytes, index->id);
+ ulint fold = rec_fold(
+ rec,
+ rec_get_offsets(rec, index, offsets_, true,
+ ULINT_UNDEFINED, &heap),
+ block->curr_n_fields,
+ block->curr_n_bytes, index->id);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
@@ -693,6 +724,9 @@ btr_search_update_hash_ref(
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
+
+func_exit:
+ rw_lock_x_unlock(latch);
}
/** Checks if a guessed position for a tree cursor is right. Note that if
@@ -873,7 +907,6 @@ btr_search_guess_on_hash(
rw_lock_t* ahi_latch,
mtr_t* mtr)
{
- const rec_t* rec;
ulint fold;
index_id_t index_id;
#ifdef notdefined
@@ -884,7 +917,7 @@ btr_search_guess_on_hash(
ahi_latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
if (!btr_search_enabled) {
- return(FALSE);
+ return false;
}
ut_ad(index && info && tuple && cursor && mtr);
@@ -900,16 +933,14 @@ btr_search_guess_on_hash(
any latch here! */
if (info->n_hash_potential == 0) {
-
- return(FALSE);
+ return false;
}
cursor->n_fields = info->n_fields;
cursor->n_bytes = info->n_bytes;
if (dtuple_get_n_fields(tuple) < btr_search_get_n_fields(cursor)) {
-
- return(FALSE);
+ return false;
}
index_id = index->id;
@@ -923,6 +954,7 @@ btr_search_guess_on_hash(
cursor->flag = BTR_CUR_HASH;
rw_lock_t* use_latch = ahi_latch ? NULL : btr_get_search_latch(index);
+ const rec_t* rec;
if (use_latch) {
rw_lock_s_lock(use_latch);
@@ -935,47 +967,55 @@ btr_search_guess_on_hash(
ut_ad(rw_lock_own(ahi_latch, RW_LOCK_S));
}
- rec = (rec_t*) ha_search_and_get_data(
- btr_get_search_table(index), fold);
+ rec = static_cast<const rec_t*>(
+ ha_search_and_get_data(btr_get_search_table(index), fold));
- if (rec == NULL) {
+ if (!rec) {
if (use_latch) {
fail:
rw_lock_s_unlock(use_latch);
}
btr_search_failure(info, cursor);
-
- return(FALSE);
+ return false;
}
buf_block_t* block = buf_block_from_ahi(rec);
if (use_latch) {
-
if (!buf_page_get_known_nowait(
latch_mode, block, BUF_MAKE_YOUNG,
__FILE__, __LINE__, mtr)) {
goto fail;
}
+ const bool fail = index != block->index
+ && index_id == block->index->id;
+ ut_a(!fail || block->index->freed());
rw_lock_s_unlock(use_latch);
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
+ if (UNIV_UNLIKELY(fail)) {
+ btr_search_drop_page_hash_index(block);
+ goto fail_and_release_page;
+ }
+ } else if (UNIV_UNLIKELY(index != block->index
+ && index_id == block->index->id)) {
+ ut_a(block->index->freed());
+ goto fail_and_release_page;
}
if (buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
ut_ad(buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH);
+fail_and_release_page:
if (!ahi_latch) {
-
btr_leaf_page_release(block, latch_mode, mtr);
}
btr_search_failure(info, cursor);
-
- return(FALSE);
+ return false;
}
ut_ad(page_rec_is_user_rec(rec));
@@ -991,14 +1031,7 @@ fail:
right. */
if (index_id != btr_page_get_index_id(block->frame)
|| !btr_search_check_guess(cursor, !!ahi_latch, tuple, mode)) {
-
- if (!ahi_latch) {
- btr_leaf_page_release(block, latch_mode, mtr);
- }
-
- btr_search_failure(info, cursor);
-
- return(FALSE);
+ goto fail_and_release_page;
}
if (info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5) {
@@ -1059,7 +1092,7 @@ fail:
++buf_pool->stat.n_page_gets;
}
- return(TRUE);
+ return true;
}
/** Drop any adaptive hash index entries that point to an index page.
@@ -1081,32 +1114,28 @@ void btr_search_drop_page_hash_index(buf_block_t* block)
ulint* folds;
ulint i;
mem_heap_t* heap;
- const dict_index_t* index;
rec_offs* offsets;
rw_lock_t* latch;
- btr_search_t* info;
retry:
- /* Do a dirty check on block->index, return if the block is
- not in the adaptive hash index. */
- index = block->index;
/* This debug check uses a dirty read that could theoretically cause
false positives while buf_pool_clear_hash_index() is executing. */
assert_block_ahi_valid(block);
ut_ad(!btr_search_own_any(RW_LOCK_S));
ut_ad(!btr_search_own_any(RW_LOCK_X));
- if (index == NULL) {
+ if (!block->index) {
return;
}
ut_ad(block->page.buf_fix_count == 0
|| buf_block_get_state(block) == BUF_BLOCK_REMOVE_HASH
|| rw_lock_own_flagged(&block->lock,
- RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
+ RW_LOCK_FLAG_X | RW_LOCK_FLAG_S
+ | RW_LOCK_FLAG_SX));
ut_ad(page_is_leaf(block->frame));
- /* We must not dereference index here, because it could be freed
+ /* We must not dereference block->index here, because it could be freed
if (index->table->n_ref_count == 0 && !mutex_own(&dict_sys.mutex)).
Determine the ahi_slot based on the block contents. */
@@ -1121,18 +1150,12 @@ retry:
rw_lock_s_lock(latch);
assert_block_ahi_valid(block);
- if (block->index == NULL) {
+ if (!block->index) {
rw_lock_s_unlock(latch);
return;
}
- /* The index associated with a block must remain the
- same, because we are holding block->lock or the block is
- not accessible by other threads (BUF_BLOCK_REMOVE_HASH),
- or the index is not accessible to other threads
- (buf_fix_count == 0 when DROP TABLE or similar is executing
- buf_LRU_drop_page_hash_for_tablespace()). */
- ut_a(index == block->index);
+ dict_index_t* index = block->index;
#ifdef MYSQL_INDEX_DISABLE_AHI
ut_ad(!index->disable_ahi);
#endif
@@ -1140,7 +1163,7 @@ retry:
ut_ad(block->page.id.space() == index->table->space_id);
ut_a(index_id == index->id);
- ut_a(!dict_index_is_ibuf(index));
+ ut_ad(!dict_index_is_ibuf(index));
#ifdef UNIV_DEBUG
switch (dict_index_get_online_status(index)) {
case ONLINE_INDEX_CREATION:
@@ -1248,9 +1271,14 @@ next_rec:
folds[i], page);
}
- info = btr_search_get_info(block->index);
- ut_a(info->ref_count > 0);
- info->ref_count--;
+ switch (index->search_info->ref_count--) {
+ case 0:
+ ut_error;
+ case 1:
+ if (index->freed()) {
+ btr_search_lazy_free(index);
+ }
+ }
block->index = NULL;
@@ -1351,11 +1379,12 @@ btr_search_build_page_hash_index(
ut_ad(ahi_latch == btr_get_search_latch(index));
ut_ad(index);
ut_ad(block->page.id.space() == index->table->space_id);
- ut_a(!dict_index_is_ibuf(index));
+ ut_ad(!dict_index_is_ibuf(index));
ut_ad(page_is_leaf(block->frame));
ut_ad(rw_lock_own_flagged(&block->lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
+ ut_ad(block->page.id.page_no() >= 3);
rw_lock_s_lock(ahi_latch);
@@ -1548,11 +1577,7 @@ btr_search_info_update_slow(btr_search_t* info, btr_cur_t* cursor)
btr_search_n_hash_fail++;
#endif /* UNIV_SEARCH_PERF_STAT */
- rw_lock_x_lock(ahi_latch);
-
btr_search_update_hash_ref(info, block, cursor);
-
- rw_lock_x_unlock(ahi_latch);
}
if (build_index) {
@@ -1670,7 +1695,7 @@ void btr_search_update_hash_on_delete(btr_cur_t* cursor)
ut_ad(block->page.id.space() == index->table->space_id);
ut_a(index == cursor->index);
ut_a(block->curr_n_fields > 0 || block->curr_n_bytes > 0);
- ut_a(!dict_index_is_ibuf(index));
+ ut_ad(!dict_index_is_ibuf(index));
table = btr_get_search_table(index);
@@ -1741,7 +1766,7 @@ btr_search_update_hash_node_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
}
ut_a(cursor->index == index);
- ut_a(!dict_index_is_ibuf(index));
+ ut_ad(!dict_index_is_ibuf(index));
rw_lock_x_lock(ahi_latch);
if (!block->index) {
@@ -1794,8 +1819,6 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
ulint next_fold = 0; /* remove warning (??? bug ???) */
ulint n_fields;
ulint n_bytes;
- ibool left_side;
- bool locked = false;
mem_heap_t* heap = NULL;
rec_offs offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs* offsets = offsets_;
@@ -1835,11 +1858,11 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
ut_a(!index->disable_ahi);
#endif
ut_a(index == cursor->index);
- ut_a(!dict_index_is_ibuf(index));
+ ut_ad(!dict_index_is_ibuf(index));
n_fields = block->curr_n_fields;
n_bytes = block->curr_n_bytes;
- left_side = block->curr_left_side;
+ const bool left_side = block->curr_left_side;
ins_rec = page_rec_get_next_const(rec);
next_rec = page_rec_get_next_const(ins_rec);
@@ -1856,6 +1879,8 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
n_bytes, index->id);
}
+ bool locked = false;
+
if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, *index)) {
offsets = rec_get_offsets(
rec, index, offsets, true,
@@ -1914,7 +1939,6 @@ check_next_rec:
}
if (ins_fold != next_fold) {
-
if (!locked) {
locked = true;
rw_lock_x_lock(ahi_latch);
@@ -2044,7 +2068,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
== BUF_BLOCK_REMOVE_HASH);
}
- ut_a(!dict_index_is_ibuf(block->index));
+ ut_ad(!dict_index_is_ibuf(block->index));
ut_ad(block->page.id.space()
== block->index->table->space_id);
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 7d898d967bc..f23fe9b17b4 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -3184,66 +3184,6 @@ DECLARE_THREAD(buf_resize_thread)(void*)
OS_THREAD_DUMMY_RETURN;
}
-#ifdef BTR_CUR_HASH_ADAPT
-/** Clear the adaptive hash index on all pages in the buffer pool. */
-void
-buf_pool_clear_hash_index()
-{
- ulint p;
-
- ut_ad(btr_search_own_all(RW_LOCK_X));
- ut_ad(!buf_pool_resizing);
- ut_ad(!btr_search_enabled);
-
- for (p = 0; p < srv_buf_pool_instances; p++) {
- buf_pool_t* buf_pool = buf_pool_from_array(p);
- buf_chunk_t* chunks = buf_pool->chunks;
- buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
-
- while (--chunk >= chunks) {
- buf_block_t* block = chunk->blocks;
- ulint i = chunk->size;
-
- for (; i--; block++) {
- dict_index_t* index = block->index;
- assert_block_ahi_valid(block);
-
- /* We can set block->index = NULL
- and block->n_pointers = 0
- when btr_search_own_all(RW_LOCK_X);
- see the comments in buf0buf.h */
-
- if (!index) {
-# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- ut_a(!block->n_pointers);
-# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- continue;
- }
-
- ut_d(buf_page_state state
- = buf_block_get_state(block));
- /* Another thread may have set the
- state to BUF_BLOCK_REMOVE_HASH in
- buf_LRU_block_remove_hashed().
-
- The state change in buf_page_realloc()
- is not observable here, because in
- that case we would have !block->index.
-
- In the end, the entire adaptive hash
- index will be removed. */
- ut_ad(state == BUF_BLOCK_FILE_PAGE
- || state == BUF_BLOCK_REMOVE_HASH);
-# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- block->n_pointers = 0;
-# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- block->index = NULL;
- }
- }
- }
-}
-#endif /* BTR_CUR_HASH_ADAPT */
-
/********************************************************************//**
Relocate a buffer control block. Relocates the block on the LRU list
and in buf_pool->page_hash. Does not relocate bpage->list.
@@ -4221,7 +4161,7 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
{
case RW_NO_LATCH:
fix_type= MTR_MEMO_BUF_FIX;
- break;
+ goto done;
case RW_S_LATCH:
rw_lock_s_lock_inline(&block->lock, 0, file, line);
fix_type= MTR_MEMO_PAGE_S_FIX;
@@ -4237,6 +4177,15 @@ static buf_block_t* buf_page_mtr_lock(buf_block_t *block,
break;
}
+#ifdef BTR_CUR_HASH_ADAPT
+ {
+ dict_index_t *index= block->index;
+ if (index && index->freed())
+ btr_search_drop_page_hash_index(block);
+ }
+#endif /* BTR_CUR_HASH_ADAPT */
+
+done:
mtr_memo_push(mtr, block, fix_type);
return block;
}
@@ -4562,6 +4511,7 @@ evict_from_pool:
buf_pool_mutex_exit(buf_pool);
return(NULL);
}
+
break;
case BUF_BLOCK_ZIP_PAGE:
@@ -5105,9 +5055,11 @@ buf_page_get_known_nowait(
buf_pool = buf_pool_from_block(block);
+#ifdef BTR_CUR_HASH_ADAPT
if (mode == BUF_MAKE_YOUNG) {
buf_page_make_young_if_needed(&block->page);
}
+#endif /* BTR_CUR_HASH_ADAPT */
ut_ad(!ibuf_inside(mtr) || mode == BUF_KEEP_OLD);
@@ -5150,9 +5102,12 @@ buf_page_get_known_nowait(
deleting a record from SYS_INDEXES. This check will be
skipped in recv_recover_page() as well. */
- buf_page_mutex_enter(block);
- ut_a(!block->page.file_page_was_freed);
- buf_page_mutex_exit(block);
+# ifdef BTR_CUR_HASH_ADAPT
+ ut_ad(!block->page.file_page_was_freed
+ || (block->index && block->index->freed()));
+# else /* BTR_CUR_HASH_ADAPT */
+ ut_ad(!block->page.file_page_was_freed);
+# endif /* BTR_CUR_HASH_ADAPT */
}
#endif /* UNIV_DEBUG */
@@ -5638,6 +5593,12 @@ buf_page_create(
rw_lock_x_unlock(hash_lock);
buf_block_free(free_block);
+#ifdef BTR_CUR_HASH_ADAPT
+ if (block->page.state == BUF_BLOCK_FILE_PAGE
+ && UNIV_LIKELY_NULL(block->index)) {
+ btr_search_drop_page_hash_index(block);
+ }
+#endif /* BTR_CUR_HASH_ADAPT */
if (!recv_recovery_is_on()) {
return buf_page_get_with_no_latch(page_id, zip_size,
@@ -6118,9 +6079,8 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict)
err = buf_page_check_corrupt(bpage, space);
-database_corrupted:
-
if (err != DB_SUCCESS) {
+database_corrupted:
/* Not a real corruption if it was triggered by
error injection */
DBUG_EXECUTE_IF(
@@ -6137,6 +6097,11 @@ database_corrupted:
goto page_not_corrupt;
);
+ if (uncompressed && bpage->zip.data) {
+ memset(reinterpret_cast<buf_block_t*>(bpage)
+ ->frame, 0, srv_page_size);
+ }
+
if (err == DB_PAGE_CORRUPTED) {
ib::error()
<< "Database page corruption on disk"
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index f2a475e2046..6aa47fd1ec7 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -219,166 +219,6 @@ buf_LRU_evict_from_unzip_LRU(
}
#ifdef BTR_CUR_HASH_ADAPT
-/** Attempts to drop page hash index on a batch of pages belonging to a
-particular space id.
-@param[in] space_id space id
-@param[in] arr array of page_no
-@param[in] count number of entries in array */
-static
-void
-buf_LRU_drop_page_hash_batch(ulint space_id, const ulint* arr, ulint count)
-{
- ut_ad(count <= BUF_LRU_DROP_SEARCH_SIZE);
-
- for (const ulint* const end = arr + count; arr != end; ) {
- /* While our only caller
- buf_LRU_drop_page_hash_for_tablespace()
- is being executed for DROP TABLE or similar,
- the table cannot be evicted from the buffer pool. */
- btr_search_drop_page_hash_when_freed(
- page_id_t(space_id, *arr++));
- }
-}
-
-/******************************************************************//**
-When doing a DROP TABLE/DISCARD TABLESPACE we have to drop all page
-hash index entries belonging to that table. This function tries to
-do that in batch. Note that this is a 'best effort' attempt and does
-not guarantee that ALL hash entries will be removed. */
-static
-void
-buf_LRU_drop_page_hash_for_tablespace(
-/*==================================*/
- buf_pool_t* buf_pool, /*!< in: buffer pool instance */
- ulint id) /*!< in: space id */
-{
- ulint* page_arr = static_cast<ulint*>(ut_malloc_nokey(
- sizeof(ulint) * BUF_LRU_DROP_SEARCH_SIZE));
-
- ulint num_entries = 0;
-
- buf_pool_mutex_enter(buf_pool);
-
-scan_again:
- for (buf_page_t* bpage = UT_LIST_GET_LAST(buf_pool->LRU);
- bpage != NULL;
- /* No op */) {
-
- buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
-
- ut_a(buf_page_in_file(bpage));
-
- if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE
- || bpage->id.space() != id
- || bpage->io_fix != BUF_IO_NONE) {
- /* Compressed pages are never hashed.
- Skip blocks of other tablespaces.
- Skip I/O-fixed blocks (to be dealt with later). */
-next_page:
- bpage = prev_bpage;
- continue;
- }
-
- buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
-
- mutex_enter(&block->mutex);
-
- /* This debug check uses a dirty read that could
- theoretically cause false positives while
- buf_pool_clear_hash_index() is executing.
- (Other conflicting access paths to the adaptive hash
- index should not be possible, because when a
- tablespace is being discarded or dropped, there must
- be no concurrect access to the contained tables.) */
- assert_block_ahi_valid(block);
-
- bool skip = bpage->buf_fix_count > 0 || !block->index;
-
- mutex_exit(&block->mutex);
-
- if (skip) {
- /* Skip this block, because there are
- no adaptive hash index entries
- pointing to it, or because we cannot
- drop them due to the buffer-fix. */
- goto next_page;
- }
-
- /* Store the page number so that we can drop the hash
- index in a batch later. */
- page_arr[num_entries] = bpage->id.page_no();
- ut_a(num_entries < BUF_LRU_DROP_SEARCH_SIZE);
- ++num_entries;
-
- if (num_entries < BUF_LRU_DROP_SEARCH_SIZE) {
- goto next_page;
- }
-
- /* Array full. We release the buf_pool->mutex to obey
- the latching order. */
- buf_pool_mutex_exit(buf_pool);
-
- buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
-
- num_entries = 0;
-
- buf_pool_mutex_enter(buf_pool);
-
- /* Note that we released the buf_pool mutex above
- after reading the prev_bpage during processing of a
- page_hash_batch (i.e.: when the array was full).
- Because prev_bpage could belong to a compressed-only
- block, it may have been relocated, and thus the
- pointer cannot be trusted. Because bpage is of type
- buf_block_t, it is safe to dereference.
-
- bpage can change in the LRU list. This is OK because
- this function is a 'best effort' to drop as many
- search hash entries as possible and it does not
- guarantee that ALL such entries will be dropped. */
-
- /* If, however, bpage has been removed from LRU list
- to the free list then we should restart the scan.
- bpage->state is protected by buf_pool mutex. */
- if (bpage != NULL
- && buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
-
- goto scan_again;
- }
- }
-
- buf_pool_mutex_exit(buf_pool);
-
- /* Drop any remaining batch of search hashed pages. */
- buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
- ut_free(page_arr);
-}
-
-/** Try to drop the adaptive hash index for a tablespace.
-@param[in,out] table table
-@return whether anything was dropped */
-bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
-{
- for (dict_index_t* index = dict_table_get_first_index(table);
- index != NULL;
- index = dict_table_get_next_index(index)) {
- if (btr_search_info_get_ref_count(btr_search_get_info(index),
- index)) {
- goto drop_ahi;
- }
- }
-
- return false;
-drop_ahi:
- ulint id = table->space_id;
- for (ulint i = 0; i < srv_buf_pool_instances; i++) {
- buf_LRU_drop_page_hash_for_tablespace(buf_pool_from_array(i),
- id);
- }
-
- return true;
-}
-
/******************************************************************//**
While flushing (or removing dirty) pages from a tablespace we don't
want to hog the CPU and resources. Release the buffer pool and block
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index aa7913b3cbe..be570b3fda7 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -26,7 +26,9 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0crea.h"
#include "btr0pcur.h"
-#include "btr0btr.h"
+#ifdef BTR_CUR_HASH_ADAPT
+# include "btr0sea.h"
+#endif /* BTR_CUR_HASH_ADAPT */
#include "page0page.h"
#include "mach0data.h"
#include "dict0boot.h"
@@ -1300,6 +1302,9 @@ dict_create_index_step(
&node->table->fts->cache->init_lock);
}
+#ifdef BTR_CUR_HASH_ADAPT
+ ut_ad(!node->index->search_info->ref_count);
+#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(node->table, node->index);
node->index = NULL;
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index c092d78fa0d..c9abc03f682 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1057,25 +1057,12 @@ dict_table_can_be_evicted(
}
#ifdef BTR_CUR_HASH_ADAPT
+ /* We cannot really evict the table if adaptive hash
+ index entries are pointing to any of its indexes. */
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
-
- btr_search_t* info = btr_search_get_info(index);
-
- /* We are not allowed to free the in-memory index
- struct dict_index_t until all entries in the adaptive
- hash index that point to any of the page belonging to
- his b-tree index are dropped. This is so because
- dropping of these entries require access to
- dict_index_t struct. To avoid such scenario we keep
- a count of number of such pages in the search_info and
- only free the dict_index_t struct when this count
- drops to zero.
-
- See also: dict_index_remove_from_cache_low() */
-
- if (btr_search_info_get_ref_count(info, index) > 0) {
+ if (index->n_ahi_pages()) {
return(FALSE);
}
}
@@ -1087,6 +1074,71 @@ dict_table_can_be_evicted(
return(FALSE);
}
+#ifdef BTR_CUR_HASH_ADAPT
+/** @return a clone of this */
+dict_index_t *dict_index_t::clone() const
+{
+ ut_ad(n_fields);
+ ut_ad(!(type & (DICT_IBUF | DICT_SPATIAL | DICT_FTS)));
+ ut_ad(online_status == ONLINE_INDEX_COMPLETE);
+ ut_ad(is_committed());
+ ut_ad(!is_dummy);
+ ut_ad(!parser);
+ ut_ad(!index_fts_syncing);
+ ut_ad(!online_log);
+ ut_ad(!rtr_track);
+
+ const size_t size= sizeof *this + n_fields * sizeof(*fields) +
+#ifdef BTR_CUR_ADAPT
+ sizeof *search_info +
+#endif
+ 1 + strlen(name) +
+ n_uniq * (sizeof *stat_n_diff_key_vals +
+ sizeof *stat_n_sample_sizes +
+ sizeof *stat_n_non_null_key_vals);
+
+ mem_heap_t* heap= mem_heap_create(size);
+ dict_index_t *index= static_cast<dict_index_t*>(mem_heap_dup(heap, this,
+ sizeof *this));
+ *index= *this;
+ rw_lock_create(index_tree_rw_lock_key, &index->lock, SYNC_INDEX_TREE);
+ index->heap= heap;
+ index->name= mem_heap_strdup(heap, name);
+ index->fields= static_cast<dict_field_t*>
+ (mem_heap_dup(heap, fields, n_fields * sizeof *fields));
+#ifdef BTR_CUR_ADAPT
+ index->search_info= btr_search_info_create(index->heap);
+#endif /* BTR_CUR_ADAPT */
+ index->stat_n_diff_key_vals= static_cast<ib_uint64_t*>
+ (mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_diff_key_vals));
+ index->stat_n_sample_sizes= static_cast<ib_uint64_t*>
+ (mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_sample_sizes));
+ index->stat_n_non_null_key_vals= static_cast<ib_uint64_t*>
+ (mem_heap_zalloc(heap, n_uniq * sizeof *stat_n_non_null_key_vals));
+ mutex_create(LATCH_ID_ZIP_PAD_MUTEX, &index->zip_pad.mutex);
+ return index;
+}
+
+/** Clone this index for lazy dropping of the adaptive hash.
+@return this or a clone */
+dict_index_t *dict_index_t::clone_if_needed()
+{
+ if (!search_info->ref_count)
+ return this;
+ dict_index_t *prev= UT_LIST_GET_PREV(indexes, this);
+
+ UT_LIST_REMOVE(table->indexes, this);
+ UT_LIST_ADD_LAST(table->freed_indexes, this);
+ dict_index_t *index= clone();
+ set_freed();
+ if (prev)
+ UT_LIST_INSERT_AFTER(table->indexes, prev, index);
+ else
+ UT_LIST_ADD_FIRST(table->indexes, index);
+ return index;
+}
+#endif /* BTR_CUR_HASH_ADAPT */
+
/**********************************************************************//**
Make room in the table cache by evicting an unused table. The unused table
should not be part of FK relationship and currently not used in any user
@@ -1727,9 +1779,19 @@ void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep)
mutex_free(&table->autoinc_mutex);
- if (!keep) {
- dict_mem_table_free(table);
+ if (keep) {
+ return;
}
+
+#ifdef BTR_CUR_HASH_ADAPT
+ if (UNIV_UNLIKELY(UT_LIST_GET_LEN(table->freed_indexes) != 0)) {
+ table->vc_templ = NULL;
+ table->id = 0;
+ return;
+ }
+#endif /* BTR_CUR_HASH_ADAPT */
+
+ dict_mem_table_free(table);
}
/****************************************************************//**
@@ -1907,6 +1969,8 @@ dict_index_remove_from_cache_low(
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(mutex_own(&dict_sys.mutex));
+ ut_ad(table->id);
+ ut_ad(!index->freed());
/* No need to acquire the dict_index_t::lock here because
there can't be any active operations on this index (or table). */
@@ -1916,13 +1980,22 @@ dict_index_remove_from_cache_low(
row_log_free(index->online_log);
}
+ /* Remove the index from the list of indexes of the table */
+ UT_LIST_REMOVE(table->indexes, index);
+
+ /* The index is being dropped, remove any compression stats for it. */
+ if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) {
+ mutex_enter(&page_zip_stat_per_index_mutex);
+ page_zip_stat_per_index.erase(index->id);
+ mutex_exit(&page_zip_stat_per_index_mutex);
+ }
+
+ /* Remove the index from affected virtual column index list */
+ index->detach_columns();
+
#ifdef BTR_CUR_HASH_ADAPT
/* We always create search info whether or not adaptive
hash index is enabled or not. */
- btr_search_t* info = btr_search_get_info(index);
- ulint retries = 0;
- ut_ad(info);
-
/* We are not allowed to free the in-memory index struct
dict_index_t until all entries in the adaptive hash index
that point to any of the page belonging to his b-tree index
@@ -1932,31 +2005,15 @@ dict_index_remove_from_cache_low(
only free the dict_index_t struct when this count drops to
zero. See also: dict_table_can_be_evicted() */
- do {
- if (!btr_search_info_get_ref_count(info, index)
- || !buf_LRU_drop_page_hash_for_tablespace(table)) {
- break;
- }
-
- ut_a(++retries < 10000);
- } while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict);
+ if (index->n_ahi_pages()) {
+ index->set_freed();
+ UT_LIST_ADD_LAST(table->freed_indexes, index);
+ return;
+ }
#endif /* BTR_CUR_HASH_ADAPT */
rw_lock_free(&index->lock);
- /* The index is being dropped, remove any compression stats for it. */
- if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) {
- mutex_enter(&page_zip_stat_per_index_mutex);
- page_zip_stat_per_index.erase(index->id);
- mutex_exit(&page_zip_stat_per_index_mutex);
- }
-
- /* Remove the index from the list of indexes of the table */
- UT_LIST_REMOVE(table->indexes, index);
-
- /* Remove the index from affected virtual column index list */
- index->detach_columns();
-
dict_mem_index_free(index);
}
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index 49c80cb4f5b..bbc5535668c 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -157,6 +157,9 @@ dict_mem_table_create(
lock_table_lock_list_init(&table->locks);
UT_LIST_INIT(table->indexes, &dict_index_t::indexes);
+#ifdef BTR_CUR_HASH_ADAPT
+ UT_LIST_INIT(table->freed_indexes, &dict_index_t::indexes);
+#endif /* BTR_CUR_HASH_ADAPT */
table->heap = heap;
@@ -210,6 +213,10 @@ dict_mem_table_free(
{
ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
+ ut_ad(UT_LIST_GET_LEN(table->indexes) == 0);
+#ifdef BTR_CUR_HASH_ADAPT
+ ut_ad(UT_LIST_GET_LEN(table->freed_indexes) == 0);
+#endif /* BTR_CUR_HASH_ADAPT */
ut_d(table->cached = FALSE);
if (dict_table_has_fts_index(table)
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index 98172d1d0a6..fbd9b1f9bc6 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -419,6 +419,9 @@ dict_stats_table_clone_create(
t->corrupted = table->corrupted;
UT_LIST_INIT(t->indexes, &dict_index_t::indexes);
+#ifdef BTR_CUR_HASH_ADAPT
+ UT_LIST_INIT(t->freed_indexes, &dict_index_t::indexes);
+#endif /* BTR_CUR_HASH_ADAPT */
for (index = dict_table_get_first_index(table);
index != NULL;
@@ -4023,6 +4026,9 @@ test_dict_stats_save()
table.stat_clustered_index_size = TEST_CLUSTERED_INDEX_SIZE;
table.stat_sum_of_other_index_sizes = TEST_SUM_OF_OTHER_INDEX_SIZES;
UT_LIST_INIT(table.indexes, &dict_index_t::indexes);
+#ifdef BTR_CUR_HASH_ADAPT
+ UT_LIST_INIT(table.freed_indexes, &dict_index_t::indexes);
+#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(table.indexes, &index1);
UT_LIST_ADD_LAST(table.indexes, &index2);
ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
@@ -4172,6 +4178,9 @@ test_dict_stats_fetch_from_ps()
/* craft a dummy dict_table_t */
table.name.m_name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME);
UT_LIST_INIT(table.indexes, &dict_index_t::indexes);
+#ifdef BTR_CUR_HASH_ADAPT
+ UT_LIST_INIT(table.freed_indexes, &dict_index_t::indexes);
+#endif /* BTR_CUR_HASH_ADAPT */
UT_LIST_ADD_LAST(table.indexes, &index1);
UT_LIST_ADD_LAST(table.indexes, &index2);
ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc
index e6f23c852dc..b3390a4cd12 100644
--- a/storage/innobase/fil/fil0pagecompress.cc
+++ b/storage/innobase/fil/fil0pagecompress.cc
@@ -27,7 +27,6 @@ Updated 14/02/2015
#include "fil0fil.h"
#include "fil0pagecompress.h"
-#include <debug_sync.h>
#include <my_dbug.h>
#include "mem0mem.h"
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 809ad3ded4f..dcaf4138c89 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2721,8 +2721,6 @@ fseg_mark_page_used(
@param[in] seg_inode segment inode
@param[in,out] space tablespace
@param[in] offset page number
-@param[in] ahi whether we may need to drop the adaptive
-hash index
@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
static
@@ -2731,9 +2729,6 @@ fseg_free_page_low(
fseg_inode_t* seg_inode,
fil_space_t* space,
page_no_t offset,
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi,
-#endif /* BTR_CUR_HASH_ADAPT */
bool log,
mtr_t* mtr)
{
@@ -2749,15 +2744,6 @@ fseg_free_page_low(
== FSEG_MAGIC_N_VALUE);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_d(space->modify_check(*mtr));
-#ifdef BTR_CUR_HASH_ADAPT
- /* Drop search system page hash index if the page is found in
- the pool and is hashed */
-
- if (ahi) {
- btr_search_drop_page_hash_when_freed(
- page_id_t(space->id, offset));
- }
-#endif /* BTR_CUR_HASH_ADAPT */
descr = xdes_get_descriptor(space, offset, mtr);
@@ -2842,27 +2828,17 @@ fseg_free_page_low(
}
}
-#ifndef BTR_CUR_HASH_ADAPT
-# define fseg_free_page_low(inode, space, offset, ahi, log, mtr) \
- fseg_free_page_low(inode, space, offset, log, mtr)
-#endif /* !BTR_CUR_HASH_ADAPT */
-
/** Free a page in a file segment.
@param[in,out] seg_header file segment header
@param[in,out] space tablespace
@param[in] offset page number
-@param[in] ahi whether we may need to drop the adaptive
-hash index
@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
void
-fseg_free_page_func(
+fseg_free_page(
fseg_header_t* seg_header,
fil_space_t* space,
ulint offset,
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi,
-#endif /* BTR_CUR_HASH_ADAPT */
bool log,
mtr_t* mtr)
{
@@ -2881,7 +2857,7 @@ fseg_free_page_func(
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
}
- fseg_free_page_low(seg_inode, space, offset, ahi, log, mtr);
+ fseg_free_page_low(seg_inode, space, offset, log, mtr);
ut_d(buf_page_set_file_page_was_freed(page_id_t(space->id, offset)));
@@ -2921,8 +2897,6 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
@param[in,out] seg_inode segment inode
@param[in,out] space tablespace
@param[in] page page number in the extent
-@param[in] ahi whether we may need to drop
- the adaptive hash index
@param[in,out] mtr mini-transaction */
MY_ATTRIBUTE((nonnull))
static
@@ -2931,12 +2905,8 @@ fseg_free_extent(
fseg_inode_t* seg_inode,
fil_space_t* space,
ulint page,
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi,
-#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr)
{
- ulint first_page_in_extent;
xdes_t* descr;
ulint not_full_n_used;
ulint descr_n_used;
@@ -2951,24 +2921,9 @@ fseg_free_extent(
== FSEG_MAGIC_N_VALUE);
ut_d(space->modify_check(*mtr));
- first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
-
-#ifdef BTR_CUR_HASH_ADAPT
- if (ahi) {
- for (ulint i = 0; i < FSP_EXTENT_SIZE; i++) {
- if (!xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
-
- /* Drop search system page hash index
- if the page is found in the pool and
- is hashed */
-
- btr_search_drop_page_hash_when_freed(
- page_id_t(space->id,
- first_page_in_extent + i));
- }
- }
- }
-#endif /* BTR_CUR_HASH_ADAPT */
+#if defined BTR_CUR_HASH_ADAPT || defined UNIV_DEBUG
+ const ulint first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
+#endif /* BTR_CUR_HASH_ADAPT || UNIV_DEBUG */
if (xdes_is_full(descr, mtr)) {
flst_remove(seg_inode + FSEG_FULL,
@@ -3000,27 +2955,18 @@ fseg_free_extent(
#endif /* UNIV_DEBUG */
}
-#ifndef BTR_CUR_HASH_ADAPT
-# define fseg_free_extent(inode, space, page, ahi, mtr) \
- fseg_free_extent(inode, space, page, mtr)
-#endif /* !BTR_CUR_HASH_ADAPT */
-
/**********************************************************************//**
Frees part of a segment. This function can be used to free a segment by
repeatedly calling this function in different mini-transactions. Doing
the freeing in a single mini-transaction might result in too big a
mini-transaction.
-@return TRUE if freeing completed */
-ibool
-fseg_free_step_func(
+@return whether the freeing was completed */
+bool
+fseg_free_step(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list
of the segment, this pointer becomes obsolete
after the last freeing step */
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi, /*!< in: whether we may need to drop
- the adaptive hash index */
-#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint n;
@@ -3051,7 +2997,7 @@ fseg_free_step_func(
if (inode == NULL) {
ib::info() << "Double free of inode from "
<< page_id_t(space_id, header_page);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
if (!space->full_crc32()) {
@@ -3062,8 +3008,8 @@ fseg_free_step_func(
if (descr != NULL) {
/* Free the extent held by the segment */
page = xdes_get_offset(descr);
- fseg_free_extent(inode, space, page, ahi, mtr);
- DBUG_RETURN(FALSE);
+ fseg_free_extent(inode, space, page, mtr);
+ DBUG_RETURN(false);
}
/* Free a frag page */
@@ -3073,13 +3019,13 @@ fseg_free_step_func(
/* Freeing completed: free the segment inode */
fsp_free_seg_inode(space, inode, mtr);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
fseg_free_page_low(
inode, space,
fseg_get_nth_frag_page_no(inode, n, mtr),
- ahi, true, mtr);
+ true, mtr);
n = fseg_find_last_used_frag_page_slot(inode, mtr);
@@ -3087,24 +3033,20 @@ fseg_free_step_func(
/* Freeing completed: free the segment inode */
fsp_free_seg_inode(space, inode, mtr);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
}
/**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed.
-@return TRUE if freeing completed, except the header page */
-ibool
-fseg_free_step_not_header_func(
+@return whether the freeing was completed, except for the header page */
+bool
+fseg_free_step_not_header(
fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi, /*!< in: whether we may need to drop
- the adaptive hash index */
-#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint n;
@@ -3131,30 +3073,24 @@ fseg_free_step_not_header_func(
if (descr != NULL) {
/* Free the extent held by the segment */
page = xdes_get_offset(descr);
-
- fseg_free_extent(inode, space, page, ahi, mtr);
-
- return(FALSE);
+ fseg_free_extent(inode, space, page, mtr);
+ return false;
}
/* Free a frag page */
n = fseg_find_last_used_frag_page_slot(inode, mtr);
- if (n == ULINT_UNDEFINED) {
- ut_error;
- }
+ ut_a(n != ULINT_UNDEFINED);
page_no = fseg_get_nth_frag_page_no(inode, n, mtr);
if (page_no == page_get_page_no(page_align(header))) {
-
- return(TRUE);
+ return true;
}
- fseg_free_page_low(inode, space, page_no, ahi, true, mtr);
-
- return(FALSE);
+ fseg_free_page_low(inode, space, page_no, true, mtr);
+ return false;
}
/** Returns the first extent descriptor for a segment.
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 4fa405c5dc2..45c115e8535 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -18600,33 +18600,6 @@ innodb_log_checksums_update(THD* thd, st_mysql_sys_var*, void* var_ptr,
thd, *static_cast<const my_bool*>(save));
}
-#ifdef UNIV_DEBUG
-static
-void
-innobase_debug_sync_callback(srv_slot_t *slot, const void *value)
-{
- const char *value_str = *static_cast<const char* const*>(value);
- size_t len = strlen(value_str) + 1;
-
-
- // One allocatoin for list node object and value.
- void *buf = ut_malloc_nokey(sizeof(srv_slot_t::debug_sync_t) + len);
- srv_slot_t::debug_sync_t *sync = new(buf) srv_slot_t::debug_sync_t();
- strcpy(reinterpret_cast<char*>(&sync[1]), value_str);
-
- rw_lock_x_lock(&slot->debug_sync_lock);
- UT_LIST_ADD_LAST(slot->debug_sync, sync);
- rw_lock_x_unlock(&slot->debug_sync_lock);
-}
-static
-void
-innobase_debug_sync_set(THD *thd, st_mysql_sys_var*, void *, const void *value)
-{
- srv_for_each_thread(SRV_WORKER, innobase_debug_sync_callback, value);
- srv_for_each_thread(SRV_PURGE, innobase_debug_sync_callback, value);
-}
-#endif
-
static SHOW_VAR innodb_status_variables_export[]= {
{"Innodb", (char*) &show_innodb_vars, SHOW_FUNC},
{NullS, NullS, SHOW_LONG}
@@ -20022,16 +19995,6 @@ static MYSQL_SYSVAR_BOOL(debug_force_scrubbing,
0,
"Perform extra scrubbing to increase test exposure",
NULL, NULL, FALSE);
-
-char *innobase_debug_sync;
-static MYSQL_SYSVAR_STR(debug_sync, innobase_debug_sync,
- PLUGIN_VAR_NOCMDARG,
- "debug_sync for innodb purge threads. "
- "Use it to set up sync points for all purge threads "
- "at once. The commands will be applied sequentially at "
- "the beginning of purging the next undo record.",
- NULL,
- innobase_debug_sync_set, NULL);
#endif /* UNIV_DEBUG */
static MYSQL_SYSVAR_BOOL(encrypt_temporary_tables, innodb_encrypt_temporary_tables,
@@ -20245,7 +20208,6 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(background_scrub_data_check_interval),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(debug_force_scrubbing),
- MYSQL_SYSVAR(debug_sync),
#endif
MYSQL_SYSVAR(buf_dump_status_frequency),
MYSQL_SYSVAR(background_thread),
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 5f8d56c2c62..b497951a26d 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -1062,6 +1062,7 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
for (ulint i = 0; i < num_to_add_index; i++) {
if (!add_index[i]->is_committed()) {
add_index[i]->detach_columns();
+ add_index[i]->n_fields = 0;
}
}
}
@@ -11283,21 +11284,14 @@ foreign_fail:
|| (ctx0->is_instant()
&& m_prebuilt->table->n_v_cols
&& ha_alter_info->handler_flags & ALTER_STORED_COLUMN_ORDER)) {
+ /* FIXME: this workaround does not seem to work with
+ partitioned tables */
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
trx_commit_for_mysql(m_prebuilt->trx);
-#ifdef BTR_CUR_HASH_ADAPT
- if (btr_search_enabled) {
- btr_search_disable(false);
- btr_search_enable();
- }
-#endif /* BTR_CUR_HASH_ADAPT */
-
- char tb_name[FN_REFLEN];
- ut_strcpy(tb_name, m_prebuilt->table->name.m_name);
-
- tb_name[strlen(m_prebuilt->table->name.m_name)] = 0;
+ char tb_name[NAME_LEN * 2 + 1 + 1];
+ strcpy(tb_name, m_prebuilt->table->name.m_name);
dict_table_close(m_prebuilt->table, true, false);
if (ctx0->is_instant()) {
for (unsigned i = ctx0->old_n_v_cols; i--; ) {
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 80f09ce9c31..b04959e2cc1 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -2052,7 +2052,7 @@ ibuf_remove_free_page(void)
compile_time_assert(IBUF_SPACE_ID == 0);
fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
- fil_system.sys_space, page_no, false, true, &mtr);
+ fil_system.sys_space, page_no, true, &mtr);
const page_id_t page_id(IBUF_SPACE_ID, page_no);
diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index 20f4730f48f..c20b971de98 100644
--- a/storage/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic
index 4c3c3359262..9c99da42b97 100644
--- a/storage/innobase/include/btr0pcur.ic
+++ b/storage/innobase/include/btr0pcur.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2019, MariaDB Corporation.
+Copyright (c) 2015, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 8bbfe18c5d9..b1ba5f6a31b 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -48,15 +48,6 @@ void btr_search_disable(bool need_mutex);
/** Enable the adaptive hash search system. */
void btr_search_enable();
-/** Returns the value of ref_count. The value is protected by latch.
-@param[in] info search info
-@param[in] index index identifier
-@return ref_count value. */
-ulint
-btr_search_info_get_ref_count(
- btr_search_t* info,
- dict_index_t* index);
-
/*********************************************************************//**
Updates the search info. */
UNIV_INLINE
@@ -272,6 +263,18 @@ struct btr_search_t{
};
#ifdef BTR_CUR_HASH_ADAPT
+/** @return number of leaf pages pointed to by the adaptive hash index */
+inline ulint dict_index_t::n_ahi_pages() const
+{
+ if (!btr_search_enabled)
+ return 0;
+ rw_lock_t *latch = btr_get_search_latch(this);
+ rw_lock_s_lock(latch);
+ ulint ref_count= search_info->ref_count;
+ rw_lock_s_unlock(latch);
+ return ref_count;
+}
+
/** The hash index system */
struct btr_search_sys_t{
hash_table_t** hash_tables; /*!< the adaptive hash tables,
diff --git a/storage/innobase/include/btr0sea.ic b/storage/innobase/include/btr0sea.ic
index 17bc7e555bd..9db0084ce59 100644
--- a/storage/innobase/include/btr0sea.ic
+++ b/storage/innobase/include/btr0sea.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, MariaDB Corporation.
+Copyright (c) 2018, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 19c18e4105d..fdf1980df36 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -70,11 +70,13 @@ struct fil_addr_t;
/* @} */
/** @name Modes for buf_page_get_known_nowait */
/* @{ */
-#define BUF_MAKE_YOUNG 51 /*!< Move the block to the
+#ifdef BTR_CUR_HASH_ADAPT
+# define BUF_MAKE_YOUNG 51 /*!< Move the block to the
start of the LRU list if there
is a danger that the block
would drift out of the buffer
pool*/
+#endif /* BTR_CUR_HASH_ADAPT */
#define BUF_KEEP_OLD 52 /*!< Preserve the current LRU
position of the block. */
/* @} */
@@ -281,12 +283,6 @@ extern "C"
os_thread_ret_t
DECLARE_THREAD(buf_resize_thread)(void*);
-#ifdef BTR_CUR_HASH_ADAPT
-/** Clear the adaptive hash index on all pages in the buffer pool. */
-void
-buf_pool_clear_hash_index();
-#endif /* BTR_CUR_HASH_ADAPT */
-
/*********************************************************************//**
Gets the current size of buffer buf_pool in bytes.
@return size in bytes */
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index 2468efb193f..e7707ffd6dc 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -50,17 +50,6 @@ These are low-level functions
/** Minimum LRU list length for which the LRU_old pointer is defined */
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
-#ifdef BTR_CUR_HASH_ADAPT
-struct dict_table_t;
-/** Try to drop the adaptive hash index for a tablespace.
-@param[in,out] table table
-@return whether anything was dropped */
-bool buf_LRU_drop_page_hash_for_tablespace(dict_table_t* table)
- MY_ATTRIBUTE((warn_unused_result,nonnull));
-#else
-# define buf_LRU_drop_page_hash_for_tablespace(table)
-#endif /* BTR_CUR_HASH_ADAPT */
-
/** Empty the flush list for all pages belonging to a tablespace.
@param[in] id tablespace identifier
@param[in,out] observer flush observer,
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 9d73a48f4e6..b2be01ec637 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -956,7 +956,10 @@ struct dict_index_t {
mem_heap_t* heap; /*!< memory heap */
id_name_t name; /*!< index name */
dict_table_t* table; /*!< back pointer to table */
- unsigned page:32;/*!< index tree root page number */
+ /** root page number, or FIL_NULL if the index has been detached
+ from storage (DISCARD TABLESPACE or similar),
+ or 1 if the index is in table->freed_indexes */
+ unsigned page:32;
unsigned merge_threshold:6;
/*!< In the pessimistic delete, if the page
data size drops below this limit in percent,
@@ -1104,19 +1107,11 @@ struct dict_index_t {
/* @} */
private:
/** R-tree split sequence number */
- std::atomic<node_seq_t> rtr_ssn;
+ Atomic_counter<node_seq_t> rtr_ssn;
public:
-
- void set_ssn(node_seq_t ssn)
- {
- rtr_ssn.store(ssn, std::memory_order_relaxed);
- }
- node_seq_t assign_ssn()
- {
- node_seq_t ssn= rtr_ssn.fetch_add(1, std::memory_order_relaxed);
- return ssn + 1;
- }
- node_seq_t ssn() const { return rtr_ssn.load(std::memory_order_relaxed); }
+ void set_ssn(node_seq_t ssn) { rtr_ssn= ssn; }
+ node_seq_t assign_ssn() { return ++rtr_ssn; }
+ node_seq_t ssn() const { return rtr_ssn; }
rtr_info_track_t*
rtr_track;/*!< tracking all R-Tree search cursors */
@@ -1209,8 +1204,6 @@ public:
for (unsigned i = 0; i < n_fields; i++) {
fields[i].col->detach(*this);
}
-
- n_fields = 0;
}
}
@@ -1273,15 +1266,29 @@ public:
bool
vers_history_row(const rec_t* rec, bool &history_row);
- /** Reconstruct the clustered index fields. */
- inline void reconstruct_fields();
+ /** Reconstruct the clustered index fields. */
+ inline void reconstruct_fields();
- /** Check if the index contains a column or a prefix of that column.
- @param[in] n column number
- @param[in] is_virtual whether it is a virtual col
- @return whether the index contains the column or its prefix */
- bool contains_col_or_prefix(ulint n, bool is_virtual) const
- MY_ATTRIBUTE((warn_unused_result));
+ /** Check if the index contains a column or a prefix of that column.
+ @param[in] n column number
+ @param[in] is_virtual whether it is a virtual col
+ @return whether the index contains the column or its prefix */
+ bool contains_col_or_prefix(ulint n, bool is_virtual) const
+ MY_ATTRIBUTE((warn_unused_result));
+
+#ifdef BTR_CUR_HASH_ADAPT
+ /** @return a clone of this */
+ dict_index_t* clone() const;
+ /** Clone this index for lazy dropping of the adaptive hash index.
+ @return this or a clone */
+ dict_index_t* clone_if_needed();
+ /** @return number of leaf pages pointed to by the adaptive hash index */
+ inline ulint n_ahi_pages() const;
+ /** @return whether mark_freed() had been invoked */
+ bool freed() const { return UNIV_UNLIKELY(page == 1); }
+ /** Note that the index is waiting for btr_search_lazy_free() */
+ void set_freed() { ut_ad(!freed()); page= 1; }
+#endif /* BTR_CUR_HASH_ADAPT */
/** This ad-hoc class is used by record_size_info only. */
class record_size_info_t {
@@ -2043,6 +2050,11 @@ public:
/** List of indexes of the table. */
UT_LIST_BASE_NODE_T(dict_index_t) indexes;
+#ifdef BTR_CUR_HASH_ADAPT
+ /** List of detached indexes that are waiting to be freed along with
+ the last adaptive hash index entry */
+ UT_LIST_BASE_NODE_T(dict_index_t) freed_indexes;
+#endif /* BTR_CUR_HASH_ADAPT */
/** List of foreign key constraints in the table. These refer to
columns in other tables. */
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 75c448c956d..8e1acfe1805 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2019, MariaDB Corporation.
+Copyright (c) 2013, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -488,27 +488,15 @@ fsp_reserve_free_extents(
@param[in,out] seg_header file segment header
@param[in,out] space tablespace
@param[in] offset page number
-@param[in] ahi whether we may need to drop the adaptive
-hash index
@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
void
-fseg_free_page_func(
+fseg_free_page(
fseg_header_t* seg_header,
fil_space_t* space,
ulint offset,
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi,
-#endif /* BTR_CUR_HASH_ADAPT */
bool log,
mtr_t* mtr);
-#ifdef BTR_CUR_HASH_ADAPT
-# define fseg_free_page(header, space, offset, ahi, log, mtr) \
- fseg_free_page_func(header, space, offset, ahi, log, mtr)
-#else /* BTR_CUR_HASH_ADAPT */
-# define fseg_free_page(header, space, offset, ahi, log, mtr) \
- fseg_free_page_func(header, space, offset, log, mtr)
-#endif /* BTR_CUR_HASH_ADAPT */
/** Determine whether a page is free.
@param[in,out] space tablespace
@param[in] page page number
@@ -521,45 +509,25 @@ Frees part of a segment. This function can be used to free a segment
by repeatedly calling this function in different mini-transactions.
Doing the freeing in a single mini-transaction might result in
too big a mini-transaction.
-@return TRUE if freeing completed */
-ibool
-fseg_free_step_func(
+@return whether the freeing was completed */
+bool
+fseg_free_step(
fseg_header_t* header, /*!< in, own: segment header; NOTE: if the header
resides on the first page of the frag list
of the segment, this pointer becomes obsolete
after the last freeing step */
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi, /*!< in: whether we may need to drop
- the adaptive hash index */
-#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((warn_unused_result));
-#ifdef BTR_CUR_HASH_ADAPT
-# define fseg_free_step(header, ahi, mtr) fseg_free_step_func(header, ahi, mtr)
-#else /* BTR_CUR_HASH_ADAPT */
-# define fseg_free_step(header, ahi, mtr) fseg_free_step_func(header, mtr)
-#endif /* BTR_CUR_HASH_ADAPT */
/**********************************************************************//**
Frees part of a segment. Differs from fseg_free_step because this function
leaves the header page unfreed.
-@return TRUE if freeing completed, except the header page */
-ibool
-fseg_free_step_not_header_func(
+@return whether the freeing was completed, except for the header page */
+bool
+fseg_free_step_not_header(
fseg_header_t* header, /*!< in: segment header which must reside on
the first fragment page of the segment */
-#ifdef BTR_CUR_HASH_ADAPT
- bool ahi, /*!< in: whether we may need to drop
- the adaptive hash index */
-#endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((warn_unused_result));
-#ifdef BTR_CUR_HASH_ADAPT
-# define fseg_free_step_not_header(header, ahi, mtr) \
- fseg_free_step_not_header_func(header, ahi, mtr)
-#else /* BTR_CUR_HASH_ADAPT */
-# define fseg_free_step_not_header(header, ahi, mtr) \
- fseg_free_step_not_header_func(header, mtr)
-#endif /* BTR_CUR_HASH_ADAPT */
/** Reset the page type.
Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE.
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 9456b5a1685..e1d37613dc9 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -1102,13 +1102,6 @@ struct srv_slot_t{
to do */
que_thr_t* thr; /*!< suspended query thread
(only used for user threads) */
-#ifdef UNIV_DEBUG
- struct debug_sync_t {
- UT_LIST_NODE_T(debug_sync_t) debug_sync_list;
- };
- UT_LIST_BASE_NODE_T(debug_sync_t) debug_sync;
- rw_lock_t debug_sync_lock;
-#endif
};
#ifdef UNIV_DEBUG
diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h
index bf47cb8fe88..6592988def8 100644
--- a/storage/innobase/include/sync0rw.h
+++ b/storage/innobase/include/sync0rw.h
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -568,11 +568,11 @@ struct rw_lock_t
: public latch_t
#endif /* UNIV_DEBUG */
{
- /** Holds the state of the lock. */
- std::atomic<int32_t> lock_word;
+ /** Holds the state of the lock. */
+ Atomic_counter<int32_t> lock_word;
- /** 1: there are waiters */
- std::atomic<int32_t> waiters;
+ /** 1: there are waiters */
+ Atomic_counter<uint32_t> waiters;
/** number of granted SX locks. */
volatile ulint sx_recursive;
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index a1bbf719b7d..2a7b008a532 100644
--- a/storage/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -77,7 +77,7 @@ rw_lock_get_writer(
/*===============*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_word = lock->lock_word;
ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) {
@@ -109,7 +109,7 @@ rw_lock_get_reader_count(
/*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_word = lock->lock_word;
ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) {
@@ -145,7 +145,7 @@ rw_lock_get_x_lock_count(
/*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_copy = lock->lock_word;
ut_ad(lock_copy <= X_LOCK_DECR);
if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
@@ -178,7 +178,7 @@ rw_lock_get_sx_lock_count(
const rw_lock_t* lock) /*!< in: rw-lock */
{
#ifdef UNIV_DEBUG
- auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_copy = lock->lock_word;
ut_ad(lock_copy <= X_LOCK_DECR);
@@ -209,7 +209,7 @@ rw_lock_lock_word_decr(
int32_t amount, /*!< in: amount to decrement */
int32_t threshold) /*!< in: threshold of judgement */
{
- auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_copy = lock->lock_word;
while (lock_copy > threshold) {
if (lock->lock_word.compare_exchange_strong(
@@ -352,26 +352,21 @@ rw_lock_s_unlock_func(
#endif /* UNIV_DEBUG */
rw_lock_t* lock) /*!< in/out: rw-lock */
{
-#ifdef UNIV_DEBUG
- auto dbg_lock_word = lock->lock_word.load(std::memory_order_relaxed);
- ut_ad(dbg_lock_word > -X_LOCK_DECR);
- ut_ad(dbg_lock_word != 0);
- ut_ad(dbg_lock_word < X_LOCK_DECR);
-#endif
-
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
/* Increment lock_word to indicate 1 less reader */
- auto lock_word = lock->lock_word.fetch_add(
- 1, std::memory_order_release) + 1;
- if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
+ int32_t lock_word = ++lock->lock_word;
+ if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
/* wait_ex waiter exists. It may not be asleep, but we signal
anyway. We do not wake other waiters, because they can't
exist without wait_ex waiter and wait_ex waiter goes first.*/
os_event_set(lock->wait_ex_event);
sync_array_object_signalled();
-
+ } else {
+ ut_ad(--lock_word);
+ ut_ad(lock_word > -X_LOCK_DECR);
+ ut_ad(lock_word < X_LOCK_DECR);
}
ut_ad(rw_lock_validate(lock));
@@ -389,10 +384,7 @@ rw_lock_x_unlock_func(
#endif /* UNIV_DEBUG */
rw_lock_t* lock) /*!< in/out: rw-lock */
{
- auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
-
- ut_ad(lock_word == 0 || lock_word == -X_LOCK_HALF_DECR
- || lock_word <= -X_LOCK_DECR);
+ int32_t lock_word = lock->lock_word;
if (lock_word == 0) {
/* Last caller in a possible recursive chain. */
@@ -414,21 +406,19 @@ rw_lock_x_unlock_func(
We need to signal read/write waiters.
We do not need to signal wait_ex waiters, since they cannot
exist when there is a writer. */
- if (lock->waiters.load(std::memory_order_relaxed)) {
- lock->waiters.store(0, std::memory_order_relaxed);
+ if (lock->waiters) {
+ lock->waiters = 0;
os_event_set(lock->event);
sync_array_object_signalled();
}
} else if (lock_word == -X_LOCK_DECR
|| lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
/* There are 2 x-locks */
- lock->lock_word.fetch_add(X_LOCK_DECR,
- std::memory_order_relaxed);
+ lock->lock_word += X_LOCK_DECR;
} else {
/* There are more than 2 x-locks. */
ut_ad(lock_word < -X_LOCK_DECR);
- lock->lock_word.fetch_add(1,
- std::memory_order_relaxed);
+ lock->lock_word++;
}
ut_ad(rw_lock_validate(lock));
@@ -454,8 +444,7 @@ rw_lock_sx_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
if (lock->sx_recursive == 0) {
- auto lock_word =
- lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_word = lock->lock_word;
/* Last caller in a possible recursive chain. */
if (lock_word > 0) {
lock->writer_thread = 0;
@@ -472,8 +461,8 @@ rw_lock_sx_unlock_func(
waiters. We do not need to signal wait_ex waiters,
since they cannot exist when there is an sx-lock
holder. */
- if (lock->waiters.load(std::memory_order_relaxed)) {
- lock->waiters.store(0, std::memory_order_relaxed);
+ if (lock->waiters) {
+ lock->waiters = 0;
os_event_set(lock->event);
sync_array_object_signalled();
}
@@ -481,8 +470,7 @@ rw_lock_sx_unlock_func(
/* still has x-lock */
ut_ad(lock_word == -X_LOCK_HALF_DECR ||
lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
- lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
- std::memory_order_relaxed);
+ lock->lock_word += X_LOCK_HALF_DECR;
}
}
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index de9a9553ef5..8b45884476f 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -26,6 +26,9 @@ Created 2012-02-08 by Sunny Bains.
#include "row0import.h"
#include "btr0pcur.h"
+#ifdef BTR_CUR_HASH_ADAPT
+# include "btr0sea.h"
+#endif
#include "que0que.h"
#include "dict0boot.h"
#include "dict0load.h"
@@ -4033,15 +4036,12 @@ row_import_for_mysql(
index entries that point to cached garbage pages in the buffer
pool, because PageConverter::operator() only evicted those
pages that were replaced by the imported pages. We must
- discard all remaining adaptive hash index entries, because the
+ detach any remaining adaptive hash index entries, because the
adaptive hash index must be a subset of the table contents;
false positives are not tolerated. */
- while (buf_LRU_drop_page_hash_for_tablespace(table)) {
- if (trx_is_interrupted(trx)
- || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
- err = DB_INTERRUPTED;
- break;
- }
+ for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); index;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+ index = index->clone_if_needed();
}
#endif /* BTR_CUR_HASH_ADAPT */
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 77695ec54ae..6a428c9ed0b 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -46,6 +46,9 @@ Completed by Sunny Bains and Marko Makela
#include "row0vers.h"
#include "handler0alter.h"
#include "btr0bulk.h"
+#ifdef BTR_CUR_ADAPT
+# include "btr0sea.h"
+#endif /* BTR_CUR_ADAPT */
#include "ut0stage.h"
#include "fil0crypt.h"
@@ -203,7 +206,6 @@ public:
&ins_cur, 0,
__FILE__, __LINE__, &mtr);
-
error = btr_cur_pessimistic_insert(
flag, &ins_cur, &ins_offsets,
&row_heap, dtuple, &rec,
@@ -1968,8 +1970,7 @@ row_merge_read_clustered_index(
goto scan_next;
}
- if (clust_index->lock.waiters.load(
- std::memory_order_relaxed)) {
+ if (clust_index->lock.waiters) {
/* There are waiters on the clustered
index tree lock, likely the purge
thread. Store and restore the cursor
@@ -3908,6 +3909,9 @@ row_merge_drop_indexes(
we should exclude FTS entries from
prebuilt->ins_node->entry_list
in ins_node_create_entry_list(). */
+#ifdef BTR_CUR_HASH_ADAPT
+ ut_ad(!index->search_info->ref_count);
+#endif /* BTR_CUR_HASH_ADAPT */
dict_index_remove_from_cache(
table, index);
index = prev;
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 23ad4088b13..4a273cd635e 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2573,6 +2573,9 @@ row_create_index_for_mysql(
unsigned(index->n_nullable));
err = dict_create_index_tree_in_mem(index, trx);
+#ifdef BTR_CUR_HASH_ADAPT
+ ut_ad(!index->search_info->ref_count);
+#endif /* BTR_CUR_HASH_ADAPT */
if (err != DB_SUCCESS) {
dict_index_remove_from_cache(table, index);
@@ -3420,35 +3423,6 @@ row_drop_table_for_mysql(
ut_ad(!(table->stats_bg_flag & BG_STAT_IN_PROGRESS));
if (!table->no_rollback()) {
if (table->space != fil_system.sys_space) {
-#ifdef BTR_CUR_HASH_ADAPT
- /* On DISCARD TABLESPACE, we would not drop the
- adaptive hash index entries. If the tablespace is
- missing here, delete-marking the record in SYS_INDEXES
- would not free any pages in the buffer pool. Thus,
- dict_index_remove_from_cache() would hang due to
- adaptive hash index entries existing in the buffer
- pool. To prevent this hang, and also to guarantee
- that btr_search_drop_page_hash_when_freed() will avoid
- calling btr_search_drop_page_hash_index() while we
- hold the InnoDB dictionary lock, we will drop any
- adaptive hash index entries upfront. */
- const bool immune = is_temp_name
- || create_failed
- || sqlcom == SQLCOM_CREATE_TABLE
- || strstr(table->name.m_name, "/FTS");
-
- while (buf_LRU_drop_page_hash_for_tablespace(table)) {
- if ((!immune && trx_is_interrupted(trx))
- || srv_shutdown_state
- != SRV_SHUTDOWN_NONE) {
- err = DB_INTERRUPTED;
- table->to_be_dropped = false;
- dict_table_close(table, true, false);
- goto funct_exit;
- }
- }
-#endif /* BTR_CUR_HASH_ADAPT */
-
/* Delete the link file if used. */
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
RemoteDatafile::delete_link_file(name);
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 727fcbae421..9c49410209a 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -46,7 +46,6 @@ Created 3/14/1997 Heikki Tuuri
#include "handler.h"
#include "ha_innodb.h"
#include "fil0fil.h"
-#include "debug_sync.h"
/*************************************************************************
IMPORTANT NOTE: Any operation that generates redo MUST check that there
@@ -1310,26 +1309,6 @@ row_purge_step(
node->start();
-#ifdef UNIV_DEBUG
- srv_slot_t *slot = thr->thread_slot;
- ut_ad(slot);
-
- rw_lock_x_lock(&slot->debug_sync_lock);
- while (UT_LIST_GET_LEN(slot->debug_sync)) {
- srv_slot_t::debug_sync_t *sync =
- UT_LIST_GET_FIRST(slot->debug_sync);
- const char* sync_str = reinterpret_cast<char*>(&sync[1]);
- bool result = debug_sync_set_action(current_thd,
- sync_str,
- strlen(sync_str));
- ut_a(!result);
-
- UT_LIST_REMOVE(slot->debug_sync, sync);
- ut_free(sync);
- }
- rw_lock_x_unlock(&slot->debug_sync_lock);
-#endif
-
if (!(node->undo_recs == NULL || ib_vector_is_empty(node->undo_recs))) {
trx_purge_rec_t*purge_rec;
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 394689225e5..d194a6929a3 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -2,7 +2,7 @@
Copyright (c) 1997, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2015, 2019, MariaDB Corporation.
+Copyright (c) 2015, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc
index 5ebc25863ff..4bbe0c8d717 100644
--- a/storage/innobase/row/row0vers.cc
+++ b/storage/innobase/row/row0vers.cc
@@ -465,7 +465,6 @@ row_vers_build_clust_v_col(
vcol_info->set_used();
maria_table = vcol_info->table();
}
- DEBUG_SYNC(current_thd, "ib_clust_v_col_before_row_allocated");
innobase_allocate_row_for_vcol(thd, index,
&local_heap,
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index d7f7e127fea..59b1ef80cee 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -2499,13 +2499,6 @@ DECLARE_THREAD(srv_worker_thread)(
slot = srv_reserve_slot(SRV_WORKER);
-#ifdef UNIV_DEBUG
- UT_LIST_INIT(slot->debug_sync,
- &srv_slot_t::debug_sync_t::debug_sync_list);
- rw_lock_create(PFS_NOT_INSTRUMENTED, &slot->debug_sync_lock,
- SYNC_NO_ORDER_CHECK);
-#endif
-
ut_a(srv_n_purge_threads > 1);
ut_a(ulong(srv_sys.n_threads_active[SRV_WORKER])
< srv_n_purge_threads);
@@ -2721,12 +2714,6 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
slot = srv_reserve_slot(SRV_PURGE);
-#ifdef UNIV_DEBUG
- UT_LIST_INIT(slot->debug_sync,
- &srv_slot_t::debug_sync_t::debug_sync_list);
- rw_lock_create(PFS_NOT_INSTRUMENTED, &slot->debug_sync_lock,
- SYNC_NO_ORDER_CHECK);
-#endif
uint32_t rseg_history_len = trx_sys.rseg_history_len;
do {
diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc
index 6e858254ad4..5dc9be4b8b8 100644
--- a/storage/innobase/sync/sync0arr.cc
+++ b/storage/innobase/sync/sync0arr.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2013, 2019, MariaDB Corporation.
+Copyright (c) 2013, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -593,8 +593,8 @@ sync_array_cell_print(
#endif
"\n",
rw_lock_get_reader_count(rwlock),
- rwlock->waiters.load(std::memory_order_relaxed),
- rwlock->lock_word.load(std::memory_order_relaxed),
+ uint32_t{rwlock->waiters},
+ int32_t{rwlock->lock_word},
innobase_basename(rwlock->last_x_file_name),
rwlock->last_x_line
#if 0 /* JAN: TODO: FIX LATER */
@@ -1381,9 +1381,9 @@ sync_arr_fill_sys_semphore_waits_table(
//fields[SYS_SEMAPHORE_WAITS_HOLDER_LINE]->set_notnull();
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_READERS], rw_lock_get_reader_count(rwlock)));
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_WAITERS_FLAG],
- rwlock->waiters.load(std::memory_order_relaxed)));
+ rwlock->waiters));
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_LOCK_WORD],
- rwlock->lock_word.load(std::memory_order_relaxed)));
+ rwlock->lock_word));
OK(field_store_string(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_FILE], innobase_basename(rwlock->last_x_file_name)));
OK(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->store(rwlock->last_x_line, true));
fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->set_notnull();
diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc
index 2795fc902fc..983507fab0d 100644
--- a/storage/innobase/sync/sync0rw.cc
+++ b/storage/innobase/sync/sync0rw.cc
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -202,11 +202,8 @@ rw_lock_create_func(
new(lock) rw_lock_t();
#endif /* UNIV_DEBUG */
- /* If this is the very first time a synchronization object is
- created, then the following call initializes the sync system. */
-
- lock->lock_word.store(X_LOCK_DECR, std::memory_order_relaxed);
- lock->waiters.store(0, std::memory_order_relaxed);
+ lock->lock_word = X_LOCK_DECR;
+ lock->waiters = 0;
lock->sx_recursive = 0;
lock->writer_thread= 0;
@@ -252,7 +249,7 @@ rw_lock_free_func(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
ut_ad(rw_lock_validate(lock));
- ut_a(lock->lock_word.load(std::memory_order_relaxed) == X_LOCK_DECR);
+ ut_a(lock->lock_word == X_LOCK_DECR);
mutex_enter(&rw_lock_list_mutex);
@@ -294,7 +291,7 @@ lock_loop:
/* Spin waiting for the writer field to become free */
HMT_low();
while (i < srv_n_spin_wait_rounds &&
- lock->lock_word.load(std::memory_order_relaxed) <= 0) {
+ lock->lock_word <= 0) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -412,10 +409,10 @@ rw_lock_x_lock_wait_func(
sync_array_t* sync_arr;
int64_t count_os_wait = 0;
- ut_ad(lock->lock_word.load(std::memory_order_relaxed) <= threshold);
+ ut_ad(lock->lock_word <= threshold);
HMT_low();
- while (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
+ while (lock->lock_word < threshold) {
ut_delay(srv_spin_wait_delay);
if (i < srv_n_spin_wait_rounds) {
@@ -434,8 +431,7 @@ rw_lock_x_lock_wait_func(
i = 0;
/* Check lock_word to ensure wake-up isn't missed.*/
- if (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
-
+ if (lock->lock_word < threshold) {
++count_os_wait;
/* Add debug info as it is needed to detect possible
@@ -524,17 +520,15 @@ rw_lock_x_lock_low(
file_name, line);
} else {
- auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_word = lock->lock_word;
/* At least one X lock by this thread already
exists. Add another. */
if (lock_word == 0
|| lock_word == -X_LOCK_HALF_DECR) {
- lock->lock_word.fetch_sub(X_LOCK_DECR,
- std::memory_order_relaxed);
+ lock->lock_word -= X_LOCK_DECR;
} else {
ut_ad(lock_word <= -X_LOCK_DECR);
- lock->lock_word.fetch_sub(1,
- std::memory_order_relaxed);
+ lock->lock_word--;
}
}
@@ -677,7 +671,7 @@ lock_loop:
/* Spin waiting for the lock_word to become free */
HMT_low();
while (i < srv_n_spin_wait_rounds
- && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
+ && lock->lock_word <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -778,7 +772,7 @@ lock_loop:
/* Spin waiting for the lock_word to become free */
while (i < srv_n_spin_wait_rounds
- && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
+ && lock->lock_word <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -841,13 +835,11 @@ rw_lock_validate(
/*=============*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_word;
-
ut_ad(lock);
- lock_word = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_word = lock->lock_word;
- ut_ad(lock->waiters.load(std::memory_order_relaxed) < 2);
+ ut_ad(lock->waiters < 2);
ut_ad(lock_word > -(2 * X_LOCK_DECR));
ut_ad(lock_word <= X_LOCK_DECR);
@@ -910,7 +902,7 @@ rw_lock_add_debug_info(
rw_lock_debug_mutex_exit();
if (pass == 0 && lock_type != RW_LOCK_X_WAIT) {
- auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
+ int32_t lock_word = lock->lock_word;
/* Recursive x while holding SX
(lock_type == RW_LOCK_X && lock_word == -X_LOCK_HALF_DECR)
@@ -1096,11 +1088,11 @@ rw_lock_list_print_info(
count++;
- if (lock->lock_word.load(std::memory_order_relaxed) != X_LOCK_DECR) {
+ if (lock->lock_word != X_LOCK_DECR) {
fprintf(file, "RW-LOCK: %p ", (void*) lock);
- if (int32_t waiters= lock->waiters.load(std::memory_order_relaxed)) {
+ if (int32_t waiters= lock->waiters) {
fprintf(file, " (%d waiters)\n", waiters);
} else {
putc('\n', file);
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index 39035dc545e..b95a2ac565c 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2019, MariaDB Corporation.
+Copyright (c) 2017, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -365,7 +365,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
while (!fseg_free_step_not_header(
TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
- + undo_page, false, &mtr)) {
+ + undo_page, &mtr)) {
mutex_exit(&rseg->mutex);
mtr.commit();
@@ -401,7 +401,7 @@ trx_purge_free_segment(trx_rseg_t* rseg, fil_addr_t hdr_addr)
fsp0fsp.cc. */
} while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
- + undo_page, false, &mtr));
+ + undo_page, &mtr));
const ulint hist_size = mach_read_from_4(rseg_hdr
+ TRX_RSEG_HISTORY_SIZE);
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 5cb7ec3ae0e..54e3f4de467 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2019, MariaDB Corporation.
+Copyright (c) 2014, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -853,7 +853,7 @@ trx_undo_free_page(
TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + undo_page, mtr);
fseg_free_page(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + header_page,
- rseg->space, page_no, false, true, mtr);
+ rseg->space, page_no, true, mtr);
const fil_addr_t last_addr = flst_get_last(
TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_page, mtr);
@@ -1055,7 +1055,7 @@ trx_undo_seg_free(
file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
- finished = fseg_free_step(file_seg, false, &mtr);
+ finished = fseg_free_step(file_seg, &mtr);
if (finished) {
/* Update the rseg header */
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result
index b18f970d2ce..14b6052a7d3 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_auto_increment_tokudb.result
@@ -1115,5 +1115,13 @@ SELECT * FROM t1;
a
0
DROP TABLE t1;
+#
+# MDEV-19622 Assertion failures in
+# ha_partition::set_auto_increment_if_higher upon UPDATE on Aria table
+#
+CREATE OR REPLACE TABLE t1 (pk INT AUTO_INCREMENT, a INT, KEY(pk)) ENGINE=myisam PARTITION BY HASH(a);
+INSERT INTO t1 VALUES (1,1),(2,2);
+UPDATE t1 SET pk = 0;
+DROP TABLE t1;
##############################################################################
SET GLOBAL tokudb_prelock_empty = @tokudb_prelock_empty_saved;