diff options
author | law <law@138bc75d-0d04-0410-961f-82ee72b054a4> | 2011-11-30 05:16:36 +0000 |
---|---|---|
committer | law <law@138bc75d-0d04-0410-961f-82ee72b054a4> | 2011-11-30 05:16:36 +0000 |
commit | bc4381667a31bd5f1e677d64c789b1e959df00d5 (patch) | |
tree | a6b80dca8f72a2e7640e4d535901e42325b2a954 /libstdc++-v3/include/bits | |
parent | 1cd66bce1663eb648638d311b493de0dcc4146c3 (diff) | |
parent | 738c50b853f5ba0eaf93e23f6d29cbac761eef9e (diff) | |
download | gcc-reload-v2a.tar.gz |
Weekly merge from trunk. No regressions.reload-v2a
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/reload-v2a@181834 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++-v3/include/bits')
-rw-r--r-- | libstdc++-v3/include/bits/atomic_base.h | 3 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/basic_string.h | 8 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/functional_hash.h | 75 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/hashtable.h | 810 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/hashtable_policy.h | 231 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/shared_ptr.h | 2 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/shared_ptr_base.h | 2 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/stl_bvector.h | 2 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/stl_map.h | 5 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/stl_multimap.h | 5 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/stl_tree.h | 10 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/unique_ptr.h | 2 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/unordered_map.h | 8 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/unordered_set.h | 8 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/vector.tcc | 2 |
15 files changed, 734 insertions, 439 deletions
diff --git a/libstdc++-v3/include/bits/atomic_base.h b/libstdc++-v3/include/bits/atomic_base.h index f0336611d3f..cf292a85385 100644 --- a/libstdc++-v3/include/bits/atomic_base.h +++ b/libstdc++-v3/include/bits/atomic_base.h @@ -93,6 +93,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION #define LOCKFREE_PROP(T) (__atomic_always_lock_free (sizeof (T), 0) ? 2 : 1) +#define ATOMIC_BOOL_LOCK_FREE LOCKFREE_PROP (bool) #define ATOMIC_CHAR_LOCK_FREE LOCKFREE_PROP (char) #define ATOMIC_CHAR16_T_LOCK_FREE LOCKFREE_PROP (char16_t) #define ATOMIC_CHAR32_T_LOCK_FREE LOCKFREE_PROP (char32_t) @@ -101,7 +102,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION #define ATOMIC_INT_LOCK_FREE LOCKFREE_PROP (int) #define ATOMIC_LONG_LOCK_FREE LOCKFREE_PROP (long) #define ATOMIC_LLONG_LOCK_FREE LOCKFREE_PROP (long long) - +#define ATOMIC_POINTER_LOCK_FREE LOCKFREE_PROP (void *) // Base types for atomics. template<typename _IntTp> diff --git a/libstdc++-v3/include/bits/basic_string.h b/libstdc++-v3/include/bits/basic_string.h index 00f9bccd419..169daf58613 100644 --- a/libstdc++-v3/include/bits/basic_string.h +++ b/libstdc++-v3/include/bits/basic_string.h @@ -3044,7 +3044,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, string> { size_t - operator()(const string& __s) const + operator()(const string& __s) const noexcept { return std::_Hash_impl::hash(__s.data(), __s.length()); } }; @@ -3055,7 +3055,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, wstring> { size_t - operator()(const wstring& __s) const + operator()(const wstring& __s) const noexcept { return std::_Hash_impl::hash(__s.data(), __s.length() * sizeof(wchar_t)); } }; @@ -3069,7 +3069,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, u16string> { size_t - operator()(const u16string& __s) const + operator()(const u16string& __s) const noexcept { return std::_Hash_impl::hash(__s.data(), __s.length() * sizeof(char16_t)); } }; @@ -3080,7 +3080,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, u32string> { size_t - operator()(const u32string& __s) const + operator()(const u32string& __s) const noexcept { return std::_Hash_impl::hash(__s.data(), __s.length() * sizeof(char32_t)); } }; diff --git a/libstdc++-v3/include/bits/functional_hash.h b/libstdc++-v3/include/bits/functional_hash.h index e77cb4e17bf..2b82b21f716 100644 --- a/libstdc++-v3/include/bits/functional_hash.h +++ b/libstdc++-v3/include/bits/functional_hash.h @@ -66,61 +66,64 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION struct hash<_Tp*> : public __hash_base<size_t, _Tp*> { size_t - operator()(_Tp* __p) const + operator()(_Tp* __p) const noexcept { return reinterpret_cast<size_t>(__p); } }; // Explicit specializations for integer types. #define _Cxx_hashtable_define_trivial_hash(_Tp) \ template<> \ - inline size_t \ - hash<_Tp>::operator()(_Tp __val) const \ - { return static_cast<size_t>(__val); } + struct hash<_Tp> : public __hash_base<size_t, _Tp> \ + { \ + size_t \ + operator()(_Tp __val) const noexcept \ + { return static_cast<size_t>(__val); } \ + }; /// Explicit specialization for bool. - _Cxx_hashtable_define_trivial_hash(bool); + _Cxx_hashtable_define_trivial_hash(bool) /// Explicit specialization for char. - _Cxx_hashtable_define_trivial_hash(char); + _Cxx_hashtable_define_trivial_hash(char) /// Explicit specialization for signed char. - _Cxx_hashtable_define_trivial_hash(signed char); + _Cxx_hashtable_define_trivial_hash(signed char) /// Explicit specialization for unsigned char. - _Cxx_hashtable_define_trivial_hash(unsigned char); + _Cxx_hashtable_define_trivial_hash(unsigned char) /// Explicit specialization for wchar_t. - _Cxx_hashtable_define_trivial_hash(wchar_t); + _Cxx_hashtable_define_trivial_hash(wchar_t) /// Explicit specialization for char16_t. - _Cxx_hashtable_define_trivial_hash(char16_t); + _Cxx_hashtable_define_trivial_hash(char16_t) /// Explicit specialization for char32_t. - _Cxx_hashtable_define_trivial_hash(char32_t); + _Cxx_hashtable_define_trivial_hash(char32_t) /// Explicit specialization for short. - _Cxx_hashtable_define_trivial_hash(short); + _Cxx_hashtable_define_trivial_hash(short) /// Explicit specialization for int. - _Cxx_hashtable_define_trivial_hash(int); + _Cxx_hashtable_define_trivial_hash(int) /// Explicit specialization for long. - _Cxx_hashtable_define_trivial_hash(long); + _Cxx_hashtable_define_trivial_hash(long) /// Explicit specialization for long long. - _Cxx_hashtable_define_trivial_hash(long long); + _Cxx_hashtable_define_trivial_hash(long long) /// Explicit specialization for unsigned short. - _Cxx_hashtable_define_trivial_hash(unsigned short); + _Cxx_hashtable_define_trivial_hash(unsigned short) /// Explicit specialization for unsigned int. - _Cxx_hashtable_define_trivial_hash(unsigned int); + _Cxx_hashtable_define_trivial_hash(unsigned int) /// Explicit specialization for unsigned long. - _Cxx_hashtable_define_trivial_hash(unsigned long); + _Cxx_hashtable_define_trivial_hash(unsigned long) /// Explicit specialization for unsigned long long. - _Cxx_hashtable_define_trivial_hash(unsigned long long); + _Cxx_hashtable_define_trivial_hash(unsigned long long) #undef _Cxx_hashtable_define_trivial_hash @@ -162,26 +165,36 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION /// Specialization for float. template<> - inline size_t - hash<float>::operator()(float __val) const + struct hash<float> : public __hash_base<size_t, float> { - // 0 and -0 both hash to zero. - return __val != 0.0f ? std::_Hash_impl::hash(__val) : 0; - } + size_t + operator()(float __val) const noexcept + { + // 0 and -0 both hash to zero. + return __val != 0.0f ? std::_Hash_impl::hash(__val) : 0; + } + }; /// Specialization for double. template<> - inline size_t - hash<double>::operator()(double __val) const + struct hash<double> : public __hash_base<size_t, double> { - // 0 and -0 both hash to zero. - return __val != 0.0 ? std::_Hash_impl::hash(__val) : 0; - } + size_t + operator()(double __val) const noexcept + { + // 0 and -0 both hash to zero. + return __val != 0.0 ? std::_Hash_impl::hash(__val) : 0; + } + }; /// Specialization for long double. template<> - _GLIBCXX_PURE size_t - hash<long double>::operator()(long double __val) const; + struct hash<long double> + : public __hash_base<size_t, long double> + { + _GLIBCXX_PURE size_t + operator()(long double __val) const noexcept; + }; // @} group hashes diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h index 95d06b21262..dfa91f7cc8c 100644 --- a/libstdc++-v3/include/bits/hashtable.h +++ b/libstdc++-v3/include/bits/hashtable.h @@ -48,7 +48,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // value type is Value. As a conforming extension, we allow for // value type != Value. - // _ExtractKey: function object that takes a object of type Value + // _ExtractKey: function object that takes an object of type Value // and returns a value of type _Key. // _Equal: function object that takes two objects of type k and returns @@ -78,9 +78,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // count. If so, returns make_pair(true, n), where n is the new // bucket count. If not, returns make_pair(false, <anything>). - // ??? Right now it is hard-wired that the number of buckets never - // shrinks. Should we allow _RehashPolicy to change that? - // __cache_hash_code: bool. true if we store the value of the hash // function along with the value. This is a time-space tradeoff. // Storing it may improve lookup speed by reducing the number of times @@ -94,6 +91,53 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // is always at most one, false if it may be an arbitrary number. This // true for unordered_set and unordered_map, false for unordered_multiset // and unordered_multimap. + /** + * Here's _Hashtable data structure, each _Hashtable has: + * - _Bucket[] _M_buckets + * - size_type _M_bucket_count + * - size_type _M_begin_bucket_index + * - size_type _M_element_count + * + * with _Bucket being _Node* and _Node: + * - _Node* _M_next + * - Tp _M_value + * - size_t _M_code if cache_hash_code is true + * + * In terms of Standard containers the hastable is like the aggregation of: + * - std::forward_list<_Node> containing the elements + * - std::vector<std::forward_list<_Node>::iterator> representing the buckets + * + * The first non-empty bucket with index _M_begin_bucket_index contains the + * first container node which is also the first bucket node whereas other + * non-empty buckets contain the node before the first bucket node. This is so + * to implement something like a std::forward_list::erase_after on container + * erase calls. + * + * Access to the bucket last element require a check on the hash code to see + * if the node is still in the bucket. Such a design impose a quite efficient + * hash functor and is one of the reasons it is highly advise to set + * __cache_hash_code to true. + * + * The container iterators are simply built from nodes. This way incrementing + * the iterator is perfectly efficient no matter how many empty buckets there + * are in the container. + * + * On insert we compute element hash code and thanks to it find the bucket + * index. If the element is the first one in the bucket we must find the + * previous non-empty bucket where the previous node rely. To keep this loop + * minimal it is important that the number of bucket is not too high compared + * to the number of elements. So the hash policy must be carefully design so + * that it computes a bucket count large enough to respect the user defined + * load factor but also not too large to limit impact on the insert operation. + * + * On erase, the simple iterator design impose to use the hash functor to get + * the index of the bucket to update. For this reason, when __cache_hash_code + * is set to false, there is a static assertion that the hash functor cannot + * throw. + * + * _M_begin_bucket_index is used to offer contant time access to the container + * begin iterator. + */ template<typename _Key, typename _Value, typename _Allocator, typename _ExtractKey, typename _Equal, @@ -130,6 +174,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __constant_iterators, __unique_keys> > { + static_assert(__or_<integral_constant<bool, __cache_hash_code>, + __detail::__is_noexcept_hash<_Key, _H1>>::value, + "Cache the hash code or qualify your hash functor with noexcept"); public: typedef _Allocator allocator_type; typedef _Value value_type; @@ -152,30 +199,28 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __cache_hash_code> const_local_iterator; - typedef __detail::_Hashtable_iterator<value_type, __constant_iterators, - __cache_hash_code> - iterator; - typedef __detail::_Hashtable_const_iterator<value_type, - __constant_iterators, - __cache_hash_code> - const_iterator; + typedef local_iterator iterator; + typedef const_local_iterator const_iterator; template<typename _Key2, typename _Value2, typename _Ex2, bool __unique2, typename _Hashtable2> friend struct __detail::_Map_base; private: + typedef typename _RehashPolicy::_State _RehashPolicyState; typedef __detail::_Hash_node<_Value, __cache_hash_code> _Node; typedef typename _Allocator::template rebind<_Node>::other _Node_allocator_type; - typedef typename _Allocator::template rebind<_Node*>::other + typedef _Node* _Bucket; + //typedef __detail::_Bucket<_Value, __cache_hash_code> _Bucket; + typedef typename _Allocator::template rebind<_Bucket>::other _Bucket_allocator_type; typedef typename _Allocator::template rebind<_Value>::other _Value_allocator_type; _Node_allocator_type _M_node_allocator; - _Node** _M_buckets; + _Bucket* _M_buckets; size_type _M_bucket_count; size_type _M_begin_bucket_index; // First non-empty bucket. size_type _M_element_count; @@ -188,14 +233,38 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _M_deallocate_node(_Node* __n); + // Deallocate all nodes contained in the bucket array, buckets' nodes + // are not linked to each other + void + _M_deallocate_nodes(_Bucket*, size_type); + + // Deallocate the linked list of nodes pointed to by __n void - _M_deallocate_nodes(_Node**, size_type); + _M_deallocate_nodes(_Node* __n); - _Node** + _Bucket* _M_allocate_buckets(size_type __n); void - _M_deallocate_buckets(_Node**, size_type __n); + _M_deallocate_buckets(_Bucket*, size_type __n); + + // Gets bucket begin dealing with the difference between first non-empty + // bucket containing the first container node and the other non-empty + // buckets containing the node before the one belonging to the bucket. + _Node* + _M_bucket_begin(size_type __bkt) const; + + // Gets the bucket last node if any + _Node* + _M_bucket_end(size_type __bkt) const; + + // Gets the bucket node after the last if any + _Node* + _M_bucket_past_the_end(size_type __bkt) const + { + _Node* __end = _M_bucket_end(__bkt); + return __end ? __end->_M_next : nullptr; + } public: // Constructor, destructor, assignment, swap @@ -240,27 +309,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // Basic container operations iterator begin() noexcept - { return iterator(_M_buckets + _M_begin_bucket_index); } + { return iterator(_M_buckets[_M_begin_bucket_index]); } const_iterator begin() const noexcept - { return const_iterator(_M_buckets + _M_begin_bucket_index); } + { return const_iterator(_M_buckets[_M_begin_bucket_index]); } iterator end() noexcept - { return iterator(_M_buckets + _M_bucket_count); } + { return iterator(nullptr); } const_iterator end() const noexcept - { return const_iterator(_M_buckets + _M_bucket_count); } + { return const_iterator(nullptr); } const_iterator cbegin() const noexcept - { return const_iterator(_M_buckets + _M_begin_bucket_index); } + { return const_iterator(_M_buckets[_M_begin_bucket_index]); } const_iterator cend() const noexcept - { return const_iterator(_M_buckets + _M_bucket_count); } + { return const_iterator(nullptr); } size_type size() const noexcept @@ -307,28 +376,28 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION local_iterator begin(size_type __n) - { return local_iterator(_M_buckets[__n]); } + { return local_iterator(_M_bucket_begin(__n)); } local_iterator - end(size_type) - { return local_iterator(0); } + end(size_type __n) + { return local_iterator(_M_bucket_past_the_end(__n)); } const_local_iterator begin(size_type __n) const - { return const_local_iterator(_M_buckets[__n]); } + { return const_local_iterator(_M_bucket_begin(__n)); } const_local_iterator - end(size_type) const - { return const_local_iterator(0); } + end(size_type __n) const + { return const_local_iterator(_M_bucket_past_the_end(__n)); } // DR 691. const_local_iterator cbegin(size_type __n) const - { return const_local_iterator(_M_buckets[__n]); } + { return const_local_iterator(_M_bucket_begin(__n)); } const_local_iterator - cend(size_type) const - { return const_local_iterator(0); } + cend(size_type __n) const + { return const_local_iterator(_M_bucket_past_the_end(__n)); } float load_factor() const noexcept @@ -366,9 +435,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION private: // Find and insert helper functions and types _Node* - _M_find_node(_Node*, const key_type&, + _M_find_node(size_type, const key_type&, typename _Hashtable::_Hash_code_type) const; + // Insert a node in an empty bucket + void + _M_insert_bucket_begin(size_type, _Node*); + + // Insert a node after an other one in a non-empty bucket + void + _M_insert_after(size_type, _Node*, _Node*); + + // Remove the bucket first node + void + _M_remove_bucket_begin(size_type __bkt, _Node* __next_n, + size_type __next_bkt); + + // Get the node before __n in the bucket __bkt + _Node* + _M_get_previous_node(size_type __bkt, _Node* __n); + template<typename _Arg> iterator _M_insert_bucket(_Arg&&, size_type, @@ -432,6 +518,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION iterator erase(const_iterator); + // LWG 2059. + iterator + erase(iterator __it) + { return erase(const_iterator(__it)); } + size_type erase(const key_type&); @@ -449,8 +540,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION private: // Unconditionally change size of bucket array to n, restore hash policy - // resize value to __next_resize on exception. - void _M_rehash(size_type __n, size_type __next_resize); + // state to __state on exception. + void _M_rehash(size_type __n, const _RehashPolicyState& __state); }; @@ -471,7 +562,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __try { _M_node_allocator.construct(__n, std::forward<_Args>(__args)...); - __n->_M_next = 0; return __n; } __catch(...) @@ -501,18 +591,26 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_deallocate_nodes(_Node** __array, size_type __n) + _M_deallocate_nodes(_Bucket* __buckets, size_type __n) + { + for (size_type __i = 0; __i != __n; ++__i) + _M_deallocate_nodes(__buckets[__i]); + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_deallocate_nodes(_Node* __n) { - for (size_type __i = 0; __i < __n; ++__i) + while (__n) { - _Node* __p = __array[__i]; - while (__p) - { - _Node* __tmp = __p; - __p = __p->_M_next; - _M_deallocate_node(__tmp); - } - __array[__i] = 0; + _Node* __tmp = __n; + __n = __n->_M_next; + _M_deallocate_node(__tmp); } } @@ -522,18 +620,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION bool __chc, bool __cit, bool __uk> typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, - __chc, __cit, __uk>::_Node** + __chc, __cit, __uk>::_Bucket* _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_allocate_buckets(size_type __n) { _Bucket_allocator_type __alloc(_M_node_allocator); - // We allocate one extra bucket to hold a sentinel, an arbitrary - // non-null pointer. Iterator increment relies on this. - _Node** __p = __alloc.allocate(__n + 1); - std::fill(__p, __p + __n, (_Node*) 0); - __p[__n] = reinterpret_cast<_Node*>(0x1000); + // We allocate one extra bucket to have _M_begin_bucket_index + // point to it as long as container is empty + _Bucket* __p = __alloc.allocate(__n + 1); + __builtin_memset(__p, 0, (__n + 1) * sizeof(_Bucket)); return __p; } @@ -544,7 +641,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_deallocate_buckets(_Node** __p, size_type __n) + _M_deallocate_buckets(_Bucket* __p, size_type __n) { _Bucket_allocator_type __alloc(_M_node_allocator); __alloc.deallocate(__p, __n + 1); @@ -554,6 +651,44 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION typename _Allocator, typename _ExtractKey, typename _Equal, typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, bool __chc, bool __cit, bool __uk> + typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, + _Equal, _H1, _H2, _Hash, _RehashPolicy, + __chc, __cit, __uk>::_Node* + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_bucket_begin(size_type __bkt) const + { + if (__bkt == _M_begin_bucket_index) + return _M_buckets[__bkt]; + _Node* __n = _M_buckets[__bkt]; + return __n ? __n->_M_next : nullptr; + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> + typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, + _Equal, _H1, _H2, _Hash, _RehashPolicy, + __chc, __cit, __uk>::_Node* + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_bucket_end(size_type __bkt) const + { + _Node* __n = _M_bucket_begin(__bkt); + if (__n) + for (;; __n = __n->_M_next) + if (!__n->_M_next + || this->_M_bucket_index(__n->_M_next, _M_bucket_count) + != __bkt) + break; + return __n; + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _Hashtable(size_type __bucket_hint, @@ -571,6 +706,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_rehash_policy() { _M_bucket_count = _M_rehash_policy._M_next_bkt(__bucket_hint); + // We don't want the rehash policy to ask for the hashtable to shrink + // on the first insertion so we need to reset its previous resize level. + _M_rehash_policy._M_prev_resize = 0; _M_buckets = _M_allocate_buckets(_M_bucket_count); _M_begin_bucket_index = _M_bucket_count; } @@ -602,6 +740,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_bkt_for_elements(__detail:: __distance_fw(__f, __l))); + // We don't want the rehash policy to ask for the hashtable to shrink + // on the first insertion so we need to reset its previous resize + // level. + _M_rehash_policy._M_prev_resize = 0; _M_buckets = _M_allocate_buckets(_M_bucket_count); _M_begin_bucket_index = _M_bucket_count; __try @@ -637,17 +779,41 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_buckets = _M_allocate_buckets(_M_bucket_count); __try { - for (size_type __i = 0; __i < __ht._M_bucket_count; ++__i) + const _Node* __ht_n = __ht._M_buckets[__ht._M_begin_bucket_index]; + if (!__ht_n) + return; + + // Note that the copy constructor do not rely on hash code usage. + // First deal with the special first node that is directly store in + // the first non-empty bucket + _Node* __this_n = _M_allocate_node(__ht_n->_M_v); + this->_M_copy_code(__this_n, __ht_n); + _M_buckets[_M_begin_bucket_index] = __this_n; + __ht_n = __ht_n->_M_next; + // Second deal with following non-empty buckets containing previous + // nodes node. + for (size_type __i = __ht._M_begin_bucket_index + 1; + __i != __ht._M_bucket_count; ++__i) { - _Node* __n = __ht._M_buckets[__i]; - _Node** __tail = _M_buckets + __i; - while (__n) + if (!__ht._M_buckets[__i]) + continue; + + for (; __ht_n != __ht._M_buckets[__i]->_M_next; + __ht_n = __ht_n->_M_next) { - *__tail = _M_allocate_node(__n->_M_v); - this->_M_copy_code(*__tail, __n); - __tail = &((*__tail)->_M_next); - __n = __n->_M_next; + __this_n->_M_next = _M_allocate_node(__ht_n->_M_v); + this->_M_copy_code(__this_n->_M_next, __ht_n); + __this_n = __this_n->_M_next; } + + _M_buckets[__i] = __this_n; + } + // Last finalize copy of the nodes of the last non-empty bucket + for (; __ht_n; __ht_n = __ht_n->_M_next) + { + __this_n->_M_next = _M_allocate_node(__ht_n->_M_v); + this->_M_copy_code(__this_n->_M_next, __ht_n); + __this_n = __this_n->_M_next; } } __catch(...) @@ -732,8 +898,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __rehash_policy(const _RehashPolicy& __pol) { size_type __n_bkt = __pol._M_bkt_for_elements(_M_element_count); - if (__n_bkt > _M_bucket_count) - _M_rehash(__n_bkt, _M_rehash_policy._M_next_resize); + if (__n_bkt != _M_bucket_count) + _M_rehash(__n_bkt, _M_rehash_policy._M_state()); _M_rehash_policy = __pol; } @@ -750,8 +916,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node* __p = _M_find_node(_M_buckets[__n], __k, __code); - return __p ? iterator(__p, _M_buckets + __n) : this->end(); + _Node* __p = _M_find_node(__n, __k, __code); + return __p ? iterator(__p) : this->end(); } template<typename _Key, typename _Value, @@ -767,8 +933,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node* __p = _M_find_node(_M_buckets[__n], __k, __code); - return __p ? const_iterator(__p, _M_buckets + __n) : this->end(); + _Node* __p = _M_find_node(__n, __k, __code); + return __p ? const_iterator(__p) : this->end(); } template<typename _Key, typename _Value, @@ -784,10 +950,25 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); + _Node* __p = _M_bucket_begin(__n); + if (!__p) + return 0; + std::size_t __result = 0; - for (_Node* __p = _M_buckets[__n]; __p; __p = __p->_M_next) - if (this->_M_compare(__k, __code, __p)) - ++__result; + for (;; __p = __p->_M_next) + { + if (this->_M_compare(__k, __code, __p)) + ++__result; + else if (__result) + // All equivalent values are next to each other, if we found a not + // equivalent value after an equivalent one it means that we won't + // find anymore an equivalent value. + break; + if (!__p->_M_next + || this->_M_bucket_index(__p->_M_next, _M_bucket_count) + != __n) + break; + } return __result; } @@ -809,21 +990,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node** __head = _M_buckets + __n; - _Node* __p = _M_find_node(*__head, __k, __code); + _Node* __p = _M_find_node(__n, __k, __code); if (__p) { _Node* __p1 = __p->_M_next; - for (; __p1; __p1 = __p1->_M_next) - if (!this->_M_compare(__k, __code, __p1)) - break; + while (__p1 + && this->_M_bucket_index(__p1, _M_bucket_count) == __n + && this->_M_compare(__k, __code, __p1)) + __p1 = __p1->_M_next; - iterator __first(__p, __head); - iterator __last(__p1, __head); - if (!__p1) - __last._M_incr_bucket(); - return std::make_pair(__first, __last); + return std::make_pair(iterator(__p), iterator(__p1)); } else return std::make_pair(this->end(), this->end()); @@ -847,28 +1024,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - _Node** __head = _M_buckets + __n; - _Node* __p = _M_find_node(*__head, __k, __code); + _Node* __p = _M_find_node(__n, __k, __code); if (__p) { _Node* __p1 = __p->_M_next; - for (; __p1; __p1 = __p1->_M_next) - if (!this->_M_compare(__k, __code, __p1)) - break; + while (__p1 + && this->_M_bucket_index(__p1, _M_bucket_count) == __n + && this->_M_compare(__k, __code, __p1)) + __p1 = __p1->_M_next; - const_iterator __first(__p, __head); - const_iterator __last(__p1, __head); - if (!__p1) - __last._M_incr_bucket(); - return std::make_pair(__first, __last); + return std::make_pair(const_iterator(__p), const_iterator(__p1)); } else return std::make_pair(this->end(), this->end()); } - // Find the node whose key compares equal to k, beginning the search - // at p (usually the head of a bucket). Return nil if no node is found. + // Find the node whose key compares equal to k in the bucket n. Return nullptr + // if no node is found. template<typename _Key, typename _Value, typename _Allocator, typename _ExtractKey, typename _Equal, typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, @@ -878,13 +1051,131 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __chc, __cit, __uk>::_Node* _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_find_node(_Node* __p, const key_type& __k, + _M_find_node(size_type __n, const key_type& __k, typename _Hashtable::_Hash_code_type __code) const { - for (; __p; __p = __p->_M_next) - if (this->_M_compare(__k, __code, __p)) - return __p; - return false; + _Node* __p = _M_bucket_begin(__n); + if (!__p) + return nullptr; + for (;; __p = __p->_M_next) + { + if (this->_M_compare(__k, __code, __p)) + return __p; + if (!(__p->_M_next) + || this->_M_bucket_index(__p->_M_next, _M_bucket_count) != __n) + break; + } + return nullptr; + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_insert_bucket_begin(size_type __bkt, _Node* __new_node) + { + _Node* __prev_n; + if (__bkt < _M_begin_bucket_index) + { + if (_M_begin_bucket_index != _M_bucket_count) + { + __new_node->_M_next = _M_buckets[_M_begin_bucket_index]; + _M_buckets[_M_begin_bucket_index] = __new_node; + } + __prev_n = __new_node; + _M_begin_bucket_index = __bkt; + } + else + { + // We need to find previous non-empty bucket to link the new node. + // There are several ways to find this previous bucket: + // 1. Move backward until we find it (the current method) + // 2. Start from the begin bucket index and move forward until we + // cross __n position. + // 3. Move forward until we find a non-empty bucket that will + // contain the previous node. + size_type __prev_bkt; + for (__prev_bkt = __bkt; __prev_bkt-- != 0;) + if (_M_buckets[__prev_bkt]) + break; + __prev_n = _M_bucket_end(__prev_bkt); + _M_insert_after(__prev_bkt, __prev_n, __new_node); + } + _M_buckets[__bkt] = __prev_n; + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_insert_after(size_type __bkt, _Node* __prev_n, _Node* __new_n) + { + if (__prev_n->_M_next) + { + size_type __next_bkt = + this->_M_bucket_index(__prev_n->_M_next, _M_bucket_count); + if (__next_bkt != __bkt) + _M_buckets[__next_bkt] = __new_n; + } + __new_n->_M_next = __prev_n->_M_next; + __prev_n->_M_next = __new_n; + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> + void + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_remove_bucket_begin(size_type __bkt, _Node* __next, size_type __next_bkt) + { + if (!__next || __next_bkt != __bkt) + { + // Bucket is now empty + if (__next && __next_bkt != __bkt) + // Update next non-empty bucket before begin node + _M_buckets[__next_bkt] = _M_buckets[__bkt]; + _M_buckets[__bkt] = nullptr; + if (__bkt == _M_begin_bucket_index) + // We need to update begin bucket index + if (__next) + { + _M_begin_bucket_index = __next_bkt; + _M_buckets[_M_begin_bucket_index] = __next; + } + else + _M_begin_bucket_index = _M_bucket_count; + } + else if (__bkt == _M_begin_bucket_index) + _M_buckets[__bkt] = __next; + } + + template<typename _Key, typename _Value, + typename _Allocator, typename _ExtractKey, typename _Equal, + typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, + bool __chc, bool __cit, bool __uk> + typename _Hashtable<_Key, _Value, _Allocator, _ExtractKey, + _Equal, _H1, _H2, _Hash, _RehashPolicy, + __chc, __cit, __uk>::_Node* + _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, + _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: + _M_get_previous_node(size_type __bkt, _Node* __n) + { + _Node* __prev_n = nullptr; + if (__bkt != _M_begin_bucket_index || __n != _M_buckets[__bkt]) + { + __prev_n = _M_buckets[__bkt]; + while (__prev_n->_M_next != __n) + __prev_n = __prev_n->_M_next; + } + return __prev_n; } // Insert v in bucket n (assumes no element with its key already present). @@ -901,7 +1192,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_insert_bucket(_Arg&& __v, size_type __n, typename _Hashtable::_Hash_code_type __code) { - const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); std::pair<bool, std::size_t> __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); @@ -912,27 +1203,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __n = this->_M_bucket_index(__k, __code, __do_rehash.second); } - _Node* __new_node = 0; + _Node* __new_node = nullptr; __try { // Allocate the new node before doing the rehash so that we // don't do a rehash if the allocation throws. __new_node = _M_allocate_node(std::forward<_Arg>(__v)); + this->_M_store_code(__new_node, __code); if (__do_rehash.first) - _M_rehash(__do_rehash.second, __saved_next_resize); + _M_rehash(__do_rehash.second, __saved_state); - __new_node->_M_next = _M_buckets[__n]; - this->_M_store_code(__new_node, __code); - _M_buckets[__n] = __new_node; + if (_M_buckets[__n]) + _M_insert_after(__n, _M_buckets[__n], __new_node); + else + _M_insert_bucket_begin(__n, __new_node); ++_M_element_count; - if (__n < _M_begin_bucket_index) - _M_begin_bucket_index = __n; - return iterator(__new_node, _M_buckets + __n); + return iterator(__new_node); } __catch(...) { if (!__new_node) - _M_rehash_policy._M_next_resize = __saved_next_resize; + _M_rehash_policy._M_reset(__saved_state); else _M_deallocate_node(__new_node); __throw_exception_again; @@ -957,8 +1248,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - if (_Node* __p = _M_find_node(_M_buckets[__n], __k, __code)) - return std::make_pair(iterator(__p, _M_buckets + __n), false); + if (_Node* __p = _M_find_node(__n, __k, __code)) + return std::make_pair(iterator(__p), false); return std::make_pair(_M_insert_bucket(std::forward<_Arg>(__v), __n, __code), true); } @@ -976,37 +1267,58 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert(_Arg&& __v, std::false_type) { - const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); std::pair<bool, std::size_t> __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); - if (__do_rehash.first) - _M_rehash(__do_rehash.second, __saved_next_resize); const key_type& __k = this->_M_extract(__v); typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); size_type __n = this->_M_bucket_index(__k, __code, _M_bucket_count); // First find the node, avoid leaking new_node if compare throws. - _Node* __prev = _M_find_node(_M_buckets[__n], __k, __code); - _Node* __new_node = _M_allocate_node(std::forward<_Arg>(__v)); - - if (__prev) + _Node* __prev = _M_find_node(__n, __k, __code); + _Node* __new_node = nullptr; + __try { - __new_node->_M_next = __prev->_M_next; - __prev->_M_next = __new_node; + // Second allocate new node so that we don't rehash if it throws + __new_node = _M_allocate_node(std::forward<_Arg>(__v)); + this->_M_store_code(__new_node, __code); + if (__do_rehash.first) + { + _M_rehash(__do_rehash.second, __saved_state); + __n = this->_M_bucket_index(__k, __code, _M_bucket_count); + // __prev is still valid because rehash do not invalidate nodes + } + + if (__prev) + // Insert after the previous equivalent node + _M_insert_after(__n, __prev, __new_node); + else if (_M_buckets[__n]) + // Bucket is not empty and the inserted node has no equivalent in + // the hashtable. We must insert the new node at the beginning or + // end of the bucket to preserve equivalent elements relative + // positions. + if (__n != _M_begin_bucket_index) + // We insert the new node at the beginning + _M_insert_after(__n, _M_buckets[__n], __new_node); + else + // We insert the new node at the end + _M_insert_after(__n, _M_bucket_end(__n), __new_node); + else + _M_insert_bucket_begin(__n, __new_node); + ++_M_element_count; + return iterator(__new_node); } - else + __catch(...) { - __new_node->_M_next = _M_buckets[__n]; - _M_buckets[__n] = __new_node; - if (__n < _M_begin_bucket_index) - _M_begin_bucket_index = __n; + if (!__new_node) + _M_rehash_policy._M_reset(__saved_state); + else + _M_deallocate_node(__new_node); + __throw_exception_again; } - this->_M_store_code(__new_node, __code); - ++_M_element_count; - return iterator(__new_node, _M_buckets + __n); } template<typename _Key, typename _Value, @@ -1020,12 +1332,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION insert(_InputIterator __first, _InputIterator __last) { size_type __n_elt = __detail::__distance_fw(__first, __last); - const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); std::pair<bool, std::size_t> __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n_elt); if (__do_rehash.first) - _M_rehash(__do_rehash.second, __saved_next_resize); + _M_rehash(__do_rehash.second, __saved_state); for (; __first != __last; ++__first) this->insert(*__first); @@ -1042,31 +1354,29 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(const_iterator __it) { - iterator __result(__it._M_cur_node, __it._M_cur_bucket); - ++__result; - - _Node* __cur = *__it._M_cur_bucket; - if (__cur == __it._M_cur_node) + _Node* __n = __it._M_cur; + std::size_t __bkt = this->_M_bucket_index(__n, _M_bucket_count); + + // Look for previous node to unlink it from the erased one, this is why + // we need buckets to contain the before begin node of the bucket to make + // this research fast. + _Node* __prev_n = _M_get_previous_node(__bkt, __n); + if (__n == _M_bucket_begin(__bkt)) + _M_remove_bucket_begin(__bkt, __n->_M_next, + __n->_M_next ? this->_M_bucket_index(__n->_M_next, _M_bucket_count) + : 0); + else if (__n->_M_next) { - *__it._M_cur_bucket = __cur->_M_next; - - // If _M_begin_bucket_index no longer indexes the first non-empty - // bucket - its single node is being erased - update it. - if (!_M_buckets[_M_begin_bucket_index]) - _M_begin_bucket_index = __result._M_cur_bucket - _M_buckets; - } - else - { - _Node* __next = __cur->_M_next; - while (__next != __it._M_cur_node) - { - __cur = __next; - __next = __cur->_M_next; - } - __cur->_M_next = __next->_M_next; + size_type __next_bkt = + this->_M_bucket_index(__n->_M_next, _M_bucket_count); + if (__next_bkt != __bkt) + _M_buckets[__next_bkt] = __prev_n; } - _M_deallocate_node(__it._M_cur_node); + if (__prev_n) + __prev_n->_M_next = __n->_M_next; + iterator __result(__n->_M_next); + _M_deallocate_node(__n); --_M_element_count; return __result; @@ -1084,64 +1394,65 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION erase(const key_type& __k) { typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); - std::size_t __n = this->_M_bucket_index(__k, __code, _M_bucket_count); - size_type __result = 0; - - _Node** __slot = _M_buckets + __n; - while (*__slot && !this->_M_compare(__k, __code, *__slot)) - __slot = &((*__slot)->_M_next); + std::size_t __bkt = this->_M_bucket_index(__k, __code, _M_bucket_count); + // Look for the first matching node with its previous node at the same + // time + _Node* __n = _M_buckets[__bkt]; + if (!__n) + return 0; + _Node* __prev_n = nullptr; + if (__bkt != _M_begin_bucket_index) + { + __prev_n = __n; + __n = __n->_M_next; + } + bool __is_bucket_begin = true; + for (;; __prev_n = __n, __n = __n->_M_next) + { + if (this->_M_compare(__k, __code, __n)) + break; + if (!(__n->_M_next) + || this->_M_bucket_index(__n->_M_next, _M_bucket_count) != __bkt) + return 0; + __is_bucket_begin = false; + } - _Node** __saved_slot = 0; - while (*__slot && this->_M_compare(__k, __code, *__slot)) + // We found a matching node, start deallocation loop from it + std::size_t __next_bkt = __bkt; + _Node* __next_n = __n; + size_type __result = 0; + _Node* __saved_n = nullptr; + do { + _Node* __p = __next_n; + __next_n = __p->_M_next; // _GLIBCXX_RESOLVE_LIB_DEFECTS // 526. Is it undefined if a function in the standard changes // in parameters? - if (std::__addressof(this->_M_extract((*__slot)->_M_v)) + if (std::__addressof(this->_M_extract(__p->_M_v)) != std::__addressof(__k)) - { - _Node* __p = *__slot; - *__slot = __p->_M_next; - _M_deallocate_node(__p); - --_M_element_count; - ++__result; - } + _M_deallocate_node(__p); else - { - __saved_slot = __slot; - __slot = &((*__slot)->_M_next); - } - } - - if (__saved_slot) - { - _Node* __p = *__saved_slot; - *__saved_slot = __p->_M_next; - _M_deallocate_node(__p); + __saved_n = __p; --_M_element_count; ++__result; + if (!__next_n) + break; + __next_bkt = this->_M_bucket_index(__next_n, _M_bucket_count); } - - // If the entire bucket indexed by _M_begin_bucket_index has been - // erased look forward for the first non-empty bucket. - if (!_M_buckets[_M_begin_bucket_index]) - { - if (!_M_element_count) - _M_begin_bucket_index = _M_bucket_count; - else - { - ++_M_begin_bucket_index; - while (!_M_buckets[_M_begin_bucket_index]) - ++_M_begin_bucket_index; - } - } - + while (__next_bkt == __bkt && this->_M_compare(__k, __code, __next_n)); + + if (__saved_n) + _M_deallocate_node(__saved_n); + if (__is_bucket_begin) + _M_remove_bucket_begin(__bkt, __next_n, __next_bkt); + else if (__next_n && __next_bkt != __bkt) + _M_buckets[__next_bkt] = __prev_n; + if (__prev_n) + __prev_n->_M_next = __next_n; return __result; } - // ??? This could be optimized by taking advantage of the bucket - // structure, but it's not clear that it's worth doing. It probably - // wouldn't even be an optimization unless the load factor is large. template<typename _Key, typename _Value, typename _Allocator, typename _ExtractKey, typename _Equal, typename _H1, typename _H2, typename _Hash, typename _RehashPolicy, @@ -1153,9 +1464,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: erase(const_iterator __first, const_iterator __last) { - while (__first != __last) - __first = this->erase(__first); - return iterator(__last._M_cur_node, __last._M_cur_bucket); + _Node* __n = __first._M_cur; + _Node* __last_n = __last._M_cur; + if (__n == __last_n) + return iterator(__n); + + std::size_t __bkt = this->_M_bucket_index(__n, _M_bucket_count); + + _Node* __prev_n = _M_get_previous_node(__bkt, __n); + bool __is_bucket_begin = __n == _M_bucket_begin(__bkt); + std::size_t __n_bkt = __bkt; + for (;;) + { + do + { + _Node* __tmp = __n; + __n = __n->_M_next; + _M_deallocate_node(__tmp); + --_M_element_count; + if (!__n) + break; + __n_bkt = this->_M_bucket_index(__n, _M_bucket_count); + } + while (__n != __last_n && __n_bkt == __bkt); + if (__is_bucket_begin) + _M_remove_bucket_begin(__bkt, __n, __n_bkt); + if (__n == __last_n) + break; + __is_bucket_begin = true; + __bkt = __n_bkt; + } + + if (__n && __n_bkt != __bkt) + _M_buckets[__n_bkt] = __prev_n; + if (__prev_n) + __prev_n->_M_next = __n; + return iterator(__n); } template<typename _Key, typename _Value, @@ -1167,7 +1511,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: clear() noexcept { - _M_deallocate_nodes(_M_buckets, _M_bucket_count); + _M_deallocate_nodes(_M_buckets[_M_begin_bucket_index]); + __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(_Bucket)); _M_element_count = 0; _M_begin_bucket_index = _M_bucket_count; } @@ -1181,11 +1526,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: rehash(size_type __n) { - const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; + const _RehashPolicyState& __saved_state = _M_rehash_policy._M_state(); _M_rehash(std::max(_M_rehash_policy._M_next_bkt(__n), _M_rehash_policy._M_bkt_for_elements(_M_element_count + 1)), - __saved_next_resize); + __saved_state); } template<typename _Key, typename _Value, @@ -1195,46 +1540,75 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_rehash(size_type __n, size_type __next_resize) + _M_rehash(size_type __n, const _RehashPolicyState& __state) { - _Node** __new_array = 0; + _Bucket* __new_buckets = nullptr; + _Node* __p = _M_buckets[_M_begin_bucket_index]; __try { - __new_array = _M_allocate_buckets(__n); + __new_buckets = _M_allocate_buckets(__n); + // First loop to store each node in its new bucket + while (__p) + { + _Node* __next = __p->_M_next; + std::size_t __new_index = this->_M_bucket_index(__p, __n); + if (!__new_buckets[__new_index]) + // Store temporarily bucket end node in _M_buckets if possible. + // This will boost second loop where we need to access bucket + // end node quickly. + if (__new_index < _M_bucket_count) + _M_buckets[__new_index] = __p; + __p->_M_next = __new_buckets[__new_index]; + __new_buckets[__new_index] = __p; + __p = __next; + } _M_begin_bucket_index = __n; - for (size_type __i = 0; __i < _M_bucket_count; ++__i) - while (_Node* __p = _M_buckets[__i]) + _Node* __prev_node = nullptr; + // Second loop to link all nodes together and to fix bucket values so + // that they contain the before begin node of the bucket. + for (size_type __i = 0; __i != __n; ++__i) + if (__new_buckets[__i]) { - std::size_t __new_index = this->_M_bucket_index(__p, __n); - _M_buckets[__i] = __p->_M_next; - __p->_M_next = __new_array[__new_index]; - __new_array[__new_index] = __p; - if (__new_index < _M_begin_bucket_index) - _M_begin_bucket_index = __new_index; + if (__prev_node) + { + __prev_node->_M_next = __new_buckets[__i]; + __new_buckets[__i] = __prev_node; + } + else + _M_begin_bucket_index = __i; + if (__i < _M_bucket_count) + __prev_node = _M_buckets[__i]; + else + { + __prev_node = __new_buckets[__i]; + while (__prev_node->_M_next) + __prev_node = __prev_node->_M_next; + } } _M_deallocate_buckets(_M_buckets, _M_bucket_count); _M_bucket_count = __n; - _M_buckets = __new_array; + _M_buckets = __new_buckets; } __catch(...) { - if (__new_array) + if (__new_buckets) { // A failure here means that a hash function threw an exception. // We can't restore the previous state without calling the hash // function again, so the only sensible recovery is to delete // everything. - _M_deallocate_nodes(__new_array, __n); - _M_deallocate_buckets(__new_array, __n); - _M_deallocate_nodes(_M_buckets, _M_bucket_count); + _M_deallocate_nodes(__new_buckets, __n); + _M_deallocate_buckets(__new_buckets, __n); + _M_deallocate_nodes(__p); + __builtin_memset(_M_buckets, 0, sizeof(_Bucket) * _M_bucket_count); _M_element_count = 0; _M_begin_bucket_index = _M_bucket_count; - _M_rehash_policy._M_next_resize = 0; + _M_rehash_policy._M_reset(_RehashPolicyState()); } else // A failure here means that buckets allocation failed. We only // have to restore hash policy previous state. - _M_rehash_policy._M_next_resize = __next_resize; + _M_rehash_policy._M_reset(__state); __throw_exception_again; } } diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h index acb7e99a8df..44c749af515 100644 --- a/libstdc++-v3/include/bits/hashtable_policy.h +++ b/libstdc++-v3/include/bits/hashtable_policy.h @@ -59,6 +59,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return __distance_fw(__first, __last, _Tag()); } + // Helper type used to detect when the hash functor is noexcept qualified or + // not + template <typename _Key, typename _Hash> + struct __is_noexcept_hash : std::integral_constant<bool, + noexcept(declval<const _Hash&>()(declval<const _Key&>()))> + {}; + // Auxiliary types used for all instantiations of _Hashtable: nodes // and iterators. @@ -211,155 +218,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } }; - template<typename _Value, bool __cache> - struct _Hashtable_iterator_base - { - _Hashtable_iterator_base(_Hash_node<_Value, __cache>* __node, - _Hash_node<_Value, __cache>** __bucket) - : _M_cur_node(__node), _M_cur_bucket(__bucket) { } - - void - _M_incr() - { - _M_cur_node = _M_cur_node->_M_next; - if (!_M_cur_node) - _M_incr_bucket(); - } - - void - _M_incr_bucket(); - - _Hash_node<_Value, __cache>* _M_cur_node; - _Hash_node<_Value, __cache>** _M_cur_bucket; - }; - - // Global iterators, used for arbitrary iteration within a hash - // table. Larger and more expensive than local iterators. - template<typename _Value, bool __cache> - void - _Hashtable_iterator_base<_Value, __cache>:: - _M_incr_bucket() - { - ++_M_cur_bucket; - - // This loop requires the bucket array to have a non-null sentinel. - while (!*_M_cur_bucket) - ++_M_cur_bucket; - _M_cur_node = *_M_cur_bucket; - } - - template<typename _Value, bool __cache> - inline bool - operator==(const _Hashtable_iterator_base<_Value, __cache>& __x, - const _Hashtable_iterator_base<_Value, __cache>& __y) - { return __x._M_cur_node == __y._M_cur_node; } - - template<typename _Value, bool __cache> - inline bool - operator!=(const _Hashtable_iterator_base<_Value, __cache>& __x, - const _Hashtable_iterator_base<_Value, __cache>& __y) - { return __x._M_cur_node != __y._M_cur_node; } - - template<typename _Value, bool __constant_iterators, bool __cache> - struct _Hashtable_iterator - : public _Hashtable_iterator_base<_Value, __cache> - { - typedef _Value value_type; - typedef typename std::conditional<__constant_iterators, - const _Value*, _Value*>::type - pointer; - typedef typename std::conditional<__constant_iterators, - const _Value&, _Value&>::type - reference; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; - - _Hashtable_iterator() - : _Hashtable_iterator_base<_Value, __cache>(0, 0) { } - - _Hashtable_iterator(_Hash_node<_Value, __cache>* __p, - _Hash_node<_Value, __cache>** __b) - : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { } - - explicit - _Hashtable_iterator(_Hash_node<_Value, __cache>** __b) - : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { } - - reference - operator*() const - { return this->_M_cur_node->_M_v; } - - pointer - operator->() const - { return std::__addressof(this->_M_cur_node->_M_v); } - - _Hashtable_iterator& - operator++() - { - this->_M_incr(); - return *this; - } - - _Hashtable_iterator - operator++(int) - { - _Hashtable_iterator __tmp(*this); - this->_M_incr(); - return __tmp; - } - }; - - template<typename _Value, bool __constant_iterators, bool __cache> - struct _Hashtable_const_iterator - : public _Hashtable_iterator_base<_Value, __cache> - { - typedef _Value value_type; - typedef const _Value* pointer; - typedef const _Value& reference; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; - - _Hashtable_const_iterator() - : _Hashtable_iterator_base<_Value, __cache>(0, 0) { } - - _Hashtable_const_iterator(_Hash_node<_Value, __cache>* __p, - _Hash_node<_Value, __cache>** __b) - : _Hashtable_iterator_base<_Value, __cache>(__p, __b) { } - - explicit - _Hashtable_const_iterator(_Hash_node<_Value, __cache>** __b) - : _Hashtable_iterator_base<_Value, __cache>(*__b, __b) { } - - _Hashtable_const_iterator(const _Hashtable_iterator<_Value, - __constant_iterators, __cache>& __x) - : _Hashtable_iterator_base<_Value, __cache>(__x._M_cur_node, - __x._M_cur_bucket) { } - - reference - operator*() const - { return this->_M_cur_node->_M_v; } - - pointer - operator->() const - { return std::__addressof(this->_M_cur_node->_M_v); } - - _Hashtable_const_iterator& - operator++() - { - this->_M_incr(); - return *this; - } - - _Hashtable_const_iterator - operator++(int) - { - _Hashtable_const_iterator __tmp(*this); - this->_M_incr(); - return __tmp; - } - }; - - // Many of class template _Hashtable's template parameters are policy // classes. These are defaults for the policies. @@ -388,7 +246,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION struct _Prime_rehash_policy { _Prime_rehash_policy(float __z = 1.0) - : _M_max_load_factor(__z), _M_growth_factor(2.f), _M_next_resize(0) { } + : _M_max_load_factor(__z), _M_prev_resize(0), _M_next_resize(0) { } float max_load_factor() const noexcept @@ -410,10 +268,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt, std::size_t __n_ins) const; + typedef std::pair<std::size_t, std::size_t> _State; + + _State + _M_state() const + { return std::make_pair(_M_prev_resize, _M_next_resize); } + + void + _M_reset(const _State& __state) + { + _M_prev_resize = __state.first; + _M_next_resize = __state.second; + } + enum { _S_n_primes = sizeof(unsigned long) != 8 ? 256 : 256 + 48 }; float _M_max_load_factor; - float _M_growth_factor; + mutable std::size_t _M_prev_resize; mutable std::size_t _M_next_resize; }; @@ -429,15 +300,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION { // Optimize lookups involving the first elements of __prime_list. // (useful to speed-up, eg, constructors) - static const unsigned char __fast_bkt[12] + static const unsigned long __fast_bkt[12] = { 2, 2, 2, 3, 5, 5, 7, 7, 11, 11, 11, 11 }; - const unsigned long __p - = __n <= 11 ? __fast_bkt[__n] - : *std::lower_bound(__prime_list + 5, - __prime_list + _S_n_primes, __n); - _M_next_resize = __builtin_floor(__p * (long double)_M_max_load_factor); - return __p; + const unsigned long* __p + = __n <= 11 ? __fast_bkt + __n + : std::lower_bound(__prime_list + 5, + __prime_list + _S_n_primes, __n); + + _M_prev_resize = __builtin_floor(*__p * (long double)_M_max_load_factor); + if (__p != __fast_bkt) + _M_prev_resize = std::min(_M_prev_resize, + static_cast<std::size_t>(*(__p - 1))); + // Lets guaranty a minimal grow step of 11: + if (*__p - __n < 11) + __p = std::lower_bound(__prime_list + 5, + __prime_list + _S_n_primes, __n + 11); + _M_next_resize = __builtin_floor(*__p * (long double)_M_max_load_factor); + return *__p; } // Return the smallest prime p such that alpha p >= n, where alpha @@ -461,17 +341,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_need_rehash(std::size_t __n_bkt, std::size_t __n_elt, std::size_t __n_ins) const { - if (__n_elt + __n_ins > _M_next_resize) + if (__n_elt + __n_ins >= _M_next_resize) { - long double __min_bkts = ((__n_elt + __n_ins) - / (long double)_M_max_load_factor); - if (__min_bkts > __n_bkt) - { - __min_bkts = std::max(__min_bkts, (long double)_M_growth_factor - * __n_bkt); - return std::make_pair(true, - _M_next_bkt(__builtin_ceil(__min_bkts))); - } + long double __min_bkts = (__n_elt + __n_ins) + / (long double)_M_max_load_factor; + if (__min_bkts >= __n_bkt) + return std::make_pair(true, + _M_next_bkt(__builtin_floor(__min_bkts) + 1)); else { _M_next_resize @@ -479,6 +355,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION return std::make_pair(false, 0); } } + else if (__n_elt + __n_ins < _M_prev_resize) + { + long double __min_bkts = (__n_elt + __n_ins) + / (long double)_M_max_load_factor; + return std::make_pair(true, + _M_next_bkt(__builtin_floor(__min_bkts) + 1)); + } else return std::make_pair(false, 0); } @@ -538,8 +421,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION std::size_t __n = __h->_M_bucket_index(__k, __code, __h->_M_bucket_count); - typename _Hashtable::_Node* __p = - __h->_M_find_node(__h->_M_buckets[__n], __k, __code); + typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code); if (!__p) return __h->_M_insert_bucket(std::make_pair(__k, mapped_type()), __n, __code)->second; @@ -557,8 +439,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION std::size_t __n = __h->_M_bucket_index(__k, __code, __h->_M_bucket_count); - typename _Hashtable::_Node* __p = - __h->_M_find_node(__h->_M_buckets[__n], __k, __code); + typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code); if (!__p) return __h->_M_insert_bucket(std::make_pair(std::move(__k), mapped_type()), @@ -577,8 +458,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION std::size_t __n = __h->_M_bucket_index(__k, __code, __h->_M_bucket_count); - typename _Hashtable::_Node* __p = - __h->_M_find_node(__h->_M_buckets[__n], __k, __code); + typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code); if (!__p) __throw_out_of_range(__N("_Map_base::at")); return (__p->_M_v).second; @@ -595,8 +475,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION std::size_t __n = __h->_M_bucket_index(__k, __code, __h->_M_bucket_count); - typename _Hashtable::_Node* __p = - __h->_M_find_node(__h->_M_buckets[__n], __k, __code); + typename _Hashtable::_Node* __p = __h->_M_find_node(__n, __k, __code); if (!__p) __throw_out_of_range(__N("_Map_base::at")); return (__p->_M_v).second; diff --git a/libstdc++-v3/include/bits/shared_ptr.h b/libstdc++-v3/include/bits/shared_ptr.h index 32addf95105..33128dd4ed6 100644 --- a/libstdc++-v3/include/bits/shared_ptr.h +++ b/libstdc++-v3/include/bits/shared_ptr.h @@ -619,7 +619,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, shared_ptr<_Tp>> { size_t - operator()(const shared_ptr<_Tp>& __s) const + operator()(const shared_ptr<_Tp>& __s) const noexcept { return std::hash<_Tp*>()(__s.get()); } }; diff --git a/libstdc++-v3/include/bits/shared_ptr_base.h b/libstdc++-v3/include/bits/shared_ptr_base.h index fbbadd1aaaa..c0677547553 100644 --- a/libstdc++-v3/include/bits/shared_ptr_base.h +++ b/libstdc++-v3/include/bits/shared_ptr_base.h @@ -1450,7 +1450,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, __shared_ptr<_Tp, _Lp>> { size_t - operator()(const __shared_ptr<_Tp, _Lp>& __s) const + operator()(const __shared_ptr<_Tp, _Lp>& __s) const noexcept { return std::hash<_Tp*>()(__s.get()); } }; diff --git a/libstdc++-v3/include/bits/stl_bvector.h b/libstdc++-v3/include/bits/stl_bvector.h index 8f286400aec..bec63ff03f9 100644 --- a/libstdc++-v3/include/bits/stl_bvector.h +++ b/libstdc++-v3/include/bits/stl_bvector.h @@ -1075,7 +1075,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, _GLIBCXX_STD_C::vector<bool, _Alloc>> { size_t - operator()(const _GLIBCXX_STD_C::vector<bool, _Alloc>& __b) const; + operator()(const _GLIBCXX_STD_C::vector<bool, _Alloc>&) const noexcept; }; _GLIBCXX_END_NAMESPACE_VERSION diff --git a/libstdc++-v3/include/bits/stl_map.h b/libstdc++-v3/include/bits/stl_map.h index 45824f04079..f1c4cfefa57 100644 --- a/libstdc++-v3/include/bits/stl_map.h +++ b/libstdc++-v3/include/bits/stl_map.h @@ -617,6 +617,11 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER iterator erase(const_iterator __position) { return _M_t.erase(__position); } + + // LWG 2059. + iterator + erase(iterator __position) + { return _M_t.erase(__position); } #else /** * @brief Erases an element from a %map. diff --git a/libstdc++-v3/include/bits/stl_multimap.h b/libstdc++-v3/include/bits/stl_multimap.h index fd5a5a8cffb..ae4389e2709 100644 --- a/libstdc++-v3/include/bits/stl_multimap.h +++ b/libstdc++-v3/include/bits/stl_multimap.h @@ -536,6 +536,11 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER iterator erase(const_iterator __position) { return _M_t.erase(__position); } + + // LWG 2059. + iterator + erase(iterator __position) + { return _M_t.erase(__position); } #else /** * @brief Erases an element from a %multimap. diff --git a/libstdc++-v3/include/bits/stl_tree.h b/libstdc++-v3/include/bits/stl_tree.h index 8c5f0c30a0f..ee56bbc7525 100644 --- a/libstdc++-v3/include/bits/stl_tree.h +++ b/libstdc++-v3/include/bits/stl_tree.h @@ -767,6 +767,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_erase_aux(__position); return __result._M_const_cast(); } + + // LWG 2059. + iterator + erase(iterator __position) + { + iterator __result = __position; + ++__result; + _M_erase_aux(__position); + return __result; + } #else void erase(iterator __position) diff --git a/libstdc++-v3/include/bits/unique_ptr.h b/libstdc++-v3/include/bits/unique_ptr.h index 869d931330c..0a127996e52 100644 --- a/libstdc++-v3/include/bits/unique_ptr.h +++ b/libstdc++-v3/include/bits/unique_ptr.h @@ -545,7 +545,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION : public __hash_base<size_t, unique_ptr<_Tp, _Dp>> { size_t - operator()(const unique_ptr<_Tp, _Dp>& __u) const + operator()(const unique_ptr<_Tp, _Dp>& __u) const noexcept { typedef unique_ptr<_Tp, _Dp> _UP; return std::hash<typename _UP::pointer>()(__u.get()); diff --git a/libstdc++-v3/include/bits/unordered_map.h b/libstdc++-v3/include/bits/unordered_map.h index c77bab12fba..7c810d3801f 100644 --- a/libstdc++-v3/include/bits/unordered_map.h +++ b/libstdc++-v3/include/bits/unordered_map.h @@ -40,7 +40,9 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER class _Hash = hash<_Key>, class _Pred = std::equal_to<_Key>, class _Alloc = std::allocator<std::pair<const _Key, _Tp> >, - bool __cache_hash_code = false> + bool __cache_hash_code = + __not_<__and_<is_integral<_Key>, + __detail::__is_noexcept_hash<_Key, _Hash>>>::value> class __unordered_map : public _Hashtable<_Key, std::pair<const _Key, _Tp>, _Alloc, std::_Select1st<std::pair<const _Key, _Tp> >, _Pred, @@ -109,7 +111,9 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER class _Hash = hash<_Key>, class _Pred = std::equal_to<_Key>, class _Alloc = std::allocator<std::pair<const _Key, _Tp> >, - bool __cache_hash_code = false> + bool __cache_hash_code = + __not_<__and_<is_integral<_Key>, + __detail::__is_noexcept_hash<_Key, _Hash>>>::value> class __unordered_multimap : public _Hashtable<_Key, std::pair<const _Key, _Tp>, _Alloc, diff --git a/libstdc++-v3/include/bits/unordered_set.h b/libstdc++-v3/include/bits/unordered_set.h index 12b7bda138f..9aef0829749 100644 --- a/libstdc++-v3/include/bits/unordered_set.h +++ b/libstdc++-v3/include/bits/unordered_set.h @@ -40,7 +40,9 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER class _Hash = hash<_Value>, class _Pred = std::equal_to<_Value>, class _Alloc = std::allocator<_Value>, - bool __cache_hash_code = false> + bool __cache_hash_code = + __not_<__and_<is_integral<_Value>, + __detail::__is_noexcept_hash<_Value, _Hash>>>::value> class __unordered_set : public _Hashtable<_Value, _Value, _Alloc, std::_Identity<_Value>, _Pred, @@ -121,7 +123,9 @@ _GLIBCXX_BEGIN_NAMESPACE_CONTAINER class _Hash = hash<_Value>, class _Pred = std::equal_to<_Value>, class _Alloc = std::allocator<_Value>, - bool __cache_hash_code = false> + bool __cache_hash_code = + __not_<__and_<is_integral<_Value>, + __detail::__is_noexcept_hash<_Value, _Hash>>>::value> class __unordered_multiset : public _Hashtable<_Value, _Value, _Alloc, std::_Identity<_Value>, _Pred, diff --git a/libstdc++-v3/include/bits/vector.tcc b/libstdc++-v3/include/bits/vector.tcc index b74684228c1..d9c3b659e6b 100644 --- a/libstdc++-v3/include/bits/vector.tcc +++ b/libstdc++-v3/include/bits/vector.tcc @@ -818,7 +818,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION template<typename _Alloc> size_t hash<_GLIBCXX_STD_C::vector<bool, _Alloc>>:: - operator()(const _GLIBCXX_STD_C::vector<bool, _Alloc>& __b) const + operator()(const _GLIBCXX_STD_C::vector<bool, _Alloc>& __b) const noexcept { size_t __hash = 0; using _GLIBCXX_STD_C::_S_word_bit; |