diff options
author | fdumont <fdumont@138bc75d-0d04-0410-961f-82ee72b054a4> | 2011-09-14 19:50:20 +0000 |
---|---|---|
committer | fdumont <fdumont@138bc75d-0d04-0410-961f-82ee72b054a4> | 2011-09-14 19:50:20 +0000 |
commit | c41cafd5576b6e48806ca9a3af3635cb41206593 (patch) | |
tree | e57293aee651dc24fd4a5d2b5324dcce635cc539 /libstdc++-v3 | |
parent | 5902c6c2cc8006b8bb9352075eaade85beeb2ed5 (diff) | |
download | gcc-c41cafd5576b6e48806ca9a3af3635cb41206593.tar.gz |
2011-09-14 François Dumont <fdumont@gcc.gnu.org>
Paolo Carlini <paolo.carlini@oracle.com>
* include/bits/hashtable.h (_Hashtable<>::_M_rehash): Take and restore
hash policy _M_prev_resize on exception.
(_Hashtable<>::_M_insert_bucket): Capture hash policy next resize
before using it and use latter method to have it restored on exception.
(_Hashtable<>::_M_insert(_Arg&& __v, std::false_type): Likewise.
(_Hashtable<>::insert(_InputIterator, _InputIterator): Likewise.
(_Hashtable<>::rehash): Likewise.
* testsuite/23_containers/unordered_set/insert/hash_policy.cc: New.
* testsuite/23_containers/unordered_multiset/insert/hash_policy.cc:
Likewise.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@178861 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++-v3')
-rw-r--r-- | libstdc++-v3/ChangeLog | 14 | ||||
-rw-r--r-- | libstdc++-v3/include/bits/hashtable.h | 64 | ||||
-rw-r--r-- | libstdc++-v3/testsuite/23_containers/unordered_multiset/insert/hash_policy.cc | 69 | ||||
-rw-r--r-- | libstdc++-v3/testsuite/23_containers/unordered_set/insert/hash_policy.cc | 113 |
4 files changed, 237 insertions, 23 deletions
diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 4631abc6905..8de9659bf88 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,17 @@ +2011-09-14 François Dumont <fdumont@gcc.gnu.org> + Paolo Carlini <paolo.carlini@oracle.com> + + * include/bits/hashtable.h (_Hashtable<>::_M_rehash): Take and restore + hash policy _M_prev_resize on exception. + (_Hashtable<>::_M_insert_bucket): Capture hash policy next resize + before using it and use latter method to have it restored on exception. + (_Hashtable<>::_M_insert(_Arg&& __v, std::false_type): Likewise. + (_Hashtable<>::insert(_InputIterator, _InputIterator): Likewise. + (_Hashtable<>::rehash): Likewise. + * testsuite/23_containers/unordered_set/insert/hash_policy.cc: New. + * testsuite/23_containers/unordered_multiset/insert/hash_policy.cc: + Likewise. + 2011-09-13 Paul Brook <paul@codesourcery.com> * libsupc++/eh_arm.cc (__cxa_end_cleanup): Add C6X implementation. diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h index ed9bdb380bc..b097ee78cb5 100644 --- a/libstdc++-v3/include/bits/hashtable.h +++ b/libstdc++-v3/include/bits/hashtable.h @@ -458,8 +458,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // reserve, if present, comes from _Rehash_base. private: - // Unconditionally change size of bucket array to n. - void _M_rehash(size_type __n); + // Unconditionally change size of bucket array to n, restore hash policy + // resize value to __next_resize on exception. + void _M_rehash(size_type __n, size_type __next_resize); }; @@ -743,7 +744,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_rehash_policy = __pol; size_type __n_bkt = __pol._M_bkt_for_elements(_M_element_count); if (__n_bkt > _M_bucket_count) - _M_rehash(__n_bkt); + _M_rehash(__n_bkt, __pol._M_next_resize); } template<typename _Key, typename _Value, @@ -910,6 +911,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_insert_bucket(_Arg&& __v, size_type __n, typename _Hashtable::_Hash_code_type __code) { + const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; std::pair<bool, std::size_t> __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); @@ -920,14 +922,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __n = this->_M_bucket_index(__k, __code, __do_rehash.second); } - // Allocate the new node before doing the rehash so that we don't - // do a rehash if the allocation throws. - _Node* __new_node = _M_allocate_node(std::forward<_Arg>(__v)); - + _Node* __new_node = 0; __try { + // Allocate the new node before doing the rehash so that we + // don't do a rehash if the allocation throws. + __new_node = _M_allocate_node(std::forward<_Arg>(__v)); if (__do_rehash.first) - _M_rehash(__do_rehash.second); + _M_rehash(__do_rehash.second, __saved_next_resize); __new_node->_M_next = _M_buckets[__n]; this->_M_store_code(__new_node, __code); @@ -939,7 +941,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } __catch(...) { - _M_deallocate_node(__new_node); + if (!__new_node) + _M_rehash_policy._M_next_resize = __saved_next_resize; + else + _M_deallocate_node(__new_node); __throw_exception_again; } } @@ -981,11 +986,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: _M_insert(_Arg&& __v, std::false_type) { + const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; std::pair<bool, std::size_t> __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, 1); if (__do_rehash.first) - _M_rehash(__do_rehash.second); + _M_rehash(__do_rehash.second, __saved_next_resize); const key_type& __k = this->_M_extract(__v); typename _Hashtable::_Hash_code_type __code = this->_M_hash_code(__k); @@ -1024,11 +1030,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION insert(_InputIterator __first, _InputIterator __last) { size_type __n_elt = __detail::__distance_fw(__first, __last); + const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; std::pair<bool, std::size_t> __do_rehash = _M_rehash_policy._M_need_rehash(_M_bucket_count, _M_element_count, __n_elt); if (__do_rehash.first) - _M_rehash(__do_rehash.second); + _M_rehash(__do_rehash.second, __saved_next_resize); for (; __first != __last; ++__first) this->insert(*__first); @@ -1184,9 +1191,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: rehash(size_type __n) { + const size_type __saved_next_resize = _M_rehash_policy._M_next_resize; _M_rehash(std::max(_M_rehash_policy._M_next_bkt(__n), _M_rehash_policy._M_bkt_for_elements(_M_element_count - + 1))); + + 1)), + __saved_next_resize); } template<typename _Key, typename _Value, @@ -1196,11 +1205,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void _Hashtable<_Key, _Value, _Allocator, _ExtractKey, _Equal, _H1, _H2, _Hash, _RehashPolicy, __chc, __cit, __uk>:: - _M_rehash(size_type __n) + _M_rehash(size_type __n, size_type __next_resize) { - _Node** __new_array = _M_allocate_buckets(__n); + _Node** __new_array = 0; __try { + __new_array = _M_allocate_buckets(__n); _M_begin_bucket_index = __n; for (size_type __i = 0; __i < _M_bucket_count; ++__i) while (_Node* __p = _M_buckets[__i]) @@ -1218,15 +1228,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION } __catch(...) { - // A failure here means that a hash function threw an exception. - // We can't restore the previous state without calling the hash - // function again, so the only sensible recovery is to delete - // everything. - _M_deallocate_nodes(__new_array, __n); - _M_deallocate_buckets(__new_array, __n); - _M_deallocate_nodes(_M_buckets, _M_bucket_count); - _M_element_count = 0; - _M_begin_bucket_index = _M_bucket_count; + if (__new_array) + { + // A failure here means that a hash function threw an exception. + // We can't restore the previous state without calling the hash + // function again, so the only sensible recovery is to delete + // everything. + _M_deallocate_nodes(__new_array, __n); + _M_deallocate_buckets(__new_array, __n); + _M_deallocate_nodes(_M_buckets, _M_bucket_count); + _M_element_count = 0; + _M_begin_bucket_index = _M_bucket_count; + _M_rehash_policy._M_next_resize = 0; + } + else + // A failure here means that buckets allocation failed. We only + // have to restore hash policy previous state. + _M_rehash_policy._M_next_resize = __next_resize; __throw_exception_again; } } diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/insert/hash_policy.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/insert/hash_policy.cc new file mode 100644 index 00000000000..0950c13c80f --- /dev/null +++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/insert/hash_policy.cc @@ -0,0 +1,69 @@ +// { dg-options "-std=gnu++0x" } + +// Copyright (C) 2011 Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along +// with this library; see the file COPYING3. If not see +// <http://www.gnu.org/licenses/>. + +#include <unordered_set> +#include <limits> +#include <ext/throw_allocator.h> +#include <testsuite_hooks.h> + +void test01() +{ + bool test __attribute__((unused)) = true; + + typedef std::numeric_limits<size_t> nl_size_t; + std::unordered_multiset<int, std::hash<int>, std::equal_to<int>, + __gnu_cxx::throw_allocator_limit<int> > us; + const int nb = 100; + int scheduled_throw_counter = 0; + std::size_t thrown_exceptions = 0; + for (int i = 0; i != nb; ++i) + { + if ((float)(us.size() + 1) + / (float)us.bucket_count() >= us.max_load_factor()) + { + // We are going to need a rehash, lets introduce allocation issues: + __gnu_cxx::limit_condition::set_limit(scheduled_throw_counter++); + } + try + { + us.insert(i / 2); + scheduled_throw_counter = 0; + } + catch (const __gnu_cxx::forced_error&) + { + ++thrown_exceptions; + --i; + } + VERIFY( us.load_factor() <= us.max_load_factor() ); + __gnu_cxx::limit_condition::set_limit(nl_size_t::max()); + } + + VERIFY( thrown_exceptions != 0 ); + // Check that all values have been inserted: + for (int i = 0; i != nb / 2; ++i) + { + VERIFY( us.count(i) == 2 ); + } +} + +int main() +{ + test01(); + return 0; +} diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/insert/hash_policy.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/insert/hash_policy.cc new file mode 100644 index 00000000000..3d3f55abc78 --- /dev/null +++ b/libstdc++-v3/testsuite/23_containers/unordered_set/insert/hash_policy.cc @@ -0,0 +1,113 @@ +// { dg-options "-std=gnu++0x" } + +// Copyright (C) 2011 Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 3, or (at your option) +// any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along +// with this library; see the file COPYING3. If not see +// <http://www.gnu.org/licenses/>. + +#include <unordered_set> +#include <vector> +#include <limits> +#include <ext/throw_allocator.h> +#include <testsuite_hooks.h> + +void test01() +{ + bool test __attribute__((unused)) = true; + + typedef std::numeric_limits<std::size_t> nl_size_t; + std::unordered_set<int, std::hash<int>, std::equal_to<int>, + __gnu_cxx::throw_allocator_limit<int> > us; + const int nb = 100; + int scheduled_throw_counter = 0; + std::size_t thrown_exceptions = 0; + for (int i = 0; i != nb; ++i) + { + if ((float)(us.size() + 1) + / (float)us.bucket_count() >= us.max_load_factor()) + { + // We are going to need a rehash, lets introduce allocation issues: + __gnu_cxx::limit_condition::set_limit(scheduled_throw_counter++); + } + try + { + VERIFY(us.insert(i).second); + scheduled_throw_counter = 0; + } + catch (const __gnu_cxx::forced_error&) + { + ++thrown_exceptions; + --i; + } + VERIFY( us.load_factor() <= us.max_load_factor() ); + __gnu_cxx::limit_condition::set_limit(nl_size_t::max()); + } + + VERIFY( thrown_exceptions != 0 ); + // Check that all values have been inserted: + for (int i = 0; i != nb; ++i) + { + VERIFY( us.count(i) == 1 ); + } +} + +void test02() +{ + bool test __attribute__((unused)) = true; + + typedef std::numeric_limits<std::size_t> nl_size_t; + std::unordered_set<int, std::hash<int>, std::equal_to<int>, + __gnu_cxx::throw_allocator_limit<int> > us; + const int nb = 100; + int scheduled_throw_counter = 0; + std::size_t thrown_exceptions = 0; + for (int i = 0; i != nb; ++i) + { + if ((float)(us.size() + 2) + / (float)us.bucket_count() >= us.max_load_factor()) + { + // We are going to need a rehash, lets introduce allocation issues: + __gnu_cxx::limit_condition::set_limit(scheduled_throw_counter++); + } + try + { + std::vector<int> v = { i, i }; + // Check the insert range robustness + us.insert(v.begin(), v.end()); + scheduled_throw_counter = 0; + } + catch (const __gnu_cxx::forced_error&) + { + ++thrown_exceptions; + --i; + } + VERIFY( us.load_factor() <= us.max_load_factor() ); + __gnu_cxx::limit_condition::set_limit(nl_size_t::max()); + } + + VERIFY( thrown_exceptions != 0 ); + // Check that all values have been inserted: + for (int i = 0; i != nb; ++i) + { + VERIFY( us.count(i) == 1 ); + } +} + +int main() +{ + test01(); + test02(); + return 0; +} |