summaryrefslogtreecommitdiff
path: root/libstdc++-v3/include/ext
diff options
context:
space:
mode:
authorbkoz <bkoz@138bc75d-0d04-0410-961f-82ee72b054a4>2003-06-11 15:52:11 +0000
committerbkoz <bkoz@138bc75d-0d04-0410-961f-82ee72b054a4>2003-06-11 15:52:11 +0000
commit93666440745e2eb59c05d7ca9c202f1d962fb2c7 (patch)
tree61989852861e8690ab5a873c759b5218e2280ca7 /libstdc++-v3/include/ext
parentf7874cd76589d7ea2fd729e956469565069cdf76 (diff)
downloadgcc-93666440745e2eb59c05d7ca9c202f1d962fb2c7.tar.gz
2003-06-11 Benjamin Kosnik <bkoz@redhat.com>
* include/bits/stl_alloc.h (__debug_alloc): Move out. (__malloc_alloc): Same. (__pool_alloc): Same. (__new_alloc): Same. Rename to.. * include/bits/allocator.h: ...this. * include/bits/stl_deque.h: Modify comment. * include/bits/stl_tree.h: Modify include. * include/std/std_memory.h: Same. * include/ext/rope: Same. * include/ext/slist: Same. * include/std/std_vector.h: Same. * include/std/std_stack.h: Same. * include/std/std_queue.h: Same. * include/std/std_list.h: Same. * include/std/std_deque.h: Same. * include/backward/alloc.h: Same. * include/ext/debug_allocator.h: New. * include/ext/malloc_allocator.h: New. * include/ext/pool_allocator.h: New. * include/ext/new_allocator.h: New. * include/bits/pthread_allocimpl.h: Remove. * include/bits/stl_pthread_alloc.h: Remove. * include/Makefile.am (ext_headers): Add. * include/Makefile.in: Regenerate. * src/stl-inst.cc: Use __gnu_cxx namespace. * src/stl-inst.cc: Move to... * src/allocator-inst.cc: Here. * src/Makefile.am (sources): Update. * src/Makefile.in: Regenerate. * config/linker-map.gnu: Remove __pool_alloc bits. * testsuite/ext/headers.cc: Add. * testsuite/ext/allocators.cc: Fixup. 2003-06-11 Stefan Olsson <stefan@snon.net> Ola R�nnerup <fnolis@home.se> * include/Makefile.am (ext_headers): Add. * include/Makefile.in: Regenerate. * include/ext/mt_allocator.h: New file. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@67777 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++-v3/include/ext')
-rw-r--r--libstdc++-v3/include/ext/debug_allocator.h139
-rw-r--r--libstdc++-v3/include/ext/malloc_allocator.h163
-rw-r--r--libstdc++-v3/include/ext/mt_allocator.h861
-rw-r--r--libstdc++-v3/include/ext/new_allocator.h58
-rw-r--r--libstdc++-v3/include/ext/pool_allocator.h383
-rw-r--r--libstdc++-v3/include/ext/rope2
-rw-r--r--libstdc++-v3/include/ext/slist2
7 files changed, 1606 insertions, 2 deletions
diff --git a/libstdc++-v3/include/ext/debug_allocator.h b/libstdc++-v3/include/ext/debug_allocator.h
new file mode 100644
index 00000000000..0f7eb0c653d
--- /dev/null
+++ b/libstdc++-v3/include/ext/debug_allocator.h
@@ -0,0 +1,139 @@
+// Allocators -*- C++ -*-
+
+// Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+/*
+ * Copyright (c) 1996-1997
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ */
+
+/** @file ext/debug_allocator.h
+ * This file is a GNU extension to the Standard C++ Library.
+ * You should only include this header if you are using GCC 3 or later.
+ */
+
+#ifndef _DEBUG_ALLOCATOR_H
+#define _DEBUG_ALLOCATOR_H 1
+
+#include <bits/allocator_traits.h>
+
+namespace __gnu_cxx
+{
+ /**
+ * @if maint
+ * An adaptor for an underlying allocator (_Alloc) to check the size
+ * arguments for debugging.
+ *
+ * "There is some evidence that this can confuse Purify." - SGI comment
+ *
+ * This adaptor is "SGI" style. The _Alloc parameter must also be "SGI".
+ * @endif
+ * (See @link Allocators allocators info @endlink for more.)
+ */
+ template<typename _Alloc>
+ class __debug_alloc
+ {
+ private:
+ // Size of space used to store size. Note that this must be
+ // large enough to preserve alignment.
+ enum {_S_extra = 8};
+
+ public:
+ static void*
+ allocate(size_t __n)
+ {
+ char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
+ *(size_t*)__result = __n;
+ return __result + (int) _S_extra;
+ }
+
+ static void
+ deallocate(void* __p, size_t __n)
+ {
+ char* __real_p = (char*)__p - (int) _S_extra;
+ if (*(size_t*)__real_p != __n)
+ abort();
+ _Alloc::deallocate(__real_p, __n + (int) _S_extra);
+ }
+ };
+
+ //@{
+ /** Comparison operators for all of the predifined SGI-style allocators.
+ * This ensures that __allocator<malloc_alloc> (for example) will work
+ * correctly. As required, all allocators compare equal.
+ */
+ template<typename _Alloc>
+ inline bool
+ operator==(const __debug_alloc<_Alloc>&, const __debug_alloc<_Alloc>&)
+ { return true; }
+
+ template<typename _Alloc>
+ inline bool
+ operator!=(const __debug_alloc<_Alloc>&, const __debug_alloc<_Alloc>&)
+ { return false; }
+ //@}
+} // namespace __gnu_cxx
+
+namespace std
+{
+ //@{
+ /// Versions for the predefined "SGI" style allocators.
+ template<typename _Tp, typename _Alloc>
+ struct _Alloc_traits<_Tp, __gnu_cxx::__debug_alloc<_Alloc> >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx::__debug_alloc<_Alloc> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+ //@}
+
+ //@{
+ /// Versions for the __allocator adaptor used with the predefined
+ /// "SGI" style allocators.
+ template<typename _Tp, typename _Tp1, typename _Alloc>
+ struct _Alloc_traits<_Tp, __allocator<_Tp1,
+ __gnu_cxx::__debug_alloc<_Alloc> > >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx::__debug_alloc<_Alloc> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+ //@}
+} // namespace std
+
+#endif
diff --git a/libstdc++-v3/include/ext/malloc_allocator.h b/libstdc++-v3/include/ext/malloc_allocator.h
new file mode 100644
index 00000000000..410b296e09d
--- /dev/null
+++ b/libstdc++-v3/include/ext/malloc_allocator.h
@@ -0,0 +1,163 @@
+// Allocators -*- C++ -*-
+
+// Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+/*
+ * Copyright (c) 1996-1997
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ */
+
+/** @file ext/debug_allocator.h
+ * This file is a GNU extension to the Standard C++ Library.
+ * You should only include this header if you are using GCC 3 or later.
+ */
+
+#ifndef _MALLOC_ALLOCATOR_H
+#define _MALLOC_ALLOCATOR_H 1
+
+#include <bits/allocator_traits.h>
+
+namespace __gnu_cxx
+{
+ /**
+ * @if maint
+ * A malloc-based allocator. Typically slower than the
+ * __pool_alloc (below). Typically thread-safe and more
+ * storage efficient. The template argument is unused and is only present
+ * to permit multiple instantiations (but see __pool_alloc
+ * for caveats). "SGI" style, plus __set_malloc_handler for OOM conditions.
+ * @endif
+ * (See @link Allocators allocators info @endlink for more.)
+ */
+ template<int __inst>
+ class __malloc_alloc
+ {
+ private:
+ static void* _S_oom_malloc(size_t);
+ static void (* __malloc_alloc_oom_handler)();
+
+ public:
+ static void*
+ allocate(size_t __n)
+ {
+ void* __result = malloc(__n);
+ if (__builtin_expect(__result == 0, 0))
+ __result = _S_oom_malloc(__n);
+ return __result;
+ }
+
+ static void
+ deallocate(void* __p, size_t /* __n */)
+ { free(__p); }
+
+ static void (* __set_malloc_handler(void (*__f)()))()
+ {
+ void (* __old)() = __malloc_alloc_oom_handler;
+ __malloc_alloc_oom_handler = __f;
+ return __old;
+ }
+ };
+
+ // malloc_alloc out-of-memory handling
+ template<int __inst>
+ void (* __malloc_alloc<__inst>::__malloc_alloc_oom_handler)() = 0;
+
+ template<int __inst>
+ void*
+ __malloc_alloc<__inst>::
+ _S_oom_malloc(size_t __n)
+ {
+ void (* __my_malloc_handler)();
+ void* __result;
+
+ for (;;)
+ {
+ __my_malloc_handler = __malloc_alloc_oom_handler;
+ if (__builtin_expect(__my_malloc_handler == 0, 0))
+ __throw_bad_alloc();
+ (*__my_malloc_handler)();
+ __result = malloc(__n);
+ if (__result)
+ return __result;
+ }
+ }
+ //@{
+ /** Comparison operators for all of the predifined SGI-style allocators.
+ * This ensures that __allocator<malloc_alloc> (for example) will work
+ * correctly. As required, all allocators compare equal.
+ */
+ template<int inst>
+ inline bool
+ operator==(const __malloc_alloc<inst>&, const __malloc_alloc<inst>&)
+ { return true; }
+
+ template<int __inst>
+ inline bool
+ operator!=(const __malloc_alloc<__inst>&, const __malloc_alloc<__inst>&)
+ { return false; }
+ //@}
+} // namespace __gnu_cxx
+
+namespace std
+{
+ //@{
+ /// Versions for the predefined "SGI" style allocators.
+ template<typename _Tp, int __inst>
+ struct _Alloc_traits<_Tp, __gnu_cxx::__malloc_alloc<__inst> >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx:: __malloc_alloc<__inst> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+ //@}
+
+ //@{
+ /// Versions for the __allocator adaptor used with the predefined
+ /// "SGI" style allocators.
+ template<typename _Tp, typename _Tp1, int __inst>
+ struct _Alloc_traits<_Tp, __allocator<_Tp1,
+ __gnu_cxx::__malloc_alloc<__inst> > >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx:: __malloc_alloc<__inst> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+ //@}
+} // namespace std
+
+#endif
diff --git a/libstdc++-v3/include/ext/mt_allocator.h b/libstdc++-v3/include/ext/mt_allocator.h
new file mode 100644
index 00000000000..1e95b55f467
--- /dev/null
+++ b/libstdc++-v3/include/ext/mt_allocator.h
@@ -0,0 +1,861 @@
+// MT-optimized allocator -*- C++ -*-
+
+// Copyright (C) 2003 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+/** @file ext/mt_allocator.h
+ * This file is a GNU extension to the Standard C++ Library.
+ * You should only include this header if you are using GCC 3 or later.
+ */
+
+#ifndef _MT_ALLOCATOR_H
+#define _MT_ALLOCATOR_H 1
+
+#include <cstdlib>
+#include <bits/functexcept.h>
+#include <bits/stl_threads.h>
+#include <bits/atomicity.h>
+#include <bits/allocator_traits.h>
+
+/**
+ * This is a fixed size (power of 2) allocator which - when compiled
+ * with thread support - will maintain one freelist per size per thread
+ * plus a "global" one. Steps are taken to limit the per thread freelist
+ * sizes (by returning excess back to "global").
+ *
+ * Usage examples:
+ * vector<int, __gnu_cxx::__mt_alloc<0> > v1;
+ *
+ * typedef std::__allocator<char, __gnu_cxx::__mt_alloc<0> > string_alloc;
+ * std::basic_string<char, std::char_traits<char>, string_alloc> s1;
+ */
+
+namespace __gnu_cxx
+{
+ template<int __inst>
+ class __mt_alloc
+ {
+ private:
+ /*
+ * We need to create the initial lists and set up some variables
+ * before we can answer to the first request for memory.
+ * The initialization of these variables is done at file scope
+ * below class declaration.
+ */
+#ifdef __GTHREADS
+ static __gthread_once_t _S_once_mt;
+#endif
+ static bool _S_initialized;
+
+ /*
+ * Using short int as type for the binmap implies we are never caching
+ * blocks larger than 65535 with this allocator
+ */
+ typedef unsigned short int binmap_type;
+ static binmap_type* _S_binmap;
+
+ static void _S_init();
+
+ /*
+ * Variables used to "tune" the behavior of the allocator, assigned
+ * and explained in detail below.
+ */
+ static size_t _S_max_bytes;
+ static size_t _S_chunk_size;
+ static size_t _S_max_threads;
+ static size_t _S_no_of_bins;
+ static size_t _S_freelist_headroom;
+
+ /*
+ * Each requesting thread is assigned an id ranging from 1 to
+ * _S_max_threads. Thread id 0 is used as a global memory pool.
+ * In order to get constant performance on the thread assignment
+ * routine, we keep a list of free ids. When a thread first requests
+ * memory we remove the first record in this list and stores the address
+ * in a __gthread_key. When initializing the __gthread_key
+ * we specify a destructor. When this destructor (i.e. the thread dies)
+ * is called, we return the thread id to the back of this list.
+ */
+#ifdef __GTHREADS
+ struct thread_record
+ {
+ /*
+ * Points to next free thread id record. NULL if last record in list.
+ */
+ thread_record* next;
+
+ /*
+ * Thread id ranging from 1 to _S_max_threads.
+ */
+ size_t id;
+ };
+
+ static thread_record* _S_thread_freelist_first;
+ static thread_record* _S_thread_freelist_last;
+ static __gthread_mutex_t _S_thread_freelist_mutex;
+ static void _S_thread_key_destr(void* freelist_pos);
+ static __gthread_key_t _S_thread_key;
+ static size_t _S_get_thread_id();
+#endif
+
+ struct block_record
+ {
+ /*
+ * Points to the next block_record for its thread_id.
+ */
+ block_record* next;
+
+ /*
+ * The thread id of the thread which has requested this block.
+ * All blocks are initially "owned" by global pool thread id 0.
+ */
+ size_t thread_id;
+ };
+
+ struct bin_record
+ {
+ /*
+ * An "array" of pointers to the first/last free block for each
+ * thread id. Memory to these "arrays" is allocated in _S_init()
+ * for _S_max_threads + global pool 0.
+ */
+ block_record** first;
+ block_record** last;
+
+ /*
+ * An "array" of counters used to keep track of the amount of blocks
+ * that are on the freelist/used for each thread id.
+ * Memory to these "arrays" is allocated in _S_init()
+ * for _S_max_threads + global pool 0.
+ */
+ size_t* free;
+ size_t* used;
+
+ /*
+ * Each bin has its own mutex which is used to ensure data integrity
+ * while changing "ownership" on a block.
+ * The mutex is initialized in _S_init().
+ */
+#ifdef __GTHREADS
+ __gthread_mutex_t* mutex;
+#endif
+ };
+
+ /*
+ * An "array" of bin_records each of which represents a specific
+ * power of 2 size. Memory to this "array" is allocated in _S_init().
+ */
+ static bin_record* _S_bin;
+
+ public:
+ static void*
+ allocate(size_t __n)
+ {
+
+ /*
+ * Requests larger than _S_max_bytes are handled by
+ * malloc/free directly
+ */
+ if (__n > _S_max_bytes)
+ {
+ void* __ret = malloc(__n);
+ if (!__ret)
+ __throw_bad_alloc();
+
+ return __ret;
+ }
+
+ /*
+ * Although the test in __gthread_once() would suffice, we
+ * wrap test of the once condition in our own unlocked
+ * check. This saves one function call to pthread_once()
+ * (which itself only tests for the once value unlocked anyway
+ * and immediately returns if set)
+ */
+ if (!_S_initialized)
+ {
+#ifdef __GTHREADS
+ if (__gthread_active_p())
+ __gthread_once(&_S_once_mt, _S_init);
+ else
+#endif
+ {
+ _S_max_threads = 0;
+ _S_init();
+ }
+ }
+
+ /*
+ * Round up to power of 2 and figure out which bin to use
+ */
+ size_t bin = _S_binmap[__n];
+
+#ifdef __GTHREADS
+ size_t thread_id = _S_get_thread_id();
+#else
+ size_t thread_id = 0;
+#endif
+
+ block_record* block;
+
+ /*
+ * Find out if we have blocks on our freelist.
+ * If so, go ahead and use them directly without
+ * having to lock anything.
+ */
+ if (_S_bin[bin].first[thread_id] == NULL)
+ {
+ /*
+ * Are we using threads?
+ * - Yes, lock and check if there are free blocks on the global
+ * list (and if not add new ones), get the first one
+ * and change owner.
+ * - No, all operations are made directly to global pool 0
+ * no need to lock or change ownership but check for free
+ * blocks on global list (and if not add new ones) and
+ * get the first one.
+ */
+#ifdef __GTHREADS
+ if (__gthread_active_p())
+ {
+ __gthread_mutex_lock(_S_bin[bin].mutex);
+
+ if (_S_bin[bin].first[0] == NULL)
+ {
+ _S_bin[bin].first[0] =
+ (block_record*)malloc(_S_chunk_size);
+
+ if (!_S_bin[bin].first[0])
+ {
+ __gthread_mutex_unlock(_S_bin[bin].mutex);
+ __throw_bad_alloc();
+ }
+
+ size_t bin_t = 1 << bin;
+ size_t block_count =
+ _S_chunk_size /(bin_t + sizeof(block_record));
+
+ _S_bin[bin].free[0] = block_count;
+
+ block_count--;
+ block = _S_bin[bin].first[0];
+
+ while (block_count > 0)
+ {
+ block->next = (block_record*)((char*)block +
+ (bin_t + sizeof(block_record)));
+ block = block->next;
+ block_count--;
+ }
+
+ block->next = NULL;
+ _S_bin[bin].last[0] = block;
+ }
+
+ block = _S_bin[bin].first[0];
+
+ /*
+ * Remove from list and count down the available counter on
+ * global pool 0.
+ */
+ _S_bin[bin].first[0] = _S_bin[bin].first[0]->next;
+ _S_bin[bin].free[0]--;
+
+ __gthread_mutex_unlock(_S_bin[bin].mutex);
+
+ /*
+ * Now that we have removed the block from the global
+ * freelist we can change owner and update the used
+ * counter for this thread without locking.
+ */
+ block->thread_id = thread_id;
+ _S_bin[bin].used[thread_id]++;
+ }
+ else
+#endif
+ {
+ _S_bin[bin].first[0] = (block_record*)malloc(_S_chunk_size);
+
+ if (!_S_bin[bin].first[0])
+ __throw_bad_alloc();
+
+ size_t bin_t = 1 << bin;
+ size_t block_count =
+ _S_chunk_size / (bin_t + sizeof(block_record));
+
+ _S_bin[bin].free[0] = block_count;
+
+ block_count--;
+ block = _S_bin[bin].first[0];
+
+ while (block_count > 0)
+ {
+ block->next = (block_record*)((char*)block +
+ (bin_t + sizeof(block_record)));
+ block = block->next;
+ block_count--;
+ }
+
+ block->next = NULL;
+ _S_bin[bin].last[0] = block;
+
+ block = _S_bin[bin].first[0];
+
+ /*
+ * Remove from list and count down the available counter on
+ * global pool 0 and increase it's used counter.
+ */
+ _S_bin[bin].first[0] = _S_bin[bin].first[0]->next;
+ _S_bin[bin].free[0]--;
+ _S_bin[bin].used[0]++;
+ }
+ }
+ else
+ {
+ /*
+ * "Default" operation - we have blocks on our own freelist
+ * grab the first record and update the counters.
+ */
+ block = _S_bin[bin].first[thread_id];
+
+ _S_bin[bin].first[thread_id] = _S_bin[bin].first[thread_id]->next;
+ _S_bin[bin].free[thread_id]--;
+ _S_bin[bin].used[thread_id]++;
+ }
+
+ return (void*)((char*)block + sizeof(block_record));
+ }
+
+ static void
+ deallocate(void* __p, size_t __n)
+ {
+ /*
+ * Requests larger than _S_max_bytes are handled by
+ * malloc/free directly
+ */
+ if (__n > _S_max_bytes)
+ {
+ free(__p);
+ return;
+ }
+
+ /*
+ * Round up to power of 2 and figure out which bin to use
+ */
+ size_t bin = _S_binmap[__n];
+
+#ifdef __GTHREADS
+ size_t thread_id = _S_get_thread_id();
+#else
+ size_t thread_id = 0;
+#endif
+
+ block_record* block = (block_record*)((char*)__p
+ - sizeof(block_record));
+
+ /*
+ * This block will always be at the back of a list and thus
+ * we set its next pointer to NULL.
+ */
+ block->next = NULL;
+
+#ifdef __GTHREADS
+ if (__gthread_active_p())
+ {
+ /*
+ * Calculate the number of records to remove from our freelist
+ */
+ int remove = _S_bin[bin].free[thread_id] -
+ (_S_bin[bin].used[thread_id] / _S_freelist_headroom);
+
+ /*
+ * The calculation above will almost always tell us to
+ * remove one or two records at a time, but this creates
+ * too much contention when locking and therefore we
+ * wait until the number of records is "high enough".
+ */
+ if (remove > (int)(100 * (_S_no_of_bins - bin)) &&
+ remove > (int)(_S_bin[bin].free[thread_id] /
+ _S_freelist_headroom))
+ {
+ __gthread_mutex_lock(_S_bin[bin].mutex);
+
+ while (remove > 0)
+ {
+ if (_S_bin[bin].first[0] == NULL)
+ _S_bin[bin].first[0] = _S_bin[bin].first[thread_id];
+ else
+ _S_bin[bin].last[0]->next = _S_bin[bin].first[thread_id];
+
+ _S_bin[bin].last[0] = _S_bin[bin].first[thread_id];
+
+ _S_bin[bin].first[thread_id] =
+ _S_bin[bin].first[thread_id]->next;
+
+ _S_bin[bin].free[0]++;
+ _S_bin[bin].free[thread_id]--;
+
+ remove--;
+ }
+
+ _S_bin[bin].last[0]->next = NULL;
+
+ __gthread_mutex_unlock(_S_bin[bin].mutex);
+ }
+
+ /*
+ * Did we allocate this block?
+ * - Yes, return it to our freelist
+ * - No, return it to global pool
+ */
+ if (thread_id == block->thread_id)
+ {
+ if (_S_bin[bin].first[thread_id] == NULL)
+ _S_bin[bin].first[thread_id] = block;
+ else
+ _S_bin[bin].last[thread_id]->next = block;
+
+ _S_bin[bin].last[thread_id] = block;
+
+ _S_bin[bin].free[thread_id]++;
+ _S_bin[bin].used[thread_id]--;
+ }
+ else
+ {
+ __gthread_mutex_lock(_S_bin[bin].mutex);
+
+ if (_S_bin[bin].first[0] == NULL)
+ _S_bin[bin].first[0] = block;
+ else
+ _S_bin[bin].last[0]->next = block;
+
+ _S_bin[bin].last[0] = block;
+
+ _S_bin[bin].free[0]++;
+ _S_bin[bin].used[block->thread_id]--;
+
+ __gthread_mutex_unlock(_S_bin[bin].mutex);
+ }
+ }
+ else
+#endif
+ {
+ /*
+ * Single threaded application - return to global pool
+ */
+ if (_S_bin[bin].first[0] == NULL)
+ _S_bin[bin].first[0] = block;
+ else
+ _S_bin[bin].last[0]->next = block;
+
+ _S_bin[bin].last[0] = block;
+
+ _S_bin[bin].free[0]++;
+ _S_bin[bin].used[0]--;
+ }
+ }
+ };
+
+ template<int __inst>
+ void
+ __mt_alloc<__inst>::
+ _S_init()
+ {
+ /*
+ * Calculate the number of bins required based on _S_max_bytes,
+ * _S_no_of_bins is initialized to 1 below.
+ */
+ {
+ size_t bin_t = 1;
+ while (_S_max_bytes > bin_t)
+ {
+ bin_t = bin_t << 1;
+ _S_no_of_bins++;
+ }
+ }
+
+ /*
+ * Setup the bin map for quick lookup of the relevant bin
+ */
+ _S_binmap = (binmap_type*)
+ malloc ((_S_max_bytes + 1) * sizeof(binmap_type));
+
+ if (!_S_binmap)
+ __throw_bad_alloc();
+
+ binmap_type* bp_t = _S_binmap;
+ binmap_type bin_max_t = 1;
+ binmap_type bin_t = 0;
+ for (binmap_type ct = 0; ct <= _S_max_bytes; ct++)
+ {
+ if (ct > bin_max_t)
+ {
+ bin_max_t <<= 1;
+ bin_t++;
+ }
+ *bp_t++ = bin_t;
+ }
+
+ /*
+ * If __gthread_active_p() create and initialize the list of
+ * free thread ids. Single threaded applications use thread id 0
+ * directly and have no need for this.
+ */
+#ifdef __GTHREADS
+ if (__gthread_active_p())
+ {
+ _S_thread_freelist_first =
+ (thread_record*)malloc(sizeof(thread_record) * _S_max_threads);
+
+ if (!_S_thread_freelist_first)
+ __throw_bad_alloc();
+
+ /*
+ * NOTE! The first assignable thread id is 1 since the global
+ * pool uses id 0
+ */
+ size_t i;
+ for (i = 1; i < _S_max_threads; i++)
+ {
+ _S_thread_freelist_first[i - 1].next =
+ &_S_thread_freelist_first[i];
+
+ _S_thread_freelist_first[i - 1].id = i;
+ }
+
+ /*
+ * Set last record and pointer to this
+ */
+ _S_thread_freelist_first[i - 1].next = NULL;
+ _S_thread_freelist_first[i - 1].id = i;
+ _S_thread_freelist_last = &_S_thread_freelist_first[i - 1];
+
+ /*
+ * Initialize per thread key to hold pointer to
+ * _S_thread_freelist NOTE! Here's an ugly workaround - if
+ * _S_thread_key_destr is not explicitly called at least
+ * once it won't be linked into the application. This is the
+ * behavior of template methods and __gthread_key_create()
+ * takes only a pointer to the function and does not cause
+ * the compiler to create an instance.
+ */
+ _S_thread_key_destr(NULL);
+ __gthread_key_create(&_S_thread_key, _S_thread_key_destr);
+ }
+#endif
+
+ /*
+ * Initialize _S_bin and its members
+ */
+ _S_bin = (bin_record*)malloc(sizeof(bin_record) * _S_no_of_bins);
+
+ if (!_S_bin)
+ __throw_bad_alloc();
+
+ for (size_t bin = 0; bin < _S_no_of_bins; bin++)
+ {
+ _S_bin[bin].first = (block_record**)
+ malloc(sizeof(block_record*) * (_S_max_threads + 1));
+
+ if (!_S_bin[bin].first)
+ __throw_bad_alloc();
+
+ _S_bin[bin].last = (block_record**)
+ malloc(sizeof(block_record*) * (_S_max_threads + 1));
+
+ if (!_S_bin[bin].last)
+ __throw_bad_alloc();
+
+ _S_bin[bin].free = (size_t*)
+ malloc(sizeof(size_t) * (_S_max_threads + 1));
+
+ if (!_S_bin[bin].free)
+ __throw_bad_alloc();
+
+ _S_bin[bin].used = (size_t*)
+ malloc(sizeof(size_t) * (_S_max_threads + 1));
+
+ if (!_S_bin[bin].used)
+ __throw_bad_alloc();
+
+ /*
+ * Ugly workaround of what at the time of writing seems to be
+ * a parser problem - see PR c++/9779 for more info.
+ */
+#ifdef __GTHREADS
+ size_t s = sizeof(__gthread_mutex_t);
+ _S_bin[bin].mutex = (__gthread_mutex_t*)malloc(s);
+
+ if (!_S_bin[bin].mutex)
+ __throw_bad_alloc();
+
+ /*
+ * This is not only ugly - it's extremly non-portable!
+ * However gthr.h does not currently provide a
+ * __gthread_mutex_init() call. The correct solution to
+ * this problem needs to be discussed.
+ */
+ pthread_mutex_init(_S_bin[bin].mutex, NULL);
+#endif
+
+ for (size_t thread = 0; thread <= _S_max_threads; thread++)
+ {
+ _S_bin[bin].first[thread] = NULL;
+ _S_bin[bin].last[thread] = NULL;
+ _S_bin[bin].free[thread] = 0;
+ _S_bin[bin].used[thread] = 0;
+ }
+ }
+
+ _S_initialized = true;
+ }
+
+#ifdef __GTHREADS
+ template<int __inst>
+ void
+ __mt_alloc<__inst>::
+ _S_thread_key_destr(void* freelist_pos)
+ {
+ /*
+ * This is due to the ugly workaround mentioned in _S_init()
+ */
+ if (freelist_pos == NULL)
+ return;
+
+ /*
+ * If the thread - when it dies - still have records on its
+ * freelist we return them to the global pool here.
+ */
+ for (size_t bin = 0; bin < _S_no_of_bins; bin++)
+ {
+ block_record* block =
+ _S_bin[bin].first[((thread_record*)freelist_pos)->id];
+
+ if (block != NULL)
+ {
+ __gthread_mutex_lock(_S_bin[bin].mutex);
+
+ while (block != NULL)
+ {
+ if (_S_bin[bin].first[0] == NULL)
+ _S_bin[bin].first[0] = block;
+ else
+ _S_bin[bin].last[0]->next = block;
+
+ _S_bin[bin].last[0] = block;
+
+ block = block->next;
+
+ _S_bin[bin].free[0]++;
+ }
+
+ _S_bin[bin].last[0]->next = NULL;
+
+ __gthread_mutex_unlock(_S_bin[bin].mutex);
+ }
+ }
+
+ /*
+ * Return this thread id record to thread_freelist
+ */
+ __gthread_mutex_lock(&_S_thread_freelist_mutex);
+
+ _S_thread_freelist_last->next = (thread_record*)freelist_pos;
+ _S_thread_freelist_last = (thread_record*)freelist_pos;
+ _S_thread_freelist_last->next = NULL;
+
+ __gthread_mutex_unlock(&_S_thread_freelist_mutex);
+
+ }
+
+ template<int __inst>
+ size_t
+ __mt_alloc<__inst>::
+ _S_get_thread_id()
+ {
+ /*
+ * If we have thread support and it's active we check the thread
+ * key value and return it's id or if it's not set we take the
+ * first record from _S_thread_freelist and sets the key and
+ * returns it's id.
+ */
+ if (__gthread_active_p())
+ {
+ thread_record* freelist_pos;
+
+ if ((freelist_pos =
+ (thread_record*)__gthread_getspecific(_S_thread_key)) == NULL)
+ {
+ __gthread_mutex_lock(&_S_thread_freelist_mutex);
+
+ /*
+ * Since _S_max_threads must be larger than the
+ * theoretical max number of threads of the OS the list
+ * can never be empty.
+ */
+ freelist_pos = _S_thread_freelist_first;
+ _S_thread_freelist_first = _S_thread_freelist_first->next;
+
+ __gthread_mutex_unlock(&_S_thread_freelist_mutex);
+
+ __gthread_setspecific(_S_thread_key, (void*)freelist_pos);
+
+ /*
+ * Since thread_ids may/will be reused (espcially in
+ * producer/consumer applications) we make sure that the
+ * list pointers and free counter is reset BUT as the
+ * "old" thread may still be owner of some memory (which
+ * is referred to by other threads and thus not freed)
+ * we don't reset the used counter.
+ */
+ for (size_t bin = 0; bin < _S_no_of_bins; bin++)
+ {
+ _S_bin[bin].first[freelist_pos->id] = NULL;
+ _S_bin[bin].last[freelist_pos->id] = NULL;
+ _S_bin[bin].free[freelist_pos->id] = 0;
+ }
+ }
+
+ return freelist_pos->id;
+ }
+
+ /*
+ * Otherwise (no thread support or inactive) all requests are
+ * served from the global pool 0.
+ */
+ return 0;
+ }
+
+ template<int __inst> __gthread_once_t
+ __mt_alloc<__inst>::_S_once_mt = __GTHREAD_ONCE_INIT;
+#endif
+
+ template<int __inst> bool
+ __mt_alloc<__inst>::_S_initialized = false;
+
+ template<int __inst> typename __mt_alloc<__inst>::binmap_type*
+ __mt_alloc<__inst>::_S_binmap = NULL;
+
+ /*
+ * Allocation requests (after round-up to power of 2) below this
+ * value will be handled by the allocator. A raw malloc/free() call
+ * will be used for requests larger than this value.
+ */
+ template<int __inst> size_t
+ __mt_alloc<__inst>::_S_max_bytes = 128;
+
+ /*
+ * In order to avoid fragmenting and minimize the number of malloc()
+ * calls we always request new memory using this value. Based on
+ * previous discussions on the libstdc++ mailing list we have
+ * choosen the value below. See
+ * http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
+ */
+ template<int __inst> size_t
+ __mt_alloc<__inst>::_S_chunk_size = 4096 - 4 * sizeof(void*);
+
+ /*
+ * The maximum number of supported threads. Our Linux 2.4.18 reports
+ * 4070 in /proc/sys/kernel/threads-max
+ */
+ template<int __inst> size_t
+ __mt_alloc<__inst>::_S_max_threads = 4096;
+
+ /*
+ * Actual value calculated in _S_init()
+ */
+ template<int __inst> size_t
+ __mt_alloc<__inst>::_S_no_of_bins = 1;
+
+ /*
+ * Each time a deallocation occurs in a threaded application we make
+ * sure that there are no more than _S_freelist_headroom % of used
+ * memory on the freelist. If the number of additional records is
+ * more than _S_freelist_headroom % of the freelist, we move these
+ * records back to the global pool.
+ */
+ template<int __inst> size_t
+ __mt_alloc<__inst>::_S_freelist_headroom = 10;
+
+ /*
+ * Actual initialization in _S_init()
+ */
+#ifdef __GTHREADS
+ template<int __inst> typename __mt_alloc<__inst>::thread_record*
+ __mt_alloc<__inst>::_S_thread_freelist_first = NULL;
+
+ template<int __inst> typename __mt_alloc<__inst>::thread_record*
+ __mt_alloc<__inst>::_S_thread_freelist_last = NULL;
+
+ template<int __inst> __gthread_mutex_t
+ __mt_alloc<__inst>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
+
+ /*
+ * Actual initialization in _S_init()
+ */
+ template<int __inst> __gthread_key_t
+ __mt_alloc<__inst>::_S_thread_key;
+#endif
+
+ template<int __inst> typename __mt_alloc<__inst>::bin_record*
+ __mt_alloc<__inst>::_S_bin = NULL;
+
+ template<int __inst>
+ inline bool
+ operator==(const __mt_alloc<__inst>&, const __mt_alloc<__inst>&)
+ { return true; }
+
+ template<int __inst>
+ inline bool
+ operator!=(const __mt_alloc<__inst>&, const __mt_alloc<__inst>&)
+ { return false; }
+} // namespace __gnu_cxx
+
+namespace std
+{
+ template<typename _Tp, int __inst>
+ struct _Alloc_traits<_Tp, __gnu_cxx::__mt_alloc<__inst> >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx:: __mt_alloc<__inst> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+
+ template<typename _Tp, typename _Tp1, int __inst>
+ struct _Alloc_traits<_Tp,
+ __allocator<_Tp1, __gnu_cxx::__mt_alloc<__inst> > >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx:: __mt_alloc<__inst> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+} // namespace std
+
+#endif
+
diff --git a/libstdc++-v3/include/ext/new_allocator.h b/libstdc++-v3/include/ext/new_allocator.h
new file mode 100644
index 00000000000..794751d3c13
--- /dev/null
+++ b/libstdc++-v3/include/ext/new_allocator.h
@@ -0,0 +1,58 @@
+// Allocators -*- C++ -*-
+
+// Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+#ifndef _NEW_ALLOCATOR_H
+#define _NEW_ALLOCATOR_H 1
+
+#include <new>
+
+namespace __gnu_cxx
+{
+ /**
+ * @if maint
+ * A new-based allocator, as required by the standard. Allocation and
+ * deallocation forward to global new and delete. "SGI" style, minus
+ * reallocate().
+ * @endif
+ * (See @link Allocators allocators info @endlink for more.)
+ */
+ class __new_alloc
+ {
+ public:
+ static void*
+ allocate(size_t __n)
+ { return ::operator new(__n); }
+
+ static void
+ deallocate(void* __p, size_t)
+ { ::operator delete(__p); }
+ };
+} // namespace __gnu_cxx
+
+#endif
diff --git a/libstdc++-v3/include/ext/pool_allocator.h b/libstdc++-v3/include/ext/pool_allocator.h
new file mode 100644
index 00000000000..77d354faabb
--- /dev/null
+++ b/libstdc++-v3/include/ext/pool_allocator.h
@@ -0,0 +1,383 @@
+// Allocators -*- C++ -*-
+
+// Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 2, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING. If not, write to the Free
+// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+// USA.
+
+// As a special exception, you may use this file as part of a free software
+// library without restriction. Specifically, if other files instantiate
+// templates or use macros or inline functions from this file, or you compile
+// this file and link it with other files to produce an executable, this
+// file does not by itself cause the resulting executable to be covered by
+// the GNU General Public License. This exception does not however
+// invalidate any other reasons why the executable file might be covered by
+// the GNU General Public License.
+
+/*
+ * Copyright (c) 1996-1997
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ */
+
+/** @file ext/debug_allocator.h
+ * This file is a GNU extension to the Standard C++ Library.
+ * You should only include this header if you are using GCC 3 or later.
+ */
+
+#ifndef _POOL_ALLOCATOR_H
+#define _POOL_ALLOCATOR_H 1
+
+#include <bits/functexcept.h>
+#include <bits/stl_threads.h>
+#include <bits/atomicity.h>
+#include <bits/allocator_traits.h>
+#include <ext/new_allocator.h>
+
+namespace __gnu_cxx
+{
+ using std::_STL_mutex_lock;
+ using std::__throw_bad_alloc;
+
+ /**
+ * @if maint
+ * Default node allocator. "SGI" style. Uses various allocators to
+ * fulfill underlying requests (and makes as few requests as possible
+ * when in default high-speed pool mode).
+ *
+ * Important implementation properties:
+ * 0. If globally mandated, then allocate objects from __new_alloc
+ * 1. If the clients request an object of size > _S_max_bytes, the resulting
+ * object will be obtained directly from __new_alloc
+ * 2. In all other cases, we allocate an object of size exactly
+ * _S_round_up(requested_size). Thus the client has enough size
+ * information that we can return the object to the proper free list
+ * without permanently losing part of the object.
+ *
+ * The first template parameter specifies whether more than one thread may
+ * use this allocator. It is safe to allocate an object from one instance
+ * of a default_alloc and deallocate it with another one. This effectively
+ * transfers its ownership to the second one. This may have undesirable
+ * effects on reference locality.
+ *
+ * The second parameter is unused and serves only to allow the creation of
+ * multiple default_alloc instances. Note that containers built on different
+ * allocator instances have different types, limiting the utility of this
+ * approach. If you do not wish to share the free lists with the main
+ * default_alloc instance, instantiate this with a non-zero __inst.
+ *
+ * @endif
+ * (See @link Allocators allocators info @endlink for more.)
+ */
+ template<bool __threads, int __inst>
+ class __pool_alloc
+ {
+ private:
+ enum {_S_align = 8};
+ enum {_S_max_bytes = 128};
+ enum {_S_freelists = _S_max_bytes / _S_align};
+
+ union _Obj
+ {
+ union _Obj* _M_free_list_link;
+ char _M_client_data[1]; // The client sees this.
+ };
+
+ static _Obj* volatile _S_free_list[_S_freelists];
+
+ // Chunk allocation state.
+ static char* _S_start_free;
+ static char* _S_end_free;
+ static size_t _S_heap_size;
+
+ static _STL_mutex_lock _S_lock;
+ static _Atomic_word _S_force_new;
+
+ static size_t
+ _S_round_up(size_t __bytes)
+ { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
+
+ static size_t
+ _S_freelist_index(size_t __bytes)
+ { return ((__bytes + (size_t)_S_align - 1)/(size_t)_S_align - 1); }
+
+ // Returns an object of size __n, and optionally adds to size __n
+ // free list.
+ static void*
+ _S_refill(size_t __n);
+
+ // Allocates a chunk for nobjs of size size. nobjs may be reduced
+ // if it is inconvenient to allocate the requested number.
+ static char*
+ _S_chunk_alloc(size_t __size, int& __nobjs);
+
+ // It would be nice to use _STL_auto_lock here. But we need a
+ // test whether threads are in use.
+ struct _Lock
+ {
+ _Lock() { if (__threads) _S_lock._M_acquire_lock(); }
+ ~_Lock() { if (__threads) _S_lock._M_release_lock(); }
+ } __attribute__ ((__unused__));
+ friend struct _Lock;
+
+ public:
+ // __n must be > 0
+ static void*
+ allocate(size_t __n)
+ {
+ void* __ret = 0;
+
+ // If there is a race through here, assume answer from getenv
+ // will resolve in same direction. Inspired by techniques
+ // to efficiently support threading found in basic_string.h.
+ if (_S_force_new == 0)
+ {
+ if (getenv("GLIBCPP_FORCE_NEW"))
+ __atomic_add(&_S_force_new, 1);
+ else
+ __atomic_add(&_S_force_new, -1);
+ }
+
+ if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
+ __ret = __new_alloc::allocate(__n);
+ else
+ {
+ _Obj* volatile* __my_free_list = _S_free_list
+ + _S_freelist_index(__n);
+ // Acquire the lock here with a constructor call. This
+ // ensures that it is released in exit or during stack
+ // unwinding.
+ _Lock __lock_instance;
+ _Obj* __restrict__ __result = *__my_free_list;
+ if (__builtin_expect(__result == 0, 0))
+ __ret = _S_refill(_S_round_up(__n));
+ else
+ {
+ *__my_free_list = __result -> _M_free_list_link;
+ __ret = __result;
+ }
+ if (__builtin_expect(__ret == 0, 0))
+ __throw_bad_alloc();
+ }
+ return __ret;
+ }
+
+ // __p may not be 0
+ static void
+ deallocate(void* __p, size_t __n)
+ {
+ if ((__n > (size_t) _S_max_bytes) || (_S_force_new > 0))
+ __new_alloc::deallocate(__p, __n);
+ else
+ {
+ _Obj* volatile* __my_free_list = _S_free_list
+ + _S_freelist_index(__n);
+ _Obj* __q = (_Obj*)__p;
+
+ // Acquire the lock here with a constructor call. This
+ // ensures that it is released in exit or during stack
+ // unwinding.
+ _Lock __lock_instance;
+ __q -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = __q;
+ }
+ }
+ };
+
+ template<bool __threads, int __inst> _Atomic_word
+ __pool_alloc<__threads, __inst>::_S_force_new = 0;
+
+ template<bool __threads, int __inst>
+ inline bool
+ operator==(const __pool_alloc<__threads,__inst>&,
+ const __pool_alloc<__threads,__inst>&)
+ { return true; }
+
+ template<bool __threads, int __inst>
+ inline bool
+ operator!=(const __pool_alloc<__threads,__inst>&,
+ const __pool_alloc<__threads,__inst>&)
+ { return false; }
+
+
+ // We allocate memory in large chunks in order to avoid fragmenting the
+ // heap too much. We assume that __size is properly aligned. We hold
+ // the allocation lock.
+ template<bool __threads, int __inst>
+ char*
+ __pool_alloc<__threads, __inst>::
+ _S_chunk_alloc(size_t __size, int& __nobjs)
+ {
+ char* __result;
+ size_t __total_bytes = __size * __nobjs;
+ size_t __bytes_left = _S_end_free - _S_start_free;
+
+ if (__bytes_left >= __total_bytes)
+ {
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return __result ;
+ }
+ else if (__bytes_left >= __size)
+ {
+ __nobjs = (int)(__bytes_left/__size);
+ __total_bytes = __size * __nobjs;
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return __result;
+ }
+ else
+ {
+ size_t __bytes_to_get =
+ 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
+ // Try to make use of the left-over piece.
+ if (__bytes_left > 0)
+ {
+ _Obj* volatile* __my_free_list =
+ _S_free_list + _S_freelist_index(__bytes_left);
+
+ ((_Obj*)(void*)_S_start_free) -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = (_Obj*)(void*)_S_start_free;
+ }
+ _S_start_free = (char*) __new_alloc::allocate(__bytes_to_get);
+ if (_S_start_free == 0)
+ {
+ size_t __i;
+ _Obj* volatile* __my_free_list;
+ _Obj* __p;
+ // Try to make do with what we have. That can't hurt. We
+ // do not try smaller requests, since that tends to result
+ // in disaster on multi-process machines.
+ __i = __size;
+ for (; __i <= (size_t) _S_max_bytes; __i += (size_t) _S_align)
+ {
+ __my_free_list = _S_free_list + _S_freelist_index(__i);
+ __p = *__my_free_list;
+ if (__p != 0)
+ {
+ *__my_free_list = __p -> _M_free_list_link;
+ _S_start_free = (char*)__p;
+ _S_end_free = _S_start_free + __i;
+ return _S_chunk_alloc(__size, __nobjs);
+ // Any leftover piece will eventually make it to the
+ // right free list.
+ }
+ }
+ _S_end_free = 0; // In case of exception.
+ _S_start_free = (char*)__new_alloc::allocate(__bytes_to_get);
+ // This should either throw an exception or remedy the situation.
+ // Thus we assume it succeeded.
+ }
+ _S_heap_size += __bytes_to_get;
+ _S_end_free = _S_start_free + __bytes_to_get;
+ return _S_chunk_alloc(__size, __nobjs);
+ }
+ }
+
+
+ // Returns an object of size __n, and optionally adds to "size
+ // __n"'s free list. We assume that __n is properly aligned. We
+ // hold the allocation lock.
+ template<bool __threads, int __inst>
+ void*
+ __pool_alloc<__threads, __inst>::_S_refill(size_t __n)
+ {
+ int __nobjs = 20;
+ char* __chunk = _S_chunk_alloc(__n, __nobjs);
+ _Obj* volatile* __my_free_list;
+ _Obj* __result;
+ _Obj* __current_obj;
+ _Obj* __next_obj;
+ int __i;
+
+ if (1 == __nobjs)
+ return __chunk;
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
+
+ // Build free list in chunk.
+ __result = (_Obj*)(void*)__chunk;
+ *__my_free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
+ for (__i = 1; ; __i++)
+ {
+ __current_obj = __next_obj;
+ __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
+ if (__nobjs - 1 == __i)
+ {
+ __current_obj -> _M_free_list_link = 0;
+ break;
+ }
+ else
+ __current_obj -> _M_free_list_link = __next_obj;
+ }
+ return __result;
+ }
+
+
+ template<bool __threads, int __inst>
+ _STL_mutex_lock
+ __pool_alloc<__threads, __inst>::_S_lock __STL_MUTEX_INITIALIZER;
+
+ template<bool __threads, int __inst>
+ char* __pool_alloc<__threads, __inst>::_S_start_free = 0;
+
+ template<bool __threads, int __inst>
+ char* __pool_alloc<__threads, __inst>::_S_end_free = 0;
+
+ template<bool __threads, int __inst>
+ size_t __pool_alloc<__threads, __inst>::_S_heap_size = 0;
+
+ template<bool __threads, int __inst>
+ typename __pool_alloc<__threads, __inst>::_Obj* volatile
+ __pool_alloc<__threads, __inst>::_S_free_list[_S_freelists];
+} // namespace __gnu_cxx
+
+namespace std
+{
+ //@{
+ /// Versions for the predefined "SGI" style allocators.
+ template<typename _Tp, bool __thr, int __inst>
+ struct _Alloc_traits<_Tp, __gnu_cxx::__pool_alloc<__thr, __inst> >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx::__pool_alloc<__thr, __inst> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+ //@}
+
+ //@{
+ /// Versions for the __allocator adaptor used with the predefined
+ /// "SGI" style allocators.
+ template<typename _Tp, typename _Tp1, bool __thr, int __inst>
+ struct _Alloc_traits<_Tp, __allocator<_Tp1,
+ __gnu_cxx::__pool_alloc<__thr, __inst> > >
+ {
+ static const bool _S_instanceless = true;
+ typedef __gnu_cxx::__pool_alloc<__thr, __inst> base_alloc_type;
+ typedef __simple_alloc<_Tp, base_alloc_type> _Alloc_type;
+ typedef __allocator<_Tp, base_alloc_type> allocator_type;
+ };
+ //@}
+} // namespace std
+
+#endif
diff --git a/libstdc++-v3/include/ext/rope b/libstdc++-v3/include/ext/rope
index d621ba84aa0..9ba261845b3 100644
--- a/libstdc++-v3/include/ext/rope
+++ b/libstdc++-v3/include/ext/rope
@@ -53,7 +53,7 @@
#include <bits/stl_algo.h>
#include <bits/stl_function.h>
#include <bits/stl_numeric.h>
-#include <bits/stl_alloc.h>
+#include <bits/allocator.h>
#include <bits/stl_construct.h>
#include <bits/stl_uninitialized.h>
#include <ext/hash_fun.h>
diff --git a/libstdc++-v3/include/ext/slist b/libstdc++-v3/include/ext/slist
index 780e2c2d693..875f7604de5 100644
--- a/libstdc++-v3/include/ext/slist
+++ b/libstdc++-v3/include/ext/slist
@@ -51,7 +51,7 @@
#define _SLIST 1
#include <bits/stl_algobase.h>
-#include <bits/stl_alloc.h>
+#include <bits/allocator.h>
#include <bits/stl_construct.h>
#include <bits/stl_uninitialized.h>
#include <bits/concept_check.h>