diff options
author | Tim Janik <timj@imendio.com> | 2005-11-01 18:10:31 +0000 |
---|---|---|
committer | Tim Janik <timj@src.gnome.org> | 2005-11-01 18:10:31 +0000 |
commit | 0cba1b531d5d28890fa4f48359d4e7adacf2a603 (patch) | |
tree | fa2a88ef5c43b20004851e2b9f3eb14cf10c71f2 /gobject | |
parent | 3a042a8959501f9e90df41fc31e3167dd7aa6222 (diff) | |
download | glib-0cba1b531d5d28890fa4f48359d4e7adacf2a603.tar.gz |
prepared deprecation of GMemChunk and GAllocator. added g_slice_*() API to
Tue Nov 1 16:24:20 2005 Tim Janik <timj@imendio.com>
* glib/gmem.[hc]: prepared deprecation of GMemChunk and GAllocator.
added g_slice_*() API to allocate and cache small bits of memory.
an actuall allocator implementation for g_slice_*() is still pending.
* glib/gthread.[hc]: changes from a patch by Matthias Clasen.
changed GRealThread list to use in-structure *next; fields instead
of GSList, in order for thread iteration to not depenend on g_slice_*()
indirectly.
_g_thread_mem_private_get():
_g_thread_mem_private_set(): added accessors for private memory,
needed because the ordinary GPrivate implementation relies on GArray
and GSList and therefore indirectly on working g_slice_*() allocations.
* glib/gthread.[hc]:
g_thread_foreach(): new public API function to loop over all existing threads.
* glib/gdataset.c:
* glib/gstring.c:
* glib/gcache.c:
* glib/garray.c:
* glib/gqueue.c:
* glib/gslist.c:
* glib/glist.c:
* glib/ghash.c:
* glib/gtree.c:
* glib/ghook.c:
* glib/gmain.c:
* glib/gnode.c:
removed GAllocator and free list usages and accompanying locks.
use g_slice_*() API to allocate and cache small bits of memory.
* glib/ghook.h: removed GMemChunk field from public API.
* glib/gslist.h:
* glib/glist.h: deprecate allocator API, provide _free1() for consistency.
* glib/gnode.h: deprecate allocator API.
* glib/gmain.c: reordered GPollRec fields so g_slice_free_chain() can
be used for poll rec lists.
* glib/grel.c: removed mem chunk usage, and allocated tuples via g_slice_*().
g_relation_destroy(): free all tuples from the all_tuples hash table,
this effectively maintains the life time track keeping of tuples.
g_relation_delete_tuple(): free tuples which are removed from the
all_tuples hash table. this fixes a temporary leak that was present
in the memchunk code until the destruction of the relation.
Diffstat (limited to 'gobject')
-rw-r--r-- | gobject/ChangeLog | 8 | ||||
-rw-r--r-- | gobject/gsignal.c | 55 | ||||
-rw-r--r-- | gobject/gtype.c | 51 |
3 files changed, 25 insertions, 89 deletions
diff --git a/gobject/ChangeLog b/gobject/ChangeLog index 6fade6f19..a35641a81 100644 --- a/gobject/ChangeLog +++ b/gobject/ChangeLog @@ -1,3 +1,11 @@ +Tue Nov 1 17:07:43 2005 Tim Janik <timj@imendio.com> + + * gsignal.c: allocate signal handlers and handler match structures + via the new g_slice_*() API (get's rid of GList allocation hack). + + * gtype.c: got rid of per-type memchunks. if GTypeInfo.n_preallocs + is > 0, objects are allocated via g_slice_*() instead of g_malloc(). + 2005-10-03 Matthias Clasen <mclasen@redhat.com> * glib-mkenums.in: Really fix #314890. diff --git a/gobject/gsignal.c b/gobject/gsignal.c index 50eebdd75..1da35bfb4 100644 --- a/gobject/gsignal.c +++ b/gobject/gsignal.c @@ -42,7 +42,6 @@ /* pre allocation configurations */ #define MAX_STACK_VALUES (16) -#define HANDLER_PRE_ALLOC (48) #define REPORT_BUG "please report occurrence circumstances to gtk-devel-list@gnome.org" #ifdef G_ENABLE_DEBUG @@ -52,41 +51,6 @@ static volatile gpointer g_trap_instance_signals = NULL; #endif /* G_ENABLE_DEBUG */ -/* --- generic allocation --- */ -/* we special case allocations generically by replacing - * these functions with more speed/memory aware variants - */ -#ifndef DISABLE_MEM_POOLS -static inline gpointer -g_generic_node_alloc (GTrashStack **trash_stack_p, - guint sizeof_node, - guint nodes_pre_alloc) -{ - gpointer node = g_trash_stack_pop (trash_stack_p); - - if (!node) - { - guint8 *block; - - nodes_pre_alloc = MAX (nodes_pre_alloc, 1); - block = g_malloc (sizeof_node * nodes_pre_alloc); - while (--nodes_pre_alloc) - { - g_trash_stack_push (trash_stack_p, block); - block += sizeof_node; - } - node = block; - } - - return node; -} -#define g_generic_node_free(trash_stack_p, node) g_trash_stack_push (trash_stack_p, node) -#else /* !DISABLE_MEM_POOLS */ -#define g_generic_node_alloc(t,sizeof_node,p) g_malloc (sizeof_node) -#define g_generic_node_free(t,node) g_free (node) -#endif /* !DISABLE_MEM_POOLS */ - - /* --- typedefs --- */ typedef struct _SignalNode SignalNode; typedef struct _SignalKey SignalKey; @@ -232,10 +196,7 @@ struct _HandlerMatch { Handler *handler; HandlerMatch *next; - union { - guint signal_id; - gpointer dummy; - } d; + guint signal_id; }; typedef struct @@ -434,10 +395,10 @@ handler_match_prepend (HandlerMatch *list, * instead, we use GList* nodes, since they are exactly the size * we need and are already cached. g_signal_init() asserts this. */ - node = (HandlerMatch*) g_list_alloc (); + node = g_slice_new (HandlerMatch); node->handler = handler; node->next = list; - node->d.signal_id = signal_id; + node->signal_id = signal_id; handler_ref (handler); return node; @@ -448,8 +409,8 @@ handler_match_free1_R (HandlerMatch *node, { HandlerMatch *next = node->next; - handler_unref_R (node->d.signal_id, instance, node->handler); - g_list_free_1 ((GList*) node); + handler_unref_R (node->signal_id, instance, node->handler); + g_slice_free (HandlerMatch, node); return next; } @@ -541,9 +502,7 @@ handlers_find (gpointer instance, static inline Handler* handler_new (gboolean after) { - Handler *handler = g_generic_node_alloc (&g_handler_ts, - sizeof (Handler), - HANDLER_PRE_ALLOC); + Handler *handler = g_slice_new (Handler); #ifndef G_DISABLE_CHECKS if (g_handler_sequential_number < 1) g_error (G_STRLOC ": handler id overflow, %s", REPORT_BUG); @@ -624,7 +583,7 @@ handler_unref_R (guint signal_id, SIGNAL_UNLOCK (); g_closure_unref (handler->closure); SIGNAL_LOCK (); - g_generic_node_free (&g_handler_ts, handler); + g_slice_free (Handler, handler); } } diff --git a/gobject/gtype.c b/gobject/gtype.c index c67ed6dfb..3d312b237 100644 --- a/gobject/gtype.c +++ b/gobject/gtype.c @@ -278,7 +278,6 @@ struct _InstanceData guint16 private_size; guint16 n_preallocs; GInstanceInitFunc instance_init; - GMemChunk *mem_chunk; }; union _TypeData { @@ -976,7 +975,6 @@ type_data_make_W (TypeNode *node, data->instance.n_preallocs = MIN (info->n_preallocs, 1024); #endif /* !DISABLE_MEM_POOLS */ data->instance.instance_init = info->instance_init; - data->instance.mem_chunk = NULL; } else if (node->is_classed) /* only classed */ { @@ -1526,8 +1524,7 @@ g_type_create_instance (GType type) TypeNode *node; GTypeInstance *instance; GTypeClass *class; - guint i; - gsize total_instance_size; + guint i, total_size; node = lookup_type_node_I (type); if (!node || !node->is_instantiatable) @@ -1545,35 +1542,12 @@ g_type_create_instance (GType type) } class = g_type_class_ref (type); + total_size = type_total_instance_size_I (node); - total_instance_size = type_total_instance_size_I (node); - if (node->data->instance.n_preallocs) - { - G_WRITE_LOCK (&type_rw_lock); - if (!node->data->instance.mem_chunk) - { - /* If there isn't private data, the compiler will have already - * added the necessary padding, but in the private data case, we - * have to pad ourselves to ensure proper alignment of all the - * atoms in the slab. - */ - gsize atom_size = total_instance_size; - if (node->data->instance.private_size) - atom_size = ALIGN_STRUCT (atom_size); - - node->data->instance.mem_chunk = g_mem_chunk_new (NODE_NAME (node), - atom_size, - (atom_size * - node->data->instance.n_preallocs), - G_ALLOC_AND_FREE); - } - - instance = g_chunk_new0 (GTypeInstance, node->data->instance.mem_chunk); - G_WRITE_UNLOCK (&type_rw_lock); - } + instance = g_slice_alloc0 (total_size); else - instance = g_malloc0 (total_instance_size); /* fine without read lock */ + instance = g_malloc0 (total_size); if (node->data->instance.private_size) instance_real_class_set (instance, class); @@ -1624,17 +1598,13 @@ g_type_free_instance (GTypeInstance *instance) instance->g_class = NULL; #ifdef G_ENABLE_DEBUG - memset (instance, 0xaa, type_total_instance_size_I (node)); /* debugging hack */ -#endif + memset (instance, 0xaa, type_total_instance_size_I (node)); +#endif if (node->data->instance.n_preallocs) - { - G_WRITE_LOCK (&type_rw_lock); - g_chunk_free (instance, node->data->instance.mem_chunk); - G_WRITE_UNLOCK (&type_rw_lock); - } + g_slice_free1 (type_total_instance_size_I (node), instance); else g_free (instance); - + g_type_class_unref (class); } @@ -2045,10 +2015,9 @@ type_data_last_unref_Wm (GType type, node->data->common.ref_count = 0; - if (node->is_instantiatable && node->data->instance.mem_chunk) + if (node->is_instantiatable) { - g_mem_chunk_destroy (node->data->instance.mem_chunk); - node->data->instance.mem_chunk = NULL; + /* destroy node->data->instance.mem_chunk */ } tdata = node->data; |