summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Lortie <desrt@desrt.ca>2015-02-12 12:18:22 -0500
committerRyan Lortie <desrt@desrt.ca>2015-02-12 13:51:04 -0500
commita91c900c9e9c28f4d0530f962c7ba46353467f6e (patch)
tree33ce5558742c41d94c85c3aeb4df9a8dd771c77c
parentb0899fdc9a3e434927630422425ce78f8242fc25 (diff)
downloadglib-wip/desrt/mountmonitor.tar.gz
GContextSpecificGroup: fix deadlockwip/desrt/mountmonitor
There was a theoretical deadlock between the worker trying to emit a signal at the same time as we were waiting for it to shutdown the notification (while holding the lock). The deadlock was particularly annoying because we didn't really need to wait for the shutdown and because it wasn't possible to signals to arrive while waiting for a start. Attempting to deal with start and stop in an asymmetric way could have lead to other weird situations, however. Drop the lock while waiting for the worker thread to start. This means that we face the possibility of multiple waiters on the cond at the same time, so we need to make more of a state machine.
-rw-r--r--gio/gcontextspecificgroup.c77
-rw-r--r--gio/gcontextspecificgroup.h4
-rw-r--r--gio/tests/contexts.c4
3 files changed, 52 insertions, 33 deletions
diff --git a/gio/gcontextspecificgroup.c b/gio/gcontextspecificgroup.c
index 849b0a40a..47a49c404 100644
--- a/gio/gcontextspecificgroup.c
+++ b/gio/gcontextspecificgroup.c
@@ -90,27 +90,24 @@ g_context_specific_source_new (const gchar *name,
return css;
}
-typedef struct
-{
- GCallback callback;
- GMutex mutex;
- GCond cond;
-} Closure;
-
static gboolean
-g_context_specific_group_invoke_closure (gpointer user_data)
+g_context_specific_group_change_state (gpointer user_data)
{
- Closure *closure = user_data;
+ GContextSpecificGroup *group = user_data;
- (* closure->callback) ();
+ g_mutex_lock (&group->lock);
- g_mutex_lock (&closure->mutex);
+ if (group->requested_state != group->effective_state)
+ {
+ (* group->requested_func) ();
- closure->callback = NULL;
+ group->effective_state = group->requested_state;
+ group->requested_func = NULL;
- g_cond_broadcast (&closure->cond);
+ g_cond_broadcast (&group->cond);
+ }
- g_mutex_unlock (&closure->mutex);
+ g_mutex_unlock (&group->lock);
return FALSE;
}
@@ -128,25 +125,43 @@ g_context_specific_group_invoke_closure (gpointer user_data)
* example)
*/
static void
-g_context_specific_group_wait_for_callback (GCallback callback)
+g_context_specific_group_request_state (GContextSpecificGroup *group,
+ gboolean requested_state,
+ GCallback requested_func)
{
- Closure closure;
-
- g_mutex_init (&closure.mutex);
- g_cond_init (&closure.cond);
-
- closure.callback = callback;
+ if (requested_state != group->requested_state)
+ {
+ if (group->effective_state != group->requested_state)
+ {
+ /* abort the currently pending state transition */
+ g_assert (group->effective_state == requested_state);
- g_main_context_invoke (GLIB_PRIVATE_CALL(g_get_worker_context) (),
- g_context_specific_group_invoke_closure, &closure);
+ group->requested_state = requested_state;
+ group->requested_func = NULL;
+ }
+ else
+ {
+ /* start a new state transition */
+ group->requested_state = requested_state;
+ group->requested_func = requested_func;
- g_mutex_lock (&closure.mutex);
- while (closure.callback)
- g_cond_wait (&closure.cond, &closure.mutex);
- g_mutex_unlock (&closure.mutex);
+ g_main_context_invoke (GLIB_PRIVATE_CALL(g_get_worker_context) (),
+ g_context_specific_group_change_state, group);
+ }
+ }
- g_mutex_clear (&closure.mutex);
- g_cond_clear (&closure.cond);
+ /* we only block for positive transitions */
+ if (requested_state)
+ {
+ while (group->requested_state != group->effective_state)
+ g_cond_wait (&group->cond, &group->lock);
+
+ /* there is no way this could go back to FALSE because the object
+ * that we just created in this thread would have to have been
+ * destroyed again (from this thread) before that could happen.
+ */
+ g_assert (group->effective_state);
+ }
}
gpointer
@@ -169,7 +184,7 @@ g_context_specific_group_get (GContextSpecificGroup *group,
/* start only if there are no others */
if (start_func && g_hash_table_size (group->table) == 0)
- g_context_specific_group_wait_for_callback (start_func);
+ g_context_specific_group_request_state (group, TRUE, start_func);
css = g_hash_table_lookup (group->table, context);
@@ -214,7 +229,7 @@ g_context_specific_group_remove (GContextSpecificGroup *group,
/* stop only if we were the last one */
if (stop_func && g_hash_table_size (group->table) == 0)
- g_context_specific_group_wait_for_callback (stop_func);
+ g_context_specific_group_request_state (group, FALSE, stop_func);
g_mutex_unlock (&group->lock);
diff --git a/gio/gcontextspecificgroup.h b/gio/gcontextspecificgroup.h
index b77134b98..9c06aa83b 100644
--- a/gio/gcontextspecificgroup.h
+++ b/gio/gcontextspecificgroup.h
@@ -26,6 +26,10 @@ typedef struct
{
GHashTable *table;
GMutex lock;
+ GCond cond;
+ gboolean requested_state;
+ GCallback requested_func;
+ gboolean effective_state;
} GContextSpecificGroup;
gpointer
diff --git a/gio/tests/contexts.c b/gio/tests/contexts.c
index 49190d9ad..6d7412a80 100644
--- a/gio/tests/contexts.c
+++ b/gio/tests/contexts.c
@@ -297,7 +297,7 @@ test_identity_thread (gpointer user_data)
g_main_context_unref (my_context);
/* at least one thread should see this cleared on exit */
- return GUINT_TO_POINTER (!g_atomic_int_get (&is_running));
+ return GUINT_TO_POINTER (!group.requested_state);
}
static void
@@ -352,7 +352,7 @@ test_emit_thread (gpointer user_data)
g_main_context_unref (my_context);
/* at least one thread should see this cleared on exit */
- return GUINT_TO_POINTER (!g_atomic_int_get (&is_running));
+ return GUINT_TO_POINTER (!group.requested_state);
}
static void