diff options
| author | Victor Stinner <vstinner@redhat.com> | 2019-06-03 18:14:24 +0200 | 
|---|---|---|
| committer | GitHub <noreply@github.com> | 2019-06-03 18:14:24 +0200 | 
| commit | e225bebc1409bcf68db74a35ed3c31222883bf8f (patch) | |
| tree | 42153d2d48a3f1c784812b4fb54ed3557be713d0 | |
| parent | 49a7e347976c9b39149ac7505b11ad6e9e2bdeec (diff) | |
| download | cpython-git-e225bebc1409bcf68db74a35ed3c31222883bf8f.tar.gz | |
Revert "bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (gh-13714)" (GH-13780)
This reverts commit 6a150bcaeb190d1731b38ab9c7a5d1a352847ddc.
| -rw-r--r-- | Include/internal/pycore_ceval.h | 13 | ||||
| -rw-r--r-- | Include/internal/pycore_pystate.h | 12 | ||||
| -rw-r--r-- | Lib/test/test_capi.py | 2 | ||||
| -rw-r--r-- | Misc/NEWS.d/next/Core and Builtins/2018-09-15-12-13-46.bpo-33608.avmvVP.rst | 5 | ||||
| -rw-r--r-- | Modules/_testcapimodule.c | 1 | ||||
| -rw-r--r-- | Modules/signalmodule.c | 12 | ||||
| -rw-r--r-- | Python/ceval.c | 335 | ||||
| -rw-r--r-- | Python/ceval_gil.h | 28 | ||||
| -rw-r--r-- | Python/pylifecycle.c | 29 | ||||
| -rw-r--r-- | Python/pystate.c | 68 | 
10 files changed, 185 insertions, 320 deletions
diff --git a/Include/internal/pycore_ceval.h b/Include/internal/pycore_ceval.h index 2c6df9a9af..4c1c0e2439 100644 --- a/Include/internal/pycore_ceval.h +++ b/Include/internal/pycore_ceval.h @@ -12,22 +12,19 @@ extern "C" {  #include "pycore_pystate.h"  #include "pythread.h" +PyAPI_FUNC(void) _Py_FinishPendingCalls(_PyRuntimeState *runtime);  PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);  PyAPI_FUNC(void) _PyEval_FiniThreads( -    struct _ceval_runtime_state *); +    struct _ceval_runtime_state *ceval);  PyAPI_FUNC(void) _PyEval_SignalReceived( -    struct _ceval_runtime_state *); +    struct _ceval_runtime_state *ceval);  PyAPI_FUNC(int) _PyEval_AddPendingCall(      PyThreadState *tstate, -    struct _ceval_runtime_state *, -    struct _ceval_interpreter_state *, -    unsigned long thread_id, +    struct _ceval_runtime_state *ceval,      int (*func)(void *),      void *arg); -PyAPI_FUNC(void) _PyEval_FinishPendingCalls(PyInterpreterState *);  PyAPI_FUNC(void) _PyEval_SignalAsyncExc( -    struct _ceval_runtime_state *, -    struct _ceval_interpreter_state *); +    struct _ceval_runtime_state *ceval);  PyAPI_FUNC(void) _PyEval_ReInitThreads(      _PyRuntimeState *runtime); diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index aca5533022..520a74b8a6 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -25,7 +25,7 @@ struct pyruntimestate;  /* ceval state */ -struct _ceval_pending_calls { +struct _pending_calls {      int finishing;      PyThread_type_lock lock;      /* Request for running pending calls. */ @@ -36,7 +36,6 @@ struct _ceval_pending_calls {      int async_exc;  #define NPENDINGCALLS 32      struct { -        unsigned long thread_id;          int (*func)(void *);          void *arg;      } calls[NPENDINGCALLS]; @@ -54,21 +53,15 @@ struct _ceval_runtime_state {      int tracing_possible;      /* This single variable consolidates all requests to break out of         the fast path in the eval loop. */ -    // XXX This can move to _ceval_interpreter_state once all parts -    // from COMPUTE_EVAL_BREAKER have moved under PyInterpreterState.      _Py_atomic_int eval_breaker;      /* Request for dropping the GIL */      _Py_atomic_int gil_drop_request; +    struct _pending_calls pending;      /* Request for checking signals. */      _Py_atomic_int signals_pending;      struct _gil_runtime_state gil;  }; -struct _ceval_interpreter_state { -    struct _ceval_pending_calls pending; -}; - -  /* interpreter state */  typedef PyObject* (*_PyFrameEvalFunction)(struct _frame *, int); @@ -143,7 +136,6 @@ struct _is {      uint64_t tstate_next_unique_id; -    struct _ceval_interpreter_state ceval;      struct _warnings_runtime_state warnings;      PyObject *audit_hooks; diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py index 88bda057ed..43d7a08da9 100644 --- a/Lib/test/test_capi.py +++ b/Lib/test/test_capi.py @@ -431,7 +431,7 @@ class TestPendingCalls(unittest.TestCase):      def test_pendingcalls_threaded(self):          #do every callback on a separate thread -        n = 32 #total callbacks (see NPENDINGCALLS in pycore_ceval.h) +        n = 32 #total callbacks          threads = []          class foo(object):pass          context = foo() diff --git a/Misc/NEWS.d/next/Core and Builtins/2018-09-15-12-13-46.bpo-33608.avmvVP.rst b/Misc/NEWS.d/next/Core and Builtins/2018-09-15-12-13-46.bpo-33608.avmvVP.rst deleted file mode 100644 index 73a01a1f46..0000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2018-09-15-12-13-46.bpo-33608.avmvVP.rst +++ /dev/null @@ -1,5 +0,0 @@ -We added a new internal _Py_AddPendingCall() that operates relative to the -provided interpreter.  This allows us to use the existing implementation to -ask another interpreter to do work that cannot be done in the current -interpreter, like decref an object the other interpreter owns.  The existing -Py_AddPendingCall() only operates relative to the main interpreter. diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index 40e0826ce1..f059b4df11 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -2677,7 +2677,6 @@ pending_threadfunc(PyObject *self, PyObject *arg)      Py_INCREF(callable);      Py_BEGIN_ALLOW_THREADS -    /* XXX Use the internal _Py_AddPendingCall(). */      r = Py_AddPendingCall(&_pending_callback, callable);      Py_END_ALLOW_THREADS diff --git a/Modules/signalmodule.c b/Modules/signalmodule.c index 1964646da2..7698984ff3 100644 --- a/Modules/signalmodule.c +++ b/Modules/signalmodule.c @@ -21,7 +21,6 @@  #include <process.h>  #endif  #endif -#include "internal/pycore_pystate.h"  #ifdef HAVE_SIGNAL_H  #include <signal.h> @@ -260,7 +259,6 @@ trip_signal(int sig_num)      /* Notify ceval.c */      _PyRuntimeState *runtime = &_PyRuntime;      PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    PyInterpreterState *interp = runtime->interpreters.main;      _PyEval_SignalReceived(&runtime->ceval);      /* And then write to the wakeup fd *after* setting all the globals and @@ -301,10 +299,7 @@ trip_signal(int sig_num)                  {                      /* Py_AddPendingCall() isn't signal-safe, but we                         still use it for this exceptional case. */ -                    _PyEval_AddPendingCall(tstate, -                                           &runtime->ceval, -                                           &interp->ceval, -                                           runtime->main_thread, +                    _PyEval_AddPendingCall(tstate, &runtime->ceval,                                             report_wakeup_send_error,                                             (void *)(intptr_t) last_error);                  } @@ -323,10 +318,7 @@ trip_signal(int sig_num)                  {                      /* Py_AddPendingCall() isn't signal-safe, but we                         still use it for this exceptional case. */ -                    _PyEval_AddPendingCall(tstate, -                                           &runtime->ceval, -                                           &interp->ceval, -                                           runtime->main_thread, +                    _PyEval_AddPendingCall(tstate, &runtime->ceval,                                             report_wakeup_write_error,                                             (void *)(intptr_t)errno);                  } diff --git a/Python/ceval.c b/Python/ceval.c index cb0275c418..0a4af915d6 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -115,65 +115,66 @@ static size_t opcache_global_hits = 0;  static size_t opcache_global_misses = 0;  #endif +#define GIL_REQUEST _Py_atomic_load_relaxed(&ceval->gil_drop_request)  /* This can set eval_breaker to 0 even though gil_drop_request became     1.  We believe this is all right because the eval loop will release     the GIL eventually anyway. */ -#define COMPUTE_EVAL_BREAKER(ceval_r, ceval_i) \ +#define COMPUTE_EVAL_BREAKER(ceval) \      _Py_atomic_store_relaxed( \ -        &(ceval_r)->eval_breaker, \ -        _Py_atomic_load_relaxed(&(ceval_r)->gil_drop_request) | \ -        _Py_atomic_load_relaxed(&(ceval_r)->signals_pending) | \ -        _Py_atomic_load_relaxed(&(ceval_i)->pending.calls_to_do) | \ -        (ceval_i)->pending.async_exc) +        &(ceval)->eval_breaker, \ +        GIL_REQUEST | \ +        _Py_atomic_load_relaxed(&(ceval)->signals_pending) | \ +        _Py_atomic_load_relaxed(&(ceval)->pending.calls_to_do) | \ +        (ceval)->pending.async_exc) -#define SET_GIL_DROP_REQUEST(ceval_r) \ +#define SET_GIL_DROP_REQUEST(ceval) \      do { \ -        _Py_atomic_store_relaxed(&(ceval_r)->gil_drop_request, 1); \ -        _Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \ +        _Py_atomic_store_relaxed(&(ceval)->gil_drop_request, 1); \ +        _Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \      } while (0) -#define RESET_GIL_DROP_REQUEST(ceval_r, ceval_i) \ +#define RESET_GIL_DROP_REQUEST(ceval) \      do { \ -        _Py_atomic_store_relaxed(&(ceval_r)->gil_drop_request, 0); \ -        COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \ +        _Py_atomic_store_relaxed(&(ceval)->gil_drop_request, 0); \ +        COMPUTE_EVAL_BREAKER(ceval); \      } while (0)  /* Pending calls are only modified under pending_lock */ -#define SIGNAL_PENDING_CALLS(ceval_r, ceval_i) \ +#define SIGNAL_PENDING_CALLS(ceval) \      do { \ -        _Py_atomic_store_relaxed(&(ceval_i)->pending.calls_to_do, 1); \ -        _Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \ +        _Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 1); \ +        _Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \      } while (0) -#define UNSIGNAL_PENDING_CALLS(ceval_r, ceval_i) \ +#define UNSIGNAL_PENDING_CALLS(ceval) \      do { \ -        _Py_atomic_store_relaxed(&(ceval_i)->pending.calls_to_do, 0); \ -        COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \ +        _Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 0); \ +        COMPUTE_EVAL_BREAKER(ceval); \      } while (0) -#define SIGNAL_PENDING_SIGNALS(ceval_r) \ +#define SIGNAL_PENDING_SIGNALS(ceval) \      do { \ -        _Py_atomic_store_relaxed(&(ceval_r)->signals_pending, 1); \ -        _Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \ +        _Py_atomic_store_relaxed(&(ceval)->signals_pending, 1); \ +        _Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \      } while (0) -#define UNSIGNAL_PENDING_SIGNALS(ceval_r, ceval_i) \ +#define UNSIGNAL_PENDING_SIGNALS(ceval) \      do { \ -        _Py_atomic_store_relaxed(&(ceval_r)->signals_pending, 0); \ -        COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \ +        _Py_atomic_store_relaxed(&(ceval)->signals_pending, 0); \ +        COMPUTE_EVAL_BREAKER(ceval); \      } while (0) -#define SIGNAL_ASYNC_EXC(ceval_r, ceval_i) \ +#define SIGNAL_ASYNC_EXC(ceval) \      do { \ -        (ceval_i)->pending.async_exc = 1; \ -        _Py_atomic_store_relaxed(&(ceval_r)->eval_breaker, 1); \ +        (ceval)->pending.async_exc = 1; \ +        _Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \      } while (0) -#define UNSIGNAL_ASYNC_EXC(ceval_r, ceval_i) \ +#define UNSIGNAL_ASYNC_EXC(ceval) \      do { \ -        (ceval_i)->pending.async_exc = 0; \ -        COMPUTE_EVAL_BREAKER(ceval_r, ceval_i); \ +        (ceval)->pending.async_exc = 0; \ +        COMPUTE_EVAL_BREAKER(ceval); \      } while (0) @@ -193,8 +194,8 @@ void  PyEval_InitThreads(void)  {      _PyRuntimeState *runtime = &_PyRuntime; -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; -    struct _gil_runtime_state *gil = &ceval_r->gil; +    struct _ceval_runtime_state *ceval = &runtime->ceval; +    struct _gil_runtime_state *gil = &ceval->gil;      if (gil_created(gil)) {          return;      } @@ -202,15 +203,19 @@ PyEval_InitThreads(void)      PyThread_init_thread();      create_gil(gil);      PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    take_gil(ceval_r, tstate); +    take_gil(ceval, tstate); -    // The pending calls mutex is initialized in PyInterpreterState_New(). +    struct _pending_calls *pending = &ceval->pending; +    pending->lock = PyThread_allocate_lock(); +    if (pending->lock == NULL) { +        Py_FatalError("Can't initialize threads for pending calls"); +    }  }  void -_PyEval_FiniThreads(struct _ceval_runtime_state *ceval_r) +_PyEval_FiniThreads(struct _ceval_runtime_state *ceval)  { -    struct _gil_runtime_state *gil = &ceval_r->gil; +    struct _gil_runtime_state *gil = &ceval->gil;      if (!gil_created(gil)) {          return;      } @@ -218,24 +223,20 @@ _PyEval_FiniThreads(struct _ceval_runtime_state *ceval_r)      destroy_gil(gil);      assert(!gil_created(gil)); -    // The pending calls mutex is freed in PyInterpreterState_Delete(). +    struct _pending_calls *pending = &ceval->pending; +    if (pending->lock != NULL) { +        PyThread_free_lock(pending->lock); +        pending->lock = NULL; +    }  }  static inline void  exit_thread_if_finalizing(PyThreadState *tstate)  { -    PyInterpreterState *interp = tstate->interp; -    // Stop if thread/interpreter inalization already stated. -    if (interp == NULL) { -        return; -    } -    _PyRuntimeState *runtime = interp->runtime; -    if (runtime == NULL) { -        return; -    } -    // Don't exit if the main thread (i.e. of the main interpreter). +    _PyRuntimeState *runtime = tstate->interp->runtime; +    /* _Py_Finalizing is protected by the GIL */      if (runtime->finalizing != NULL && !_Py_CURRENTLY_FINALIZING(runtime, tstate)) { -        drop_gil(&runtime->ceval, &interp->ceval, tstate); +        drop_gil(&runtime->ceval, tstate);          PyThread_exit_thread();      }  } @@ -273,12 +274,12 @@ void  PyEval_AcquireLock(void)  {      _PyRuntimeState *runtime = &_PyRuntime; -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; +    struct _ceval_runtime_state *ceval = &runtime->ceval;      PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);      if (tstate == NULL) {          Py_FatalError("PyEval_AcquireLock: current thread state is NULL");      } -    take_gil(ceval_r, tstate); +    take_gil(ceval, tstate);      exit_thread_if_finalizing(tstate);  } @@ -286,21 +287,12 @@ void  PyEval_ReleaseLock(void)  {      _PyRuntimeState *runtime = &_PyRuntime; +    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);      /* This function must succeed when the current thread state is NULL.         We therefore avoid PyThreadState_Get() which dumps a fatal error         in debug mode.      */ -    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    // Fall back to the main interpreter if there is not active Python -    // thread.  This only affects the eval_breaker. -    PyInterpreterState *interp = runtime->interpreters.main; -    if (tstate != NULL) { -        interp = tstate->interp; -        if (interp == NULL) { -            Py_FatalError("PyEval_ReleaseLock: NULL interpreter state"); -        } -    } -    drop_gil(&runtime->ceval, &interp->ceval, tstate); +    drop_gil(&runtime->ceval, tstate);  }  void @@ -309,19 +301,14 @@ PyEval_AcquireThread(PyThreadState *tstate)      if (tstate == NULL) {          Py_FatalError("PyEval_AcquireThread: NULL new thread state");      } -    PyInterpreterState *interp = tstate->interp; -    if (interp == NULL) { -        Py_FatalError("PyEval_AcquireThread: NULL interpreter state"); -    } -    _PyRuntimeState *runtime = interp->runtime; -    if (runtime == NULL) { -        Py_FatalError("PyEval_AcquireThread: NULL runtime state"); -    } -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; +    assert(tstate->interp != NULL); + +    _PyRuntimeState *runtime = tstate->interp->runtime; +    struct _ceval_runtime_state *ceval = &runtime->ceval;      /* Check someone has called PyEval_InitThreads() to create the lock */ -    assert(gil_created(&ceval_r->gil)); -    take_gil(ceval_r, tstate); +    assert(gil_created(&ceval->gil)); +    take_gil(ceval, tstate);      exit_thread_if_finalizing(tstate);      if (_PyThreadState_Swap(&runtime->gilstate, tstate) != NULL) {          Py_FatalError("PyEval_AcquireThread: non-NULL old thread state"); @@ -334,20 +321,14 @@ PyEval_ReleaseThread(PyThreadState *tstate)      if (tstate == NULL) {          Py_FatalError("PyEval_ReleaseThread: NULL thread state");      } -    PyInterpreterState *interp = tstate->interp; -    if (interp == NULL) { -        Py_FatalError("PyEval_ReleaseThread: NULL interpreter state"); -    } -    _PyRuntimeState *runtime = interp->runtime; -    if (runtime == NULL) { -        Py_FatalError("PyEval_ReleaseThread: NULL runtime state"); -    } +    assert(tstate->interp != NULL); +    _PyRuntimeState *runtime = tstate->interp->runtime;      PyThreadState *new_tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);      if (new_tstate != tstate) {          Py_FatalError("PyEval_ReleaseThread: wrong thread state");      } -    drop_gil(&runtime->ceval, &interp->ceval, tstate); +    drop_gil(&runtime->ceval, tstate);  }  /* This function is called from PyOS_AfterFork_Child to destroy all threads @@ -358,17 +339,15 @@ PyEval_ReleaseThread(PyThreadState *tstate)  void  _PyEval_ReInitThreads(_PyRuntimeState *runtime)  { -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; -    if (!gil_created(&ceval_r->gil)) { +    struct _ceval_runtime_state *ceval = &runtime->ceval; +    if (!gil_created(&ceval->gil)) {          return;      } -    recreate_gil(&ceval_r->gil); +    recreate_gil(&ceval->gil);      PyThreadState *current_tstate = _PyRuntimeState_GetThreadState(runtime); -    take_gil(ceval_r, current_tstate); +    take_gil(ceval, current_tstate); -    // Only the main interpreter remains, so ignore the rest. -    PyInterpreterState *interp = _PyRuntime.interpreters.main; -    struct _ceval_pending_calls *pending = &interp->ceval.pending; +    struct _pending_calls *pending = &ceval->pending;      pending->lock = PyThread_allocate_lock();      if (pending->lock == NULL) {          Py_FatalError("Can't initialize threads for pending calls"); @@ -382,28 +361,22 @@ _PyEval_ReInitThreads(_PyRuntimeState *runtime)     raised. */  void -_PyEval_SignalAsyncExc(struct _ceval_runtime_state *ceval_r, -                       struct _ceval_interpreter_state *ceval_i) +_PyEval_SignalAsyncExc(struct _ceval_runtime_state *ceval)  { -    SIGNAL_ASYNC_EXC(ceval_r, ceval_i); +    SIGNAL_ASYNC_EXC(ceval);  }  PyThreadState *  PyEval_SaveThread(void)  {      _PyRuntimeState *runtime = &_PyRuntime; -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; +    struct _ceval_runtime_state *ceval = &runtime->ceval;      PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);      if (tstate == NULL) {          Py_FatalError("PyEval_SaveThread: NULL tstate");      } -    PyInterpreterState *interp = tstate->interp; -    if (interp == NULL) { -        Py_FatalError("PyEval_SaveThread: NULL interpreter state"); -    } - -    assert(gil_created(&ceval_r->gil)); -    drop_gil(ceval_r, &interp->ceval, tstate); +    assert(gil_created(&ceval->gil)); +    drop_gil(ceval, tstate);      return tstate;  } @@ -413,20 +386,14 @@ PyEval_RestoreThread(PyThreadState *tstate)      if (tstate == NULL) {          Py_FatalError("PyEval_RestoreThread: NULL tstate");      } -    PyInterpreterState *interp = tstate->interp; -    if (interp == NULL) { -        Py_FatalError("PyEval_RestoreThread: NULL interpreter state"); -    } -    _PyRuntimeState *runtime = interp->runtime; -    if (runtime == NULL) { -        Py_FatalError("PyEval_RestoreThread: NULL runtime state"); -    } -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; +    assert(tstate->interp != NULL); -    assert(gil_created(&ceval_r->gil)); +    _PyRuntimeState *runtime = tstate->interp->runtime; +    struct _ceval_runtime_state *ceval = &runtime->ceval; +    assert(gil_created(&ceval->gil));      int err = errno; -    take_gil(ceval_r, tstate); +    take_gil(ceval, tstate);      exit_thread_if_finalizing(tstate);      errno = err; @@ -457,17 +424,17 @@ PyEval_RestoreThread(PyThreadState *tstate)  */  void -_PyEval_SignalReceived(struct _ceval_runtime_state *ceval_r) +_PyEval_SignalReceived(struct _ceval_runtime_state *ceval)  {      /* bpo-30703: Function called when the C signal handler of Python gets a         signal. We cannot queue a callback using Py_AddPendingCall() since         that function is not async-signal-safe. */ -    SIGNAL_PENDING_SIGNALS(ceval_r); +    SIGNAL_PENDING_SIGNALS(ceval);  }  /* Push one item onto the queue while holding the lock. */  static int -_push_pending_call(struct _ceval_pending_calls *pending, unsigned long thread_id, +_push_pending_call(struct _pending_calls *pending,                     int (*func)(void *), void *arg)  {      int i = pending->last; @@ -475,7 +442,6 @@ _push_pending_call(struct _ceval_pending_calls *pending, unsigned long thread_id      if (j == pending->first) {          return -1; /* Queue full */      } -    pending->calls[i].thread_id = thread_id;      pending->calls[i].func = func;      pending->calls[i].arg = arg;      pending->last = j; @@ -484,7 +450,7 @@ _push_pending_call(struct _ceval_pending_calls *pending, unsigned long thread_id  /* Pop one item off the queue while holding the lock. */  static void -_pop_pending_call(struct _ceval_pending_calls *pending, unsigned long *thread_id, +_pop_pending_call(struct _pending_calls *pending,                    int (**func)(void *), void **arg)  {      int i = pending->first; @@ -494,7 +460,6 @@ _pop_pending_call(struct _ceval_pending_calls *pending, unsigned long *thread_id      *func = pending->calls[i].func;      *arg = pending->calls[i].arg; -    *thread_id = pending->calls[i].thread_id;      pending->first = (i + 1) % NPENDINGCALLS;  } @@ -505,12 +470,10 @@ _pop_pending_call(struct _ceval_pending_calls *pending, unsigned long *thread_id  int  _PyEval_AddPendingCall(PyThreadState *tstate, -                       struct _ceval_runtime_state *ceval_r, -                       struct _ceval_interpreter_state *ceval_i, -                       unsigned long thread_id, +                       struct _ceval_runtime_state *ceval,                         int (*func)(void *), void *arg)  { -    struct _ceval_pending_calls *pending = &ceval_i->pending; +    struct _pending_calls *pending = &ceval->pending;      PyThread_acquire_lock(pending->lock, WAIT_LOCK);      if (pending->finishing) { @@ -525,27 +488,20 @@ _PyEval_AddPendingCall(PyThreadState *tstate,          _PyErr_Restore(tstate, exc, val, tb);          return -1;      } -    int result = _push_pending_call(pending, thread_id, func, arg); - -    /* signal loop */ -    SIGNAL_PENDING_CALLS(ceval_r, ceval_i); +    int result = _push_pending_call(pending, func, arg);      PyThread_release_lock(pending->lock); +    /* signal main loop */ +    SIGNAL_PENDING_CALLS(ceval);      return result;  } -/* Py_AddPendingCall() is a simple wrapper for the sake -   of backward-compatibility. */  int  Py_AddPendingCall(int (*func)(void *), void *arg)  {      _PyRuntimeState *runtime = &_PyRuntime; -    PyInterpreterState *interp = runtime->interpreters.main;      PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    return _PyEval_AddPendingCall(tstate, -                                  &runtime->ceval, &interp->ceval, -                                  runtime->main_thread, -                                  func, arg); +    return _PyEval_AddPendingCall(tstate, &runtime->ceval, func, arg);  }  static int @@ -566,69 +522,47 @@ handle_signals(_PyRuntimeState *runtime)          return 0;      } -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; -    struct _ceval_interpreter_state *ceval_i = &interp->ceval; -    UNSIGNAL_PENDING_SIGNALS(ceval_r, ceval_i); +    struct _ceval_runtime_state *ceval = &runtime->ceval; +    UNSIGNAL_PENDING_SIGNALS(ceval);      if (_PyErr_CheckSignals() < 0) { -        SIGNAL_PENDING_SIGNALS(ceval_r); /* We're not done yet */ +        SIGNAL_PENDING_SIGNALS(ceval); /* We're not done yet */          return -1;      }      return 0;  }  static int -make_pending_calls(PyInterpreterState *interp) +make_pending_calls(_PyRuntimeState *runtime)  { -    if (interp == NULL) { -        Py_FatalError("make_pending_calls: NULL interpreter state"); -    } -    _PyRuntimeState *runtime = interp->runtime; -    if (runtime == NULL) { -        Py_FatalError("make_pending_calls: NULL runtime state"); -    } -    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    if (tstate == NULL) { -        Py_FatalError("make_pending_calls: NULL thread state"); -    } -    if (tstate->interp == NULL || tstate->interp != interp) { -        Py_FatalError("make_pending_calls: thread state mismatch"); -    }      static int busy = 0; +    /* only service pending calls on main thread */ +    if (PyThread_get_thread_ident() != runtime->main_thread) { +        return 0; +    } +      /* don't perform recursive pending calls */      if (busy) {          return 0;      }      busy = 1; -    struct _ceval_runtime_state *ceval_r = &runtime->ceval; -    struct _ceval_interpreter_state *ceval_i = &interp->ceval; +    struct _ceval_runtime_state *ceval = &runtime->ceval;      /* unsignal before starting to call callbacks, so that any callback         added in-between re-signals */ -    UNSIGNAL_PENDING_CALLS(ceval_r, ceval_i); +    UNSIGNAL_PENDING_CALLS(ceval);      int res = 0;      /* perform a bounded number of calls, in case of recursion */ -    struct _ceval_pending_calls *pending = &ceval_i->pending; -    unsigned long thread_id = 0; +    struct _pending_calls *pending = &ceval->pending;      for (int i=0; i<NPENDINGCALLS; i++) {          int (*func)(void *) = NULL;          void *arg = NULL;          /* pop one item off the queue while holding the lock */          PyThread_acquire_lock(pending->lock, WAIT_LOCK); -        _pop_pending_call(pending, &thread_id, &func, &arg); +        _pop_pending_call(pending, &func, &arg);          PyThread_release_lock(pending->lock); -        if (thread_id && PyThread_get_thread_ident() != thread_id) { -            // Thread mismatch, so move it to the end of the list -            // and start over. -            _PyEval_AddPendingCall(tstate, -                                   &runtime->ceval, &interp->ceval, -                                   thread_id, -                                   func, arg); -            goto error; -        } -          /* having released the lock, perform the callback */          if (func == NULL) {              break; @@ -644,16 +578,17 @@ make_pending_calls(PyInterpreterState *interp)  error:      busy = 0; -    SIGNAL_PENDING_CALLS(ceval_r, ceval_i); +    SIGNAL_PENDING_CALLS(ceval);      return res;  }  void -_PyEval_FinishPendingCalls(PyInterpreterState *interp) +_Py_FinishPendingCalls(_PyRuntimeState *runtime)  {      assert(PyGILState_Check()); -    struct _ceval_pending_calls *pending = &interp->ceval.pending; +    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); +    struct _pending_calls *pending = &runtime->ceval.pending;      PyThread_acquire_lock(pending->lock, WAIT_LOCK);      pending->finishing = 1; @@ -663,19 +598,12 @@ _PyEval_FinishPendingCalls(PyInterpreterState *interp)          return;      } -    if (make_pending_calls(interp) < 0) { -        _PyRuntimeState *runtime = interp->runtime; -        if (runtime == NULL) { -            runtime = &_PyRuntime; -        } -        PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -        if (tstate != NULL) { -            PyObject *exc, *val, *tb; -            _PyErr_Fetch(tstate, &exc, &val, &tb); -            PyErr_BadInternalCall(); -            _PyErr_ChainExceptions(exc, val, tb); -            _PyErr_Print(tstate); -        } +    if (make_pending_calls(runtime) < 0) { +        PyObject *exc, *val, *tb; +        _PyErr_Fetch(tstate, &exc, &val, &tb); +        PyErr_BadInternalCall(); +        _PyErr_ChainExceptions(exc, val, tb); +        _PyErr_Print(tstate);      }  } @@ -694,8 +622,7 @@ Py_MakePendingCalls(void)          return res;      } -    PyInterpreterState *interp = _PyRuntime.interpreters.main; -    res = make_pending_calls(interp); +    res = make_pending_calls(runtime);      if (res != 0) {          return res;      } @@ -712,11 +639,11 @@ Py_MakePendingCalls(void)  int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;  void -_PyEval_Initialize(struct _ceval_runtime_state *ceval_r) +_PyEval_Initialize(struct _ceval_runtime_state *state)  { -    ceval_r->recursion_limit = Py_DEFAULT_RECURSION_LIMIT; +    state->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;      _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT; -    _gil_initialize(&ceval_r->gil); +    _gil_initialize(&state->gil);  }  int @@ -728,9 +655,9 @@ Py_GetRecursionLimit(void)  void  Py_SetRecursionLimit(int new_limit)  { -    struct _ceval_runtime_state *ceval_r = &_PyRuntime.ceval; -    ceval_r->recursion_limit = new_limit; -    _Py_CheckRecursionLimit = ceval_r->recursion_limit; +    struct _ceval_runtime_state *ceval = &_PyRuntime.ceval; +    ceval->recursion_limit = new_limit; +    _Py_CheckRecursionLimit = ceval->recursion_limit;  }  /* the macro Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall() @@ -779,7 +706,7 @@ _Py_CheckRecursiveCall(const char *where)  static int do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause);  static int unpack_iterable(PyThreadState *, PyObject *, int, int, PyObject **); -#define _Py_TracingPossible(ceval_r) ((ceval_r)->tracing_possible) +#define _Py_TracingPossible(ceval) ((ceval)->tracing_possible)  PyObject * @@ -825,10 +752,8 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)      PyObject *retval = NULL;            /* Return value */      _PyRuntimeState * const runtime = &_PyRuntime;      PyThreadState * const tstate = _PyRuntimeState_GetThreadState(runtime); -    PyInterpreterState * const interp = tstate->interp; -    struct _ceval_runtime_state * const ceval_r = &runtime->ceval; -    struct _ceval_interpreter_state * const ceval_i = &interp->ceval; -    _Py_atomic_int * const eval_breaker = &ceval_r->eval_breaker; +    struct _ceval_runtime_state * const ceval = &runtime->ceval; +    _Py_atomic_int * const eval_breaker = &ceval->eval_breaker;      PyCodeObject *co;      /* when tracing we set things up so that @@ -916,7 +841,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)  #ifdef LLTRACE  #define FAST_DISPATCH() \      { \ -        if (!lltrace && !_Py_TracingPossible(ceval_r) && !PyDTrace_LINE_ENABLED()) { \ +        if (!lltrace && !_Py_TracingPossible(ceval) && !PyDTrace_LINE_ENABLED()) { \              f->f_lasti = INSTR_OFFSET(); \              NEXTOPARG(); \              goto *opcode_targets[opcode]; \ @@ -926,7 +851,7 @@ _PyEval_EvalFrameDefault(PyFrameObject *f, int throwflag)  #else  #define FAST_DISPATCH() \      { \ -        if (!_Py_TracingPossible(ceval_r) && !PyDTrace_LINE_ENABLED()) { \ +        if (!_Py_TracingPossible(ceval) && !PyDTrace_LINE_ENABLED()) { \              f->f_lasti = INSTR_OFFSET(); \              NEXTOPARG(); \              goto *opcode_targets[opcode]; \ @@ -1295,27 +1220,27 @@ main_loop:                  goto fast_next_opcode;              } -            if (_Py_atomic_load_relaxed(&ceval_r->signals_pending)) { +            if (_Py_atomic_load_relaxed(&ceval->signals_pending)) {                  if (handle_signals(runtime) != 0) {                      goto error;                  }              } -            if (_Py_atomic_load_relaxed(&ceval_i->pending.calls_to_do)) { -                if (make_pending_calls(interp) != 0) { +            if (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do)) { +                if (make_pending_calls(runtime) != 0) {                      goto error;                  }              } -            if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request)) { +            if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {                  /* Give another thread a chance */                  if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) {                      Py_FatalError("ceval: tstate mix-up");                  } -                drop_gil(ceval_r, ceval_i, tstate); +                drop_gil(ceval, tstate);                  /* Other threads may run now */ -                take_gil(ceval_r, tstate); +                take_gil(ceval, tstate);                  /* Check if we should make a quick exit. */                  exit_thread_if_finalizing(tstate); @@ -1328,7 +1253,7 @@ main_loop:              if (tstate->async_exc != NULL) {                  PyObject *exc = tstate->async_exc;                  tstate->async_exc = NULL; -                UNSIGNAL_ASYNC_EXC(ceval_r, ceval_i); +                UNSIGNAL_ASYNC_EXC(ceval);                  _PyErr_SetNone(tstate, exc);                  Py_DECREF(exc);                  goto error; @@ -1343,7 +1268,7 @@ main_loop:          /* line-by-line tracing support */ -        if (_Py_TracingPossible(ceval_r) && +        if (_Py_TracingPossible(ceval) &&              tstate->c_tracefunc != NULL && !tstate->tracing) {              int err;              /* see maybe_call_line_trace diff --git a/Python/ceval_gil.h b/Python/ceval_gil.h index b44d0abad3..34d48c990c 100644 --- a/Python/ceval_gil.h +++ b/Python/ceval_gil.h @@ -141,11 +141,9 @@ static void recreate_gil(struct _gil_runtime_state *gil)  }  static void -drop_gil(struct _ceval_runtime_state *ceval_r, -         struct _ceval_interpreter_state *ceval_i, -         PyThreadState *tstate) +drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)  { -    struct _gil_runtime_state *gil = &ceval_r->gil; +    struct _gil_runtime_state *gil = &ceval->gil;      if (!_Py_atomic_load_relaxed(&gil->locked)) {          Py_FatalError("drop_gil: GIL is not locked");      } @@ -165,12 +163,12 @@ drop_gil(struct _ceval_runtime_state *ceval_r,      MUTEX_UNLOCK(gil->mutex);  #ifdef FORCE_SWITCHING -    if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request) && tstate != NULL) { +    if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) {          MUTEX_LOCK(gil->switch_mutex);          /* Not switched yet => wait */          if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)          { -            RESET_GIL_DROP_REQUEST(ceval_r, ceval_i); +            RESET_GIL_DROP_REQUEST(ceval);              /* NOTE: if COND_WAIT does not atomically start waiting when                 releasing the mutex, another thread can run through, take                 the GIL and drop it again, and reset the condition @@ -183,19 +181,13 @@ drop_gil(struct _ceval_runtime_state *ceval_r,  }  static void -take_gil(struct _ceval_runtime_state *ceval_r, -         PyThreadState *tstate) +take_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)  {      if (tstate == NULL) {          Py_FatalError("take_gil: NULL tstate");      } -    PyInterpreterState *interp = tstate->interp; -    if (interp == NULL) { -        Py_FatalError("take_gil: NULL interp"); -    } -    struct _ceval_interpreter_state *ceval_i = &interp->ceval; -    struct _gil_runtime_state *gil = &ceval_r->gil; +    struct _gil_runtime_state *gil = &ceval->gil;      int err = errno;      MUTEX_LOCK(gil->mutex); @@ -218,7 +210,7 @@ take_gil(struct _ceval_runtime_state *ceval_r,              _Py_atomic_load_relaxed(&gil->locked) &&              gil->switch_number == saved_switchnum)          { -            SET_GIL_DROP_REQUEST(ceval_r); +            SET_GIL_DROP_REQUEST(ceval);          }      }  _ready: @@ -240,11 +232,11 @@ _ready:      COND_SIGNAL(gil->switch_cond);      MUTEX_UNLOCK(gil->switch_mutex);  #endif -    if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request)) { -        RESET_GIL_DROP_REQUEST(ceval_r, ceval_i); +    if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) { +        RESET_GIL_DROP_REQUEST(ceval);      }      if (tstate->async_exc != NULL) { -        _PyEval_SignalAsyncExc(ceval_r, ceval_i); +        _PyEval_SignalAsyncExc(ceval);      }      MUTEX_UNLOCK(gil->mutex); diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index fc7e5510b2..fca2ee6551 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -1147,31 +1147,15 @@ Py_FinalizeEx(void)          return status;      } -    /* Get current thread state and interpreter pointer */ -    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    PyInterpreterState *interp = tstate->interp; -      // Wrap up existing "threading"-module-created, non-daemon threads.      wait_for_thread_shutdown();      // Make any remaining pending calls. -    /* XXX For the moment we are going to ignore lingering pending calls. -     * We've seen sporadic on some of the buildbots during finalization -     * with the changes for per-interpreter pending calls (see bpo-33608), -     * meaning the previous _PyEval_FinishPendincCalls() call here is -     * a trigger, if not responsible. -     * -     * Ignoring pending calls at this point in the runtime lifecycle -     * is okay (for now) for the following reasons: -     * -     *  * pending calls are still not a widely-used feature -     *  * this only affects runtime finalization, where the process is -     *    likely to end soon anyway (except for some embdding cases) -     * -     * See bpo-37127 about resolving the problem.  Ultimately the call -     * here should be re-enabled. -     */ -    //_PyEval_FinishPendingCalls(interp); +    _Py_FinishPendingCalls(runtime); + +    /* Get current thread state and interpreter pointer */ +    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); +    PyInterpreterState *interp = tstate->interp;      /* The interpreter is still entirely intact at this point, and the       * exit funcs may be relying on that.  In particular, if some thread @@ -1599,9 +1583,6 @@ Py_EndInterpreter(PyThreadState *tstate)      // Wrap up existing "threading"-module-created, non-daemon threads.      wait_for_thread_shutdown(); -    // Make any remaining pending calls. -    _PyEval_FinishPendingCalls(interp); -      call_py_exitfuncs(interp);      if (tstate != interp->tstate_head || tstate->next != NULL) diff --git a/Python/pystate.c b/Python/pystate.c index a9f3389a0d..2b7db0e48d 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -218,13 +218,6 @@ PyInterpreterState_New(void)          return NULL;      } -    interp->ceval.pending.lock = PyThread_allocate_lock(); -    if (interp->ceval.pending.lock == NULL) { -        PyErr_SetString(PyExc_RuntimeError, -                        "failed to create interpreter ceval pending mutex"); -        return NULL; -    } -      interp->eval_frame = _PyEval_EvalFrameDefault;  #ifdef HAVE_DLOPEN  #if HAVE_DECL_RTLD_NOW @@ -352,10 +345,6 @@ PyInterpreterState_Delete(PyInterpreterState *interp)      if (interp->id_mutex != NULL) {          PyThread_free_lock(interp->id_mutex);      } -    if (interp->ceval.pending.lock != NULL) { -        PyThread_free_lock(interp->ceval.pending.lock); -        interp->ceval.pending.lock = NULL; -    }      PyMem_RawFree(interp);  } @@ -1025,7 +1014,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)              p->async_exc = exc;              HEAD_UNLOCK(runtime);              Py_XDECREF(old_exc); -            _PyEval_SignalAsyncExc(&runtime->ceval, &interp->ceval); +            _PyEval_SignalAsyncExc(&runtime->ceval);              return 1;          }      } @@ -1455,7 +1444,7 @@ _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data)      return 0;  } -static int +static void  _release_xidata(void *arg)  {      _PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg; @@ -1463,21 +1452,42 @@ _release_xidata(void *arg)          data->free(data->data);      }      Py_XDECREF(data->obj); -    PyMem_Free(data); -    return 0; +} + +static void +_call_in_interpreter(struct _gilstate_runtime_state *gilstate, +                     PyInterpreterState *interp, +                     void (*func)(void *), void *arg) +{ +    /* We would use Py_AddPendingCall() if it weren't specific to the +     * main interpreter (see bpo-33608).  In the meantime we take a +     * naive approach. +     */ +    PyThreadState *save_tstate = NULL; +    if (interp != _PyRuntimeGILState_GetThreadState(gilstate)->interp) { +        // XXX Using the "head" thread isn't strictly correct. +        PyThreadState *tstate = PyInterpreterState_ThreadHead(interp); +        // XXX Possible GILState issues? +        save_tstate = _PyThreadState_Swap(gilstate, tstate); +    } + +    func(arg); + +    // Switch back. +    if (save_tstate != NULL) { +        _PyThreadState_Swap(gilstate, save_tstate); +    }  }  void  _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)  { -    _PyRuntimeState *runtime = &_PyRuntime; -      if (data->data == NULL && data->obj == NULL) {          // Nothing to release!          return;      } -    // Get the original interpreter. +    // Switch to the original interpreter.      PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp);      if (interp == NULL) {          // The intepreter was already destroyed. @@ -1486,28 +1496,10 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)          }          return;      } -    // XXX There's an ever-so-slight race here... -    if (interp->finalizing) { -        // XXX Someone leaked some memory... -        return; -    }      // "Release" the data and/or the object. -    _PyCrossInterpreterData *copied = PyMem_Malloc(sizeof(_PyCrossInterpreterData)); -    if (copied == NULL) { -        PyErr_SetString(PyExc_MemoryError, -                        "Not enough memory to preserve cross-interpreter data"); -        PyErr_Print(); -        return; -    } -    memcpy(copied, data, sizeof(_PyCrossInterpreterData)); -    PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime); -    int res = _PyEval_AddPendingCall(tstate, -                                     &runtime->ceval, &interp->ceval, -                                     0, _release_xidata, copied); -    if (res != 0) { -        // XXX Queue full or couldn't get lock.  Try again somehow? -    } +    struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate; +    _call_in_interpreter(gilstate, interp, _release_xidata, data);  }  PyObject *  | 
