summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS40
-rw-r--r--ChangeLog338
-rw-r--r--MANIFEST.in18
-rw-r--r--Makefile4
-rw-r--r--README.rst105
-rw-r--r--TODO.rst24
-rw-r--r--asyncio/compat.py17
-rw-r--r--asyncio/events.py610
-rw-r--r--check.py2
-rw-r--r--doc/Makefile153
-rw-r--r--doc/asyncio.rst185
-rw-r--r--doc/changelog.rst687
-rw-r--r--doc/conf.py240
-rw-r--r--doc/dev.rst85
-rw-r--r--doc/index.rst80
-rw-r--r--doc/install.rst113
-rw-r--r--doc/libraries.rst30
-rw-r--r--doc/make.bat190
-rw-r--r--doc/trollius.jpgbin0 -> 30083 bytes
-rw-r--r--doc/using.rst85
-rw-r--r--examples/cacheclt.py56
-rw-r--r--examples/cachesvr.py27
-rw-r--r--examples/child_process.py27
-rw-r--r--examples/crawl.py109
-rw-r--r--examples/echo_client_tulip.py7
-rw-r--r--examples/echo_server_tulip.py7
-rw-r--r--examples/fetch0.py11
-rw-r--r--examples/fetch1.py26
-rw-r--r--examples/fetch2.py52
-rw-r--r--examples/fetch3.py83
-rw-r--r--examples/fuzz_as_completed.py13
-rw-r--r--examples/hello_callback.py4
-rw-r--r--examples/hello_coroutine.py9
-rw-r--r--examples/interop_asyncio.py53
-rw-r--r--examples/shell.py27
-rw-r--r--examples/simple_tcp_server.py36
-rw-r--r--examples/sink.py14
-rw-r--r--examples/source.py13
-rw-r--r--examples/source1.py26
-rw-r--r--examples/stacks.py6
-rw-r--r--examples/subprocess_attach_read_pipe.py16
-rw-r--r--examples/subprocess_attach_write_pipe.py29
-rw-r--r--examples/subprocess_shell.py20
-rwxr-xr-xexamples/tcp_echo.py6
-rw-r--r--examples/timing_tcp_server.py41
-rwxr-xr-xexamples/udp_echo.py8
-rw-r--r--overlapped.c71
-rwxr-xr-xrelease.py517
-rw-r--r--releaser.conf7
-rw-r--r--run_aiotest.py8
-rwxr-xr-x[-rw-r--r--]runtests.py128
-rw-r--r--setup.py75
-rw-r--r--tests/echo3.py10
-rw-r--r--tests/test_asyncio.py141
-rw-r--r--tests/test_base_events.py235
-rw-r--r--tests/test_events.py313
-rw-r--r--tests/test_futures.py99
-rw-r--r--tests/test_locks.py167
-rw-r--r--tests/test_proactor_events.py23
-rw-r--r--tests/test_queues.py109
-rw-r--r--tests/test_selector_events.py89
-rw-r--r--tests/test_selectors.py60
-rw-r--r--tests/test_sslproto.py13
-rw-r--r--tests/test_streams.py51
-rw-r--r--tests/test_subprocess.py136
-rw-r--r--tests/test_tasks.py460
-rw-r--r--tests/test_transports.py16
-rw-r--r--tests/test_unix_events.py127
-rw-r--r--tests/test_windows_events.py41
-rw-r--r--tests/test_windows_utils.py42
-rw-r--r--tox.ini89
-rw-r--r--trollius/__init__.py (renamed from asyncio/__init__.py)11
-rw-r--r--trollius/base_events.py (renamed from asyncio/base_events.py)177
-rw-r--r--trollius/base_subprocess.py (renamed from asyncio/base_subprocess.py)22
-rw-r--r--trollius/compat.py69
-rw-r--r--trollius/constants.py (renamed from asyncio/constants.py)0
-rw-r--r--trollius/coroutines.py (renamed from asyncio/coroutines.py)211
-rw-r--r--trollius/events.py626
-rw-r--r--trollius/executor.py84
-rw-r--r--trollius/futures.py (renamed from asyncio/futures.py)104
-rw-r--r--trollius/locks.py (renamed from asyncio/locks.py)153
-rw-r--r--trollius/log.py (renamed from asyncio/log.py)0
-rw-r--r--trollius/proactor_events.py (renamed from asyncio/proactor_events.py)20
-rw-r--r--trollius/protocols.py (renamed from asyncio/protocols.py)2
-rw-r--r--trollius/py27_weakrefset.py202
-rw-r--r--trollius/py33_exceptions.py144
-rw-r--r--trollius/py33_winapi.py75
-rw-r--r--trollius/py3_ssl.py149
-rw-r--r--trollius/queues.py (renamed from asyncio/queues.py)31
-rw-r--r--trollius/selector_events.py (renamed from asyncio/selector_events.py)132
-rw-r--r--trollius/selectors.py (renamed from asyncio/selectors.py)74
-rw-r--r--trollius/sslproto.py (renamed from asyncio/sslproto.py)22
-rw-r--r--trollius/streams.py (renamed from asyncio/streams.py)99
-rw-r--r--trollius/subprocess.py (renamed from asyncio/subprocess.py)56
-rw-r--r--trollius/tasks.py (renamed from asyncio/tasks.py)240
-rw-r--r--trollius/test_support.py (renamed from asyncio/test_support.py)17
-rw-r--r--trollius/test_utils.py (renamed from asyncio/test_utils.py)209
-rw-r--r--trollius/time_monotonic.py192
-rw-r--r--trollius/transports.py (renamed from asyncio/transports.py)8
-rw-r--r--trollius/unix_events.py (renamed from asyncio/unix_events.py)147
-rw-r--r--trollius/windows_events.py (renamed from asyncio/windows_events.py)93
-rw-r--r--trollius/windows_utils.py (renamed from asyncio/windows_utils.py)33
-rwxr-xr-xupdate-asyncio-step1.sh12
-rwxr-xr-xupdate-asyncio-step2.sh36
-rwxr-xr-xupdate-asyncio-step3.sh10
-rwxr-xr-xupdate_stdlib.sh70
106 files changed, 6605 insertions, 3684 deletions
diff --git a/AUTHORS b/AUTHORS
index 8591376..3c3966b 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,26 +1,14 @@
-A. Jesse Jiryu Davis <jesse AT mongodb.com>
-Aaron Griffith
-Andrew Svetlov <andrew.svetlov AT gmail.com>
-Anthony Baire
-Antoine Pitrou <solipsis AT pitrou.net>
-Arnaud Faure
-Aymeric Augustin
-Brett Cannon
-Charles-François Natali <cf.natali AT gmail.com>
-Christian Heimes
-Donald Stufft
-Eli Bendersky <eliben AT gmail.com>
-Geert Jansen <geertj AT gmail.com>
-Giampaolo Rodola' <g.rodola AT gmail.com>
-Guido van Rossum <guido AT python.org>: creator of the asyncio project and author of the PEP 3156
-Gustavo Carneiro <gjcarneiro AT gmail.com>
-Jeff Quast
-Jonathan Slenders <jonathan.slenders AT gmail.com>
-Nikolay Kim <fafhrd91 AT gmail.com>
-Richard Oudkerk <shibturn AT gmail.com>
-Saúl Ibarra Corretgé <saghul AT gmail.com>
-Serhiy Storchaka
-Vajrasky Kok
-Victor Stinner <victor.stinner AT gmail.com>
-Vladimir Kryachko
-Yury Selivanov <yselivanov AT gmail.com>
+Trollius authors
+================
+
+Ian Wienand <iwienand@redhat.com>
+Marc Schlaich <marc.schlaich AT gmail.com>
+Victor Stinner <victor.stinner AT gmail.com> - creator of the Trollius project
+
+The photo of Trollis flower was taken by Imartin6 and distributed under the CC
+BY-SA 3.0 license. It comes from:
+http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg
+
+Trollius is a port of the asyncio project on Python 2, see also authors of the
+asyncio project (AUTHORS file).
+
diff --git a/ChangeLog b/ChangeLog
deleted file mode 100644
index 421704c..0000000
--- a/ChangeLog
+++ /dev/null
@@ -1,338 +0,0 @@
-Tulip 3.4.4
-===========
-
-* Issue #234: Drop asyncio.JoinableQueue on Python 3.5 and newer
-
-
-2015-02-04: Tulip 3.4.3
-=======================
-
-Major changes
--------------
-
-* New SSL implementation using ssl.MemoryBIO. The new implementation requires
- Python 3.5 and newer, otherwise the legacy implementation is used.
-* On Python 3.5 and newer usable, the ProactorEventLoop now supports SSL
- thanks to the new SSL implementation.
-* Fix multiple resource leaks: close sockets on error, explicitly clear
- references, emit ResourceWarning when event loops and transports are not
- closed explicitly, etc.
-* The proactor event loop is now much more reliable (no more known race
- condition).
-* Enhance handling of task cancellation.
-
-Changes of the asyncio API
---------------------------
-
-* Export BaseEventLoop symbol in the asyncio namespace
-* create_task(), call_soon(), call_soon_threadsafe(), call_later(),
- call_at() and run_in_executor() methods of BaseEventLoop now raise an
- exception if the event loop is closed.
-* call_soon(), call_soon_threadsafe(), call_later(), call_at() and
- run_in_executor() methods of BaseEventLoop now raise an exception if the
- callback is a coroutine object.
-* BaseEventLoopPolicy.get_event_loop() now always raises a RuntimeError
- if there is no event loop in the curren thread, instead of using an
- assertion (which can be disabld at runtime) and so raises an AssertionError.
-* selectors: Selector.get_key() now raises an exception if the selector is
- closed.
-* If wait_for() is cancelled, the waited task is also cancelled.
-* _UnixSelectorEventLoop.add_signal_handler() now raises an exception if
- the callback is a coroutine object or a coroutine function. It also raises
- an exception if the event loop is closed.
-
-Performances
-------------
-
-* sock_connect() doesn't check if the address is already resolved anymore.
- The check is only done in debug mode. Moreover, the check uses inet_pton()
- instead of getaddrinfo(), if inet_pton() is available, because getaddrinfo()
- is slow (around 10 us per call).
-
-Debug
------
-
-* Better repr() of _ProactorBasePipeTransport, _SelectorTransport,
- _UnixReadPipeTransport and _UnixWritePipeTransport: add closed/closing
- status and the file descriptor
-* Add repr(PipeHandle)
-* PipeHandle destructor now emits a ResourceWarning is the pipe is not closed
- explicitly.
-* In debug mode, call_at() method of BaseEventLoop now raises an exception
- if called from the wrong thread (not from the thread running the event
- loop). Before, it only raised an exception if current thread had an event
- loop.
-* A ResourceWarning is now emitted when event loops and transports are
- destroyed before being closed.
-* BaseEventLoop.call_exception_handler() now logs the traceback where
- the current handle was created (if no source_traceback was specified).
-* BaseSubprocessTransport.close() now logs a warning when the child process is
- still running and the method kills it.
-
-Bug fixes
----------
-
-* windows_utils.socketpair() now reuses socket.socketpair() if available
- (Python 3.5 or newer).
-* Fix IocpProactor.accept_pipe(): handle ERROR_PIPE_CONNECTED, it means
- that the pipe is connected. _overlapped.Overlapped.ConnectNamedPipe() now
- returns True on ERROR_PIPE_CONNECTED.
-* Rewrite IocpProactor.connect_pipe() using polling to avoid tricky bugs
- if the connection is cancelled, instead of using QueueUserWorkItem() to run
- blocking code.
-* Fix IocpProactor.recv(): handle BrokenPipeError, set the result to an empty
- string.
-* Fix ProactorEventLoop.start_serving_pipe(): if a client connected while the
- server is closing, drop the client connection.
-* Fix a tricky race condition when IocpProactor.wait_for_handle() is
- cancelled: wait until the wait is really cancelled before destroying the
- overlapped object. Unregister also the overlapped operation to not block
- in IocpProactor.close() before the wait will never complete.
-* Fix _UnixSubprocessTransport._start(): make the write end of the stdin pipe
- non-inheritable.
-* Set more attributes in the body of classes to avoid attribute errors in
- destructors if an error occurred in the constructor.
-* Fix SubprocessStreamProtocol.process_exited(): close the transport
- and clear its reference to the transport.
-* Fix SubprocessStreamProtocol.connection_made(): set the transport of
- stdout and stderr streams to respect reader buffer limits (stop reading when
- the buffer is full).
-* Fix FlowControlMixin constructor: if the loop parameter is None, get the
- current event loop.
-* Fix selectors.EpollSelector.select(): don't fail anymore if no file
- descriptor is registered.
-* Fix _SelectorTransport: don't wakeup the waiter if it was cancelled
-* Fix _SelectorTransport._call_connection_lost(): only call connection_lost()
- if connection_made() was already called.
-* Fix BaseSelectorEventLoop._accept_connection(): close the transport on
- error. In debug mode, log errors (ex: SSL handshake failure) on the creation
- of the transport for incoming connection.
-* Fix BaseProactorEventLoop.close(): stop the proactor before closing the
- event loop because stopping the proactor may schedule new callbacks, which
- is now forbidden when the event loop is closed.
-* Fix wrap_future() to not use a free variable and so not keep a frame alive
- too long.
-* Fix formatting of the "Future/Task exception was never retrieved" log: add
- a newline before the traceback.
-* WriteSubprocessPipeProto.connection_lost() now clears its reference to the
- subprocess.Popen object.
-* If the creation of a subprocess transport fails, the child process is killed
- and the event loop waits asynchronously for its completion.
-* BaseEventLoop.run_until_complete() now consumes the exception to not log a
- warning when a BaseException like KeyboardInterrupt is raised and
- run_until_complete() is not a future (but a coroutine object).
-* create_connection(), create_datagram_endpoint(), connect_read_pipe() and
- connect_write_pipe() methods of BaseEventLoop now close the transport on
- error.
-
-Other changes
--------------
-
-* Add tox.ini to run tests using tox.
-* _FlowControlMixin constructor now requires an event loop.
-* Embed asyncio/test_support.py to not depend on test.support of the system
- Python. For example, test.support is not installed by default on Windows.
-* selectors.Selector.close() now clears its reference to the mapping object.
-* _SelectorTransport and _UnixWritePipeTransport now only starts listening for
- read events after protocol.connection_made() has been called
-* _SelectorTransport._fatal_error() now only logs ConnectionAbortedError
- in debug mode.
-* BaseProactorEventLoop._loop_self_reading() now handles correctly
- CancelledError (just exit) and logs an error for other exceptions.
-* _ProactorBasePipeTransport now clears explicitly references to read and
- write future and to the socket
-* BaseSubprocessTransport constructor now calls the internal _connect_pipes()
- method (previously called _post_init()). The constructor now accepts an
- optional waiter parameter to notify when the transport is ready.
-* send_signal(), terminate() and kill() methods of BaseSubprocessTransport now
- raise a ProcessLookupError if the process already exited.
-* Add run_aiotest.py to run the aiotest test suite
-* Add release.py script to build wheel packages on Windows and run unit tests
-
-
-2014-09-30: Tulip 3.4.2
-=======================
-
-New shiny methods like create_task(), better documentation, much better debug
-mode, better tests.
-
-asyncio API
------------
-
-* Add BaseEventLoop.create_task() method: schedule a coroutine object.
- It allows other asyncio implementations to use their own Task class to
- change its behaviour.
-
-* New BaseEventLoop methods:
-
- - create_task(): schedule a coroutine
- - get_debug()
- - is_closed()
- - set_debug()
-
-* Add _FlowControlMixin.get_write_buffer_limits() method
-
-* sock_recv(), sock_sendall(), sock_connect(), sock_accept() methods of
- SelectorEventLoop now raise an exception if the socket is blocking mode
-
-* Include unix_events/windows_events symbols in asyncio.__all__.
- Examples: SelectorEventLoop, ProactorEventLoop, DefaultEventLoopPolicy.
-
-* attach(), detach(), loop, active_count and waiters attributes of the Server
- class are now private
-
-* BaseEventLoop: run_forever(), run_until_complete() now raises an exception if
- the event loop was closed
-
-* close() now raises an exception if the event loop is running, because pending
- callbacks would be lost
-
-* Queue now accepts a float for the maximum size.
-
-* Process.communicate() now ignores BrokenPipeError and ConnectionResetError
- exceptions, as Popen.communicate() of the subprocess module
-
-
-Performances
-------------
-
-* Optimize handling of cancelled timers
-
-
-Debug
------
-
-* Future (and Task), CoroWrapper and Handle now remembers where they were
- created (new _source_traceback object), traceback displayed when errors are
- logged.
-
-* On Python 3.4 and newer, Task destrutor now logs a warning if the task was
- destroyed while it was still pending. It occurs if the last reference
- to the task was removed, while the coroutine didn't finish yet.
-
-* Much more useful events are logged:
-
- - Event loop closed
- - Network connection
- - Creation of a subprocess
- - Pipe lost
- - Log many errors previously silently ignored
- - SSL handshake failure
- - etc.
-
-* BaseEventLoop._debug is now True if the envrionement variable
- PYTHONASYNCIODEBUG is set
-
-* Log the duration of DNS resolution and SSL handshake
-
-* Log a warning if a callback blocks the event loop longer than 100 ms
- (configurable duration)
-
-* repr(CoroWrapper) and repr(Task) now contains the current status of the
- coroutine (running, done), current filename and line number, and filename and
- line number where the object was created
-
-* Enhance representation (repr) of transports: add the file descriptor, status
- (idle, polling, writing, etc.), size of the write buffer, ...
-
-* Add repr(BaseEventLoop)
-
-* run_until_complete() doesn't log a warning anymore when called with a
- coroutine object which raises an exception.
-
-
-Bugfixes
---------
-
-* windows_utils.socketpair() now ensures that sockets are closed in case
- of error.
-
-* Rewrite bricks of the IocpProactor() to make it more reliable
-
-* IocpProactor destructor now closes it.
-
-* _OverlappedFuture.set_exception() now cancels the overlapped operation.
-
-* Rewrite _WaitHandleFuture:
-
- - cancel() is now able to signal the cancellation to the overlapped object
- - _unregister_wait() now catchs and logs exceptions
-
-* PipeServer.close() (class used on Windows) now cancels the accept pipe
- future.
-
-* Rewrite signal handling in the UNIX implementation of SelectorEventLoop:
- use the self-pipe to store pending signals instead of registering a
- signal handler calling directly _handle_signal(). The change fixes a
- race condition.
-
-* create_unix_server(): close the socket on error.
-
-* Fix wait_for()
-
-* Rewrite gather()
-
-* drain() is now a classic coroutine, no more special return value (empty
- tuple)
-
-* Rewrite SelectorEventLoop.sock_connect() to handle correctly timeout
-
-* Process data of the self-pipe faster to accept more pending events,
- especially signals written by signal handlers: the callback reads all pending
- data, not only a single byte
-
-* Don't try to set the result of a Future anymore if it was cancelled
- (explicitly or by a timeout)
-
-* CoroWrapper now works around CPython issue #21209: yield from & custom
- generator classes don't work together, issue with the send() method. It only
- affected asyncio in debug mode on Python older than 3.4.2
-
-
-Misc changes
-------------
-
-* windows_utils.socketpair() now supports IPv6.
-
-* Better documentation (online & docstrings): fill remaining XXX, more examples
-
-* new asyncio.coroutines submodule, to ease maintenance with the trollius
- project: @coroutine, _DEBUG, iscoroutine() and iscoroutinefunction() have
- been moved from asyncio.tasks to asyncio.coroutines
-
-* Cleanup code, ex: remove unused attribute (ex: _rawsock)
-
-* Reuse os.set_blocking() of Python 3.5.
-
-* Close explicitly the event loop in Tulip examples.
-
-* runtests.py now mention if tests are running in release or debug mode.
-
-
-2014-05-19: Tulip 3.4.1
-=======================
-
-2014-02-24: Tulip 0.4.1
-=======================
-
-2014-02-10: Tulip 0.3.1
-=======================
-
-* Add asyncio.subprocess submodule and the Process class.
-
-2013-11-25: Tulip 0.2.1
-=======================
-
-* Add support of subprocesses using transports and protocols.
-
-2013-10-22: Tulip 0.1.1
-=======================
-
-* First release.
-
-Creation of the project
-=======================
-
-* 2013-10-14: The tulip package was renamed to asyncio.
-* 2012-10-16: Creation of the Tulip project, started as mail threads on the
- python-ideas mailing list.
diff --git a/MANIFEST.in b/MANIFEST.in
index d0dbde1..f3b496f 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,15 @@
-include AUTHORS COPYING
+include AUTHORS COPYING TODO.rst tox.ini
include Makefile
include overlapped.c pypi.bat
include check.py runtests.py run_aiotest.py release.py
-include update_stdlib.sh
+include update-asyncio-*.sh
+include .travis.yml
+include releaser.conf
-recursive-include examples *.py
-recursive-include tests *.crt
-recursive-include tests *.key
-recursive-include tests *.pem
-recursive-include tests *.py
+include doc/conf.py doc/make.bat doc/Makefile
+include doc/*.rst doc/*.jpg
+
+include examples/*.py
+
+include tests/*.crt tests/*.pem tests/*.key
+include tests/*.py
diff --git a/Makefile b/Makefile
index eda02f2..768298b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
# Some simple testing tasks (sorry, UNIX only).
-PYTHON=python3
+PYTHON=python
VERBOSE=$(V)
V= 0
FLAGS=
@@ -40,6 +40,8 @@ clean:
rm -rf build
rm -rf asyncio.egg-info
rm -f MANIFEST
+ rm -rf trollius.egg-info
+ rm -rf .tox
# For distribution builders only!
diff --git a/README.rst b/README.rst
index 9f03922..a1bf495 100644
--- a/README.rst
+++ b/README.rst
@@ -1,95 +1,42 @@
-The asyncio module provides infrastructure for writing single-threaded
-concurrent code using coroutines, multiplexing I/O access over sockets and
-other resources, running network clients and servers, and other related
-primitives. Here is a more detailed list of the package contents:
+Trollius provides infrastructure for writing single-threaded concurrent
+code using coroutines, multiplexing I/O access over sockets and other
+resources, running network clients and servers, and other related primitives.
+Here is a more detailed list of the package contents:
* a pluggable event loop with various system-specific implementations;
-* transport and protocol abstractions (similar to those in Twisted);
+* transport and protocol abstractions (similar to those in `Twisted
+ <http://twistedmatrix.com/>`_);
* concrete support for TCP, UDP, SSL, subprocess pipes, delayed calls, and
others (some may be system-dependent);
-* a Future class that mimics the one in the concurrent.futures module, but
- adapted for use with the event loop;
+* a ``Future`` class that mimics the one in the ``concurrent.futures`` module,
+ but adapted for use with the event loop;
-* coroutines and tasks based on ``yield from`` (PEP 380), to help write
+* coroutines and tasks based on generators (``yield``), to help write
concurrent code in a sequential fashion;
-* cancellation support for Futures and coroutines;
+* cancellation support for ``Future``\s and coroutines;
* synchronization primitives for use between coroutines in a single thread,
- mimicking those in the threading module;
+ mimicking those in the ``threading`` module;
* an interface for passing work off to a threadpool, for times when you
absolutely, positively have to use a library that makes blocking I/O calls.
-Note: The implementation of asyncio was previously called "Tulip".
-
-
-Installation
-============
-
-To install asyncio, type::
-
- pip install asyncio
-
-asyncio requires Python 3.3 or later! The asyncio module is part of the Python
-standard library since Python 3.4.
-
-asyncio is a free software distributed under the Apache license version 2.0.
-
-
-Websites
-========
-
-* `asyncio project at GitHub <https://github.com/python/asyncio>`_: source
- code, bug tracker
-* `asyncio documentation <https://docs.python.org/dev/library/asyncio.html>`_
-* Mailing list: `python-tulip Google Group
- <https://groups.google.com/forum/?fromgroups#!forum/python-tulip>`_
-* IRC: join the ``#asyncio`` channel on the Freenode network
-
-
-Development
-===========
-
-The actual code lives in the 'asyncio' subdirectory. Tests are in the 'tests'
-subdirectory.
-
-To run tests, run::
-
- tox
-
-Or use the Makefile::
-
- make test
-
-To run coverage (coverage package is required)::
-
- make coverage
-
-On Windows, things are a little more complicated. Assume 'P' is your
-Python binary (for example C:\Python33\python.exe).
-
-You must first build the _overlapped.pyd extension and have it placed
-in the asyncio directory, as follows:
-
- C> P setup.py build_ext --inplace
-
-If this complains about vcvars.bat, you probably don't have the
-required version of Visual Studio installed. Compiling extensions for
-Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
-edition; you can download Visual Studio Express 2010 for free from
-http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
-Express).
-
-Once you have built the _overlapped.pyd extension successfully you can
-run the tests as follows:
-
- C> P runtests.py
-
-And coverage as follows:
-
- C> P runtests.py --coverage
-
+Trollius is a portage of the `asyncio project
+<https://github.com/python/asyncio>`_ (`PEP 3156
+<http://legacy.python.org/dev/peps/pep-3156/>`_) on Python 2. Trollius works on
+Python 2.6-3.5. It has been tested on Windows, Linux, Mac OS X, FreeBSD and
+OpenIndiana.
+
+* `Asyncio documentation <http://docs.python.org/dev/library/asyncio.html>`_
+* `Trollius documentation <http://trollius.readthedocs.org/>`_
+* `Trollius project in the Python Cheeseshop (PyPI)
+ <https://pypi.python.org/pypi/trollius>`_
+* `Trollius project at Github <https://github.com/haypo/trollius>`_
+ (bug tracker, source code)
+* Copyright/license: Open source, Apache 2.0. Enjoy!
+
+See also the `asyncio project at Github <https://github.com/python/asyncio>`_.
diff --git a/TODO.rst b/TODO.rst
new file mode 100644
index 0000000..31683a5
--- /dev/null
+++ b/TODO.rst
@@ -0,0 +1,24 @@
+Unsorted "TODO" tasks:
+
+* Drop Python 2.6 and 3.2 support
+* Drop platform without ssl module?
+* streams.py:FIXME: should we support __aiter__ and __anext__ in Trollius?
+* replace selectors.py with selectors34:
+ https://github.com/berkerpeksag/selectors34/pull/2
+* check ssl.SSLxxx in update_xxx.sh
+* document how to port asyncio to trollius
+* use six instead of compat
+* Replace logger with warning in monotonic clock and synchronous executor
+* Windows: use _overlapped in py33_winapi?
+* Fix tests failing with PyPy:
+
+ - sys.getrefcount()
+ - test_queues.test_repr
+ - test_futures.test_tb_logger_exception_unretrieved
+
+* write unit test for create_connection(ssl=True)
+* Fix examples:
+
+ - stacks.py: 'exceptions.ZeroDivisionError' object has no attribute '__traceback__'
+
+* Fix all FIXME in the code
diff --git a/asyncio/compat.py b/asyncio/compat.py
deleted file mode 100644
index 660b7e7..0000000
--- a/asyncio/compat.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""Compatibility helpers for the different Python versions."""
-
-import sys
-
-PY34 = sys.version_info >= (3, 4)
-PY35 = sys.version_info >= (3, 5)
-
-
-def flatten_list_bytes(list_of_data):
- """Concatenate a sequence of bytes-like objects."""
- if not PY34:
- # On Python 3.3 and older, bytes.join() doesn't handle
- # memoryview.
- list_of_data = (
- bytes(data) if isinstance(data, memoryview) else data
- for data in list_of_data)
- return b''.join(list_of_data)
diff --git a/asyncio/events.py b/asyncio/events.py
deleted file mode 100644
index d5f0d45..0000000
--- a/asyncio/events.py
+++ /dev/null
@@ -1,610 +0,0 @@
-"""Event loop and event loop policy."""
-
-__all__ = ['AbstractEventLoopPolicy',
- 'AbstractEventLoop', 'AbstractServer',
- 'Handle', 'TimerHandle',
- 'get_event_loop_policy', 'set_event_loop_policy',
- 'get_event_loop', 'set_event_loop', 'new_event_loop',
- 'get_child_watcher', 'set_child_watcher',
- ]
-
-import functools
-import inspect
-import reprlib
-import socket
-import subprocess
-import sys
-import threading
-import traceback
-
-from asyncio import compat
-
-
-def _get_function_source(func):
- if compat.PY34:
- func = inspect.unwrap(func)
- elif hasattr(func, '__wrapped__'):
- func = func.__wrapped__
- if inspect.isfunction(func):
- code = func.__code__
- return (code.co_filename, code.co_firstlineno)
- if isinstance(func, functools.partial):
- return _get_function_source(func.func)
- if compat.PY34 and isinstance(func, functools.partialmethod):
- return _get_function_source(func.func)
- return None
-
-
-def _format_args(args):
- """Format function arguments.
-
- Special case for a single parameter: ('hello',) is formatted as ('hello').
- """
- # use reprlib to limit the length of the output
- args_repr = reprlib.repr(args)
- if len(args) == 1 and args_repr.endswith(',)'):
- args_repr = args_repr[:-2] + ')'
- return args_repr
-
-
-def _format_callback(func, args, suffix=''):
- if isinstance(func, functools.partial):
- if args is not None:
- suffix = _format_args(args) + suffix
- return _format_callback(func.func, func.args, suffix)
-
- if hasattr(func, '__qualname__'):
- func_repr = getattr(func, '__qualname__')
- elif hasattr(func, '__name__'):
- func_repr = getattr(func, '__name__')
- else:
- func_repr = repr(func)
-
- if args is not None:
- func_repr += _format_args(args)
- if suffix:
- func_repr += suffix
- return func_repr
-
-def _format_callback_source(func, args):
- func_repr = _format_callback(func, args)
- source = _get_function_source(func)
- if source:
- func_repr += ' at %s:%s' % source
- return func_repr
-
-
-class Handle:
- """Object returned by callback registration methods."""
-
- __slots__ = ('_callback', '_args', '_cancelled', '_loop',
- '_source_traceback', '_repr', '__weakref__')
-
- def __init__(self, callback, args, loop):
- assert not isinstance(callback, Handle), 'A Handle is not a callback'
- self._loop = loop
- self._callback = callback
- self._args = args
- self._cancelled = False
- self._repr = None
- if self._loop.get_debug():
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
- else:
- self._source_traceback = None
-
- def _repr_info(self):
- info = [self.__class__.__name__]
- if self._cancelled:
- info.append('cancelled')
- if self._callback is not None:
- info.append(_format_callback_source(self._callback, self._args))
- if self._source_traceback:
- frame = self._source_traceback[-1]
- info.append('created at %s:%s' % (frame[0], frame[1]))
- return info
-
- def __repr__(self):
- if self._repr is not None:
- return self._repr
- info = self._repr_info()
- return '<%s>' % ' '.join(info)
-
- def cancel(self):
- if not self._cancelled:
- self._cancelled = True
- if self._loop.get_debug():
- # Keep a representation in debug mode to keep callback and
- # parameters. For example, to log the warning
- # "Executing <Handle...> took 2.5 second"
- self._repr = repr(self)
- self._callback = None
- self._args = None
-
- def _run(self):
- try:
- self._callback(*self._args)
- except Exception as exc:
- cb = _format_callback_source(self._callback, self._args)
- msg = 'Exception in callback {}'.format(cb)
- context = {
- 'message': msg,
- 'exception': exc,
- 'handle': self,
- }
- if self._source_traceback:
- context['source_traceback'] = self._source_traceback
- self._loop.call_exception_handler(context)
- self = None # Needed to break cycles when an exception occurs.
-
-
-class TimerHandle(Handle):
- """Object returned by timed callback registration methods."""
-
- __slots__ = ['_scheduled', '_when']
-
- def __init__(self, when, callback, args, loop):
- assert when is not None
- super().__init__(callback, args, loop)
- if self._source_traceback:
- del self._source_traceback[-1]
- self._when = when
- self._scheduled = False
-
- def _repr_info(self):
- info = super()._repr_info()
- pos = 2 if self._cancelled else 1
- info.insert(pos, 'when=%s' % self._when)
- return info
-
- def __hash__(self):
- return hash(self._when)
-
- def __lt__(self, other):
- return self._when < other._when
-
- def __le__(self, other):
- if self._when < other._when:
- return True
- return self.__eq__(other)
-
- def __gt__(self, other):
- return self._when > other._when
-
- def __ge__(self, other):
- if self._when > other._when:
- return True
- return self.__eq__(other)
-
- def __eq__(self, other):
- if isinstance(other, TimerHandle):
- return (self._when == other._when and
- self._callback == other._callback and
- self._args == other._args and
- self._cancelled == other._cancelled)
- return NotImplemented
-
- def __ne__(self, other):
- equal = self.__eq__(other)
- return NotImplemented if equal is NotImplemented else not equal
-
- def cancel(self):
- if not self._cancelled:
- self._loop._timer_handle_cancelled(self)
- super().cancel()
-
-
-class AbstractServer:
- """Abstract server returned by create_server()."""
-
- def close(self):
- """Stop serving. This leaves existing connections open."""
- return NotImplemented
-
- def wait_closed(self):
- """Coroutine to wait until service is closed."""
- return NotImplemented
-
-
-class AbstractEventLoop:
- """Abstract event loop."""
-
- # Running and stopping the event loop.
-
- def run_forever(self):
- """Run the event loop until stop() is called."""
- raise NotImplementedError
-
- def run_until_complete(self, future):
- """Run the event loop until a Future is done.
-
- Return the Future's result, or raise its exception.
- """
- raise NotImplementedError
-
- def stop(self):
- """Stop the event loop as soon as reasonable.
-
- Exactly how soon that is may depend on the implementation, but
- no more I/O callbacks should be scheduled.
- """
- raise NotImplementedError
-
- def is_running(self):
- """Return whether the event loop is currently running."""
- raise NotImplementedError
-
- def is_closed(self):
- """Returns True if the event loop was closed."""
- raise NotImplementedError
-
- def close(self):
- """Close the loop.
-
- The loop should not be running.
-
- This is idempotent and irreversible.
-
- No other methods should be called after this one.
- """
- raise NotImplementedError
-
- # Methods scheduling callbacks. All these return Handles.
-
- def _timer_handle_cancelled(self, handle):
- """Notification that a TimerHandle has been cancelled."""
- raise NotImplementedError
-
- def call_soon(self, callback, *args):
- return self.call_later(0, callback, *args)
-
- def call_later(self, delay, callback, *args):
- raise NotImplementedError
-
- def call_at(self, when, callback, *args):
- raise NotImplementedError
-
- def time(self):
- raise NotImplementedError
-
- # Method scheduling a coroutine object: create a task.
-
- def create_task(self, coro):
- raise NotImplementedError
-
- # Methods for interacting with threads.
-
- def call_soon_threadsafe(self, callback, *args):
- raise NotImplementedError
-
- def run_in_executor(self, executor, func, *args):
- raise NotImplementedError
-
- def set_default_executor(self, executor):
- raise NotImplementedError
-
- # Network I/O methods returning Futures.
-
- def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
- raise NotImplementedError
-
- def getnameinfo(self, sockaddr, flags=0):
- raise NotImplementedError
-
- def create_connection(self, protocol_factory, host=None, port=None, *,
- ssl=None, family=0, proto=0, flags=0, sock=None,
- local_addr=None, server_hostname=None):
- raise NotImplementedError
-
- def create_server(self, protocol_factory, host=None, port=None, *,
- family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
- sock=None, backlog=100, ssl=None, reuse_address=None):
- """A coroutine which creates a TCP server bound to host and port.
-
- The return value is a Server object which can be used to stop
- the service.
-
- If host is an empty string or None all interfaces are assumed
- and a list of multiple sockets will be returned (most likely
- one for IPv4 and another one for IPv6).
-
- family can be set to either AF_INET or AF_INET6 to force the
- socket to use IPv4 or IPv6. If not set it will be determined
- from host (defaults to AF_UNSPEC).
-
- flags is a bitmask for getaddrinfo().
-
- sock can optionally be specified in order to use a preexisting
- socket object.
-
- backlog is the maximum number of queued connections passed to
- listen() (defaults to 100).
-
- ssl can be set to an SSLContext to enable SSL over the
- accepted connections.
-
- reuse_address tells the kernel to reuse a local socket in
- TIME_WAIT state, without waiting for its natural timeout to
- expire. If not specified will automatically be set to True on
- UNIX.
- """
- raise NotImplementedError
-
- def create_unix_connection(self, protocol_factory, path, *,
- ssl=None, sock=None,
- server_hostname=None):
- raise NotImplementedError
-
- def create_unix_server(self, protocol_factory, path, *,
- sock=None, backlog=100, ssl=None):
- """A coroutine which creates a UNIX Domain Socket server.
-
- The return value is a Server object, which can be used to stop
- the service.
-
- path is a str, representing a file systsem path to bind the
- server socket to.
-
- sock can optionally be specified in order to use a preexisting
- socket object.
-
- backlog is the maximum number of queued connections passed to
- listen() (defaults to 100).
-
- ssl can be set to an SSLContext to enable SSL over the
- accepted connections.
- """
- raise NotImplementedError
-
- def create_datagram_endpoint(self, protocol_factory,
- local_addr=None, remote_addr=None, *,
- family=0, proto=0, flags=0):
- raise NotImplementedError
-
- # Pipes and subprocesses.
-
- def connect_read_pipe(self, protocol_factory, pipe):
- """Register read pipe in event loop. Set the pipe to non-blocking mode.
-
- protocol_factory should instantiate object with Protocol interface.
- pipe is a file-like object.
- Return pair (transport, protocol), where transport supports the
- ReadTransport interface."""
- # The reason to accept file-like object instead of just file descriptor
- # is: we need to own pipe and close it at transport finishing
- # Can got complicated errors if pass f.fileno(),
- # close fd in pipe transport then close f and vise versa.
- raise NotImplementedError
-
- def connect_write_pipe(self, protocol_factory, pipe):
- """Register write pipe in event loop.
-
- protocol_factory should instantiate object with BaseProtocol interface.
- Pipe is file-like object already switched to nonblocking.
- Return pair (transport, protocol), where transport support
- WriteTransport interface."""
- # The reason to accept file-like object instead of just file descriptor
- # is: we need to own pipe and close it at transport finishing
- # Can got complicated errors if pass f.fileno(),
- # close fd in pipe transport then close f and vise versa.
- raise NotImplementedError
-
- def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- **kwargs):
- raise NotImplementedError
-
- def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- **kwargs):
- raise NotImplementedError
-
- # Ready-based callback registration methods.
- # The add_*() methods return None.
- # The remove_*() methods return True if something was removed,
- # False if there was nothing to delete.
-
- def add_reader(self, fd, callback, *args):
- raise NotImplementedError
-
- def remove_reader(self, fd):
- raise NotImplementedError
-
- def add_writer(self, fd, callback, *args):
- raise NotImplementedError
-
- def remove_writer(self, fd):
- raise NotImplementedError
-
- # Completion based I/O methods returning Futures.
-
- def sock_recv(self, sock, nbytes):
- raise NotImplementedError
-
- def sock_sendall(self, sock, data):
- raise NotImplementedError
-
- def sock_connect(self, sock, address):
- raise NotImplementedError
-
- def sock_accept(self, sock):
- raise NotImplementedError
-
- # Signal handling.
-
- def add_signal_handler(self, sig, callback, *args):
- raise NotImplementedError
-
- def remove_signal_handler(self, sig):
- raise NotImplementedError
-
- # Task factory.
-
- def set_task_factory(self, factory):
- raise NotImplementedError
-
- def get_task_factory(self):
- raise NotImplementedError
-
- # Error handlers.
-
- def set_exception_handler(self, handler):
- raise NotImplementedError
-
- def default_exception_handler(self, context):
- raise NotImplementedError
-
- def call_exception_handler(self, context):
- raise NotImplementedError
-
- # Debug flag management.
-
- def get_debug(self):
- raise NotImplementedError
-
- def set_debug(self, enabled):
- raise NotImplementedError
-
-
-class AbstractEventLoopPolicy:
- """Abstract policy for accessing the event loop."""
-
- def get_event_loop(self):
- """Get the event loop for the current context.
-
- Returns an event loop object implementing the BaseEventLoop interface,
- or raises an exception in case no event loop has been set for the
- current context and the current policy does not specify to create one.
-
- It should never return None."""
- raise NotImplementedError
-
- def set_event_loop(self, loop):
- """Set the event loop for the current context to loop."""
- raise NotImplementedError
-
- def new_event_loop(self):
- """Create and return a new event loop object according to this
- policy's rules. If there's need to set this loop as the event loop for
- the current context, set_event_loop must be called explicitly."""
- raise NotImplementedError
-
- # Child processes handling (Unix only).
-
- def get_child_watcher(self):
- "Get the watcher for child processes."
- raise NotImplementedError
-
- def set_child_watcher(self, watcher):
- """Set the watcher for child processes."""
- raise NotImplementedError
-
-
-class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
- """Default policy implementation for accessing the event loop.
-
- In this policy, each thread has its own event loop. However, we
- only automatically create an event loop by default for the main
- thread; other threads by default have no event loop.
-
- Other policies may have different rules (e.g. a single global
- event loop, or automatically creating an event loop per thread, or
- using some other notion of context to which an event loop is
- associated).
- """
-
- _loop_factory = None
-
- class _Local(threading.local):
- _loop = None
- _set_called = False
-
- def __init__(self):
- self._local = self._Local()
-
- def get_event_loop(self):
- """Get the event loop.
-
- This may be None or an instance of EventLoop.
- """
- if (self._local._loop is None and
- not self._local._set_called and
- isinstance(threading.current_thread(), threading._MainThread)):
- self.set_event_loop(self.new_event_loop())
- if self._local._loop is None:
- raise RuntimeError('There is no current event loop in thread %r.'
- % threading.current_thread().name)
- return self._local._loop
-
- def set_event_loop(self, loop):
- """Set the event loop."""
- self._local._set_called = True
- assert loop is None or isinstance(loop, AbstractEventLoop)
- self._local._loop = loop
-
- def new_event_loop(self):
- """Create a new event loop.
-
- You must call set_event_loop() to make this the current event
- loop.
- """
- return self._loop_factory()
-
-
-# Event loop policy. The policy itself is always global, even if the
-# policy's rules say that there is an event loop per thread (or other
-# notion of context). The default policy is installed by the first
-# call to get_event_loop_policy().
-_event_loop_policy = None
-
-# Lock for protecting the on-the-fly creation of the event loop policy.
-_lock = threading.Lock()
-
-
-def _init_event_loop_policy():
- global _event_loop_policy
- with _lock:
- if _event_loop_policy is None: # pragma: no branch
- from . import DefaultEventLoopPolicy
- _event_loop_policy = DefaultEventLoopPolicy()
-
-
-def get_event_loop_policy():
- """Get the current event loop policy."""
- if _event_loop_policy is None:
- _init_event_loop_policy()
- return _event_loop_policy
-
-
-def set_event_loop_policy(policy):
- """Set the current event loop policy.
-
- If policy is None, the default policy is restored."""
- global _event_loop_policy
- assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
- _event_loop_policy = policy
-
-
-def get_event_loop():
- """Equivalent to calling get_event_loop_policy().get_event_loop()."""
- return get_event_loop_policy().get_event_loop()
-
-
-def set_event_loop(loop):
- """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
- get_event_loop_policy().set_event_loop(loop)
-
-
-def new_event_loop():
- """Equivalent to calling get_event_loop_policy().new_event_loop()."""
- return get_event_loop_policy().new_event_loop()
-
-
-def get_child_watcher():
- """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
- return get_event_loop_policy().get_child_watcher()
-
-
-def set_child_watcher(watcher):
- """Equivalent to calling
- get_event_loop_policy().set_child_watcher(watcher)."""
- return get_event_loop_policy().set_child_watcher(watcher)
diff --git a/check.py b/check.py
index 6db82d6..dcefc18 100644
--- a/check.py
+++ b/check.py
@@ -37,7 +37,7 @@ def process(fn):
line = line.rstrip('\n')
sline = line.rstrip()
if len(line) >= 80 or line != sline or not isascii(line):
- print('{}:{:d}:{}{}'.format(
+ print('{0}:{1:d}:{2}{3}'.format(
fn, i+1, sline, '_' * (len(line) - len(sline))))
finally:
f.close()
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..314751a
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Trollius.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Trollius.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/Trollius"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Trollius"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/asyncio.rst b/doc/asyncio.rst
new file mode 100644
index 0000000..011a9a8
--- /dev/null
+++ b/doc/asyncio.rst
@@ -0,0 +1,185 @@
+++++++++++++++++++++
+Trollius and asyncio
+++++++++++++++++++++
+
+Differences between Trollius and asyncio
+========================================
+
+Syntax of coroutines
+--------------------
+
+The major difference between Trollius and asyncio is the syntax of coroutines:
+
+================== ======================
+asyncio Trollius
+================== ======================
+``yield from ...`` ``yield From(...)``
+``yield from []`` ``yield From(None)``
+``return`` ``raise Return()``
+``return x`` ``raise Return(x)``
+``return x, y`` ``raise Return(x, y)``
+================== ======================
+
+Because of this major difference, it was decided to call the module
+``trollius`` instead of ``asyncio``. This choice also allows to use Trollius on
+Python 3.4 and later. Changing imports is not enough to use Trollius code with
+asyncio: the asyncio event loop explicit rejects coroutines using ``yield``
+(instead of ``yield from``).
+
+OSError and socket.error exceptions
+-----------------------------------
+
+The ``OSError`` exception changed in Python 3.3: there are now subclasses like
+``ConnectionResetError`` or ``BlockingIOError``. The exception hierarchy also
+changed: ``socket.error`` is now an alias to ``OSError``. The ``asyncio``
+module is written for Python 3.3 and newer and so is based on these new
+exceptions.
+
+.. seealso::
+
+ `PEP 3151: Reworking the OS and IO exception hierarchy
+ <https://www.python.org/dev/peps/pep-3151>`_.
+
+On Python 3.2 and older, Trollius wraps ``OSError``, ``IOError``,
+``socket.error`` and ``select.error`` exceptions on operating system and socket
+operations to raise more specific exceptions, subclasses of ``OSError``:
+
+* ``trollius.BlockingIOError``
+* ``trollius.BrokenPipeError``
+* ``trollius.ChildProcessError``
+* ``trollius.ConnectionAbortedError``
+* ``trollius.ConnectionRefusedError``
+* ``trollius.ConnectionResetError``
+* ``trollius.FileNotFoundError``
+* ``trollius.InterruptedError``
+* ``trollius.PermissionError``
+
+On Python 3.3 and newer, these symbols are just aliases to builtin exceptions.
+
+.. note::
+
+ ``ssl.SSLError`` exceptions are not wrapped to ``OSError``, even if
+ ``ssl.SSLError`` is a subclass of ``socket.error``.
+
+
+SSLError
+--------
+
+On Python 3.2 and older, Trollius wraps ``ssl.SSLError`` exceptions to raise
+more specific exceptions, subclasses of ``ssl.SSLError``, to mimic the Python
+3.3:
+
+* ``trollius.SSLEOFError``
+* ``trollius.SSLWantReadError``
+* ``trollius.SSLWantWriteError``
+
+On Python 3.3 and newer, these symbols are just aliases to exceptions of the
+``ssl`` module.
+
+``trollius.BACKPORT_SSL_ERRORS`` constant:
+
+* ``True`` if ``ssl.SSLError`` are wrapped to Trollius exceptions (Python 2
+ older than 2.7.9, or Python 3 older than 3.3),
+* ``False`` is trollius SSL exceptions are just aliases.
+
+
+SSLContext
+----------
+
+Python 3.3 has a new ``ssl.SSLContext`` class: see the `documentaton of the
+ssl.SSLContext class
+<https://docs.python.org/3/library/ssl.html#ssl.SSLContext>`_.
+
+On Python 3.2 and older, Trollius has a basic ``trollius.SSLContext`` class to
+mimic Python 3.3 API, but it only has a few features:
+
+* ``protocol``, ``certfile`` and ``keyfile`` attributes
+* read-only ``verify_mode`` attribute: its value is ``CERT_NONE``
+* ``load_cert_chain(certfile, keyfile)`` method
+* ``wrap_socket(sock, **kw)`` method: see the ``ssl.wrap_socket()``
+ documentation of your Python version for the keyword parameters
+
+Example of missing features:
+
+* no ``options`` attribute
+* the ``verify_mode`` attriubte cannot be modified
+* no ``set_default_verify_paths()`` method
+* no "Server Name Indication" (SNI) support
+* etc.
+
+On Python 3.2 and older, the trollius SSL transport does not have the
+``'compression'`` extra info.
+
+``trollius.BACKPORT_SSL_CONTEXT`` constant:
+
+* ``True`` if ``trollius.SSLContext`` is the backported class (Python 2 older
+ than 2.7.9, or Python 3 older than 3.3),
+* ``False`` if ``trollius.SSLContext`` is just an alias to ``ssl.SSLContext``.
+
+
+Other differences
+-----------------
+
+* Trollius uses the ``TROLLIUSDEBUG`` envrionment variable instead of
+ the ``PYTHONASYNCIODEBUG`` envrionment variable. ``TROLLIUSDEBUG`` variable
+ is used even if the Python command line option ``-E`` is used.
+* ``asyncio.subprocess`` has no ``DEVNULL`` constant
+* Python 2 does not support keyword-only parameters.
+* If the ``concurrent.futures`` module is missing,
+ ``BaseEventLoop.run_in_executor()`` uses a synchronous executor instead of a
+ pool of threads. It blocks until the function returns. For example, DNS
+ resolutions are blocking in this case.
+* Trollius has more symbols than asyncio for compatibility with Python older
+ than 3.3:
+
+ - ``From``: part of ``yield From(...)`` syntax
+ - ``Return``: part of ``raise Return(...)`` syntax
+
+
+Write code working on Trollius and asyncio
+==========================================
+
+Trollius and asyncio are different, especially for coroutines (``yield
+From(...)`` vs ``yield from ...``).
+
+To use asyncio or Trollius on Python 2 and Python 3, add the following code at
+the top of your file::
+
+ try:
+ # Use builtin asyncio on Python 3.4+, or asyncio on Python 3.3
+ import asyncio
+ except ImportError:
+ # Use Trollius on Python <= 3.2
+ import trollius as asyncio
+
+It is possible to write code working on both projects using only callbacks.
+This option is used by the following projects which work on Trollius and asyncio:
+
+* `AutobahnPython <https://github.com/tavendo/AutobahnPython>`_: WebSocket &
+ WAMP for Python, it works on Trollius (Python 2.6 and 2.7), asyncio (Python
+ 3.3) and Python 3.4 (asyncio), and also on Twisted.
+* `Pulsar <http://pythonhosted.org/pulsar/>`_: Event driven concurrent
+ framework for Python. With pulsar you can write asynchronous servers
+ performing one or several activities in different threads and/or processes.
+ Trollius 0.3 requires Pulsar 0.8.2 or later. Pulsar uses the ``asyncio``
+ module if available, or import ``trollius``.
+* `Tornado <http://www.tornadoweb.org/>`_ supports asyncio and Trollius since
+ Tornado 3.2: `tornado.platform.asyncio — Bridge between asyncio and Tornado
+ <http://tornado.readthedocs.org/en/latest/asyncio.html>`_. It tries to import
+ asyncio or fallback on importing trollius.
+
+Another option is to provide functions returning ``Future`` objects, so the
+caller can decide to use callback using ``fut.add_done_callback(callback)`` or
+to use coroutines (``yield From(fut)`` for Trollius, or ``yield from fut`` for
+asyncio). This option is used by the `aiodns <https://github.com/saghul/aiodns>`_
+project for example.
+
+Since Trollius 0.4, it's possible to use asyncio and Trollius coroutines in the
+same process. The only limit is that the event loop must be a Trollius event
+loop.
+
+.. note::
+
+ The Trollius module was called ``asyncio`` in Trollius version 0.2. The
+ module name changed to ``trollius`` to support Python 3.4.
+
diff --git a/doc/changelog.rst b/doc/changelog.rst
new file mode 100644
index 0000000..684dd77
--- /dev/null
+++ b/doc/changelog.rst
@@ -0,0 +1,687 @@
+++++++++++
+Change log
+++++++++++
+
+Version 2.0 (2015-07-13)
+========================
+
+Summary:
+
+* SSL support on Windows for proactor event loop with Python 3.5 and newer
+* Many race conditions were fixed in the proactor event loop
+* Trollius moved to Github and the fork was recreated on top to asyncio git
+ repository
+* Many resource leaks (ex: unclosed sockets) were fixed
+* Optimization of socket connections: avoid: don't call the slow getaddrinfo()
+ function to ensure that the address is already resolved. The check is now
+ only done in debug mode.
+
+The Trollius project moved from Bitbucket to Github. The project is now a fork
+of the Git repository of the asyncio project (previously called the "tulip"
+project), the trollius source code lives in the trollius branch.
+
+The new Trollius home page is now: https://github.com/haypo/trollius
+
+The asyncio project moved to: https://github.com/python/asyncio
+
+Note: the PEP 492 is not supported in trollius yet.
+
+API changes:
+
+* Issue #234: Drop JoinableQueue on Python 3.5+
+* add the asyncio.ensure_future() function, previously called async().
+ The async() function is now deprecated.
+* New event loop methods: set_task_factory() and get_task_factory().
+* Python issue #23347: Make BaseSubprocessTransport.wait() private.
+* Python issue #23347: send_signal(), kill() and terminate() methods of
+ BaseSubprocessTransport now check if the transport was closed and if the
+ process exited.
+* Python issue #23209, #23225: selectors.BaseSelector.get_key() now raises a
+ RuntimeError if the selector is closed. And selectors.BaseSelector.close()
+ now clears its internal reference to the selector mapping to break a
+ reference cycle. Initial patch written by Martin Richard.
+* PipeHandle.fileno() of asyncio.windows_utils now raises an exception if the
+ pipe is closed.
+* Remove Overlapped.WaitNamedPipeAndConnect() of the _overlapped module,
+ it is no more used and it had issues.
+* Python issue #23537: Remove 2 unused private methods of
+ BaseSubprocessTransport: _make_write_subprocess_pipe_proto,
+ _make_read_subprocess_pipe_proto. Methods only raise NotImplementedError and
+ are never used.
+* Remove unused SSLProtocol._closing attribute
+
+New SSL implementation:
+
+* Python issue #22560: On Python 3.5 and newer, use new SSL implementation
+ based on ssl.MemoryBIO instead of the legacy SSL implementation. Patch
+ written by Antoine Pitrou, based on the work of Geert Jansen.
+* If available, the new SSL implementation can be used by ProactorEventLoop to
+ support SSL.
+
+Enhance, fix and cleanup the IocpProactor:
+
+* Python issue #23293: Rewrite IocpProactor.connect_pipe(). Add
+ _overlapped.ConnectPipe() which tries to connect to the pipe for asynchronous
+ I/O (overlapped): call CreateFile() in a loop until it doesn't fail with
+ ERROR_PIPE_BUSY. Use an increasing delay between 1 ms and 100 ms.
+* Tulip issue #204: Fix IocpProactor.accept_pipe().
+ Overlapped.ConnectNamedPipe() now returns a boolean: True if the pipe is
+ connected (if ConnectNamedPipe() failed with ERROR_PIPE_CONNECTED), False if
+ the connection is in progress.
+* Tulip issue #204: Fix IocpProactor.recv(). If ReadFile() fails with
+ ERROR_BROKEN_PIPE, the operation is not pending: don't register the
+ overlapped.
+* Python issue #23095: Rewrite _WaitHandleFuture.cancel().
+ _WaitHandleFuture.cancel() now waits until the wait is cancelled to clear its
+ reference to the overlapped object. To wait until the cancellation is done,
+ UnregisterWaitEx() is used with an event instead of UnregisterWait().
+* Python issue #23293: Rewrite IocpProactor.connect_pipe() as a coroutine. Use
+ a coroutine with asyncio.sleep() instead of call_later() to ensure that the
+ scheduled call is cancelled.
+* Fix ProactorEventLoop.start_serving_pipe(). If a client was connected before
+ the server was closed: drop the client (close the pipe) and exit
+* Python issue #23293: Cleanup IocpProactor.close(). The special case for
+ connect_pipe() is no more needed. connect_pipe() doesn't use overlapped
+ operations anymore.
+* IocpProactor.close(): don't cancel futures which are already cancelled
+* Enhance (fix) BaseProactorEventLoop._loop_self_reading(). Handle correctly
+ CancelledError: just exit. On error, log the exception and exit; don't try to
+ close the event loop (it doesn't work).
+
+Bug fixes:
+
+* Fix LifoQueue's and PriorityQueue's put() and task_done().
+* Issue #222: Fix the @coroutine decorator for functions without __name__
+ attribute like functools.partial(). Enhance also the representation of a
+ CoroWrapper if the coroutine function is a functools.partial().
+* Python issue #23879: SelectorEventLoop.sock_connect() must not call connect()
+ again if the first call to connect() raises an InterruptedError. When the C
+ function connect() fails with EINTR, the connection runs in background. We
+ have to wait until the socket becomes writable to be notified when the
+ connection succeed or fails.
+* Fix _SelectorTransport.__repr__() if the event loop is closed
+* Fix repr(BaseSubprocessTransport) if it didn't start yet
+* Workaround CPython bug #23353. Don't use yield/yield-from in an except block
+ of a generator. Store the exception and handle it outside the except block.
+* Fix BaseSelectorEventLoop._accept_connection(). Close the transport on error.
+ In debug mode, log errors using call_exception_handler().
+* Fix _UnixReadPipeTransport and _UnixWritePipeTransport. Only start reading
+ when connection_made() has been called.
+* Fix _SelectorSslTransport.close(). Don't call protocol.connection_lost() if
+ protocol.connection_made() was not called yet: if the SSL handshake failed or
+ is still in progress. The close() method can be called if the creation of the
+ connection is cancelled, by a timeout for example.
+* Fix _SelectorDatagramTransport constructor. Only start reading after
+ connection_made() has been called.
+* Fix _SelectorSocketTransport constructor. Only start reading when
+ connection_made() has been called: protocol.data_received() must not be
+ called before protocol.connection_made().
+* Fix SSLProtocol.eof_received(). Wake-up the waiter if it is not done yet.
+* Close transports on error. Fix create_datagram_endpoint(),
+ connect_read_pipe() and connect_write_pipe(): close the transport if the task
+ is cancelled or on error.
+* Close the transport on subprocess creation failure
+* Fix _ProactorBasePipeTransport.close(). Set the _read_fut attribute to None
+ after cancelling it.
+* Python issue #23243: Fix _UnixWritePipeTransport.close(). Do nothing if the
+ transport is already closed. Before it was not possible to close the
+ transport twice.
+* Python issue #23242: SubprocessStreamProtocol now closes the subprocess
+ transport at subprocess exit. Clear also its reference to the transport.
+* Fix BaseEventLoop._create_connection_transport(). Close the transport if the
+ creation of the transport (if the waiter) gets an exception.
+* Python issue #23197: On SSL handshake failure, check if the waiter is
+ cancelled before setting its exception.
+* Python issue #23173: Fix SubprocessStreamProtocol.connection_made() to handle
+ cancelled waiter.
+* Python issue #23173: If an exception is raised during the creation of a
+ subprocess, kill the subprocess (close pipes, kill and read the return
+ status). Log an error in such case.
+* Python issue #23209: Break some reference cycles in asyncio. Patch written by
+ Martin Richard.
+
+Optimization:
+
+* Only call _check_resolved_address() in debug mode. _check_resolved_address()
+ is implemented with getaddrinfo() which is slow. If available, use
+ socket.inet_pton() instead of socket.getaddrinfo(), because it is much faster
+
+Other changes:
+
+* Python issue #23456: Add missing @coroutine decorators
+* Python issue #23475: Fix test_close_kill_running(). Really kill the child
+ process, don't mock completly the Popen.kill() method. This change fix memory
+ leaks and reference leaks.
+* BaseSubprocessTransport: repr() mentions when the child process is running
+* BaseSubprocessTransport.close() doesn't try to kill the process if it already
+ finished.
+* Tulip issue #221: Fix docstring of QueueEmpty and QueueFull
+* Fix subprocess_attach_write_pipe example. Close the transport, not directly
+ the pipe.
+* Python issue #23347: send_signal(), terminate(), kill() don't check if the
+ transport was closed. The check broken a Tulip example and this limitation is
+ arbitrary. Check if _proc is None should be enough. Enhance also close(): do
+ nothing when called the second time.
+* Python issue #23347: Refactor creation of subprocess transports.
+* Python issue #23243: On Python 3.4 and newer, emit a ResourceWarning when an
+ event loop or a transport is not explicitly closed
+* tox.ini: enable ResourceWarning warnings
+* Python issue #23243: test_sslproto: Close explicitly transports
+* SSL transports now clear their reference to the waiter.
+* Python issue #23208: Add BaseEventLoop._current_handle. In debug mode,
+ BaseEventLoop._run_once() now sets the BaseEventLoop._current_handle
+ attribute to the handle currently executed.
+* Replace test_selectors.py with the file of Python 3.5 adapted for asyncio and
+ Python 3.3.
+* Tulip issue #184: FlowControlMixin constructor now get the event loop if the
+ loop parameter is not set.
+* _ProactorBasePipeTransport now sets the _sock attribute to None when the
+ transport is closed.
+* Python issue #23219: cancelling wait_for() now cancels the task
+* Python issue #23243: Close explicitly event loops and transports in tests
+* Python issue #23140: Fix cancellation of Process.wait(). Check the state of
+ the waiter future before setting its result.
+* Python issue #23046: Expose the BaseEventLoop class in the asyncio namespace
+* Python issue #22926: In debug mode, call_soon(), call_at() and call_later()
+ methods of BaseEventLoop now use the identifier of the current thread to
+ ensure that they are called from the thread running the event loop. Before,
+ the get_event_loop() method was used to check the thread, and no exception
+ was raised when the thread had no event loop. Now the methods always raise an
+ exception in debug mode when called from the wrong thread. It should help to
+ notice misusage of the API.
+
+2014-12-19: Version 1.0.4
+=========================
+
+Changes:
+
+* Python issue #22922: create_task(), call_at(), call_soon(),
+ call_soon_threadsafe() and run_in_executor() now raise an error if the event
+ loop is closed. Initial patch written by Torsten Landschoff.
+* Python issue #22921: Don't require OpenSSL SNI to pass hostname to ssl
+ functions. Patch by Donald Stufft.
+* Add run_aiotest.py: run the aiotest test suite.
+* tox now also run the aiotest test suite
+* Python issue #23074: get_event_loop() now raises an exception if the thread
+ has no event loop even if assertions are disabled.
+
+Bugfixes:
+
+* Fix a race condition in BaseSubprocessTransport._try_finish(): ensure that
+ connection_made() is called before connection_lost().
+* Python issue #23009: selectors, make sure EpollSelecrtor.select() works when
+ no file descriptor is registered.
+* Python issue #22922: Fix ProactorEventLoop.close(). Call
+ _stop_accept_futures() before sestting the _closed attribute, otherwise
+ call_soon() raises an error.
+* Python issue #22429: Fix EventLoop.run_until_complete(), don't stop the event
+ loop if a BaseException is raised, because the event loop is already stopped.
+* Initialize more Future and Task attributes in the class definition to avoid
+ attribute errors in destructors.
+* Python issue #22685: Set the transport of stdout and stderr StreamReader
+ objects in the SubprocessStreamProtocol. It allows to pause the transport to
+ not buffer too much stdout or stderr data.
+* BaseSelectorEventLoop.close() now closes the self-pipe before calling the
+ parent close() method. If the event loop is already closed, the self-pipe is
+ not unregistered from the selector.
+
+
+2014-10-20: Version 1.0.3
+=========================
+
+Changes:
+
+* On Python 2 in debug mode, Future.set_exception() now stores the traceback
+ object of the exception in addition to the exception object. When a task
+ waiting for another task and the other task raises an exception, the
+ traceback object is now copied with the exception. Be careful, storing the
+ traceback object may create reference leaks.
+* Use ssl.create_default_context() if available to create the default SSL
+ context: Python 2.7.9 and newer, or Python 3.4 and newer.
+* On Python 3.5 and newer, reuse socket.socketpair() in the windows_utils
+ submodule.
+* On Python 3.4 and newer, use os.set_inheritable().
+* Enhance protocol representation: add "closed" or "closing" info.
+* run_forever() now consumes BaseException of the temporary task. If the
+ coroutine raised a BaseException, consume the exception to not log a warning.
+ The caller doesn't have access to the local task.
+* Python issue 22448: cleanup _run_once(), only iterate once to remove delayed
+ calls that were cancelled.
+* The destructor of the Return class now shows where the Return object was
+ created.
+* run_tests.py doesn't catch any exceptions anymore when loading tests, only
+ catch SkipTest.
+* Fix (SSL) tests for the future Python 2.7.9 which includes a "new" ssl
+ module: module backported from Python 3.5.
+* BaseEventLoop.add_signal_handler() now raises an exception if the parameter
+ is a coroutine function.
+* Coroutine functions and objects are now rejected with a TypeError by the
+ following functions: add_signal_handler(), call_at(), call_later(),
+ call_soon(), call_soon_threadsafe(), run_in_executor().
+
+
+2014-10-02: Version 1.0.2
+=========================
+
+This release fixes bugs. It also provides more information in debug mode on
+error.
+
+Major changes:
+
+* Tulip issue #203: Add _FlowControlMixin.get_write_buffer_limits() method.
+* Python issue #22063: socket operations (socket,recv, sock_sendall,
+ sock_connect, sock_accept) of SelectorEventLoop now raise an exception in
+ debug mode if sockets are in blocking mode.
+
+Major bugfixes:
+
+* Tulip issue #205: Fix a race condition in BaseSelectorEventLoop.sock_connect().
+* Tulip issue #201: Fix a race condition in wait_for(). Don't raise a
+ TimeoutError if we reached the timeout and the future completed in the same
+ iteration of the event loop. A side effect of the bug is that Queue.get()
+ looses items.
+* PipeServer.close() now cancels the "accept pipe" future which cancels the
+ overlapped operation.
+
+Other changes:
+
+* Python issue #22448: Improve cancelled timer callback handles cleanup. Patch
+ by Joshua Moore-Oliva.
+* Python issue #22369: Change "context manager protocol" to "context management
+ protocol". Patch written by Serhiy Storchaka.
+* Tulip issue #206: In debug mode, keep the callback in the representation of
+ Handle and TimerHandle after cancel().
+* Tulip issue #207: Fix test_tasks.test_env_var_debug() to use correct asyncio
+ module.
+* runtests.py: display a message to mention if tests are run in debug or
+ release mode
+* Tulip issue #200: Log errors in debug mode instead of simply ignoring them.
+* Tulip issue #200: _WaitHandleFuture._unregister_wait() now catchs and logs
+ exceptions.
+* _fatal_error() method of _UnixReadPipeTransport and _UnixWritePipeTransport
+ now log all exceptions in debug mode
+* Fix debug log in BaseEventLoop.create_connection(): get the socket object
+ from the transport because SSL transport closes the old socket and creates a
+ new SSL socket object.
+* Remove the _SelectorSslTransport._rawsock attribute: it contained the closed
+ socket (not very useful) and it was not used.
+* Fix _SelectorTransport.__repr__() if the transport was closed
+* Use the new os.set_blocking() function of Python 3.5 if available
+
+
+2014-07-30: Version 1.0.1
+=========================
+
+This release supports PyPy and has a better support of asyncio coroutines,
+especially in debug mode.
+
+Changes:
+
+* Tulip issue #198: asyncio.Condition now accepts an optional lock object.
+* Enhance representation of Future and Future subclasses: add "created at".
+
+Bugfixes:
+
+* Fix Trollius issue #9: @trollius.coroutine now works on callbable objects
+ (without ``__name__`` attribute), not only on functions.
+* Fix Trollius issue #13: asyncio futures are now accepted in all functions:
+ as_completed(), async(), @coroutine, gather(), run_until_complete(),
+ wrap_future().
+* Fix support of asyncio coroutines in debug mode. If the last instruction
+ of the coroutine is "yield from", it's an asyncio coroutine and it does not
+ need to use From().
+* Fix and enhance _WaitHandleFuture.cancel():
+
+ - Tulip issue #195: Fix a crash on Windows: don't call UnregisterWait() twice
+ if a _WaitHandleFuture is cancelled twice.
+ - Fix _WaitHandleFuture.cancel(): return the result of the parent cancel()
+ method (True or False).
+ - _WaitHandleFuture.cancel() now notify IocpProactor through the overlapped
+ object that the wait was cancelled.
+
+* Tulip issue #196: _OverlappedFuture now clears its reference to the
+ overlapped object. IocpProactor keeps a reference to the overlapped object
+ until it is notified of its completion. Log also an error in debug mode if it
+ gets unexpected notifications.
+* Fix runtest.py to be able to log at level DEBUG.
+
+Other changes:
+
+* BaseSelectorEventLoop._write_to_self() now logs errors in debug mode.
+* Fix as_completed(): it's not a coroutine, don't use ``yield From(...)`` but
+ ``yield ...``
+* Tulip issue #193: Convert StreamWriter.drain() to a classic coroutine.
+* Tulip issue #194: Don't use sys.getrefcount() in unit tests: the full test
+ suite now pass on PyPy.
+
+
+2014-07-21: Version 1.0
+=======================
+
+Major Changes
+-------------
+
+* Event loops have a new ``create_task()`` method, which is now the recommanded
+ way to create a task object. This method can be overriden by third-party
+ event loops to use their own task class.
+* The debug mode has been improved a lot. Set ``TROLLIUSDEBUG`` envrironment
+ variable to ``1`` and configure logging to log at level ``logging.DEBUG``
+ (ex: ``logging.basicConfig(level=logging.DEBUG)``). Changes:
+
+ - much better representation of Trollius objects (ex: ``repr(task)``):
+ unified ``<Class arg1 arg2 ...>`` format, use qualified name when available
+ - show the traceback where objects were created
+ - show the current filename and line number for coroutine
+ - show the filename and line number where objects were created
+ - log most important socket events
+ - log most important subprocess events
+
+* ``Handle.cancel()`` now clears references to callback and args
+* Log an error if a Task is destroyed while it is still pending, but only on
+ Python 3.4 and newer.
+* Fix for asyncio coroutines when passing tuple value in debug mode.
+ ``CoroWrapper.send()`` now checks if it is called from a "yield from"
+ generator to decide if the parameter should be unpacked or not.
+* ``Process.communicate()`` now ignores ``BrokenPipeError`` and
+ ``ConnectionResetError`` exceptions.
+* Rewrite signal handling on Python 3.3 and newer to fix a race condition: use
+ the "self-pipe" to get signal numbers.
+
+
+Other Changes
+-------------
+
+* Fix ``ProactorEventLoop()`` in debug mode
+* Fix a race condition when setting the result of a Future with
+ ``call_soon()``. Add an helper, a private method, to set the result only if
+ the future was not cancelled.
+* Fix ``asyncio.__all__``: export also ``unix_events`` and ``windows_events``
+ symbols. For example, on Windows, it was not possible to get
+ ``ProactorEventLoop`` or ``DefaultEventLoopPolicy`` using ``from asyncio
+ import *``.
+* ``Handle.cancel()`` now clears references to callback and args
+* Make Server attributes and methods private, the sockets attribute remains
+ public.
+* BaseEventLoop.create_datagram_endpoint() now waits until
+ protocol.connection_made() has been called. Document also why transport
+ constructors use a waiter.
+* _UnixSubprocessTransport: fix file mode of stdin: open stdin in write mode,
+ not in read mode.
+
+
+2014-06-23: version 0.4
+=======================
+
+Changes between Trollius 0.3 and 0.4:
+
+* Trollius event loop now supports asyncio coroutines:
+
+ - Trollius coroutines can yield asyncio coroutines,
+ - asyncio coroutines can yield Trollius coroutines,
+ - asyncio.set_event_loop() accepts a Trollius event loop,
+ - asyncio.set_event_loop_policy() accepts a Trollius event loop policy.
+
+* The ``PYTHONASYNCIODEBUG`` envrionment variable has been renamed to
+ ``TROLLIUSDEBUG``. The environment variable is now used even if the Python
+ command line option ``-E`` is used.
+* Synchronize with Tulip.
+* Support PyPy (fix subproces, fix unit tests).
+
+Tulip changes:
+
+* Tulip issue #171: BaseEventLoop.close() now raises an exception if the event
+ loop is running. You must first stop the event loop and then wait until it
+ stopped, before closing it.
+* Tulip issue #172: only log selector timing in debug mode
+* Enable the debug mode of event loops when the ``TROLLIUSDEBUG`` environment
+ variable is set
+* BaseEventLoop._assert_is_current_event_loop() now only raises an exception if
+ the current loop is set.
+* Tulip issue #105: in debug mode, log callbacks taking more than 100 ms to be
+ executed.
+* Python issue 21595: ``BaseSelectorEventLoop._read_from_self()`` reads all
+ available bytes from the "self pipe", not only a single byte. This change
+ reduces the risk of having the pipe full and so getting the "BlockingIOError:
+ [Errno 11] Resource temporarily unavailable" message.
+* Python issue 21723: asyncio.Queue: support any type of number (ex: float) for
+ the maximum size. Patch written by Vajrasky Kok.
+* Issue #173: Enhance repr(Handle) and repr(Task): add the filename and line
+ number, when available. For task, the current line number of the coroutine
+ is used.
+* Add BaseEventLoop.is_closed() method. run_forever() and run_until_complete()
+ methods now raises an exception if the event loop was closed.
+* Make sure that socketpair() close sockets on error. Close the listening
+ socket if sock.bind() raises an exception.
+* Fix ResourceWarning: close sockets on errors.
+ BaseEventLoop.create_connection(), BaseEventLoop.create_datagram_endpoint()
+ and _UnixSelectorEventLoop.create_unix_server() now close the newly created
+ socket on error.
+* Rephrase and fix docstrings.
+* Fix tests on Windows: wait for the subprocess exit. Before, regrtest failed
+ to remove the temporary test directory because the process was still running
+ in this directory.
+* Refactor unit tests.
+
+On Python 3.5, generators now get their name from the function, no more from
+the code. So the ``@coroutine`` decorator doesn't loose the original name of
+the function anymore.
+
+
+2014-05-26: version 0.3
+=======================
+
+Rename the Python module ``asyncio`` to ``trollius`` to support Python 3.4. On
+Python 3.4, there is already a module called ``asyncio`` in the standard
+library which conflicted with ``asyncio`` module of Trollius 0.2. To write
+asyncio code working on Trollius and Tulip, use ``import trollius as asyncio``.
+
+Changes between Trollius 0.2 and 0.3:
+
+* Synchronize with Tulip 3.4.1.
+* Enhance Trollius documentation.
+* Trollius issue #7: Fix ``asyncio.time_monotonic`` on Windows older than
+ Vista (ex: Windows 2000 and Windows XP).
+* Fedora packages have been accepted.
+
+Changes between Tulip 3.4.0 and 3.4.1:
+
+* Pull in Solaris ``devpoll`` support by Giampaolo Rodola
+ (``trollius.selectors`` module).
+* Add options ``-r`` and ``--randomize`` to runtests.py to randomize test
+ order.
+* Add a simple echo client/server example.
+* Tulip issue #166: Add ``__weakref__`` slots to ``Handle`` and
+ ``CoroWrapper``.
+* ``EventLoop.create_unix_server()`` now raises a ``ValueError`` if path and
+ sock are specified at the same time.
+* Ensure ``call_soon()``, ``call_later()`` and ``call_at()`` are invoked on
+ current loop in debug mode. Raise a ``RuntimeError`` if the event loop of the
+ current thread is different. The check should help to debug thread-safetly
+ issue. Patch written by David Foster.
+* Tulip issue #157: Improve test_events.py, avoid ``run_briefly()`` which is
+ not reliable.
+* Reject add/remove reader/writer when event loop is closed.
+
+Bugfixes of Tulip 3.4.1:
+
+* Tulip issue #168: ``StreamReader.read(-1)`` from pipe may hang if
+ data exceeds buffer limit.
+* CPython issue #21447: Fix a race condition in
+ ``BaseEventLoop._write_to_self()``.
+* Different bugfixes in ``CoroWrapper`` of ``trollius.coroutines``, class used
+ when running Trollius in debug mode:
+
+ - Fix ``CoroWrapper`` to workaround yield-from bug in CPython 3.4.0. The
+ CPython bug is now fixed in CPython 3.4.1 and 3.5.
+ - Make sure ``CoroWrapper.send`` proxies one argument correctly.
+ - CPython issue #21340: Be careful accessing instance variables in ``__del__``.
+ - Tulip issue #163: Add ``gi_{frame,running,code}`` properties to
+ ``CoroWrapper``.
+
+* Fix ``ResourceWarning`` warnings
+* Tulip issue #159: Fix ``windows_utils.socketpair()``. Use ``"127.0.0.1"``
+ (IPv4) or ``"::1"`` (IPv6) host instead of ``"localhost"``, because
+ ``"localhost"`` may be a different IP address. Reject also invalid arguments:
+ only ``AF_INET`` and ``AF_INET6`` with ``SOCK_STREAM`` (and ``proto=0``) are
+ supported.
+* Tulip issue #158: ``Task._step()`` now also sets ``self`` to ``None`` if an
+ exception is raised. ``self`` is set to ``None`` to break a reference cycle.
+
+
+2014-03-04: version 0.2
+=======================
+
+Trollius now uses ``yield From(...)`` syntax which looks close to Tulip ``yield
+from ...`` and allows to port more easily Trollius code to Tulip. The usage of
+``From()`` is not mandatory yet, but it may become mandatory in a future
+version. However, if ``yield`` is used without ``From``, an exception is
+raised if the event loop is running in debug mode.
+
+Major changes:
+
+* Replace ``yield ...`` syntax with ``yield From(...)``
+* On Python 2, Future.set_exception() now only saves the traceback if the debug
+ mode of the event loop is enabled for best performances in production mode.
+ Use ``loop.set_debug(True)`` to save the traceback.
+
+Bugfixes:
+
+* Fix ``BaseEventLoop.default_exception_handler()`` on Python 2: get the
+ traceback from ``sys.exc_info()``
+* Fix unit tests on SSL sockets on Python older than 2.6.6. Example:
+ Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4.
+* Fix error handling in the asyncio.time_monotonic module
+* Fix acquire() method of Lock, Condition and Semaphore: don't return a context
+ manager but True, as Tulip. Task._step() now does the trick.
+
+Other changes:
+
+* tox.ini: set PYTHONASYNCIODEBUG to 1 to run tests
+
+2014-02-25: version 0.1.6
+=========================
+
+Trollius changes:
+
+* Add a new Sphinx documentation:
+ http://trollius.readthedocs.org/
+* tox: pass posargs to nosetests. Patch contributed by Ian Wienand.
+* Fix support of Python 3.2 and add py32 to tox.ini
+* Merge with Tulip 0.4.1
+
+Major changes of Tulip 0.4.1:
+
+* Issue #81: Add support for UNIX Domain Sockets. New APIs:
+
+ - loop.create_unix_connection()
+ - loop.create_unix_server()
+ - streams.open_unix_connection()
+ - streams.start_unix_server()
+
+* Issue #80: Add new event loop exception handling API. New APIs:
+
+ - loop.set_exception_handler()
+ - loop.call_exception_handler()
+ - loop.default_exception_handler()
+
+* Issue #136: Add get_debug() and set_debug() methods to BaseEventLoopTests.
+ Add also a ``PYTHONASYNCIODEBUG`` environment variable to debug coroutines
+ since Python startup, to be able to debug coroutines defined directly in the
+ asyncio module.
+
+Other changes of Tulip 0.4.1:
+
+* asyncio.subprocess: Fix a race condition in communicate()
+* Fix _ProactorWritePipeTransport._pipe_closed()
+* Issue #139: Improve error messages on "fatal errors".
+* Issue #140: WriteTransport.set_write_buffer_size() to call
+ _maybe_pause_protocol()
+* Issue #129: BaseEventLoop.sock_connect() now raises an error if the address
+ is not resolved (hostname instead of an IP address) for AF_INET and
+ AF_INET6 address families.
+* Issue #131: as_completed() and wait() now raises a TypeError if the list of
+ futures is not a list but a Future, Task or coroutine object
+* Python issue #20495: Skip test_read_pty_output() of test_asyncio on FreeBSD
+ older than FreeBSD 8
+* Issue #130: Add more checks on subprocess_exec/subprocess_shell parameters
+* Issue #126: call_soon(), call_soon_threadsafe(), call_later(), call_at()
+ and run_in_executor() now raise a TypeError if the callback is a coroutine
+ function.
+* Python issue #20505: BaseEventLoop uses again the resolution of the clock
+ to decide if scheduled tasks should be executed or not.
+
+
+2014-02-10: version 0.1.5
+=========================
+
+- Merge with Tulip 0.3.1:
+
+ * New asyncio.subprocess module
+ * _UnixWritePipeTransport now also supports character devices, as
+ _UnixReadPipeTransport. Patch written by Jonathan Slenders.
+ * StreamReader.readexactly() now raises an IncompleteReadError if the
+ end of stream is reached before we received enough bytes, instead of
+ returning less bytes than requested.
+ * poll and epoll selectors now round the timeout away from zero (instead of
+ rounding towards zero) to fix a performance issue
+ * asyncio.queue: Empty renamed to QueueEmpty, Full to QueueFull
+ * _fatal_error() of _UnixWritePipeTransport and _ProactorBasePipeTransport
+ don't log BrokenPipeError nor ConnectionResetError
+ * Future.set_exception(exc) now instanciate exc if it is a class
+ * streams.StreamReader: Use bytearray instead of deque of bytes for internal
+ buffer
+
+- Fix test_wait_for() unit test
+
+2014-01-22: version 0.1.4
+=========================
+
+- The project moved to https://bitbucket.org/enovance/trollius
+- Fix CoroWrapper (_DEBUG=True): add missing import
+- Emit a warning when Return is not raised
+- Merge with Tulip to get latest Tulip bugfixes
+- Fix dependencies in tox.ini for the different Python versions
+
+2014-01-13: version 0.1.3
+=========================
+
+- Workaround bugs in the ssl module of Python older than 2.6.6. For example,
+ Mac OS 10.6 (Snow Leopard) uses Python 2.6.1.
+- ``return x, y`` is now written ``raise Return(x, y)`` instead of
+ ``raise Return((x, y))``
+- Support "with (yield lock):" syntax for Lock, Condition and Semaphore
+- SSL support is now optional: don't fail if the ssl module is missing
+- Add tox.ini, tool to run unit tests. For example, "tox -e py27" creates a
+ virtual environment to run tests with Python 2.7.
+
+2014-01-08: version 0.1.2
+=========================
+
+- Trollius now supports CPython 2.6-3.4, PyPy and Windows. All unit tests
+ pass with CPython 2.7 on Linux.
+- Fix Windows support. Fix compilation of the _overlapped module and add a
+ asyncio._winapi module (written in pure Python). Patch written by Marc
+ Schlaich.
+- Support Python 2.6: require an extra dependency,
+ ordereddict (and unittest2 for unit tests)
+- Support Python 3.2, 3.3 and 3.4
+- Support PyPy 2.2
+- Don't modify __builtins__ nor the ssl module to inject backported exceptions
+ like BlockingIOError or SSLWantReadError. Exceptions are available in the
+ asyncio module, ex: asyncio.BlockingIOError.
+
+2014-01-06: version 0.1.1
+=========================
+
+- Fix asyncio.time_monotonic on Mac OS X
+- Fix create_connection(ssl=True)
+- Don't export backported SSLContext in the ssl module anymore to not confuse
+ libraries testing hasattr(ssl, "SSLContext")
+- Relax dependency on the backported concurrent.futures module: use a
+ synchronous executor if the module is missing
+
+2014-01-04: version 0.1
+=======================
+
+- First public release
+
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..0d3b8dd
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+#
+# Trollius documentation build configuration file, created by
+# sphinx-quickstart on Fri Feb 21 11:05:42 2014.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#import sys, os
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Trollius'
+copyright = u'2014, Victor Stinner'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = release = '2.0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Trolliusdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'Trollius.tex', u'Trollius Documentation',
+ u'Victor Stinner', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'trollius', u'Trollius Documentation',
+ [u'Victor Stinner'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Trollius', u'Trollius Documentation',
+ u'Victor Stinner', 'Trollius', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
diff --git a/doc/dev.rst b/doc/dev.rst
new file mode 100644
index 0000000..1bed7f8
--- /dev/null
+++ b/doc/dev.rst
@@ -0,0 +1,85 @@
+Run tests
+=========
+
+Run tests with tox
+------------------
+
+The `tox project <https://testrun.org/tox/latest/>`_ can be used to build a
+virtual environment with all runtime and test dependencies and run tests
+against different Python versions (2.6, 2.7, 3.2, 3.3).
+
+For example, to run tests with Python 2.7, just type::
+
+ tox -e py27
+
+To run tests against other Python versions:
+
+* ``py26``: Python 2.6
+* ``py27``: Python 2.7
+* ``py32``: Python 3.2
+* ``py33``: Python 3.3
+
+
+Test Dependencies
+-----------------
+
+On Python older than 3.3, unit tests require the `mock
+<https://pypi.python.org/pypi/mock>`_ module. Python 2.6 and 2.7 require also
+`unittest2 <https://pypi.python.org/pypi/unittest2>`_.
+
+To run ``run_aiotest.py``, you need the `aiotest
+<https://pypi.python.org/pypi/aiotest>`_ test suite: ``pip install aiotest``.
+
+
+Run tests on UNIX
+-----------------
+
+Run the following commands from the directory of the Trollius project.
+
+To run tests::
+
+ make test
+
+To run coverage (``coverage`` package is required)::
+
+ make coverage
+
+
+Run tests on Windows
+--------------------
+
+Run the following commands from the directory of the Trollius project.
+
+You can run the tests as follows::
+
+ C:\Python27\python.exe runtests.py
+
+And coverage as follows::
+
+ C:\Python27\python.exe runtests.py --coverage
+
+
+CPython bugs
+============
+
+The development of asyncio and trollius helped to identify different bugs in CPython:
+
+* 2.5.0 <= python <= 3.4.2: `sys.exc_info() bug when yield/yield-from is used
+ in an except block in a generator (#23353>)
+ <http://bugs.python.org/issue23353>`_. The fix will be part of Python 3.4.3.
+ _UnixSelectorEventLoop._make_subprocess_transport() and
+ ProactorEventLoop._make_subprocess_transport() work around the bug.
+* python == 3.4.0: `Segfault in gc with cyclic trash (#21435)
+ <http://bugs.python.org/issue21435>`_.
+ Regression introduced in Python 3.4.0, fixed in Python 3.4.1.
+ Status in Ubuntu the February, 3th 2015: only Ubuntu Trusty (14.04 LTS) is
+ impacted (`bug #1367907: Segfault in gc with cyclic trash
+ <https://bugs.launchpad.net/ubuntu/+source/python3.4/+bug/1367907>`_, see
+ also `update Python3 for trusty #1348954
+ <https://bugs.launchpad.net/ubuntu/+source/python3.4/+bug/1348954>`_)
+* 3.3.0 <= python <= 3.4.0: `gen.send(tuple) unpacks the tuple instead of
+ passing 1 argument (the tuple) when gen is an object with a send() method,
+ not a classic generator (#21209) <http://bugs.python.org/21209>`_.
+ Regression introduced in Python 3.4.0, fixed in Python 3.4.1.
+ trollius.CoroWrapper.send() works around the issue, the bug is checked at
+ runtime once, when the module is imported.
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..5135da1
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,80 @@
+Trollius
+========
+
+.. image:: trollius.jpg
+ :alt: Trollius altaicus from Khangai Mountains (Mongòlia)
+ :align: right
+ :target: http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg
+
+Trollius provides infrastructure for writing single-threaded concurrent
+code using coroutines, multiplexing I/O access over sockets and other
+resources, running network clients and servers, and other related primitives.
+Here is a more detailed list of the package contents:
+
+* a pluggable event loop with various system-specific implementations;
+
+* transport and protocol abstractions (similar to those in `Twisted
+ <http://twistedmatrix.com/>`_);
+
+* concrete support for TCP, UDP, SSL, subprocess pipes, delayed calls, and
+ others (some may be system-dependent);
+
+* a ``Future`` class that mimics the one in the ``concurrent.futures`` module,
+ but adapted for use with the event loop;
+
+* coroutines and tasks based on generators (``yield``), to help write
+ concurrent code in a sequential fashion;
+
+* cancellation support for ``Future``\s and coroutines;
+
+* synchronization primitives for use between coroutines in a single thread,
+ mimicking those in the ``threading`` module;
+
+* an interface for passing work off to a threadpool, for times when you
+ absolutely, positively have to use a library that makes blocking I/O calls.
+
+Trollius is a portage of the `asyncio project <https://github.com/python/asyncio>`_
+(``asyncio`` module, `PEP 3156 <http://legacy.python.org/dev/peps/pep-3156/>`_)
+on Python 2. Trollius works on Python 2.6-3.5. It has been tested on Windows,
+Linux, Mac OS X, FreeBSD and OpenIndiana.
+
+* `Asyncio documentation <http://docs.python.org/dev/library/asyncio.html>`_
+* `Trollius documentation <http://trollius.readthedocs.org/>`_ (this document)
+* `Trollius project in the Python Cheeseshop (PyPI)
+ <https://pypi.python.org/pypi/trollius>`_ (download wheel packages and
+ tarballs)
+* `Trollius project at Github <https://github.com/haypo/trollius>`_
+ (bug tracker, source code)
+* Mailing list: `python-tulip Google Group
+ <https://groups.google.com/forum/?fromgroups#!forum/python-tulip>`_
+* IRC: ``#asyncio`` channel on the `Freenode network <https://freenode.net/>`_
+* Copyright/license: Open source, Apache 2.0. Enjoy!
+
+See also the `asyncio project at Github <https://github.com/python/asyncio>`_.
+
+
+Table Of Contents
+=================
+
+.. toctree::
+
+ using
+ install
+ libraries
+ asyncio
+ dev
+ changelog
+
+
+Trollius name
+=============
+
+Extract of `Trollius Wikipedia article
+<http://en.wikipedia.org/wiki/Trollius>`_:
+
+Trollius is a genus of about 30 species of plants in the family Ranunculaceae,
+closely related to Ranunculus. The common name of some species is globeflower
+or globe flower. Native to the cool temperate regions of the Northern
+Hemisphere, with the greatest diversity of species in Asia, trollius usually
+grow in heavy, wet clay soils.
+
diff --git a/doc/install.rst b/doc/install.rst
new file mode 100644
index 0000000..db4a8b1
--- /dev/null
+++ b/doc/install.rst
@@ -0,0 +1,113 @@
+++++++++++++++++
+Install Trollius
+++++++++++++++++
+
+Packages for Linux
+==================
+
+* `Debian package
+ <https://packages.debian.org/fr/sid/python-trollius>`_
+* `ArchLinux package
+ <https://aur.archlinux.org/packages/python2-trollius/>`_
+* `Fedora and CentOS package: python-trollius
+ <http://pkgs.org/download/python-trollius>`_
+
+
+Install Trollius on Windows using pip
+=====================================
+
+Since Trollius 0.2, `precompiled wheel packages <http://pythonwheels.com/>`_
+are now distributed on the Python Cheeseshop (PyPI). Procedure to install
+Trollius on Windows:
+
+* `Install pip
+ <http://www.pip-installer.org/en/latest/installing.html>`_, download
+ ``get-pip.py`` and type::
+
+ \Python27\python.exe get-pip.py
+
+* If you already have pip, ensure that you have at least pip 1.4. If you need
+ to upgrade::
+
+ \Python27\python.exe -m pip install -U pip
+
+* Install Trollius::
+
+ \Python27\python.exe -m pip install trollius
+
+* pip also installs the ``futures`` dependency
+
+.. note::
+
+ Only wheel packages for Python 2.7, 3.3 and 3.4 are currently distributed on
+ the Cheeseshop (PyPI). If you need wheel packages for other Python versions,
+ please ask.
+
+Download source code
+====================
+
+Command to download the development version of the source code (``trollius``
+branch)::
+
+ git clone https://github.com/haypo/trollius.git -b trollius
+
+The actual code lives in the ``trollius`` subdirectory. Tests are in the
+``tests`` subdirectory.
+
+See the `trollius project at Github
+<https://github.com/haypo/trollius>`_.
+
+The source code of the Trollius project is in the ``trollius`` branch of the
+Mercurial repository, not in the default branch. The default branch is the
+Tulip project, Trollius repository is a fork of the Tulip repository.
+
+
+Dependencies
+============
+
+Trollius requires the `six <https://pypi.python.org/pypi/six>`_ module.
+
+On Python older than 3.2, the `futures <https://pypi.python.org/pypi/futures>`_
+project is needed to get a backport of ``concurrent.futures``.
+
+Python 2.6 requires also `ordereddict
+<https://pypi.python.org/pypi/ordereddict>`_.
+
+
+Build manually Trollius on Windows
+==================================
+
+On Windows, if you cannot use precompiled wheel packages, an extension module
+must be compiled: the ``_overlapped`` module (source code: ``overlapped.c``).
+Read `Compile Python extensions on Windows
+<http://haypo-notes.readthedocs.org/python.html#compile-python-extensions-on-windows>`_
+to prepare your environment to build the Python extension. Then build the
+extension using::
+
+ C:\Python27\python.exe setup.py build_ext
+
+
+Backports
+=========
+
+To support Python 2.6-3.4, many Python modules of the standard library have
+been backported:
+
+======================== ========= =======================
+Name Python Backport
+======================== ========= =======================
+OSError 3.3 asyncio.py33_exceptions
+_overlapped 3.4 asyncio._overlapped
+_winapi 3.3 asyncio.py33_winapi
+collections.OrderedDict 2.7, 3.1 ordereddict (PyPI)
+concurrent.futures 3.2 futures (PyPI)
+selectors 3.4 asyncio.selectors
+ssl 3.2, 3.3 asyncio.py3_ssl
+time.monotonic 3.3 asyncio.time_monotonic
+unittest 2.7, 3.1 unittest2 (PyPI)
+unittest.mock 3.3 mock (PyPI)
+weakref.WeakSet 2.7, 3.0 asyncio.py27_weakrefset
+======================== ========= =======================
+
+
+
diff --git a/doc/libraries.rst b/doc/libraries.rst
new file mode 100644
index 0000000..424fd28
--- /dev/null
+++ b/doc/libraries.rst
@@ -0,0 +1,30 @@
+++++++++++++++++++
+Trollius Libraries
+++++++++++++++++++
+
+Libraries compatible with asyncio and trollius
+==============================================
+
+* `aioeventlet <https://aioeventlet.readthedocs.org/>`_: asyncio API
+ implemented on top of eventlet
+* `aiogevent <https://pypi.python.org/pypi/aiogevent>`_: asyncio API
+ implemented on top of gevent
+* `AutobahnPython <https://github.com/tavendo/AutobahnPython>`_: WebSocket &
+ WAMP for Python, it works on Trollius (Python 2.6 and 2.7), asyncio (Python
+ 3.3) and Python 3.4 (asyncio), and also on Twisted.
+* `Pulsar <http://pythonhosted.org/pulsar/>`_: Event driven concurrent
+ framework for Python. With pulsar you can write asynchronous servers
+ performing one or several activities in different threads and/or processes.
+ Trollius 0.3 requires Pulsar 0.8.2 or later. Pulsar uses the ``asyncio``
+ module if available, or import ``trollius``.
+* `Tornado <http://www.tornadoweb.org/>`_ supports asyncio and Trollius since
+ Tornado 3.2: `tornado.platform.asyncio — Bridge between asyncio and Tornado
+ <http://tornado.readthedocs.org/en/latest/asyncio.html>`_. It tries to import
+ asyncio or fallback on importing trollius.
+
+Specific Ports
+==============
+
+* `trollius-redis <https://github.com/benjolitz/trollius-redis>`_:
+ A port of `asyncio-redis <http://asyncio-redis.readthedocs.org/>`_ to
+ trollius
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..5789d41
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Trollius.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Trollius.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/doc/trollius.jpg b/doc/trollius.jpg
new file mode 100644
index 0000000..f4976c7
--- /dev/null
+++ b/doc/trollius.jpg
Binary files differ
diff --git a/doc/using.rst b/doc/using.rst
new file mode 100644
index 0000000..c730f86
--- /dev/null
+++ b/doc/using.rst
@@ -0,0 +1,85 @@
+++++++++++++++
+Using Trollius
+++++++++++++++
+
+Documentation of the asyncio module
+===================================
+
+The documentation of the asyncio is part of the Python project. It can be read
+online: `asyncio - Asynchronous I/O, event loop, coroutines and tasks
+<http://docs.python.org/dev/library/asyncio.html>`_.
+
+To adapt asyncio examples for Trollius, "just":
+
+* replace ``asyncio`` with ``trollius``
+ (or use ``import trollius as asyncio``)
+* replace ``yield from ...`` with ``yield From(...)``
+* replace ``yield from []`` with ``yield From(None)``
+* in coroutines, replace ``return res`` with ``raise Return(res)``
+
+
+Trollius Hello World
+====================
+
+Print ``Hello World`` every two seconds, using a coroutine::
+
+ import trollius
+ from trollius import From
+
+ @trollius.coroutine
+ def greet_every_two_seconds():
+ while True:
+ print('Hello World')
+ yield From(trollius.sleep(2))
+
+ loop = trollius.get_event_loop()
+ loop.run_until_complete(greet_every_two_seconds())
+
+
+Debug mode
+==========
+
+To enable the debug mode:
+
+* Set ``TROLLIUSDEBUG`` envrironment variable to ``1``
+* Configure logging to log at level ``logging.DEBUG``,
+ ``logging.basicConfig(level=logging.DEBUG)`` for example
+
+The ``BaseEventLoop.set_debug()`` method can be used to set the debug mode on a
+specific event loop. The environment variable enables also the debug mode for
+coroutines.
+
+Effect of the debug mode:
+
+* On Python 2, :meth:`Future.set_exception` stores the traceback, so
+ ``loop.run_until_complete()`` raises the exception with the original
+ traceback.
+* Log coroutines defined but never "yielded"
+* BaseEventLoop.call_soon() and BaseEventLoop.call_at() methods raise an
+ exception if they are called from the wrong thread.
+* Log the execution time of the selector
+* Log callbacks taking more than 100 ms to be executed. The
+ BaseEventLoop.slow_callback_duration attribute is the minimum duration in
+ seconds of "slow" callbacks.
+* Log most important subprocess events:
+
+ - Log stdin, stdout and stderr transports and protocols
+ - Log process identifier (pid)
+ - Log connection of pipes
+ - Log process exit
+ - Log Process.communicate() tasks: feed stdin, read stdout and stderr
+
+* Log most important socket events:
+
+ - Socket connected
+ - New client (socket.accept())
+ - Connection reset or closed by peer (EOF)
+ - Log time elapsed in DNS resolution (getaddrinfo)
+ - Log pause/resume reading
+ - Log time of SSL handshake
+ - Log SSL handshake errors
+
+See `Debug mode of asyncio
+<https://docs.python.org/dev/library/asyncio-dev.html#debug-mode-of-asyncio>`_
+for more information.
+
diff --git a/examples/cacheclt.py b/examples/cacheclt.py
index b11a4d1..1f8ece4 100644
--- a/examples/cacheclt.py
+++ b/examples/cacheclt.py
@@ -4,8 +4,9 @@ See cachesvr.py for protocol description.
"""
import argparse
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import From, Return
+from trollius import test_utils
import json
import logging
@@ -62,24 +63,24 @@ class CacheClient:
@asyncio.coroutine
def get(self, key):
- resp = yield from self.request('get', key)
+ resp = yield From(self.request('get', key))
if resp is None:
- return None
- return resp.get('value')
+ raise Return()
+ raise Return(resp.get('value'))
@asyncio.coroutine
def set(self, key, value):
- resp = yield from self.request('set', key, value)
+ resp = yield From(self.request('set', key, value))
if resp is None:
- return False
- return resp.get('status') == 'ok'
+ raise Return(False)
+ raise Return(resp.get('status') == 'ok')
@asyncio.coroutine
def delete(self, key):
- resp = yield from self.request('delete', key)
+ resp = yield From(self.request('delete', key))
if resp is None:
- return False
- return resp.get('status') == 'ok'
+ raise Return(False)
+ raise Return(resp.get('status') == 'ok')
@asyncio.coroutine
def request(self, type, key, value=None):
@@ -91,24 +92,25 @@ class CacheClient:
waiter = asyncio.Future(loop=self.loop)
if self.initialized:
try:
- yield from self.send(payload, waiter)
+ yield From(self.send(payload, waiter))
except IOError:
self.todo.add((payload, waiter))
else:
self.todo.add((payload, waiter))
- return (yield from waiter)
+ result = (yield From(waiter))
+ raise Return(result)
@asyncio.coroutine
def activity(self):
backoff = 0
while True:
try:
- self.reader, self.writer = yield from asyncio.open_connection(
- self.host, self.port, ssl=self.sslctx, loop=self.loop)
+ self.reader, self.writer = yield From(asyncio.open_connection(
+ self.host, self.port, ssl=self.sslctx, loop=self.loop))
except Exception as exc:
backoff = min(args.max_backoff, backoff + (backoff//2) + 1)
logging.info('Error connecting: %r; sleep %s', exc, backoff)
- yield from asyncio.sleep(backoff, loop=self.loop)
+ yield From(asyncio.sleep(backoff, loop=self.loop))
continue
backoff = 0
self.next_id = 0
@@ -118,9 +120,9 @@ class CacheClient:
while self.todo:
payload, waiter = self.todo.pop()
if not waiter.done():
- yield from self.send(payload, waiter)
+ yield From(self.send(payload, waiter))
while True:
- resp_id, resp = yield from self.process()
+ resp_id, resp = yield From(self.process())
if resp_id in self.pending:
payload, waiter = self.pending.pop(resp_id)
if not waiter.done():
@@ -143,11 +145,11 @@ class CacheClient:
self.writer.write(frame.encode('ascii'))
self.writer.write(payload)
self.pending[req_id] = payload, waiter
- yield from self.writer.drain()
+ yield From(self.writer.drain())
@asyncio.coroutine
def process(self):
- frame = yield from self.reader.readline()
+ frame = yield From(self.reader.readline())
if not frame:
raise EOFError()
head, tail = frame.split(None, 1)
@@ -156,17 +158,17 @@ class CacheClient:
if head != b'response':
raise IOError('Bad frame: %r' % frame)
resp_id, resp_size = map(int, tail.split())
- data = yield from self.reader.readexactly(resp_size)
+ data = yield From(self.reader.readexactly(resp_size))
if len(data) != resp_size:
raise EOFError()
resp = json.loads(data.decode('utf8'))
- return resp_id, resp
+ raise Return(resp_id, resp)
def main():
asyncio.set_event_loop(None)
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
@@ -193,13 +195,13 @@ def testing(label, cache, loop):
while True:
logging.info('%s %s', label, '-'*20)
try:
- ret = yield from w(cache.set(key, 'hello-%s-world' % label))
+ ret = yield From(w(cache.set(key, 'hello-%s-world' % label)))
logging.info('%s set %s', label, ret)
- ret = yield from w(cache.get(key))
+ ret = yield From(w(cache.get(key)))
logging.info('%s get %s', label, ret)
- ret = yield from w(cache.delete(key))
+ ret = yield From(w(cache.delete(key)))
logging.info('%s del %s', label, ret)
- ret = yield from w(cache.get(key))
+ ret = yield From(w(cache.get(key)))
logging.info('%s get2 %s', label, ret)
except asyncio.TimeoutError:
logging.warn('%s Timeout', label)
diff --git a/examples/cachesvr.py b/examples/cachesvr.py
index 053f9c2..20a54e4 100644
--- a/examples/cachesvr.py
+++ b/examples/cachesvr.py
@@ -57,7 +57,8 @@ form is returned, but the connection is not closed:
"""
import argparse
-import asyncio
+import trollius as asyncio
+from trollius import From
import json
import logging
import os
@@ -104,7 +105,7 @@ class Cache:
peer = writer.get_extra_info('socket').getpeername()
logging.info('got a connection from %s', peer)
try:
- yield from self.frame_parser(reader, writer)
+ yield From(self.frame_parser(reader, writer))
except Exception as exc:
logging.error('error %r from %s', exc, peer)
else:
@@ -122,13 +123,13 @@ class Cache:
# if the client doesn't send enough data but doesn't
# disconnect either. We add a timeout to each. (But the
# timeout should really be implemented by StreamReader.)
- framing_b = yield from asyncio.wait_for(
+ framing_b = yield From(asyncio.wait_for(
reader.readline(),
- timeout=args.timeout, loop=self.loop)
+ timeout=args.timeout, loop=self.loop))
if random.random()*100 < args.fail_percent:
logging.warn('Inserting random failure')
- yield from asyncio.sleep(args.fail_sleep*random.random(),
- loop=self.loop)
+ yield From(asyncio.sleep(args.fail_sleep*random.random(),
+ loop=self.loop))
writer.write(b'error random failure\r\n')
break
logging.debug('framing_b = %r', framing_b)
@@ -151,9 +152,9 @@ class Cache:
writer.write(b'error invalid frame parameters\r\n')
break
last_request_id = request_id
- request_b = yield from asyncio.wait_for(
+ request_b = yield From(asyncio.wait_for(
reader.readexactly(byte_count),
- timeout=args.timeout, loop=self.loop)
+ timeout=args.timeout, loop=self.loop))
try:
request = json.loads(request_b.decode('utf8'))
except ValueError:
@@ -165,10 +166,10 @@ class Cache:
break
response_b = json.dumps(response).encode('utf8') + b'\r\n'
byte_count = len(response_b)
- framing_s = 'response {} {}\r\n'.format(request_id, byte_count)
+ framing_s = 'response {0} {1}\r\n'.format(request_id, byte_count)
writer.write(framing_s.encode('ascii'))
- yield from asyncio.sleep(args.resp_sleep*random.random(),
- loop=self.loop)
+ yield From(asyncio.sleep(args.resp_sleep*random.random(),
+ loop=self.loop))
writer.write(response_b)
def handle_request(self, request):
@@ -217,7 +218,7 @@ class Cache:
def main():
asyncio.set_event_loop(None)
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
@@ -226,7 +227,7 @@ def main():
import ssl
# TODO: take cert/key from args as well.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
- sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslctx = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.options |= ssl.OP_NO_SSLv2
sslctx.load_cert_chain(
certfile=os.path.join(here, 'ssl_cert.pem'),
diff --git a/examples/child_process.py b/examples/child_process.py
index 3fac175..9e403a4 100644
--- a/examples/child_process.py
+++ b/examples/child_process.py
@@ -10,15 +10,16 @@ import os
import sys
try:
- import asyncio
+ import trollius as asyncio
except ImportError:
# asyncio is not installed
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
- import asyncio
+ import trollius as asyncio
+from trollius import From, Return
if sys.platform == 'win32':
- from asyncio.windows_utils import Popen, PIPE
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_utils import Popen, PIPE
+ from trollius.windows_events import ProactorEventLoop
else:
from subprocess import Popen, PIPE
@@ -29,8 +30,8 @@ else:
@asyncio.coroutine
def connect_write_pipe(file):
loop = asyncio.get_event_loop()
- transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file)
- return transport
+ transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol, file))
+ raise Return(transport)
#
# Wrap a readable pipe in a stream
@@ -42,8 +43,8 @@ def connect_read_pipe(file):
stream_reader = asyncio.StreamReader(loop=loop)
def factory():
return asyncio.StreamReaderProtocol(stream_reader)
- transport, _ = yield from loop.connect_read_pipe(factory, file)
- return stream_reader, transport
+ transport, _ = yield From(loop.connect_read_pipe(factory, file))
+ raise Return(stream_reader, transport)
#
@@ -80,9 +81,9 @@ def main(loop):
p = Popen([sys.executable, '-c', code],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
- stdin = yield from connect_write_pipe(p.stdin)
- stdout, stdout_transport = yield from connect_read_pipe(p.stdout)
- stderr, stderr_transport = yield from connect_read_pipe(p.stderr)
+ stdin = yield From(connect_write_pipe(p.stdin))
+ stdout, stdout_transport = yield From(connect_read_pipe(p.stdout))
+ stderr, stderr_transport = yield From(connect_read_pipe(p.stderr))
# interact with subprocess
name = {stdout:'OUT', stderr:'ERR'}
@@ -100,9 +101,9 @@ def main(loop):
# get and print lines from stdout, stderr
timeout = None
while registered:
- done, pending = yield from asyncio.wait(
+ done, pending = yield From(asyncio.wait(
registered, timeout=timeout,
- return_when=asyncio.FIRST_COMPLETED)
+ return_when=asyncio.FIRST_COMPLETED))
if not done:
break
for f in done:
diff --git a/examples/crawl.py b/examples/crawl.py
index 4bb0b4e..7f54059 100644
--- a/examples/crawl.py
+++ b/examples/crawl.py
@@ -1,7 +1,9 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python
"""A simple web crawler."""
+from __future__ import print_function
+
# TODO:
# - More organized logging (with task ID or URL?).
# - Use logging module for Logger.
@@ -15,15 +17,23 @@
# - Handle out of file descriptors directly? (How?)
import argparse
-import asyncio
+import trollius as asyncio
+from trollius import From, Return
import asyncio.locks
import cgi
-from http.client import BadStatusLine
import logging
import re
import sys
import time
-import urllib.parse
+try:
+ from httplib import BadStatusLine
+ import urlparse
+ from urllib import splitport as urllib_splitport
+except ImportError:
+ # Python 3
+ from http.client import BadStatusLine
+ from urllib import parse as urlparse
+ from urllib.parse import splitport as urllib_splitport
ARGS = argparse.ArgumentParser(description="Web crawler")
@@ -96,7 +106,8 @@ class Logger:
def _log(self, n, args):
if self.level >= n:
- print(*args, file=sys.stderr, flush=True)
+ print(*args, file=sys.stderr)
+ sys.stderr.flush()
def log(self, n, *args):
self._log(n, args)
@@ -133,14 +144,14 @@ class ConnectionPool:
for conn in conns:
conn.close()
self.connections.clear()
- self.queue.clear()
+ del self.queue[:]
@asyncio.coroutine
def get_connection(self, host, port, ssl):
"""Create or reuse a connection."""
port = port or (443 if ssl else 80)
try:
- ipaddrs = yield from self.loop.getaddrinfo(host, port)
+ ipaddrs = yield From(self.loop.getaddrinfo(host, port))
except Exception as exc:
self.log(0, 'Exception %r for (%r, %r)' % (exc, host, port))
raise
@@ -148,7 +159,8 @@ class ConnectionPool:
(host, ', '.join(ip[4][0] for ip in ipaddrs)))
# Look for a reusable connection.
- for _, _, _, _, (h, p, *_) in ipaddrs:
+ for _, _, _, _, addr in ipaddrs:
+ h, p = addr[:2]
key = h, p, ssl
conn = None
conns = self.connections.get(key)
@@ -163,13 +175,13 @@ class ConnectionPool:
else:
self.log(1, '* Reusing pooled connection', key,
'FD =', conn.fileno())
- return conn
+ raise Return(conn)
# Create a new connection.
conn = Connection(self.log, self, host, port, ssl)
- yield from conn.connect()
+ yield From(conn.connect())
self.log(1, '* New connection', conn.key, 'FD =', conn.fileno())
- return conn
+ raise Return(conn)
def recycle_connection(self, conn):
"""Make a connection available for reuse.
@@ -258,8 +270,8 @@ class Connection:
@asyncio.coroutine
def connect(self):
- self.reader, self.writer = yield from asyncio.open_connection(
- self.host, self.port, ssl=self.ssl)
+ self.reader, self.writer = yield From(asyncio.open_connection(
+ self.host, self.port, ssl=self.ssl))
peername = self.writer.get_extra_info('peername')
if peername:
self.host, self.port = peername[:2]
@@ -286,7 +298,7 @@ class Request:
self.log = log
self.url = url
self.pool = pool
- self.parts = urllib.parse.urlparse(self.url)
+ self.parts = urlparse.urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
@@ -311,8 +323,8 @@ class Request:
(self.hostname, self.port,
'ssl' if self.ssl else 'tcp',
self.url))
- self.conn = yield from self.pool.get_connection(self.hostname,
- self.port, self.ssl)
+ self.conn = yield From(self.pool.get_connection(self.hostname,
+ self.port, self.ssl))
def close(self, recycle=False):
"""Close the connection, recycle if requested."""
@@ -336,7 +348,7 @@ class Request:
"""Send the request."""
request_line = '%s %s %s' % (self.method, self.full_path,
self.http_version)
- yield from self.putline(request_line)
+ yield From(self.putline(request_line))
# TODO: What if a header is already set?
self.headers.append(('User-Agent', 'asyncio-example-crawl/0.0'))
self.headers.append(('Host', self.netloc))
@@ -344,15 +356,15 @@ class Request:
##self.headers.append(('Accept-Encoding', 'gzip'))
for key, value in self.headers:
line = '%s: %s' % (key, value)
- yield from self.putline(line)
- yield from self.putline('')
+ yield From(self.putline(line))
+ yield From(self.putline(''))
@asyncio.coroutine
def get_response(self):
"""Receive the response."""
response = Response(self.log, self.conn.reader)
- yield from response.read_headers()
- return response
+ yield From(response.read_headers())
+ raise Return(response)
class Response:
@@ -374,14 +386,15 @@ class Response:
@asyncio.coroutine
def getline(self):
"""Read one line from the connection."""
- line = (yield from self.reader.readline()).decode('latin-1').rstrip()
+ line = (yield From(self.reader.readline()))
+ line = line.decode('latin-1').rstrip()
self.log(2, '<', line)
- return line
+ raise Return(line)
@asyncio.coroutine
def read_headers(self):
"""Read the response status and the request headers."""
- status_line = yield from self.getline()
+ status_line = yield From(self.getline())
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
self.log(0, 'bad status_line', repr(status_line))
@@ -389,7 +402,7 @@ class Response:
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
- header_line = yield from self.getline()
+ header_line = yield From(self.getline())
if not header_line:
break
# TODO: Continuation lines.
@@ -426,7 +439,7 @@ class Response:
self.log(2, 'parsing chunked response')
blocks = []
while True:
- size_header = yield from self.reader.readline()
+ size_header = yield From(self.reader.readline())
if not size_header:
self.log(0, 'premature end of chunked response')
break
@@ -435,10 +448,10 @@ class Response:
size = int(parts[0], 16)
if size:
self.log(3, 'reading chunk of', size, 'bytes')
- block = yield from self.reader.readexactly(size)
+ block = yield From(self.reader.readexactly(size))
assert len(block) == size, (len(block), size)
blocks.append(block)
- crlf = yield from self.reader.readline()
+ crlf = yield From(self.reader.readline())
assert crlf == b'\r\n', repr(crlf)
if not size:
break
@@ -447,12 +460,12 @@ class Response:
'bytes in', len(blocks), 'blocks')
else:
self.log(3, 'reading until EOF')
- body = yield from self.reader.read()
+ body = yield From(self.reader.read())
# TODO: Should make sure not to recycle the connection
# in this case.
else:
- body = yield from self.reader.readexactly(nbytes)
- return body
+ body = yield From(self.reader.readexactly(nbytes))
+ raise Return(body)
class Fetcher:
@@ -504,10 +517,10 @@ class Fetcher:
self.request = None
try:
self.request = Request(self.log, self.url, self.crawler.pool)
- yield from self.request.connect()
- yield from self.request.send_request()
- self.response = yield from self.request.get_response()
- self.body = yield from self.response.read()
+ yield From(self.request.connect())
+ yield From(self.request.send_request())
+ self.response = yield From(self.request.get_response())
+ self.body = yield From(self.response.read())
h_conn = self.response.get_header('connection').lower()
if h_conn != 'close':
self.request.close(recycle=True)
@@ -531,7 +544,7 @@ class Fetcher:
return
next_url = self.response.get_redirect_url()
if next_url:
- self.next_url = urllib.parse.urljoin(self.url, next_url)
+ self.next_url = urlparse.urljoin(self.url, next_url)
if self.max_redirect > 0:
self.log(1, 'redirect to', self.next_url, 'from', self.url)
self.crawler.add_url(self.next_url, self.max_redirect-1)
@@ -556,8 +569,8 @@ class Fetcher:
self.new_urls = set()
for url in self.urls:
url = unescape(url)
- url = urllib.parse.urljoin(self.url, url)
- url, frag = urllib.parse.urldefrag(url)
+ url = urlparse.urljoin(self.url, url)
+ url, frag = urlparse.urldefrag(url)
if self.crawler.add_url(url):
self.new_urls.add(url)
@@ -657,8 +670,8 @@ class Crawler:
self.pool = ConnectionPool(self.log, max_pool, max_tasks)
self.root_domains = set()
for root in roots:
- parts = urllib.parse.urlparse(root)
- host, port = urllib.parse.splitport(parts.netloc)
+ parts = urlparse.urlparse(root)
+ host, port = urllib_splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
@@ -731,11 +744,11 @@ class Crawler:
"""Add a URL to the todo list if not seen before."""
if self.exclude and re.search(self.exclude, url):
return False
- parts = urllib.parse.urlparse(url)
+ parts = urlparse.urlparse(url)
if parts.scheme not in ('http', 'https'):
self.log(2, 'skipping non-http scheme in', url)
return False
- host, port = urllib.parse.splitport(parts.netloc)
+ host, port = urllib_splitport(parts.netloc)
if not self.host_okay(host):
self.log(2, 'skipping non-root host in', url)
return False
@@ -750,7 +763,7 @@ class Crawler:
@asyncio.coroutine
def crawl(self):
"""Run the crawler until all finished."""
- with (yield from self.termination):
+ with (yield From(self.termination)):
while self.todo or self.busy:
if self.todo:
url, max_redirect = self.todo.popitem()
@@ -762,7 +775,7 @@ class Crawler:
self.busy[url] = fetcher
fetcher.task = asyncio.Task(self.fetch(fetcher))
else:
- yield from self.termination.wait()
+ yield From(self.termination.wait())
self.t1 = time.time()
@asyncio.coroutine
@@ -772,13 +785,13 @@ class Crawler:
Once this returns, move the fetcher from busy to done.
"""
url = fetcher.url
- with (yield from self.governor):
+ with (yield From(self.governor)):
try:
- yield from fetcher.fetch() # Fetcher gonna fetch.
+ yield From(fetcher.fetch()) # Fetcher gonna fetch.
finally:
# Force GC of the task, so the error is logged.
fetcher.task = None
- with (yield from self.termination):
+ with (yield From(self.termination)):
self.done[url] = fetcher
del self.busy[url]
self.termination.notify()
@@ -828,7 +841,7 @@ def main():
log = Logger(args.level)
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
asyncio.set_event_loop(loop)
elif args.select:
diff --git a/examples/echo_client_tulip.py b/examples/echo_client_tulip.py
index 88124ef..0a60926 100644
--- a/examples/echo_client_tulip.py
+++ b/examples/echo_client_tulip.py
@@ -1,15 +1,16 @@
-import asyncio
+import trollius as asyncio
+from trollius import From
END = b'Bye-bye!\n'
@asyncio.coroutine
def echo_client():
- reader, writer = yield from asyncio.open_connection('localhost', 8000)
+ reader, writer = yield From(asyncio.open_connection('localhost', 8000))
writer.write(b'Hello, world\n')
writer.write(b'What a fine day it is.\n')
writer.write(END)
while True:
- line = yield from reader.readline()
+ line = yield From(reader.readline())
print('received:', line)
if line == END or not line:
break
diff --git a/examples/echo_server_tulip.py b/examples/echo_server_tulip.py
index 8167e54..d7e6e29 100644
--- a/examples/echo_server_tulip.py
+++ b/examples/echo_server_tulip.py
@@ -1,13 +1,14 @@
-import asyncio
+import trollius as asyncio
+from trollius import From
@asyncio.coroutine
def echo_server():
- yield from asyncio.start_server(handle_connection, 'localhost', 8000)
+ yield From(asyncio.start_server(handle_connection, 'localhost', 8000))
@asyncio.coroutine
def handle_connection(reader, writer):
while True:
- data = yield from reader.read(8192)
+ data = yield From(reader.read(8192))
if not data:
break
writer.write(data)
diff --git a/examples/fetch0.py b/examples/fetch0.py
index 180fcf2..f98feeb 100644
--- a/examples/fetch0.py
+++ b/examples/fetch0.py
@@ -1,25 +1,26 @@
"""Simplest possible HTTP client."""
+from __future__ import print_function
import sys
-from asyncio import *
+from trollius import *
@coroutine
def fetch():
- r, w = yield from open_connection('python.org', 80)
+ r, w = yield From(open_connection('python.org', 80))
request = 'GET / HTTP/1.0\r\n\r\n'
print('>', request, file=sys.stderr)
w.write(request.encode('latin-1'))
while True:
- line = yield from r.readline()
+ line = yield From(r.readline())
line = line.decode('latin-1').rstrip()
if not line:
break
print('<', line, file=sys.stderr)
print(file=sys.stderr)
- body = yield from r.read()
- return body
+ body = yield From(r.read())
+ raise Return(body)
def main():
diff --git a/examples/fetch1.py b/examples/fetch1.py
index 8dbb6e4..9e9a1ca 100644
--- a/examples/fetch1.py
+++ b/examples/fetch1.py
@@ -3,10 +3,14 @@
This version adds URL parsing (including SSL) and a Response object.
"""
+from __future__ import print_function
import sys
-import urllib.parse
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
-from asyncio import *
+from trollius import *
class Response:
@@ -22,13 +26,15 @@ class Response:
def read(self, reader):
@coroutine
def getline():
- return (yield from reader.readline()).decode('latin-1').rstrip()
- status_line = yield from getline()
+ line = (yield From(reader.readline()))
+ line = line.decode('latin-1').rstrip()
+ raise Return(line)
+ status_line = yield From(getline())
if self.verbose: print('<', status_line, file=sys.stderr)
self.http_version, status, self.reason = status_line.split(None, 2)
self.status = int(status)
while True:
- header_line = yield from getline()
+ header_line = yield From(getline())
if not header_line:
break
if self.verbose: print('<', header_line, file=sys.stderr)
@@ -40,7 +46,7 @@ class Response:
@coroutine
def fetch(url, verbose=True):
- parts = urllib.parse.urlparse(url)
+ parts = urlparse(url)
if parts.scheme == 'http':
ssl = False
elif parts.scheme == 'https':
@@ -57,12 +63,12 @@ def fetch(url, verbose=True):
request = 'GET %s HTTP/1.0\r\n\r\n' % path
if verbose:
print('>', request, file=sys.stderr, end='')
- r, w = yield from open_connection(parts.hostname, port, ssl=ssl)
+ r, w = yield From(open_connection(parts.hostname, port, ssl=ssl))
w.write(request.encode('latin-1'))
response = Response(verbose)
- yield from response.read(r)
- body = yield from r.read()
- return body
+ yield From(response.read(r))
+ body = yield From(r.read())
+ raise Return(body)
def main():
diff --git a/examples/fetch2.py b/examples/fetch2.py
index 7617b59..5a321a8 100644
--- a/examples/fetch2.py
+++ b/examples/fetch2.py
@@ -3,11 +3,17 @@
This version adds a Request object.
"""
+from __future__ import print_function
import sys
-import urllib.parse
-from http.client import BadStatusLine
+try:
+ from urllib.parse import urlparse
+ from http.client import BadStatusLine
+except ImportError:
+ # Python 2
+ from urlparse import urlparse
+ from httplib import BadStatusLine
-from asyncio import *
+from trollius import *
class Request:
@@ -15,7 +21,7 @@ class Request:
def __init__(self, url, verbose=True):
self.url = url
self.verbose = verbose
- self.parts = urllib.parse.urlparse(self.url)
+ self.parts = urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
@@ -40,9 +46,9 @@ class Request:
print('* Connecting to %s:%s using %s' %
(self.hostname, self.port, 'ssl' if self.ssl else 'tcp'),
file=sys.stderr)
- self.reader, self.writer = yield from open_connection(self.hostname,
+ self.reader, self.writer = yield From(open_connection(self.hostname,
self.port,
- ssl=self.ssl)
+ ssl=self.ssl))
if self.verbose:
print('* Connected to %s' %
(self.writer.get_extra_info('peername'),),
@@ -67,8 +73,8 @@ class Request:
@coroutine
def get_response(self):
response = Response(self.reader, self.verbose)
- yield from response.read_headers()
- return response
+ yield From(response.read_headers())
+ raise Return(response)
class Response:
@@ -83,11 +89,13 @@ class Response:
@coroutine
def getline(self):
- return (yield from self.reader.readline()).decode('latin-1').rstrip()
+ line = (yield From(self.reader.readline()))
+ line = line.decode('latin-1').rstrip()
+ raise Return(line)
@coroutine
def read_headers(self):
- status_line = yield from self.getline()
+ status_line = yield From(self.getline())
if self.verbose: print('<', status_line, file=sys.stderr)
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
@@ -95,7 +103,7 @@ class Response:
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
- header_line = yield from self.getline()
+ header_line = yield From(self.getline())
if not header_line:
break
if self.verbose: print('<', header_line, file=sys.stderr)
@@ -112,20 +120,20 @@ class Response:
nbytes = int(value)
break
if nbytes is None:
- body = yield from self.reader.read()
+ body = yield From(self.reader.read())
else:
- body = yield from self.reader.readexactly(nbytes)
- return body
+ body = yield From(self.reader.readexactly(nbytes))
+ raise Return(body)
@coroutine
def fetch(url, verbose=True):
request = Request(url, verbose)
- yield from request.connect()
- yield from request.send_request()
- response = yield from request.get_response()
- body = yield from response.read()
- return body
+ yield From(request.connect())
+ yield From(request.send_request())
+ response = yield From(request.get_response())
+ body = yield From(response.read())
+ raise Return(body)
def main():
@@ -134,7 +142,11 @@ def main():
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
finally:
loop.close()
- sys.stdout.buffer.write(body)
+ if hasattr(sys.stdout, 'buffer'):
+ sys.stdout.buffer.write(body)
+ else:
+ # Python 2
+ sys.stdout.write(body)
if __name__ == '__main__':
diff --git a/examples/fetch3.py b/examples/fetch3.py
index 9419afd..0fc56d1 100644
--- a/examples/fetch3.py
+++ b/examples/fetch3.py
@@ -4,11 +4,17 @@ This version adds a primitive connection pool, redirect following and
chunked transfer-encoding. It also supports a --iocp flag.
"""
+from __future__ import print_function
import sys
-import urllib.parse
-from http.client import BadStatusLine
+try:
+ from urllib.parse import urlparse
+ from http.client import BadStatusLine
+except ImportError:
+ # Python 2
+ from urlparse import urlparse
+ from httplib import BadStatusLine
-from asyncio import *
+from trollius import *
class ConnectionPool:
@@ -25,12 +31,13 @@ class ConnectionPool:
@coroutine
def open_connection(self, host, port, ssl):
port = port or (443 if ssl else 80)
- ipaddrs = yield from get_event_loop().getaddrinfo(host, port)
+ ipaddrs = yield From(get_event_loop().getaddrinfo(host, port))
if self.verbose:
print('* %s resolves to %s' %
(host, ', '.join(ip[4][0] for ip in ipaddrs)),
file=sys.stderr)
- for _, _, _, _, (h, p, *_) in ipaddrs:
+ for _, _, _, _, addr in ipaddrs:
+ h, p = addr[:2]
key = h, p, ssl
conn = self.connections.get(key)
if conn:
@@ -40,14 +47,15 @@ class ConnectionPool:
continue
if self.verbose:
print('* Reusing pooled connection', key, file=sys.stderr)
- return conn
- reader, writer = yield from open_connection(host, port, ssl=ssl)
- host, port, *_ = writer.get_extra_info('peername')
+ raise Return(conn)
+ reader, writer = yield From(open_connection(host, port, ssl=ssl))
+ addr = writer.get_extra_info('peername')
+ host, port = addr[:2]
key = host, port, ssl
self.connections[key] = reader, writer
if self.verbose:
print('* New connection', key, file=sys.stderr)
- return reader, writer
+ raise Return(reader, writer)
class Request:
@@ -55,7 +63,7 @@ class Request:
def __init__(self, url, verbose=True):
self.url = url
self.verbose = verbose
- self.parts = urllib.parse.urlparse(self.url)
+ self.parts = urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
@@ -83,9 +91,9 @@ class Request:
self.vprint('* Connecting to %s:%s using %s' %
(self.hostname, self.port, 'ssl' if self.ssl else 'tcp'))
self.reader, self.writer = \
- yield from pool.open_connection(self.hostname,
+ yield From(pool.open_connection(self.hostname,
self.port,
- ssl=self.ssl)
+ ssl=self.ssl))
self.vprint('* Connected to %s' %
(self.writer.get_extra_info('peername'),))
@@ -93,24 +101,24 @@ class Request:
def putline(self, line):
self.vprint('>', line)
self.writer.write(line.encode('latin-1') + b'\r\n')
- ##yield from self.writer.drain()
+ ##yield From(self.writer.drain())
@coroutine
def send_request(self):
request = '%s %s %s' % (self.method, self.full_path, self.http_version)
- yield from self.putline(request)
+ yield From(self.putline(request))
if 'host' not in {key.lower() for key, _ in self.headers}:
self.headers.insert(0, ('Host', self.netloc))
for key, value in self.headers:
line = '%s: %s' % (key, value)
- yield from self.putline(line)
- yield from self.putline('')
+ yield From(self.putline(line))
+ yield From(self.putline(''))
@coroutine
def get_response(self):
response = Response(self.reader, self.verbose)
- yield from response.read_headers()
- return response
+ yield From(response.read_headers())
+ raise Return(response)
class Response:
@@ -129,20 +137,21 @@ class Response:
@coroutine
def getline(self):
- line = (yield from self.reader.readline()).decode('latin-1').rstrip()
+ line = (yield From(self.reader.readline()))
+ line = line.decode('latin-1').rstrip()
self.vprint('<', line)
- return line
+ raise Return(line)
@coroutine
def read_headers(self):
- status_line = yield from self.getline()
+ status_line = yield From(self.getline())
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
raise BadStatusLine(status_line)
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
- header_line = yield from self.getline()
+ header_line = yield From(self.getline())
if not header_line:
break
# TODO: Continuation lines.
@@ -173,23 +182,23 @@ class Response:
blocks = []
size = -1
while size:
- size_header = yield from self.reader.readline()
+ size_header = yield From(self.reader.readline())
if not size_header:
break
parts = size_header.split(b';')
size = int(parts[0], 16)
if size:
- block = yield from self.reader.readexactly(size)
+ block = yield From(self.reader.readexactly(size))
assert len(block) == size, (len(block), size)
blocks.append(block)
- crlf = yield from self.reader.readline()
+ crlf = yield From(self.reader.readline())
assert crlf == b'\r\n', repr(crlf)
body = b''.join(blocks)
else:
- body = yield from self.reader.read()
+ body = yield From(self.reader.read())
else:
- body = yield from self.reader.readexactly(nbytes)
- return body
+ body = yield From(self.reader.readexactly(nbytes))
+ raise Return(body)
@coroutine
@@ -198,23 +207,23 @@ def fetch(url, verbose=True, max_redirect=10):
try:
for _ in range(max_redirect):
request = Request(url, verbose)
- yield from request.connect(pool)
- yield from request.send_request()
- response = yield from request.get_response()
- body = yield from response.read()
+ yield From(request.connect(pool))
+ yield From(request.send_request())
+ response = yield From(request.get_response())
+ body = yield From(response.read())
next_url = response.get_redirect_url()
if not next_url:
break
url = urllib.parse.urljoin(url, next_url)
print('redirect to', url, file=sys.stderr)
- return body
+ raise Return(body)
finally:
pool.close()
def main():
if '--iocp' in sys.argv:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
@@ -223,7 +232,11 @@ def main():
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
finally:
loop.close()
- sys.stdout.buffer.write(body)
+ if hasattr(sys.stdout, 'buffer'):
+ sys.stdout.buffer.write(body)
+ else:
+ # Python 2
+ sys.stdout.write(body)
if __name__ == '__main__':
diff --git a/examples/fuzz_as_completed.py b/examples/fuzz_as_completed.py
index 123fbf1..7e74fe7 100644
--- a/examples/fuzz_as_completed.py
+++ b/examples/fuzz_as_completed.py
@@ -2,26 +2,29 @@
"""Fuzz tester for as_completed(), by Glenn Langford."""
-import asyncio
+from __future__ import print_function
+
+import trollius as asyncio
+from trollius import From, Return
import itertools
import random
import sys
@asyncio.coroutine
def sleeper(time):
- yield from asyncio.sleep(time)
- return time
+ yield From(asyncio.sleep(time))
+ raise Return(time)
@asyncio.coroutine
def watcher(tasks,delay=False):
res = []
for t in asyncio.as_completed(tasks):
- r = yield from t
+ r = yield From(t)
res.append(r)
if delay:
# simulate processing delay
process_time = random.random() / 10
- yield from asyncio.sleep(process_time)
+ yield From(asyncio.sleep(process_time))
#print(res)
#assert(sorted(res) == res)
if sorted(res) != res:
diff --git a/examples/hello_callback.py b/examples/hello_callback.py
index 7ccbea1..f192c8d 100644
--- a/examples/hello_callback.py
+++ b/examples/hello_callback.py
@@ -1,6 +1,6 @@
"""Print 'Hello World' every two seconds, using a callback."""
-import asyncio
+import trollius
def print_and_repeat(loop):
@@ -9,7 +9,7 @@ def print_and_repeat(loop):
if __name__ == '__main__':
- loop = asyncio.get_event_loop()
+ loop = trollius.get_event_loop()
print_and_repeat(loop)
try:
loop.run_forever()
diff --git a/examples/hello_coroutine.py b/examples/hello_coroutine.py
index b9347aa..e6a4e6c 100644
--- a/examples/hello_coroutine.py
+++ b/examples/hello_coroutine.py
@@ -1,17 +1,18 @@
"""Print 'Hello World' every two seconds, using a coroutine."""
-import asyncio
+import trollius
+from trollius import From
-@asyncio.coroutine
+@trollius.coroutine
def greet_every_two_seconds():
while True:
print('Hello World')
- yield from asyncio.sleep(2)
+ yield From(trollius.sleep(2))
if __name__ == '__main__':
- loop = asyncio.get_event_loop()
+ loop = trollius.get_event_loop()
try:
loop.run_until_complete(greet_every_two_seconds())
finally:
diff --git a/examples/interop_asyncio.py b/examples/interop_asyncio.py
new file mode 100644
index 0000000..b20e3ed
--- /dev/null
+++ b/examples/interop_asyncio.py
@@ -0,0 +1,53 @@
+import asyncio
+import trollius
+
+@asyncio.coroutine
+def asyncio_noop():
+ pass
+
+@asyncio.coroutine
+def asyncio_coroutine(coro):
+ print("asyncio coroutine")
+ res = yield from coro
+ print("asyncio inner coroutine result: %r" % (res,))
+ print("asyncio coroutine done")
+ return "asyncio"
+
+@trollius.coroutine
+def trollius_noop():
+ pass
+
+@trollius.coroutine
+def trollius_coroutine(coro):
+ print("trollius coroutine")
+ res = yield trollius.From(coro)
+ print("trollius inner coroutine result: %r" % (res,))
+ print("trollius coroutine done")
+ raise trollius.Return("trollius")
+
+def main():
+ # use trollius event loop policy in asyncio
+ policy = trollius.get_event_loop_policy()
+ asyncio.set_event_loop_policy(policy)
+
+ # create an event loop for the main thread: use Trollius event loop
+ loop = trollius.get_event_loop()
+ assert asyncio.get_event_loop() is loop
+
+ print("[ asyncio coroutine called from trollius coroutine ]")
+ coro1 = asyncio_noop()
+ coro2 = asyncio_coroutine(coro1)
+ res = loop.run_until_complete(trollius_coroutine(coro2))
+ print("trollius coroutine result: %r" % res)
+ print("")
+
+ print("[ asyncio coroutine called from trollius coroutine ]")
+ coro1 = trollius_noop()
+ coro2 = trollius_coroutine(coro1)
+ res = loop.run_until_complete(asyncio_coroutine(coro2))
+ print("asyncio coroutine result: %r" % res)
+ print("")
+
+ loop.close()
+
+main()
diff --git a/examples/shell.py b/examples/shell.py
index f934325..91ba7fb 100644
--- a/examples/shell.py
+++ b/examples/shell.py
@@ -1,31 +1,33 @@
"""Examples using create_subprocess_exec() and create_subprocess_shell()."""
-import asyncio
+import trollius as asyncio
+from trollius import From
import signal
-from asyncio.subprocess import PIPE
+from trollius.subprocess import PIPE
+from trollius.py33_exceptions import ProcessLookupError
@asyncio.coroutine
def cat(loop):
- proc = yield from asyncio.create_subprocess_shell("cat",
+ proc = yield From(asyncio.create_subprocess_shell("cat",
stdin=PIPE,
- stdout=PIPE)
+ stdout=PIPE))
print("pid: %s" % proc.pid)
message = "Hello World!"
print("cat write: %r" % message)
- stdout, stderr = yield from proc.communicate(message.encode('ascii'))
+ stdout, stderr = yield From(proc.communicate(message.encode('ascii')))
print("cat read: %r" % stdout.decode('ascii'))
- exitcode = yield from proc.wait()
+ exitcode = yield From(proc.wait())
print("(exit code %s)" % exitcode)
@asyncio.coroutine
def ls(loop):
- proc = yield from asyncio.create_subprocess_exec("ls",
- stdout=PIPE)
+ proc = yield From(asyncio.create_subprocess_exec("ls",
+ stdout=PIPE))
while True:
- line = yield from proc.stdout.readline()
+ line = yield From(proc.stdout.readline())
if not line:
break
print("ls>>", line.decode('ascii').rstrip())
@@ -35,10 +37,11 @@ def ls(loop):
pass
@asyncio.coroutine
-def test_call(*args, timeout=None):
- proc = yield from asyncio.create_subprocess_exec(*args)
+def test_call(*args, **kw):
+ timeout = kw.pop('timeout', None)
try:
- exitcode = yield from asyncio.wait_for(proc.wait(), timeout)
+ proc = yield From(asyncio.create_subprocess_exec(*args))
+ exitcode = yield From(asyncio.wait_for(proc.wait(), timeout))
print("%s: exit code %s" % (' '.join(args), exitcode))
except asyncio.TimeoutError:
print("timeout! (%.1f sec)" % timeout)
diff --git a/examples/simple_tcp_server.py b/examples/simple_tcp_server.py
index 5f874ff..247f6e6 100644
--- a/examples/simple_tcp_server.py
+++ b/examples/simple_tcp_server.py
@@ -8,9 +8,11 @@ in the same process. It listens on port 12345 on 127.0.0.1, so it will
fail if this port is currently in use.
"""
+from __future__ import print_function
import sys
-import asyncio
+import trollius as asyncio
import asyncio.streams
+from trollius import From, Return
class MyServer:
@@ -58,28 +60,31 @@ class MyServer:
out one or more lines back to the client with the result.
"""
while True:
- data = (yield from client_reader.readline()).decode("utf-8")
+ data = (yield From(client_reader.readline()))
+ data = data.decode("utf-8")
if not data: # an empty string means the client disconnected
break
- cmd, *args = data.rstrip().split(' ')
+ parts = data.rstrip().split(' ')
+ cmd = parts[0]
+ args = parts[1:]
if cmd == 'add':
arg1 = float(args[0])
arg2 = float(args[1])
retval = arg1 + arg2
- client_writer.write("{!r}\n".format(retval).encode("utf-8"))
+ client_writer.write("{0!r}\n".format(retval).encode("utf-8"))
elif cmd == 'repeat':
times = int(args[0])
msg = args[1]
client_writer.write("begin\n".encode("utf-8"))
for idx in range(times):
- client_writer.write("{}. {}\n".format(idx+1, msg)
+ client_writer.write("{0}. {1}\n".format(idx+1, msg)
.encode("utf-8"))
client_writer.write("end\n".encode("utf-8"))
else:
- print("Bad command {!r}".format(data), file=sys.stderr)
+ print("Bad command {0!r}".format(data), file=sys.stderr)
# This enables us to have flow control in our connection.
- yield from client_writer.drain()
+ yield From(client_writer.drain())
def start(self, loop):
"""
@@ -115,32 +120,33 @@ def main():
@asyncio.coroutine
def client():
- reader, writer = yield from asyncio.streams.open_connection(
- '127.0.0.1', 12345, loop=loop)
+ reader, writer = yield From(asyncio.streams.open_connection(
+ '127.0.0.1', 12345, loop=loop))
def send(msg):
print("> " + msg)
writer.write((msg + '\n').encode("utf-8"))
def recv():
- msgback = (yield from reader.readline()).decode("utf-8").rstrip()
+ msgback = (yield From(reader.readline()))
+ msgback = msgback.decode("utf-8").rstrip()
print("< " + msgback)
- return msgback
+ raise Return(msgback)
# send a line
send("add 1 2")
- msg = yield from recv()
+ msg = yield From(recv())
send("repeat 5 hello")
- msg = yield from recv()
+ msg = yield From(recv())
assert msg == 'begin'
while True:
- msg = yield from recv()
+ msg = yield From(recv())
if msg == 'end':
break
writer.close()
- yield from asyncio.sleep(0.5)
+ yield From(asyncio.sleep(0.5))
# creates a client and connects to our server
try:
diff --git a/examples/sink.py b/examples/sink.py
index d362cbb..fb28ade 100644
--- a/examples/sink.py
+++ b/examples/sink.py
@@ -1,10 +1,11 @@
"""Test service that accepts connections and reads all data off them."""
+from __future__ import print_function
import argparse
import os
import sys
-from asyncio import *
+from trollius import *
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
ARGS.add_argument(
@@ -63,23 +64,24 @@ def start(loop, host, port):
import ssl
# TODO: take cert/key from args as well.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
- sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslctx.options |= ssl.OP_NO_SSLv2
+ sslctx = SSLContext(ssl.PROTOCOL_SSLv23)
+ if not BACKPORT_SSL_CONTEXT:
+ sslctx.options |= ssl.OP_NO_SSLv2
sslctx.load_cert_chain(
certfile=os.path.join(here, 'ssl_cert.pem'),
keyfile=os.path.join(here, 'ssl_key.pem'))
- server = yield from loop.create_server(Service, host, port, ssl=sslctx)
+ server = yield From(loop.create_server(Service, host, port, ssl=sslctx))
dprint('serving TLS' if sslctx else 'serving',
[s.getsockname() for s in server.sockets])
- yield from server.wait_closed()
+ yield From(server.wait_closed())
def main():
global args
args = ARGS.parse_args()
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
diff --git a/examples/source.py b/examples/source.py
index 7fd11fb..c3ebd55 100644
--- a/examples/source.py
+++ b/examples/source.py
@@ -1,10 +1,11 @@
"""Test client that connects and sends infinite data."""
+from __future__ import print_function
import argparse
import sys
-from asyncio import *
-from asyncio import test_utils
+from trollius import *
+from trollius import test_utils
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
@@ -74,18 +75,18 @@ def start(loop, host, port):
sslctx = None
if args.tls:
sslctx = test_utils.dummy_ssl_context()
- tr, pr = yield from loop.create_connection(Client, host, port,
- ssl=sslctx)
+ tr, pr = yield From(loop.create_connection(Client, host, port,
+ ssl=sslctx))
dprint('tr =', tr)
dprint('pr =', pr)
- yield from pr.waiter
+ yield From(pr.waiter)
def main():
global args
args = ARGS.parse_args()
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
diff --git a/examples/source1.py b/examples/source1.py
index 6802e96..48a53af 100644
--- a/examples/source1.py
+++ b/examples/source1.py
@@ -1,10 +1,11 @@
"""Like source.py, but uses streams."""
+from __future__ import print_function
import argparse
import sys
-from asyncio import *
-from asyncio import test_utils
+from trollius import *
+from trollius import test_utils
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
ARGS.add_argument(
@@ -33,7 +34,7 @@ class Debug:
overwriting = False
label = 'stream1:'
- def print(self, *args):
+ def print_(self, *args):
if self.overwriting:
print(file=sys.stderr)
self.overwriting = 0
@@ -46,7 +47,8 @@ class Debug:
if self.overwriting == 3:
print(self.label, '[...]', file=sys.stderr)
end = '\r'
- print(self.label, *args, file=sys.stderr, end=end, flush=True)
+ print(self.label, *args, file=sys.stderr, end=end)
+ sys.stdout.flush()
@coroutine
@@ -55,11 +57,11 @@ def start(loop, args):
total = 0
sslctx = None
if args.tls:
- d.print('using dummy SSLContext')
+ d.print_('using dummy SSLContext')
sslctx = test_utils.dummy_ssl_context()
- r, w = yield from open_connection(args.host, args.port, ssl=sslctx)
- d.print('r =', r)
- d.print('w =', w)
+ r, w = yield From(open_connection(args.host, args.port, ssl=sslctx))
+ d.print_('r =', r)
+ d.print_('w =', w)
if args.stop:
w.write(b'stop')
w.close()
@@ -73,17 +75,17 @@ def start(loop, args):
w.write(data)
f = w.drain()
if f:
- d.print('pausing')
- yield from f
+ d.print_('pausing')
+ yield From(f)
except (ConnectionResetError, BrokenPipeError) as exc:
- d.print('caught', repr(exc))
+ d.print_('caught', repr(exc))
def main():
global args
args = ARGS.parse_args()
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
diff --git a/examples/stacks.py b/examples/stacks.py
index 0b7e0b2..abe24a0 100644
--- a/examples/stacks.py
+++ b/examples/stacks.py
@@ -1,7 +1,7 @@
"""Crude demo for print_stack()."""
-from asyncio import *
+from trollius import *
@coroutine
@@ -10,9 +10,9 @@ def helper(r):
for t in Task.all_tasks():
t.print_stack()
print('--- end helper ---')
- line = yield from r.readline()
+ line = yield From(r.readline())
1/0
- return line
+ raise Return(line)
def doit():
l = get_event_loop()
diff --git a/examples/subprocess_attach_read_pipe.py b/examples/subprocess_attach_read_pipe.py
index d8a6242..a2f9bb5 100644
--- a/examples/subprocess_attach_read_pipe.py
+++ b/examples/subprocess_attach_read_pipe.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python3
"""Example showing how to attach a read pipe to a subprocess."""
-import asyncio
+import trollius as asyncio
import os, sys
+from trollius import From
code = """
import os, sys
@@ -17,16 +18,19 @@ def task():
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
- pipe = open(rfd, 'rb', 0)
+ pipe = os.fdopen(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
- transport, _ = yield from loop.connect_read_pipe(lambda: protocol, pipe)
+ transport, _ = yield From(loop.connect_read_pipe(lambda: protocol, pipe))
- proc = yield from asyncio.create_subprocess_exec(*args, pass_fds={wfd})
- yield from proc.wait()
+ kwds = {}
+ if sys.version_info >= (3, 2):
+ kwds['pass_fds'] = (wfd,)
+ proc = yield From(asyncio.create_subprocess_exec(*args, **kwds))
+ yield From(proc.wait())
os.close(wfd)
- data = yield from reader.read()
+ data = yield From(reader.read())
print("read = %r" % data.decode())
loop.run_until_complete(task())
diff --git a/examples/subprocess_attach_write_pipe.py b/examples/subprocess_attach_write_pipe.py
index c4e099f..8b9e7ec 100644
--- a/examples/subprocess_attach_write_pipe.py
+++ b/examples/subprocess_attach_write_pipe.py
@@ -1,14 +1,19 @@
#!/usr/bin/env python3
"""Example showing how to attach a write pipe to a subprocess."""
-import asyncio
+import trollius as asyncio
+from trollius import From
import os, sys
-from asyncio import subprocess
+from trollius import subprocess
code = """
import os, sys
fd = int(sys.argv[1])
data = os.read(fd, 1024)
-sys.stdout.buffer.write(data)
+if sys.version_info >= (3,):
+ stdout = sys.stdout.buffer
+else:
+ stdout = sys.stdout
+stdout.write(data)
"""
loop = asyncio.get_event_loop()
@@ -17,19 +22,19 @@ loop = asyncio.get_event_loop()
def task():
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(rfd)]
- proc = yield from asyncio.create_subprocess_exec(
- *args,
- pass_fds={rfd},
- stdout=subprocess.PIPE)
+ kwargs = {'stdout': subprocess.PIPE}
+ if sys.version_info >= (3, 2):
+ kwargs['pass_fds'] = (rfd,)
+ proc = yield From(asyncio.create_subprocess_exec(*args, **kwargs))
- pipe = open(wfd, 'wb', 0)
- transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol,
- pipe)
+ pipe = os.fdopen(wfd, 'wb', 0)
+ transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol,
+ pipe))
transport.write(b'data')
- stdout, stderr = yield from proc.communicate()
+ stdout, stderr = yield From(proc.communicate())
print("stdout = %r" % stdout.decode())
- transport.close()
+ pipe.close()
loop.run_until_complete(task())
loop.close()
diff --git a/examples/subprocess_shell.py b/examples/subprocess_shell.py
index 745cb64..8941236 100644
--- a/examples/subprocess_shell.py
+++ b/examples/subprocess_shell.py
@@ -1,21 +1,23 @@
"""Example writing to and reading from a subprocess at the same time using
tasks."""
-import asyncio
+import trollius as asyncio
import os
-from asyncio.subprocess import PIPE
+from trollius import From
+from trollius.subprocess import PIPE
+from trollius.py33_exceptions import BrokenPipeError, ConnectionResetError
@asyncio.coroutine
def send_input(writer, input):
try:
for line in input:
- print('sending', len(line), 'bytes')
+ print('sending %s bytes' % len(line))
writer.write(line)
d = writer.drain()
if d:
print('pause writing')
- yield from d
+ yield From(d)
print('resume writing')
writer.close()
except BrokenPipeError:
@@ -26,7 +28,7 @@ def send_input(writer, input):
@asyncio.coroutine
def log_errors(reader):
while True:
- line = yield from reader.readline()
+ line = yield From(reader.readline())
if not line:
break
print('ERROR', repr(line))
@@ -34,7 +36,7 @@ def log_errors(reader):
@asyncio.coroutine
def read_stdout(stdout):
while True:
- line = yield from stdout.readline()
+ line = yield From(stdout.readline())
print('received', repr(line))
if not line:
break
@@ -47,7 +49,7 @@ def start(cmd, input=None, **kwds):
kwds['stdin'] = None
else:
kwds['stdin'] = PIPE
- proc = yield from asyncio.create_subprocess_shell(cmd, **kwds)
+ proc = yield From(asyncio.create_subprocess_shell(cmd, **kwds))
tasks = []
if input is not None:
@@ -66,9 +68,9 @@ def start(cmd, input=None, **kwds):
if tasks:
# feed stdin while consuming stdout to avoid hang
# when stdin pipe is full
- yield from asyncio.wait(tasks)
+ yield From(asyncio.wait(tasks))
- exitcode = yield from proc.wait()
+ exitcode = yield From(proc.wait())
print("exit code: %s" % exitcode)
diff --git a/examples/tcp_echo.py b/examples/tcp_echo.py
index d743242..773327f 100755
--- a/examples/tcp_echo.py
+++ b/examples/tcp_echo.py
@@ -1,7 +1,7 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
"""TCP echo server example."""
import argparse
-import asyncio
+import trollius as asyncio
import sys
try:
import signal
@@ -105,7 +105,7 @@ if __name__ == '__main__':
ARGS.print_help()
else:
if args.iocp:
- from asyncio import windows_events
+ from trollius import windows_events
loop = windows_events.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
diff --git a/examples/timing_tcp_server.py b/examples/timing_tcp_server.py
index 3fcdc97..67e714d 100644
--- a/examples/timing_tcp_server.py
+++ b/examples/timing_tcp_server.py
@@ -8,12 +8,14 @@ in the same process. It listens on port 1234 on 127.0.0.1, so it will
fail if this port is currently in use.
"""
+from __future__ import print_function
import sys
import time
import random
-import asyncio
+import trollius as asyncio
import asyncio.streams
+from trollius import From, Return
class MyServer:
@@ -61,29 +63,32 @@ class MyServer:
out one or more lines back to the client with the result.
"""
while True:
- data = (yield from client_reader.readline()).decode("utf-8")
+ data = (yield From(client_reader.readline()))
+ data = data.decode("utf-8")
if not data: # an empty string means the client disconnected
break
- cmd, *args = data.rstrip().split(' ')
+ parts = data.rstrip().split(' ')
+ cmd = parts[0]
+ args = parts[1:]
if cmd == 'add':
arg1 = float(args[0])
arg2 = float(args[1])
retval = arg1 + arg2
- client_writer.write("{!r}\n".format(retval).encode("utf-8"))
+ client_writer.write("{0!r}\n".format(retval).encode("utf-8"))
elif cmd == 'repeat':
times = int(args[0])
msg = args[1]
client_writer.write("begin\n".encode("utf-8"))
for idx in range(times):
- client_writer.write("{}. {}\n".format(
+ client_writer.write("{0}. {1}\n".format(
idx+1, msg + 'x'*random.randint(10, 50))
.encode("utf-8"))
client_writer.write("end\n".encode("utf-8"))
else:
- print("Bad command {!r}".format(data), file=sys.stderr)
+ print("Bad command {0!r}".format(data), file=sys.stderr)
# This enables us to have flow control in our connection.
- yield from client_writer.drain()
+ yield From(client_writer.drain())
def start(self, loop):
"""
@@ -119,42 +124,44 @@ def main():
@asyncio.coroutine
def client():
- reader, writer = yield from asyncio.streams.open_connection(
- '127.0.0.1', 12345, loop=loop)
+ reader, writer = yield From(asyncio.streams.open_connection(
+ '127.0.0.1', 12345, loop=loop))
def send(msg):
print("> " + msg)
writer.write((msg + '\n').encode("utf-8"))
def recv():
- msgback = (yield from reader.readline()).decode("utf-8").rstrip()
+ msgback = (yield From(reader.readline()))
+ msgback = msgback.decode("utf-8").rstrip()
print("< " + msgback)
- return msgback
+ raise Return(msgback)
# send a line
send("add 1 2")
- msg = yield from recv()
+ msg = yield From(recv())
Ns = list(range(100, 100000, 10000))
times = []
for N in Ns:
t0 = time.time()
- send("repeat {} hello world ".format(N))
- msg = yield from recv()
+ send("repeat {0} hello world ".format(N))
+ msg = yield From(recv())
assert msg == 'begin'
while True:
- msg = (yield from reader.readline()).decode("utf-8").rstrip()
+ msg = (yield From(reader.readline()))
+ msg = msg.decode("utf-8").rstrip()
if msg == 'end':
break
t1 = time.time()
dt = t1 - t0
- print("Time taken: {:.3f} seconds ({:.6f} per repetition)"
+ print("Time taken: {0:.3f} seconds ({1:.6f} per repetition)"
.format(dt, dt/N))
times.append(dt)
writer.close()
- yield from asyncio.sleep(0.5)
+ yield From(asyncio.sleep(0.5))
# creates a client and connects to our server
try:
diff --git a/examples/udp_echo.py b/examples/udp_echo.py
index 93ac7e6..bd64639 100755
--- a/examples/udp_echo.py
+++ b/examples/udp_echo.py
@@ -1,8 +1,8 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
"""UDP echo example."""
import argparse
import sys
-import asyncio
+import trollius as asyncio
try:
import signal
except ImportError:
@@ -32,12 +32,12 @@ class MyClientUdpEchoProtocol:
def connection_made(self, transport):
self.transport = transport
- print('sending "{}"'.format(self.message))
+ print('sending "{0}"'.format(self.message))
self.transport.sendto(self.message.encode())
print('waiting to receive')
def datagram_received(self, data, addr):
- print('received "{}"'.format(data.decode()))
+ print('received "{0}"'.format(data.decode()))
self.transport.close()
def error_received(self, exc):
diff --git a/overlapped.c b/overlapped.c
index ef77c88..52ff9de 100644
--- a/overlapped.c
+++ b/overlapped.c
@@ -31,6 +31,18 @@
#define T_HANDLE T_POINTER
+#if PY_MAJOR_VERSION >= 3
+# define PYTHON3
+#endif
+
+#ifndef Py_MIN
+# define Py_MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
+#endif
+
+#ifndef Py_MAX
+# define Py_MAX(X, Y) (((X) > (Y)) ? (X) : (Y))
+#endif
+
enum {TYPE_NONE, TYPE_NOT_STARTED, TYPE_READ, TYPE_WRITE, TYPE_ACCEPT,
TYPE_CONNECT, TYPE_DISCONNECT, TYPE_CONNECT_NAMED_PIPE,
TYPE_WAIT_NAMED_PIPE_AND_CONNECT};
@@ -63,6 +75,7 @@ SetFromWindowsErr(DWORD err)
if (err == 0)
err = GetLastError();
+#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3) || PY_MAJOR_VERSION > 3
switch (err) {
case ERROR_CONNECTION_REFUSED:
exception_type = PyExc_ConnectionRefusedError;
@@ -73,6 +86,9 @@ SetFromWindowsErr(DWORD err)
default:
exception_type = PyExc_OSError;
}
+#else
+ exception_type = PyExc_WindowsError;
+#endif
return PyErr_SetExcFromWindowsErr(exception_type, err);
}
@@ -345,7 +361,11 @@ overlapped_CreateEvent(PyObject *self, PyObject *args)
Py_UNICODE *Name;
HANDLE Event;
+#ifdef PYTHON3
if (!PyArg_ParseTuple(args, "O" F_BOOL F_BOOL "Z",
+#else
+ if (!PyArg_ParseTuple(args, "O" F_BOOL F_BOOL "z",
+#endif
&EventAttributes, &ManualReset,
&InitialState, &Name))
return NULL;
@@ -822,7 +842,11 @@ Overlapped_WriteFile(OverlappedObject *self, PyObject *args)
return NULL;
}
+#ifdef PYTHON3
if (!PyArg_Parse(bufobj, "y*", &self->write_buffer))
+#else
+ if (!PyArg_Parse(bufobj, "s*", &self->write_buffer))
+#endif
return NULL;
#if SIZEOF_SIZE_T > SIZEOF_LONG
@@ -878,7 +902,11 @@ Overlapped_WSASend(OverlappedObject *self, PyObject *args)
return NULL;
}
+#ifdef PYTHON3
if (!PyArg_Parse(bufobj, "y*", &self->write_buffer))
+#else
+ if (!PyArg_Parse(bufobj, "s*", &self->write_buffer))
+#endif
return NULL;
#if SIZEOF_SIZE_T > SIZEOF_LONG
@@ -1136,8 +1164,9 @@ static PyObject *
ConnectPipe(OverlappedObject *self, PyObject *args)
{
PyObject *AddressObj;
- wchar_t *Address;
HANDLE PipeHandle;
+#ifdef PYTHON3
+ wchar_t *Address;
if (!PyArg_ParseTuple(args, "U", &AddressObj))
return NULL;
@@ -1146,14 +1175,26 @@ ConnectPipe(OverlappedObject *self, PyObject *args)
if (Address == NULL)
return NULL;
+# define CREATE_FILE CreateFileW
+#else
+ char *Address;
+
+ if (!PyArg_ParseTuple(args, "s", &Address))
+ return NULL;
+
+# define CREATE_FILE CreateFileA
+#endif
+
Py_BEGIN_ALLOW_THREADS
- PipeHandle = CreateFileW(Address,
+ PipeHandle = CREATE_FILE(Address,
GENERIC_READ | GENERIC_WRITE,
0, NULL, OPEN_EXISTING,
FILE_FLAG_OVERLAPPED, NULL);
Py_END_ALLOW_THREADS
+#ifdef PYTHON3
PyMem_Free(Address);
+#endif
if (PipeHandle == INVALID_HANDLE_VALUE)
return SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, PipeHandle);
@@ -1284,6 +1325,7 @@ static PyMethodDef overlapped_functions[] = {
{NULL}
};
+#ifdef PYTHON3
static struct PyModuleDef overlapped_module = {
PyModuleDef_HEAD_INIT,
"_overlapped",
@@ -1295,12 +1337,13 @@ static struct PyModuleDef overlapped_module = {
NULL,
NULL
};
+#endif
#define WINAPI_CONSTANT(fmt, con) \
PyDict_SetItemString(d, #con, Py_BuildValue(fmt, con))
-PyMODINIT_FUNC
-PyInit__overlapped(void)
+PyObject*
+_init_overlapped(void)
{
PyObject *m, *d;
@@ -1316,7 +1359,11 @@ PyInit__overlapped(void)
if (PyType_Ready(&OverlappedType) < 0)
return NULL;
+#ifdef PYTHON3
m = PyModule_Create(&overlapped_module);
+#else
+ m = Py_InitModule("_overlapped", overlapped_functions);
+#endif
if (PyModule_AddObject(m, "Overlapped", (PyObject *)&OverlappedType) < 0)
return NULL;
@@ -1332,6 +1379,22 @@ PyInit__overlapped(void)
WINAPI_CONSTANT(F_DWORD, SO_UPDATE_ACCEPT_CONTEXT);
WINAPI_CONSTANT(F_DWORD, SO_UPDATE_CONNECT_CONTEXT);
WINAPI_CONSTANT(F_DWORD, TF_REUSE_SOCKET);
+ WINAPI_CONSTANT(F_DWORD, ERROR_CONNECTION_REFUSED);
+ WINAPI_CONSTANT(F_DWORD, ERROR_CONNECTION_ABORTED);
return m;
}
+
+#ifdef PYTHON3
+PyMODINIT_FUNC
+PyInit__overlapped(void)
+{
+ return _init_overlapped();
+}
+#else
+PyMODINIT_FUNC
+init_overlapped(void)
+{
+ _init_overlapped();
+}
+#endif
diff --git a/release.py b/release.py
deleted file mode 100755
index a5acbc8..0000000
--- a/release.py
+++ /dev/null
@@ -1,517 +0,0 @@
-#!/usr/bin/env python3
-"""
-Script to upload 32 bits and 64 bits wheel packages for Python 3.3 on Windows.
-
-Usage: "python release.py HG_TAG" where HG_TAG is a Mercurial tag, usually
-a version number like "3.4.2".
-
-Requirements:
-
-- Python 3.3 and newer requires the Windows SDK 7.1 to build wheel packages
-- Python 2.7 requires the Windows SDK 7.0
-- the aiotest module is required to run aiotest tests
-"""
-import contextlib
-import optparse
-import os
-import platform
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import textwrap
-
-PROJECT = 'asyncio'
-DEBUG_ENV_VAR = 'PYTHONASYNCIODEBUG'
-PYTHON_VERSIONS = (
- (3, 3),
-)
-PY3 = (sys.version_info >= (3,))
-HG = 'hg'
-SDK_ROOT = r"C:\Program Files\Microsoft SDKs\Windows"
-BATCH_FAIL_ON_ERROR = "@IF %errorlevel% neq 0 exit /b %errorlevel%"
-WINDOWS = (sys.platform == 'win32')
-
-
-def get_architecture_bits():
- arch = platform.architecture()[0]
- return int(arch[:2])
-
-
-class PythonVersion:
- def __init__(self, major, minor, bits):
- self.major = major
- self.minor = minor
- self.bits = bits
- self._executable = None
-
- @staticmethod
- def running():
- bits = get_architecture_bits()
- pyver = PythonVersion(sys.version_info.major,
- sys.version_info.minor,
- bits)
- pyver._executable = sys.executable
- return pyver
-
- def _get_executable_windows(self, app):
- if self.bits == 32:
- executable = 'c:\\Python%s%s_32bit\\python.exe'
- else:
- executable = 'c:\\Python%s%s\\python.exe'
- executable = executable % (self.major, self.minor)
- if not os.path.exists(executable):
- print("Unable to find python %s" % self)
- print("%s does not exists" % executable)
- sys.exit(1)
- return executable
-
- def _get_executable_unix(self, app):
- return 'python%s.%s' % (self.major, self.minor)
-
- def get_executable(self, app):
- if self._executable:
- return self._executable
-
- if WINDOWS:
- executable = self._get_executable_windows(app)
- else:
- executable = self._get_executable_unix(app)
-
- code = (
- 'import platform, sys; '
- 'print("{ver.major}.{ver.minor} {bits}".format('
- 'ver=sys.version_info, '
- 'bits=platform.architecture()[0]))'
- )
- try:
- exitcode, stdout = app.get_output(executable, '-c', code,
- ignore_stderr=True)
- except OSError as exc:
- print("Error while checking %s:" % self)
- print(str(exc))
- print("Executable: %s" % executable)
- sys.exit(1)
- else:
- stdout = stdout.rstrip()
- expected = "%s.%s %sbit" % (self.major, self.minor, self.bits)
- if stdout != expected:
- print("Python version or architecture doesn't match")
- print("got %r, expected %r" % (stdout, expected))
- print("Executable: %s" % executable)
- sys.exit(1)
-
- self._executable = executable
- return executable
-
- def __str__(self):
- return 'Python %s.%s (%s bits)' % (self.major, self.minor, self.bits)
-
-
-class Release(object):
- def __init__(self):
- root = os.path.dirname(__file__)
- self.root = os.path.realpath(root)
- # Set these attributes to True to run also register sdist upload
- self.wheel = False
- self.test = False
- self.register = False
- self.sdist = False
- self.aiotest = False
- self.verbose = False
- self.upload = False
- # Release mode: enable more tests
- self.release = False
- self.python_versions = []
- if WINDOWS:
- supported_archs = (32, 64)
- else:
- bits = get_architecture_bits()
- supported_archs = (bits,)
- for major, minor in PYTHON_VERSIONS:
- for bits in supported_archs:
- pyver = PythonVersion(major, minor, bits)
- self.python_versions.append(pyver)
-
- @contextlib.contextmanager
- def _popen(self, args, **kw):
- verbose = kw.pop('verbose', True)
- if self.verbose and verbose:
- print('+ ' + ' '.join(args))
- if PY3:
- kw['universal_newlines'] = True
- proc = subprocess.Popen(args, **kw)
- try:
- yield proc
- except:
- proc.kill()
- proc.wait()
- raise
-
- def get_output(self, *args, **kw):
- kw['stdout'] = subprocess.PIPE
- ignore_stderr = kw.pop('ignore_stderr', False)
- if ignore_stderr:
- devnull = open(os.path.devnull, 'wb')
- kw['stderr'] = devnull
- else:
- kw['stderr'] = subprocess.STDOUT
- try:
- with self._popen(args, **kw) as proc:
- stdout, stderr = proc.communicate()
- return proc.returncode, stdout
- finally:
- if ignore_stderr:
- devnull.close()
-
- def check_output(self, *args, **kw):
- exitcode, output = self.get_output(*args, **kw)
- if exitcode:
- sys.stdout.write(output)
- sys.stdout.flush()
- sys.exit(1)
- return output
-
- def run_command(self, *args, **kw):
- with self._popen(args, **kw) as proc:
- exitcode = proc.wait()
- if exitcode:
- sys.exit(exitcode)
-
- def get_local_changes(self):
- status = self.check_output(HG, 'status')
- return [line for line in status.splitlines()
- if not line.startswith("?")]
-
- def remove_directory(self, name):
- path = os.path.join(self.root, name)
- if os.path.exists(path):
- if self.verbose:
- print("Remove directory: %s" % name)
- shutil.rmtree(path)
-
- def remove_file(self, name):
- path = os.path.join(self.root, name)
- if os.path.exists(path):
- if self.verbose:
- print("Remove file: %s" % name)
- os.unlink(path)
-
- def windows_sdk_setenv(self, pyver):
- if (pyver.major, pyver.minor) >= (3, 3):
- path = "v7.1"
- sdkver = (7, 1)
- else:
- path = "v7.0"
- sdkver = (7, 0)
- setenv = os.path.join(SDK_ROOT, path, 'Bin', 'SetEnv.cmd')
- if not os.path.exists(setenv):
- print("Unable to find Windows SDK %s.%s for %s"
- % (sdkver[0], sdkver[1], pyver))
- print("Please download and install it")
- print("%s does not exists" % setenv)
- sys.exit(1)
- if pyver.bits == 64:
- arch = '/x64'
- else:
- arch = '/x86'
- cmd = ["CALL", setenv, "/release", arch]
- return (cmd, sdkver)
-
- def quote(self, arg):
- if not re.search("[ '\"]", arg):
- return arg
- # FIXME: should we escape "?
- return '"%s"' % arg
-
- def quote_args(self, args):
- return ' '.join(self.quote(arg) for arg in args)
-
- def cleanup(self):
- if self.verbose:
- print("Cleanup")
- self.remove_directory('build')
- self.remove_directory('dist')
- self.remove_file('_overlapped.pyd')
- self.remove_file(os.path.join(PROJECT, '_overlapped.pyd'))
-
- def sdist_upload(self):
- self.cleanup()
- self.run_command(sys.executable, 'setup.py', 'sdist', 'upload')
-
- def build_inplace(self, pyver):
- print("Build for %s" % pyver)
- self.build(pyver, 'build')
-
- if WINDOWS:
- if pyver.bits == 64:
- arch = 'win-amd64'
- else:
- arch = 'win32'
- build_dir = 'lib.%s-%s.%s' % (arch, pyver.major, pyver.minor)
- src = os.path.join(self.root, 'build', build_dir,
- PROJECT, '_overlapped.pyd')
- dst = os.path.join(self.root, PROJECT, '_overlapped.pyd')
- shutil.copyfile(src, dst)
-
- def runtests(self, pyver):
- print("Run tests on %s" % pyver)
-
- if WINDOWS and not self.options.no_compile:
- self.build_inplace(pyver)
-
- release_env = dict(os.environ)
- release_env.pop(DEBUG_ENV_VAR, None)
-
- dbg_env = dict(os.environ)
- dbg_env[DEBUG_ENV_VAR] = '1'
-
- python = pyver.get_executable(self)
- args = (python, 'runtests.py', '-r')
-
- if self.release:
- print("Run runtests.py in release mode on %s" % pyver)
- self.run_command(*args, env=release_env)
-
- print("Run runtests.py in debug mode on %s" % pyver)
- self.run_command(*args, env=dbg_env)
-
- if self.aiotest:
- args = (python, 'run_aiotest.py')
-
- if self.release:
- print("Run aiotest in release mode on %s" % pyver)
- self.run_command(*args, env=release_env)
-
- print("Run aiotest in debug mode on %s" % pyver)
- self.run_command(*args, env=dbg_env)
- print("")
-
- def _build_windows(self, pyver, cmd):
- setenv, sdkver = self.windows_sdk_setenv(pyver)
-
- temp = tempfile.NamedTemporaryFile(mode="w", suffix=".bat",
- delete=False)
- with temp:
- temp.write("SETLOCAL EnableDelayedExpansion\n")
- temp.write(self.quote_args(setenv) + "\n")
- temp.write(BATCH_FAIL_ON_ERROR + "\n")
- # Restore console colors: lightgrey on black
- temp.write("COLOR 07\n")
- temp.write("\n")
- temp.write("SET DISTUTILS_USE_SDK=1\n")
- temp.write("SET MSSDK=1\n")
- temp.write("CD %s\n" % self.quote(self.root))
- temp.write(self.quote_args(cmd) + "\n")
- temp.write(BATCH_FAIL_ON_ERROR + "\n")
-
- try:
- if self.verbose:
- print("Setup Windows SDK %s.%s" % sdkver)
- print("+ " + ' '.join(cmd))
- # SDK 7.1 uses the COLOR command which makes SetEnv.cmd failing
- # if the stdout is not a TTY (if we redirect stdout into a file)
- if self.verbose or sdkver >= (7, 1):
- self.run_command(temp.name, verbose=False)
- else:
- self.check_output(temp.name, verbose=False)
- finally:
- os.unlink(temp.name)
-
- def _build_unix(self, pyver, cmd):
- self.check_output(*cmd)
-
- def build(self, pyver, *cmds):
- self.cleanup()
-
- python = pyver.get_executable(self)
- cmd = [python, 'setup.py'] + list(cmds)
-
- if WINDOWS:
- self._build_windows(pyver, cmd)
- else:
- self._build_unix(pyver, cmd)
-
- def test_wheel(self, pyver):
- print("Test building wheel package for %s" % pyver)
- self.build(pyver, 'bdist_wheel')
-
- def publish_wheel(self, pyver):
- print("Build and publish wheel package for %s" % pyver)
- self.build(pyver, 'bdist_wheel', 'upload')
-
- def parse_options(self):
- parser = optparse.OptionParser(
- description="Run all unittests.",
- usage="%prog [options] command")
- parser.add_option(
- '-v', '--verbose', action="store_true", dest='verbose',
- default=0, help='verbose')
- parser.add_option(
- '-t', '--tag', type="str",
- help='Mercurial tag or revision, required to release')
- parser.add_option(
- '-p', '--python', type="str",
- help='Only build/test one specific Python version, ex: "2.7:32"')
- parser.add_option(
- '-C', "--no-compile", action="store_true",
- help="Don't compile the module, this options implies --running",
- default=False)
- parser.add_option(
- '-r', "--running", action="store_true",
- help='Only use the running Python version',
- default=False)
- parser.add_option(
- '--ignore', action="store_true",
- help='Ignore local changes',
- default=False)
- self.options, args = parser.parse_args()
- if len(args) == 1:
- command = args[0]
- else:
- command = None
-
- if self.options.no_compile:
- self.options.running = True
-
- if command == 'clean':
- self.options.verbose = True
- elif command == 'build':
- self.options.running = True
- elif command == 'test_wheel':
- self.wheel = True
- elif command == 'test':
- self.test = True
- elif command == 'release':
- if not self.options.tag:
- print("The release command requires the --tag option")
- sys.exit(1)
-
- self.release = True
- self.wheel = True
- self.test = True
- self.upload = True
- else:
- if command:
- print("Invalid command: %s" % command)
- else:
- parser.print_help()
- print("")
-
- print("Available commands:")
- print("- build: build asyncio in place, imply --running")
- print("- test: run tests")
- print("- test_wheel: test building wheel packages")
- print("- release: run tests and publish wheel packages,")
- print(" require the --tag option")
- print("- clean: cleanup the project")
- sys.exit(1)
-
- if self.options.python and self.options.running:
- print("--python and --running options are exclusive")
- sys.exit(1)
-
- python = self.options.python
- if python:
- match = re.match("^([23])\.([0-9])/(32|64)$", python)
- if not match:
- print("Invalid Python version: %s" % python)
- print('Format of a Python version: "x.y/bits"')
- print("Example: 2.7/32")
- sys.exit(1)
- major = int(match.group(1))
- minor = int(match.group(2))
- bits = int(match.group(3))
- self.python_versions = [PythonVersion(major, minor, bits)]
-
- if self.options.running:
- self.python_versions = [PythonVersion.running()]
-
- self.verbose = self.options.verbose
- self.command = command
-
- def main(self):
- self.parse_options()
-
- print("Directory: %s" % self.root)
- os.chdir(self.root)
-
- if self.command == "clean":
- self.cleanup()
- sys.exit(1)
-
- if self.command == "build":
- if len(self.python_versions) != 1:
- print("build command requires one specific Python version")
- print("Use the --python command line option")
- sys.exit(1)
- pyver = self.python_versions[0]
- self.build_inplace(pyver)
-
- if (self.register or self.upload) and (not self.options.ignore):
- lines = self.get_local_changes()
- else:
- lines = ()
- if lines:
- print("ERROR: Found local changes")
- for line in lines:
- print(line)
- print("")
- print("Revert local changes")
- print("or use the --ignore command line option")
- sys.exit(1)
-
- hg_tag = self.options.tag
- if hg_tag:
- print("Update repository to revision %s" % hg_tag)
- self.check_output(HG, 'update', hg_tag)
-
- hg_rev = self.check_output(HG, 'id').rstrip()
-
- if self.wheel:
- for pyver in self.python_versions:
- self.test_wheel(pyver)
-
- if self.test:
- for pyver in self.python_versions:
- self.runtests(pyver)
-
- if self.register:
- self.run_command(sys.executable, 'setup.py', 'register')
-
- if self.sdist:
- self.sdist_upload()
-
- if self.upload:
- for pyver in self.python_versions:
- self.publish_wheel(pyver)
-
- hg_rev2 = self.check_output(HG, 'id').rstrip()
- if hg_rev != hg_rev2:
- print("ERROR: The Mercurial revision changed")
- print("Before: %s" % hg_rev)
- print("After: %s" % hg_rev2)
- sys.exit(1)
-
- print("")
- print("Mercurial revision: %s" % hg_rev)
- if self.command == 'build':
- print("Inplace compilation done")
- if self.wheel:
- print("Compilation of wheel packages succeeded")
- if self.test:
- print("Tests succeeded")
- if self.register:
- print("Project registered on the Python cheeseshop (PyPI)")
- if self.sdist:
- print("Project source code uploaded to the Python "
- "cheeseshop (PyPI)")
- if self.upload:
- print("Wheel packages uploaded to the Python cheeseshop (PyPI)")
- for pyver in self.python_versions:
- print("- %s" % pyver)
-
-
-if __name__ == "__main__":
- Release().main()
diff --git a/releaser.conf b/releaser.conf
new file mode 100644
index 0000000..3728139
--- /dev/null
+++ b/releaser.conf
@@ -0,0 +1,7 @@
+# Configuration file for the tool "releaser"
+# https://bitbucket.org/haypo/misc/src/tip/bin/releaser.py
+
+[project]
+name = trollius
+debug_env_var = TROLLIUSDEBUG
+python_versions = 2.7, 3.3, 3.4
diff --git a/run_aiotest.py b/run_aiotest.py
index 8d6fa29..da13328 100644
--- a/run_aiotest.py
+++ b/run_aiotest.py
@@ -1,14 +1,14 @@
import aiotest.run
-import asyncio
import sys
+import trollius
if sys.platform == 'win32':
- from asyncio.windows_utils import socketpair
+ from trollius.windows_utils import socketpair
else:
from socket import socketpair
config = aiotest.TestConfig()
-config.asyncio = asyncio
+config.asyncio = trollius
config.socketpair = socketpair
-config.new_event_pool_policy = asyncio.DefaultEventLoopPolicy
+config.new_event_pool_policy = trollius.DefaultEventLoopPolicy
config.call_soon_check_closed = True
aiotest.run.main(config)
diff --git a/runtests.py b/runtests.py
index b6ed71e..541a47e 100644..100755
--- a/runtests.py
+++ b/runtests.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python3
-"""Run asyncio unittests.
+#!/usr/bin/env python
+"""Run trollius unittests.
Usage:
python3 runtests.py [flags] [pattern] ...
@@ -20,86 +20,107 @@ runtests.py --coverage is equivalent of:
# Originally written by Beech Horn (for NDB).
-import argparse
+from __future__ import print_function
+import optparse
import gc
import logging
import os
import random
import re
import sys
-import unittest
import textwrap
-import importlib.machinery
+PY2 = (sys.version_info < (3,))
+PY33 = (sys.version_info >= (3, 3))
+if PY33:
+ import importlib.machinery
+else:
+ import imp
try:
import coverage
except ImportError:
coverage = None
+if PY2:
+ sys.exc_clear()
-from unittest.signals import installHandler
-
-assert sys.version >= '3.3', 'Please use Python 3.3 or higher.'
-
-ARGS = argparse.ArgumentParser(description="Run all unittests.")
-ARGS.add_argument(
- '-v', action="store", dest='verbose',
- nargs='?', const=1, type=int, default=0, help='verbose')
-ARGS.add_argument(
+try:
+ import unittest2 as unittest
+ from unittest2.signals import installHandler
+except ImportError:
+ import unittest
+ from unittest.signals import installHandler
+
+ARGS = optparse.OptionParser(description="Run all unittests.", usage="%prog [options] [pattern] [pattern2 ...]")
+ARGS.add_option(
+ '-v', '--verbose', type=int, dest='verbose',
+ default=0, help='verbose')
+ARGS.add_option(
'-x', action="store_true", dest='exclude', help='exclude tests')
-ARGS.add_argument(
+ARGS.add_option(
'-f', '--failfast', action="store_true", default=False,
dest='failfast', help='Stop on first fail or error')
-ARGS.add_argument(
+ARGS.add_option(
+ '--no-ssl', action="store_true", default=False,
+ help='Disable the SSL module')
+ARGS.add_option(
+ '--no-concurrent', action="store_true", default=False,
+ help='Disable the concurrent module')
+ARGS.add_option(
'-c', '--catch', action="store_true", default=False,
dest='catchbreak', help='Catch control-C and display results')
-ARGS.add_argument(
+ARGS.add_option(
'--forever', action="store_true", dest='forever', default=False,
help='run tests forever to catch sporadic errors')
-ARGS.add_argument(
+ARGS.add_option(
'--findleaks', action='store_true', dest='findleaks',
help='detect tests that leak memory')
-ARGS.add_argument('-r', '--randomize', action='store_true',
- help='randomize test execution order.')
-ARGS.add_argument('--seed', type=int,
- help='random seed to reproduce a previous random run')
-ARGS.add_argument(
+ARGS.add_option(
+ '-r', '--randomize', action='store_true',
+ help='randomize test execution order.')
+ARGS.add_option(
+ '--seed', type=int,
+ help='random seed to reproduce a previous random run')
+ARGS.add_option(
'-q', action="store_true", dest='quiet', help='quiet')
-ARGS.add_argument(
+ARGS.add_option(
'--tests', action="store", dest='testsdir', default='tests',
help='tests directory')
-ARGS.add_argument(
+ARGS.add_option(
'--coverage', action="store_true", dest='coverage',
help='enable html coverage report')
-ARGS.add_argument(
- 'pattern', action="store", nargs="*",
- help='optional regex patterns to match test ids (default all tests)')
-COV_ARGS = argparse.ArgumentParser(description="Run all unittests.")
-COV_ARGS.add_argument(
- '--coverage', action="store", dest='coverage', nargs='?', const='',
- help='enable coverage report and provide python files directory')
+
+if PY33:
+ def load_module(modname, sourcefile):
+ loader = importlib.machinery.SourceFileLoader(modname, sourcefile)
+ return loader.load_module()
+else:
+ def load_module(modname, sourcefile):
+ return imp.load_source(modname, sourcefile)
def load_modules(basedir, suffix='.py'):
+ import trollius.test_utils
+
def list_dir(prefix, dir):
files = []
modpath = os.path.join(dir, '__init__.py')
if os.path.isfile(modpath):
mod = os.path.split(dir)[-1]
- files.append(('{}{}'.format(prefix, mod), modpath))
+ files.append(('{0}{1}'.format(prefix, mod), modpath))
- prefix = '{}{}.'.format(prefix, mod)
+ prefix = '{0}{1}.'.format(prefix, mod)
for name in os.listdir(dir):
path = os.path.join(dir, name)
if os.path.isdir(path):
- files.extend(list_dir('{}{}.'.format(prefix, name), path))
+ files.extend(list_dir('{0}{1}.'.format(prefix, name), path))
else:
if (name != '__init__.py' and
name.endswith(suffix) and
not name.startswith(('.', '_'))):
- files.append(('{}{}'.format(prefix, name[:-3]), path))
+ files.append(('{0}{1}'.format(prefix, name[:-3]), path))
return files
@@ -107,13 +128,17 @@ def load_modules(basedir, suffix='.py'):
for modname, sourcefile in list_dir('', basedir):
if modname == 'runtests':
continue
+ if modname == 'test_asyncio' and not PY33:
+ print("Skipping '{0}': need at least Python 3.3".format(modname),
+ file=sys.stderr)
+ continue
try:
- loader = importlib.machinery.SourceFileLoader(modname, sourcefile)
- mods.append((loader.load_module(), sourcefile))
+ mod = load_module(modname, sourcefile)
+ mods.append((mod, sourcefile))
except SyntaxError:
raise
- except unittest.SkipTest as err:
- print("Skipping '{}': {}".format(modname, err), file=sys.stderr)
+ except trollius.test_utils.SkipTest as err:
+ print("Skipping '{0}': {1}".format(modname, err), file=sys.stderr)
return mods
@@ -198,7 +223,7 @@ class TestRunner(unittest.TextTestRunner):
def run(self, test):
result = super().run(test)
if result.leaks:
- self.stream.writeln("{} tests leaks:".format(len(result.leaks)))
+ self.stream.writeln("{0} tests leaks:".format(len(result.leaks)))
for name, leaks in result.leaks:
self.stream.writeln(' '*4 + name + ':')
for leak in leaks:
@@ -218,7 +243,13 @@ def _runtests(args, tests):
def runtests():
- args = ARGS.parse_args()
+ args, pattern = ARGS.parse_args()
+
+ if args.no_ssl:
+ sys.modules['ssl'] = None
+
+ if args.no_concurrent:
+ sys.modules['concurrent'] = None
if args.coverage and coverage is None:
URL = "bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py"
@@ -238,15 +269,15 @@ def runtests():
testsdir = os.path.abspath(args.testsdir)
if not os.path.isdir(testsdir):
- print("Tests directory is not found: {}\n".format(testsdir))
+ print("Tests directory is not found: {0}\n".format(testsdir))
ARGS.print_help()
return
excludes = includes = []
if args.exclude:
- excludes = args.pattern
+ excludes = pattern
else:
- includes = args.pattern
+ includes = pattern
v = 0 if args.quiet else args.verbose + 1
failfast = args.failfast
@@ -257,7 +288,6 @@ def runtests():
)
cov.start()
- logger = logging.getLogger()
if v == 0:
level = logging.CRITICAL
elif v == 1:
@@ -273,8 +303,8 @@ def runtests():
finder = TestsFinder(args.testsdir, includes, excludes)
if args.catchbreak:
installHandler()
- import asyncio.coroutines
- if asyncio.coroutines._DEBUG:
+ import trollius.coroutines
+ if trollius.coroutines._DEBUG:
print("Run tests in debug mode")
else:
print("Run tests in release mode")
@@ -297,7 +327,7 @@ def runtests():
cov.report(show_missing=False)
here = os.path.dirname(os.path.abspath(__file__))
print("\nFor html report:")
- print("open file://{}/htmlcov/index.html".format(here))
+ print("open file://{0}/htmlcov/index.html".format(here))
if __name__ == '__main__':
diff --git a/setup.py b/setup.py
index 93cacdd..19d9670 100644
--- a/setup.py
+++ b/setup.py
@@ -1,49 +1,74 @@
# Release procedure:
-# - run tox (to run runtests.py and run_aiotest.py)
-# - maybe test examples
-# - update version in setup.py
-# - hg ci
-# - hg tag VERSION
-# - hg push
-# - run on Linux: python setup.py register sdist upload
-# - run on Windows: python release.py VERSION
-# - increment version in setup.py
-# - hg ci && hg push
+# - fill trollius changelog
+# - run maybe ./update-asyncio-step1.sh
+# - run all tests: tox
+# - test examples
+# - check that "python setup.py sdist" contains all files tracked by
+# the SCM (Mercurial): update MANIFEST.in if needed
+# - run test on Windows: releaser.py test
+# - update version in setup.py (version) and doc/conf.py (version, release)
+# - set release date in doc/changelog.rst
+# - git commit
+# - git tag trollius-VERSION
+# - git push --tags
+# - git push
+# - On Linux: python setup.py register sdist upload
+# FIXME: don't use bdist_wheel because of
+# FIXME: https://github.com/haypo/trollius/issues/1
+# - On Windows: python releaser.py release
+# - increment version in setup.py (version) and doc/conf.py (version, release)
+# - git commit -a && git push
import os
+import sys
try:
from setuptools import setup, Extension
+ SETUPTOOLS = True
except ImportError:
+ SETUPTOOLS = False
# Use distutils.core as a fallback.
# We won't be able to build the Wheel file on Windows.
from distutils.core import setup, Extension
+with open("README.rst") as fp:
+ long_description = fp.read()
+
extensions = []
if os.name == 'nt':
ext = Extension(
- 'asyncio._overlapped', ['overlapped.c'], libraries=['ws2_32'],
+ 'trollius._overlapped', ['overlapped.c'], libraries=['ws2_32'],
)
extensions.append(ext)
-with open("README.rst") as fp:
- long_description = fp.read()
+requirements = ['six']
+if sys.version_info < (2, 7):
+ requirements.append('ordereddict')
+if sys.version_info < (3,):
+ requirements.append('futures')
-setup(
- name="asyncio",
- version="3.4.4",
+install_options = {
+ "name": "trollius",
+ "version": "2.0.1",
+ "license": "Apache License 2.0",
+ "author": 'Victor Stinner',
+ "author_email": 'victor.stinner@gmail.com',
- description="reference implementation of PEP 3156",
- long_description=long_description,
- url="http://www.python.org/dev/peps/pep-3156/",
+ "description": "Port of the Tulip project (asyncio module, PEP 3156) on Python 2",
+ "long_description": long_description,
+ "url": "https://github.com/haypo/trollius",
- classifiers=[
+ "classifiers": [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.3",
+ "License :: OSI Approved :: Apache Software License",
],
- packages=["asyncio"],
- test_suite="runtests.runtests",
+ "packages": ["trollius"],
+ "test_suite": "runtests.runtests",
+
+ "ext_modules": extensions,
+}
+if SETUPTOOLS:
+ install_options['install_requires'] = requirements
- ext_modules=extensions,
-)
+setup(**install_options)
diff --git a/tests/echo3.py b/tests/echo3.py
index 0644967..a009ea3 100644
--- a/tests/echo3.py
+++ b/tests/echo3.py
@@ -1,4 +1,12 @@
import os
+import sys
+
+asyncio_path = os.path.join(os.path.dirname(__file__), '..')
+asyncio_path = os.path.abspath(asyncio_path)
+
+sys.path.insert(0, asyncio_path)
+from trollius.py33_exceptions import wrap_error
+sys.path.remove(asyncio_path)
if __name__ == '__main__':
while True:
@@ -6,6 +14,6 @@ if __name__ == '__main__':
if not buf:
break
try:
- os.write(1, b'OUT:'+buf)
+ wrap_error(os.write, 1, b'OUT:'+buf)
except OSError as ex:
os.write(2, b'ERR:' + ex.__class__.__name__.encode('ascii'))
diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py
new file mode 100644
index 0000000..0421db0
--- /dev/null
+++ b/tests/test_asyncio.py
@@ -0,0 +1,141 @@
+from trollius import test_utils
+from trollius import From, Return
+import trollius
+import trollius.coroutines
+from trollius.test_utils import unittest
+
+try:
+ import asyncio
+except ImportError:
+ from trollius.test_utils import SkipTest
+ raise SkipTest('need asyncio')
+
+
+@asyncio.coroutine
+def asyncio_noop(value):
+ yield from []
+ return (value,)
+
+@asyncio.coroutine
+def asyncio_coroutine(coro, value):
+ res = yield from coro
+ return res + (value,)
+
+@trollius.coroutine
+def trollius_noop(value):
+ yield From(None)
+ raise Return((value,))
+
+@trollius.coroutine
+def trollius_coroutine(coro, value):
+ res = yield trollius.From(coro)
+ raise trollius.Return(res + (value,))
+
+
+class AsyncioTests(test_utils.TestCase):
+ def setUp(self):
+ policy = trollius.get_event_loop_policy()
+
+ asyncio.set_event_loop_policy(policy)
+ self.addCleanup(asyncio.set_event_loop_policy, None)
+
+ self.loop = policy.new_event_loop()
+ self.addCleanup(self.loop.close)
+ policy.set_event_loop(self.loop)
+
+ def test_policy(self):
+ self.assertIs(asyncio.get_event_loop(), self.loop)
+
+ def test_asyncio(self):
+ coro = asyncio_noop("asyncio")
+ res = self.loop.run_until_complete(coro)
+ self.assertEqual(res, ("asyncio",))
+
+ def test_asyncio_in_trollius(self):
+ coro1 = asyncio_noop(1)
+ coro2 = asyncio_coroutine(coro1, 2)
+ res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
+ self.assertEqual(res, (1, 2, 3))
+
+ def test_trollius_in_asyncio(self):
+ coro1 = trollius_noop(4)
+ coro2 = trollius_coroutine(coro1, 5)
+ res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
+ self.assertEqual(res, (4, 5, 6))
+
+ def test_step_future(self):
+ old_debug = trollius.coroutines._DEBUG
+ try:
+ def step_future():
+ future = asyncio.Future()
+ self.loop.call_soon(future.set_result, "asyncio.Future")
+ return (yield from future)
+
+ # test in release mode
+ trollius.coroutines._DEBUG = False
+ result = self.loop.run_until_complete(step_future())
+ self.assertEqual(result, "asyncio.Future")
+
+ # test in debug mode
+ trollius.coroutines._DEBUG = True
+ result = self.loop.run_until_complete(step_future())
+ self.assertEqual(result, "asyncio.Future")
+ finally:
+ trollius.coroutines._DEBUG = old_debug
+
+ def test_async(self):
+ fut = asyncio.Future()
+ self.assertIs(fut._loop, self.loop)
+
+ fut2 = trollius.ensure_future(fut)
+ self.assertIs(fut2, fut)
+ self.assertIs(fut._loop, self.loop)
+
+ def test_wrap_future(self):
+ fut = asyncio.Future()
+ self.assertIs(trollius.wrap_future(fut), fut)
+
+ def test_run_until_complete(self):
+ fut = asyncio.Future()
+ fut.set_result("ok")
+ self.assertEqual(self.loop.run_until_complete(fut),
+ "ok")
+
+ def test_coroutine_decorator(self):
+ @trollius.coroutine
+ def asyncio_future(fut):
+ return fut
+
+ fut = asyncio.Future()
+ self.loop.call_soon(fut.set_result, 'ok')
+ res = self.loop.run_until_complete(asyncio_future(fut))
+ self.assertEqual(res, "ok")
+
+ def test_as_completed(self):
+ fut = asyncio.Future()
+ fut.set_result("ok")
+
+ with self.assertRaises(TypeError):
+ for f in trollius.as_completed(fut):
+ pass
+
+ @trollius.coroutine
+ def get_results(fut):
+ results = []
+ for f in trollius.as_completed([fut]):
+ res = yield trollius.From(f)
+ results.append(res)
+ raise trollius.Return(results)
+
+ results = self.loop.run_until_complete(get_results(fut))
+ self.assertEqual(results, ["ok"])
+
+ def test_gather(self):
+ fut = asyncio.Future()
+ fut.set_result("ok")
+ results = self.loop.run_until_complete(trollius.gather(fut))
+ self.assertEqual(results, ["ok"])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_base_events.py b/tests/test_base_events.py
index b1f1e56..02ecfeb 100644
--- a/tests/test_base_events.py
+++ b/tests/test_base_events.py
@@ -7,24 +7,17 @@ import socket
import sys
import threading
import time
-import unittest
-from unittest import mock
-
-import asyncio
-from asyncio import base_events
-from asyncio import constants
-from asyncio import test_utils
-try:
- from test import support
-except ImportError:
- from asyncio import test_support as support
-try:
- from test.support.script_helper import assert_python_ok
-except ImportError:
- try:
- from test.script_helper import assert_python_ok
- except ImportError:
- from asyncio.test_support import assert_python_ok
+
+import trollius as asyncio
+from trollius import Return, From
+from trollius import base_events
+from trollius import constants
+from trollius import test_utils
+from trollius.py33_exceptions import BlockingIOError
+from trollius.test_utils import mock
+from trollius.time_monotonic import time_monotonic
+from trollius.test_utils import unittest
+from trollius import test_support as support
MOCK_ANY = mock.ANY
@@ -61,6 +54,7 @@ class BaseEventLoopTests(test_utils.TestCase):
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
+ # self.assertRaises(NotImplementedError, next, iter(gen))
with self.assertRaises(NotImplementedError):
gen.send(None)
@@ -265,9 +259,9 @@ class BaseEventLoopTests(test_utils.TestCase):
f.cancel() # Don't complain about abandoned Future.
def test__run_once(self):
- h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
+ h1 = asyncio.TimerHandle(time_monotonic() + 5.0, lambda: True, (),
self.loop)
- h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
+ h2 = asyncio.TimerHandle(time_monotonic() + 10.0, lambda: True, (),
self.loop)
h1.cancel()
@@ -288,7 +282,7 @@ class BaseEventLoopTests(test_utils.TestCase):
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
@@ -314,23 +308,21 @@ class BaseEventLoopTests(test_utils.TestCase):
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
- handle = None
- processed = False
+ non_local = {'handle': None, 'processed': False}
def cb(loop):
- nonlocal processed, handle
- processed = True
- handle = loop.call_soon(lambda: True)
+ non_local['processed'] = True
+ non_local['handle'] = loop.call_soon(lambda: True)
- h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
+ h = asyncio.TimerHandle(time_monotonic() - 1, cb, (self.loop,),
self.loop)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
- self.assertTrue(processed)
- self.assertEqual([handle], list(self.loop._ready))
+ self.assertTrue(non_local['processed'])
+ self.assertEqual([non_local['handle']], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
@@ -476,7 +468,7 @@ class BaseEventLoopTests(test_utils.TestCase):
1/0
# Test call_soon (events.Handle)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
@@ -486,7 +478,7 @@ class BaseEventLoopTests(test_utils.TestCase):
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
@@ -497,18 +489,21 @@ class BaseEventLoopTests(test_utils.TestCase):
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
+ self.loop.set_debug(True)
+ asyncio.set_event_loop(self.loop)
@asyncio.coroutine
def zero_error_coro():
- yield from asyncio.sleep(0.01, loop=self.loop)
+ yield From(asyncio.sleep(0.01, loop=self.loop))
1/0
# Test Future.__del__
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
+ support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
@@ -551,7 +546,7 @@ class BaseEventLoopTests(test_utils.TestCase):
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
@@ -574,7 +569,7 @@ class BaseEventLoopTests(test_utils.TestCase):
self.loop.set_exception_handler(handler)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
@@ -582,7 +577,7 @@ class BaseEventLoopTests(test_utils.TestCase):
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
- _context = None
+ contexts = []
class Loop(base_events.BaseEventLoop):
@@ -590,8 +585,7 @@ class BaseEventLoopTests(test_utils.TestCase):
_process_events = mock.Mock()
def default_exception_handler(self, context):
- nonlocal _context
- _context = context
+ contexts.append(context)
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
@@ -605,7 +599,7 @@ class BaseEventLoopTests(test_utils.TestCase):
loop.call_soon(zero_error)
loop._run_once()
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
@@ -614,9 +608,9 @@ class BaseEventLoopTests(test_utils.TestCase):
def custom_handler(loop, context):
raise ValueError('ham')
- _context = None
+ del contexts[:]
loop.set_exception_handler(custom_handler)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
@@ -625,8 +619,9 @@ class BaseEventLoopTests(test_utils.TestCase):
# Check that original context was passed to default
# exception handler.
- self.assertIn('context', _context)
- self.assertIs(type(_context['context']['exception']),
+ context = contexts[0]
+ self.assertIn('context', context)
+ self.assertIs(type(context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
@@ -667,27 +662,18 @@ class BaseEventLoopTests(test_utils.TestCase):
def test_env_var_debug(self):
code = '\n'.join((
- 'import asyncio',
- 'loop = asyncio.get_event_loop()',
+ 'import trollius',
+ 'loop = trollius.get_event_loop()',
'print(loop.get_debug())'))
- # Test with -E to not fail if the unit test was run with
- # PYTHONASYNCIODEBUG set to a non-empty string
- sts, stdout, stderr = assert_python_ok('-E', '-c', code)
- self.assertEqual(stdout.rstrip(), b'False')
-
- sts, stdout, stderr = assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='')
+ sts, stdout, stderr = support.assert_python_ok('-c', code,
+ TROLLIUSDEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
- sts, stdout, stderr = assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='1')
+ sts, stdout, stderr = support.assert_python_ok('-c', code,
+ TROLLIUSDEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
- sts, stdout, stderr = assert_python_ok('-E', '-c', code,
- PYTHONASYNCIODEBUG='1')
- self.assertEqual(stdout.rstrip(), b'False')
-
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@@ -821,7 +807,7 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
@@ -829,36 +815,39 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- yield from []
- return [(2, 1, 6, '', ('107.6.106.82', 80)),
- (2, 1, 6, '', ('107.6.106.82', 80))]
+ yield From(None)
+ raise Return([(2, 1, 6, '', ('107.6.106.82', 80)),
+ (2, 1, 6, '', ('107.6.106.82', 80))])
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
- idx = -1
- errors = ['err1', 'err2']
+ non_local = {
+ 'idx': -1,
+ 'errors': ['err1', 'err2'],
+ }
def _socket(*args, **kw):
- nonlocal idx, errors
- idx += 1
- raise OSError(errors[idx])
+ non_local['idx'] += 1
+ raise socket.error(non_local['errors'][non_local['idx']])
+ m_socket.error = socket.error
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
+ m_socket.error = socket.error
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
@@ -887,7 +876,7 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- yield from []
+ yield From(None)
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
@@ -895,24 +884,24 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- yield from []
- return [(2, 1, 6, '', ('107.6.106.82', 80))]
+ yield From(None)
+ raise Return([(2, 1, 6, '', ('107.6.106.82', 80))])
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError
+ self.loop.sock_connect.side_effect = socket.error
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
@@ -925,22 +914,23 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError
+ self.loop.sock_connect.side_effect = socket.error
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
- with self.assertRaises(OSError):
+ with self.assertRaises(socket.error):
self.loop.run_until_complete(coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
- err = OSError('Err')
+ err = socket.error('Err')
err.strerror = 'Err'
raise err
+ m_socket.error = socket.error
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
@@ -953,12 +943,12 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError('Err2')
+ self.loop.sock_connect.side_effect = socket.error('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
@@ -981,7 +971,7 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
@@ -994,7 +984,9 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.return_value = ()
+ f = asyncio.Future(loop=self.loop)
+ f.set_result(())
+ self.loop.sock_connect.return_value = f
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
@@ -1067,21 +1059,20 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_server_empty_host(self):
# if host is empty string use None instead
- host = object()
+ non_local = {'host': object()}
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- nonlocal host
- host = args[0]
- yield from []
+ non_local['host'] = args[0]
+ yield From(None)
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
- self.assertRaises(OSError, self.loop.run_until_complete, fut)
- self.assertIsNone(host)
+ self.assertRaises(socket.error, self.loop.run_until_complete, fut)
+ self.assertIsNone(non_local['host'])
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
@@ -1093,18 +1084,25 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
- getaddrinfo = self.loop.getaddrinfo = mock.Mock()
- getaddrinfo.return_value = []
+ @asyncio.coroutine
+ def getaddrinfo(*args, **kw):
+ raise Return([])
+
+ def getaddrinfo_task(*args, **kwds):
+ return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
+
+ self.loop.getaddrinfo = getaddrinfo_task
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
- self.assertRaises(OSError, self.loop.run_until_complete, f)
+ self.assertRaises(socket.error, self.loop.run_until_complete, f)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_server_cant_bind(self, m_socket):
- class Err(OSError):
+ class Err(socket.error):
strerror = 'error'
+ m_socket.error = socket.error
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
@@ -1112,18 +1110,19 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
- self.assertRaises(OSError, self.loop.run_until_complete, fut)
+ self.assertRaises(socket.error, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
+ m_socket.error = socket.error
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
@@ -1137,29 +1136,31 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError
+ self.loop.sock_connect.side_effect = socket.error
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_socket_err(self, m_socket):
+ m_socket.error = socket.error
m_socket.getaddrinfo = socket.getaddrinfo
- m_socket.socket.side_effect = OSError
+ m_socket.socket.side_effect = socket.error
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
- @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
+ @unittest.skipUnless(support.IPV6_ENABLED,
+ 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
@@ -1167,14 +1168,15 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_setblk_err(self, m_socket):
- m_socket.socket.return_value.setblocking.side_effect = OSError
+ m_socket.error = socket.error
+ m_socket.socket.return_value.setblocking.side_effect = socket.error
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
@@ -1183,12 +1185,13 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_cant_bind(self, m_socket):
- class Err(OSError):
+ class Err(socket.error):
pass
m_socket.AF_INET6 = socket.AF_INET6
+ m_socket.error = socket.error
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
@@ -1206,11 +1209,11 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
- sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
+ sock.accept.side_effect = socket.error(errno.EMFILE, 'Too many open files')
self.loop.remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
@@ -1243,14 +1246,14 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
with self.assertRaises(TypeError):
self.loop.run_in_executor(None, func)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
- yield from ()
+ yield From(None)
loop.stop()
asyncio.set_event_loop(self.loop)
@@ -1260,7 +1263,8 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
- fmt, *args = m_logger.warning.call_args[0]
+ fmt = m_logger.warning.call_args[0][0]
+ args = m_logger.warning.call_args[0][1:]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
@@ -1268,7 +1272,8 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
- fmt, *args = m_logger.warning.call_args[0]
+ fmt = m_logger.warning.call_args[0][0]
+ args = m_logger.warning.call_args[0][1:]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
diff --git a/tests/test_events.py b/tests/test_events.py
index 8fbba8f..59c25d4 100644
--- a/tests/test_events.py
+++ b/tests/test_events.py
@@ -1,5 +1,6 @@
"""Tests for events.py."""
+import contextlib
import functools
import gc
import io
@@ -7,30 +8,41 @@ import os
import platform
import re
import signal
+import six
import socket
-try:
- import ssl
-except ImportError:
- ssl = None
import subprocess
import sys
import threading
-import time
import errno
-import unittest
-from unittest import mock
import weakref
+try:
+ import ssl
+except ImportError:
+ ssl = None
-import asyncio
-from asyncio import proactor_events
-from asyncio import selector_events
-from asyncio import sslproto
-from asyncio import test_utils
try:
- from test import support
+ import concurrent
except ImportError:
- from asyncio import test_support as support
+ concurrent = None
+
+from trollius import Return, From
+from trollius import futures
+
+import trollius as asyncio
+from trollius import compat
+from trollius import events
+from trollius import proactor_events
+from trollius import selector_events
+from trollius import sslproto
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.test_utils import unittest
+from trollius.py33_exceptions import (wrap_error,
+ BlockingIOError, ConnectionRefusedError,
+ FileNotFoundError)
+from trollius.test_utils import mock
+from trollius.time_monotonic import time_monotonic
def data_file(filename):
@@ -53,6 +65,11 @@ def osx_tiger():
return version < (10, 5)
+def skip_if_backported_sslcontext():
+ backported = getattr(asyncio, 'BACKPORT_SSL_CONTEXT', False)
+ return unittest.skipIf(backported, 'need ssl.SSLContext')
+
+
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
@@ -95,7 +112,7 @@ class MyBaseProto(asyncio.Protocol):
class MyProto(MyBaseProto):
def connection_made(self, transport):
- super().connection_made(transport)
+ super(MyProto, self).connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
@@ -187,7 +204,7 @@ class MySubprocessProtocol(asyncio.SubprocessProtocol):
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
- self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
+ self.disconnects = dict((fd, futures.Future(loop=loop)) for fd in range(3))
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
@@ -221,10 +238,10 @@ class MySubprocessProtocol(asyncio.SubprocessProtocol):
self.returncode = self.transport.get_returncode()
-class EventLoopTestsMixin:
+class EventLoopTestsMixin(object):
def setUp(self):
- super().setUp()
+ super(EventLoopTestsMixin, self).setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
@@ -235,12 +252,12 @@ class EventLoopTestsMixin:
self.loop.close()
gc.collect()
- super().tearDown()
+ super(EventLoopTestsMixin, self).tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
- yield
+ yield From(None)
@asyncio.coroutine
def coro2():
@@ -263,10 +280,13 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def cb():
self.loop.stop()
- yield from asyncio.sleep(0.1, loop=self.loop)
+ yield From(asyncio.sleep(0.1, loop=self.loop))
+
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
+ for task in asyncio.Task.all_tasks(loop=self.loop):
+ task._log_destroy_pending = False
def test_call_later(self):
results = []
@@ -276,9 +296,9 @@ class EventLoopTestsMixin:
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
- t0 = time.monotonic()
+ t0 = time_monotonic()
self.loop.run_forever()
- t1 = time.monotonic()
+ t1 = time_monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
@@ -329,13 +349,14 @@ class EventLoopTestsMixin:
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
+ @unittest.skipIf(concurrent is None, 'need concurrent.futures')
def test_run_in_executor(self):
def run(arg):
- return (arg, threading.get_ident())
+ return (arg, threading.current_thread().ident)
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
- self.assertNotEqual(thread_id, threading.get_ident())
+ self.assertNotEqual(thread_id, threading.current_thread().ident)
def test_reader_callback(self):
r, w = test_utils.socketpair()
@@ -465,11 +486,10 @@ class EventLoopTestsMixin:
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
- caught = 0
+ non_local = {'caught': 0}
def my_handler():
- nonlocal caught
- caught += 1
+ non_local['caught'] += 1
# Check error behavior first.
self.assertRaises(
@@ -498,7 +518,7 @@ class EventLoopTestsMixin:
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
- test_utils.run_until(self.loop, lambda: caught)
+ test_utils.run_until(self.loop, lambda: non_local['caught'])
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
@@ -510,27 +530,25 @@ class EventLoopTestsMixin:
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
- caught = 0
+ non_local = {'caught': 0}
def my_handler():
- nonlocal caught
- caught += 1
+ non_local['caught'] += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
- self.assertEqual(caught, 1)
+ self.assertEqual(non_local['caught'], 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
- caught = 0
+ non_local = {'caught': 0}
def my_handler(*args):
- nonlocal caught
- caught += 1
+ non_local['caught'] += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
@@ -538,7 +556,7 @@ class EventLoopTestsMixin:
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
- self.assertEqual(caught, 1)
+ self.assertEqual(non_local['caught'], 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
@@ -615,7 +633,7 @@ class EventLoopTestsMixin:
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
- def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
+ def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=None, capath=None,
cadata=None):
"""
@@ -632,15 +650,18 @@ class EventLoopTestsMixin:
self._basetest_create_ssl_connection(conn_fut, check_sockname)
self.assertEqual(m.call_count, 1)
- # With the real ssl.create_default_context(), certificate
- # validation will fail
- with self.assertRaises(ssl.SSLError) as cm:
- conn_fut = create_connection(ssl=True)
- # Ignore the "SSL handshake failed" log in debug mode
- with test_utils.disable_logger():
- self._basetest_create_ssl_connection(conn_fut, check_sockname)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ # With the real ssl.create_default_context(), certificate
+ # validation will fail
+ with self.assertRaises(ssl.SSLError) as cm:
+ conn_fut = create_connection(ssl=True)
+ # Ignore the "SSL handshake failed" log in debug mode
+ with test_utils.disable_logger():
+ self._basetest_create_ssl_connection(conn_fut, check_sockname)
- self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
+ # Test for Python 3.2
+ if hasattr(ssl.SSLError, 'reason'):
+ self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
@@ -691,10 +712,11 @@ class EventLoopTestsMixin:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
- self.assertIn(str(httpd.address), cm.exception.strerror)
+ # FIXME: address missing from the message?
+ #self.assertIn(str(httpd.address), cm.exception.strerror)
def test_create_server(self):
proto = MyProto(self.loop)
@@ -773,16 +795,19 @@ class EventLoopTestsMixin:
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
- with sock:
+ try:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
+ finally:
+ sock.close()
def _create_ssl_context(self, certfile, keyfile=None):
- sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext.options |= ssl.OP_NO_SSLv2
+ sslcontext = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
@@ -875,25 +900,25 @@ class EventLoopTestsMixin:
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
+ @skip_if_backported_sslcontext()
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
-
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
- 'certificate verify failed '):
+ 'certificate verify failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
@@ -909,12 +934,13 @@ class EventLoopTestsMixin:
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @skip_if_backported_sslcontext()
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
@@ -927,7 +953,7 @@ class EventLoopTestsMixin:
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
- 'certificate verify failed '):
+ 'certificate verify failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
@@ -937,7 +963,6 @@ class EventLoopTestsMixin:
self.assertIsNone(proto.transport)
server.close()
-
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@@ -948,26 +973,35 @@ class EventLoopTestsMixin:
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext_client.options |= ssl.OP_NO_SSLv2
- sslcontext_client.verify_mode = ssl.CERT_REQUIRED
- sslcontext_client.load_verify_locations(
- cafile=SIGNING_CA)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(
+ cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
+ if six.PY3:
+ err_msg = "hostname '127.0.0.1' doesn't match 'localhost'"
+ else:
+ # http://bugs.python.org/issue22861
+ err_msg = "hostname '127.0.0.1' doesn't match u'localhost'"
+
# incorrect server_hostname
- f_c = self.loop.create_connection(MyProto, host, port,
- ssl=sslcontext_client)
- with mock.patch.object(self.loop, 'call_exception_handler'):
- with test_utils.disable_logger():
- with self.assertRaisesRegex(
- ssl.CertificateError,
- "hostname '127.0.0.1' doesn't match 'localhost'"):
- self.loop.run_until_complete(f_c)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ f_c = self.loop.create_connection(MyProto, host, port,
+ ssl=sslcontext_client)
+ with mock.patch.object(self.loop, 'call_exception_handler'):
+ with test_utils.disable_logger():
+ with self.assertRaisesRegex(
+ ssl.CertificateError,
+ err_msg):
+ self.loop.run_until_complete(f_c)
+
+ # close connection
+ proto.transport.close()
- # close connection
- proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
@@ -981,10 +1015,11 @@ class EventLoopTestsMixin:
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext_client.options |= ssl.OP_NO_SSLv2
- sslcontext_client.verify_mode = ssl.CERT_REQUIRED
- sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
@@ -1010,10 +1045,11 @@ class EventLoopTestsMixin:
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext_client.options |= ssl.OP_NO_SSLv2
- sslcontext_client.verify_mode = ssl.CERT_REQUIRED
- sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
@@ -1026,6 +1062,7 @@ class EventLoopTestsMixin:
# close connection
proto.transport.close()
client.close()
+
server.close()
self.loop.run_until_complete(proto.done)
@@ -1034,12 +1071,12 @@ class EventLoopTestsMixin:
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
- proto = asyncio.Future(loop=self.loop)
+ non_local = {'proto': asyncio.Future(loop=self.loop)}
class TestMyProto(MyProto):
def connection_made(self, transport):
- super().connection_made(transport)
- proto.set_result(self)
+ super(TestMyProto, self).connection_made(transport)
+ non_local['proto'].set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -1069,7 +1106,7 @@ class EventLoopTestsMixin:
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
@@ -1081,7 +1118,7 @@ class EventLoopTestsMixin:
class TestMyProto(MyProto):
def connection_made(self, transport):
- super().connection_made(transport)
+ super(TestMyProto, self).connection_made(transport)
f_proto.set_result(self)
try_count = 0
@@ -1090,7 +1127,7 @@ class EventLoopTestsMixin:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
- except OSError as ex:
+ except socket.error as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
@@ -1131,16 +1168,17 @@ class EventLoopTestsMixin:
client = socket.socket()
self.assertRaises(
- ConnectionRefusedError, client.connect, ('127.0.0.1', port))
+ ConnectionRefusedError, wrap_error, client.connect,
+ ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
- super().__init__(loop=self.loop)
+ super(TestMyDatagramProto, inner_self).__init__(loop=self.loop)
def datagram_received(self, data, addr):
- super().datagram_received(data, addr)
+ super(TestMyDatagramProto, self).datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
@@ -1202,8 +1240,8 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def connect():
- t, p = yield from self.loop.connect_read_pipe(
- lambda: proto, pipeobj)
+ t, p = yield From(self.loop.connect_read_pipe(
+ lambda: proto, pipeobj))
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
@@ -1242,8 +1280,8 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def connect():
- t, p = yield from self.loop.connect_read_pipe(lambda: proto,
- master_read_obj)
+ t, p = yield From(self.loop.connect_read_pipe(lambda: proto,
+ master_read_obj))
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
@@ -1311,7 +1349,12 @@ class EventLoopTestsMixin:
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
- pipeobj = io.open(wsock.detach(), 'wb', 1024)
+ if hasattr(wsock, 'detach'):
+ wsock_fd = wsock.detach()
+ else:
+ # Python 2
+ wsock_fd = wsock.fileno()
+ pipeobj = io.open(wsock_fd, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
@@ -1385,19 +1428,19 @@ class EventLoopTestsMixin:
def main():
try:
self.loop.call_soon(f.cancel)
- yield from f
+ yield From(f)
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
- return res
+ raise Return(res)
- start = time.monotonic()
+ start = time_monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
- elapsed = time.monotonic() - start
+ elapsed = time_monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
@@ -1421,19 +1464,20 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def wait():
loop = self.loop
- yield from asyncio.sleep(1e-2, loop=loop)
- yield from asyncio.sleep(1e-4, loop=loop)
- yield from asyncio.sleep(1e-6, loop=loop)
- yield from asyncio.sleep(1e-8, loop=loop)
- yield from asyncio.sleep(1e-10, loop=loop)
+ yield From(asyncio.sleep(1e-2, loop=loop))
+ yield From(asyncio.sleep(1e-4, loop=loop))
+ yield From(asyncio.sleep(1e-6, loop=loop))
+ yield From(asyncio.sleep(1e-8, loop=loop))
+ yield From(asyncio.sleep(1e-10, loop=loop))
self.loop.run_until_complete(wait())
- # The ideal number of call is 12, but on some platforms, the selector
+ # The ideal number of call is 22, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
- self.assertLessEqual(self.loop._run_once_counter, 20,
- {'clock_resolution': self.loop._clock_resolution,
+ self.assertLessEqual(self.loop._run_once_counter, 30,
+ {'calls': self.loop._run_once_counter,
+ 'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_sock_connect_address(self):
@@ -1451,7 +1495,7 @@ class EventLoopTestsMixin:
for family, address in addresses:
for sock_type in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
sock = socket.socket(family, sock_type)
- with sock:
+ with contextlib.closing(sock):
sock.setblocking(False)
connect = self.loop.sock_connect(sock, address)
with self.assertRaises(ValueError) as cm:
@@ -1525,7 +1569,7 @@ class EventLoopTestsMixin:
self.loop.add_signal_handler(signal.SIGTERM, func)
-class SubprocessTestsMixin:
+class SubprocessTestsMixin(object):
def check_terminated(self, returncode):
if sys.platform == 'win32':
@@ -1748,6 +1792,7 @@ class SubprocessTestsMixin:
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
+ @unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
@@ -1762,9 +1807,9 @@ class SubprocessTestsMixin:
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
- yield from self.loop.subprocess_exec(
+ yield From(self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
- 'pwd', **kwds)
+ 'pwd', **kwds))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
@@ -1778,9 +1823,9 @@ class SubprocessTestsMixin:
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
- yield from self.loop.subprocess_shell(
+ yield From(self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
- cmd, **kwds)
+ cmd, **kwds))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
@@ -1856,18 +1901,18 @@ if sys.platform == 'win32':
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
- from asyncio import selectors
+ from trollius import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
- super().setUp()
+ super(UnixEventLoopTestsMixin, self).setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
- super().tearDown()
+ super(UnixEventLoopTestsMixin, self).tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
@@ -1886,13 +1931,13 @@ else:
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
- super().test_read_pty_output()
+ super(KqueueEventLoopTests, self).test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
- super().test_write_pty()
+ super(KqueueEventLoopTests, self).test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
@@ -2017,7 +2062,7 @@ class HandleTests(test_utils.TestCase):
self.loop.get_debug.return_value = True
# simple function
- create_filename = __file__
+ create_filename = sys._getframe().f_code.co_filename
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
@@ -2047,8 +2092,9 @@ class HandleTests(test_utils.TestCase):
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
+ filename = sys._getframe().f_code.co_filename
self.assertEqual(h._source_traceback[-1][:3],
- (__file__,
+ (filename,
lineno,
'test_handle_source_traceback'))
@@ -2069,13 +2115,13 @@ class HandleTests(test_utils.TestCase):
check_source_traceback(h)
-class TimerTests(unittest.TestCase):
+class TimerTests(test_utils.TestCase):
def setUp(self):
self.loop = mock.Mock()
def test_hash(self):
- when = time.monotonic()
+ when = time_monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
@@ -2085,7 +2131,7 @@ class TimerTests(unittest.TestCase):
return args
args = (1, 2, 3)
- when = time.monotonic()
+ when = time_monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
@@ -2120,7 +2166,7 @@ class TimerTests(unittest.TestCase):
self.loop.get_debug.return_value = True
# simple function
- create_filename = __file__
+ create_filename = sys._getframe().f_code.co_filename
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
@@ -2141,7 +2187,7 @@ class TimerTests(unittest.TestCase):
def callback(*args):
return args
- when = time.monotonic()
+ when = time_monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
@@ -2178,7 +2224,7 @@ class TimerTests(unittest.TestCase):
self.assertIs(NotImplemented, h1.__ne__(h3))
-class AbstractEventLoopTests(unittest.TestCase):
+class AbstractEventLoopTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
@@ -2191,13 +2237,16 @@ class AbstractEventLoopTests(unittest.TestCase):
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
- self.assertRaises(
- NotImplementedError, loop.is_closed)
+ # skip some tests if the AbstractEventLoop class comes from asyncio
+ # and the asyncio version (python version in fact) is older than 3.4.2
+ if events.asyncio is None or sys.version_info >= (3, 4, 2):
+ self.assertRaises(
+ NotImplementedError, loop.is_closed)
+ self.assertRaises(
+ NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
- NotImplementedError, loop.create_task, None)
- self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
@@ -2266,7 +2315,7 @@ class AbstractEventLoopTests(unittest.TestCase):
NotImplementedError, loop.set_debug, f)
-class ProtocolsAbsTests(unittest.TestCase):
+class ProtocolsAbsTests(test_utils.TestCase):
def test_empty(self):
f = mock.Mock()
@@ -2290,7 +2339,7 @@ class ProtocolsAbsTests(unittest.TestCase):
self.assertIsNone(sp.process_exited())
-class PolicyTests(unittest.TestCase):
+class PolicyTests(test_utils.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
@@ -2333,7 +2382,7 @@ class PolicyTests(unittest.TestCase):
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
- @mock.patch('asyncio.events.threading.current_thread')
+ @mock.patch('trollius.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
diff --git a/tests/test_futures.py b/tests/test_futures.py
index c8b6829..78a097b 100644
--- a/tests/test_futures.py
+++ b/tests/test_futures.py
@@ -1,20 +1,26 @@
"""Tests for futures.py."""
-import concurrent.futures
+try:
+ import concurrent.futures
+except ImportError:
+ concurrent = None
import re
+import six
import sys
import threading
-import unittest
-from unittest import mock
-import asyncio
-from asyncio import test_utils
-try:
- from test import support
-except ImportError:
- from asyncio import test_support as support
+import trollius as asyncio
+from trollius import From
+from trollius import compat
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
+def get_thread_ident():
+ return threading.current_thread().ident
+
def _fakefunc(f):
return f
@@ -43,10 +49,6 @@ class FutureTests(test_utils.TestCase):
f = asyncio.Future()
self.assertIs(f._loop, self.loop)
- def test_constructor_positional(self):
- # Make sure Future doesn't accept a positional argument
- self.assertRaises(TypeError, asyncio.Future, 42)
-
def test_cancel(self):
f = asyncio.Future(loop=self.loop)
self.assertTrue(f.cancel())
@@ -90,24 +92,6 @@ class FutureTests(test_utils.TestCase):
f.set_exception(RuntimeError)
self.assertIsInstance(f.exception(), RuntimeError)
- def test_yield_from_twice(self):
- f = asyncio.Future(loop=self.loop)
-
- def fixture():
- yield 'A'
- x = yield from f
- yield 'B', x
- y = yield from f
- yield 'C', y
-
- g = fixture()
- self.assertEqual(next(g), 'A') # yield 'A'.
- self.assertEqual(next(g), f) # First yield from f.
- f.set_result(42)
- self.assertEqual(next(g), ('B', 42)) # yield 'B', x.
- # The second "yield from f" does not yield f.
- self.assertEqual(next(g), ('C', 42)) # yield 'C', y.
-
def test_future_repr(self):
self.loop.set_debug(True)
f_pending_debug = asyncio.Future(loop=self.loop)
@@ -140,7 +124,8 @@ class FutureTests(test_utils.TestCase):
def func_repr(func):
filename, lineno = test_utils.get_function_source(func)
- text = '%s() at %s:%s' % (func.__qualname__, filename, lineno)
+ func_name = getattr(func, '__qualname__', func.__name__)
+ text = '%s() at %s:%s' % (func_name, filename, lineno)
return re.escape(text)
f_one_callbacks = asyncio.Future(loop=self.loop)
@@ -199,32 +184,20 @@ class FutureTests(test_utils.TestCase):
newf_cancelled._copy_state(f_cancelled)
self.assertTrue(newf_cancelled.cancelled())
- def test_iter(self):
- fut = asyncio.Future(loop=self.loop)
-
- def coro():
- yield from fut
-
- def test():
- arg1, arg2 = coro()
-
- self.assertRaises(AssertionError, test)
- fut.cancel()
-
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_abandoned(self, m_log):
fut = asyncio.Future(loop=self.loop)
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_result_unretrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_result_retrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
@@ -232,15 +205,18 @@ class FutureTests(test_utils.TestCase):
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_exception_unretrieved(self, m_log):
+ self.loop.set_debug(True)
+ asyncio.set_event_loop(self.loop)
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
del fut
test_utils.run_briefly(self.loop)
+ support.gc_collect()
self.assertTrue(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_exception_retrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
@@ -248,7 +224,7 @@ class FutureTests(test_utils.TestCase):
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_exception_result_retrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
@@ -256,32 +232,35 @@ class FutureTests(test_utils.TestCase):
del fut
self.assertFalse(m_log.error.called)
+ @unittest.skipIf(concurrent is None, 'need concurrent.futures')
def test_wrap_future(self):
def run(arg):
- return (arg, threading.get_ident())
+ return (arg, get_thread_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1, loop=self.loop)
res, ident = self.loop.run_until_complete(f2)
self.assertIsInstance(f2, asyncio.Future)
self.assertEqual(res, 'oi')
- self.assertNotEqual(ident, threading.get_ident())
+ self.assertNotEqual(ident, get_thread_ident())
def test_wrap_future_future(self):
f1 = asyncio.Future(loop=self.loop)
f2 = asyncio.wrap_future(f1)
self.assertIs(f1, f2)
- @mock.patch('asyncio.futures.events')
+ @unittest.skipIf(concurrent is None, 'need concurrent.futures')
+ @mock.patch('trollius.futures.events')
def test_wrap_future_use_global_loop(self, m_events):
def run(arg):
- return (arg, threading.get_ident())
+ return (arg, get_thread_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1)
self.assertIs(m_events.get_event_loop.return_value, f2._loop)
+ @unittest.skipIf(concurrent is None, 'need concurrent.futures')
def test_wrap_future_cancel(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
@@ -290,6 +269,7 @@ class FutureTests(test_utils.TestCase):
self.assertTrue(f1.cancelled())
self.assertTrue(f2.cancelled())
+ @unittest.skipIf(concurrent is None, 'need concurrent.futures')
def test_wrap_future_cancel2(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
@@ -306,12 +286,13 @@ class FutureTests(test_utils.TestCase):
future = asyncio.Future(loop=self.loop)
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(future._source_traceback, list)
+ filename = sys._getframe().f_code.co_filename
self.assertEqual(future._source_traceback[-1][:3],
- (__file__,
+ (filename,
lineno,
'test_future_source_traceback'))
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def check_future_exception_never_retrieved(self, debug, m_log):
self.loop.set_debug(debug)
@@ -367,12 +348,16 @@ class FutureTests(test_utils.TestCase):
r'MemoryError$'
).format(filename=re.escape(frame[0]),
lineno=frame[1])
- else:
+ elif six.PY3:
regex = (r'^Future/Task exception was never retrieved\n'
r'Traceback \(most recent call last\):\n'
r'.*\n'
r'MemoryError$'
)
+ else:
+ regex = (r'^Future/Task exception was never retrieved\n'
+ r'MemoryError$'
+ )
m_log.error.assert_called_once_with(mock.ANY, exc_info=False)
message = m_log.error.call_args[0][0]
self.assertRegex(message, re.compile(regex, re.DOTALL))
diff --git a/tests/test_locks.py b/tests/test_locks.py
index dda4577..71a6cb3 100644
--- a/tests/test_locks.py
+++ b/tests/test_locks.py
@@ -1,11 +1,12 @@
"""Tests for lock.py"""
-import unittest
-from unittest import mock
import re
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import From, Return
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
STR_RGX_REPR = (
@@ -42,7 +43,7 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- yield from lock
+ yield From(lock.acquire())
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
@@ -53,7 +54,8 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from lock)
+ yield From(lock.acquire())
+ raise Return(lock)
res = self.loop.run_until_complete(acquire_lock())
@@ -71,21 +73,21 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- if (yield from lock.acquire()):
+ if (yield From(lock.acquire())):
result.append(1)
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- if (yield from lock.acquire()):
+ if (yield From(lock.acquire())):
result.append(2)
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- if (yield from lock.acquire()):
+ if (yield From(lock.acquire())):
result.append(3)
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
@@ -147,22 +149,22 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def lockit(name, blocker):
- yield from lock.acquire()
+ yield From(lock.acquire())
try:
if blocker is not None:
- yield from blocker
+ yield From(blocker)
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
@@ -170,7 +172,7 @@ class LockTests(test_utils.TestCase):
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
@@ -194,7 +196,7 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from lock)
+ raise Return((yield From(lock)))
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
@@ -206,9 +208,9 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from lock)
+ raise Return((yield From(lock)))
- # This spells "yield from lock" outside a generator.
+ # This spells "yield From(lock)" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
@@ -228,7 +230,7 @@ class LockTests(test_utils.TestCase):
except RuntimeError as err:
self.assertEqual(
str(err),
- '"yield from" should be used as context manager expression')
+ '"yield From" should be used as context manager expression')
self.assertFalse(lock.locked())
@@ -273,30 +275,30 @@ class EventTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(1)
@asyncio.coroutine
def c2(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(2)
@asyncio.coroutine
def c3(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
- test_utils.run_briefly(self.loop)
- self.assertEqual([3, 1, 2], result)
+ test_utils.run_briefly(self.loop, 2)
+ self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
@@ -338,9 +340,9 @@ class EventTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(1)
- return True
+ raise Return(True)
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -386,56 +388,56 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(1)
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(2)
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(3)
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
@@ -475,11 +477,11 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait_for(predicate)):
+ yield From(cond.acquire())
+ if (yield From(cond.wait_for(predicate))):
result.append(1)
cond.release()
- return True
+ raise Return(True)
t = asyncio.Task(c1(result), loop=self.loop)
@@ -520,27 +522,27 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(1)
cond.release()
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(2)
cond.release()
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(3)
cond.release()
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
@@ -552,14 +554,16 @@ class ConditionTests(test_utils.TestCase):
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 4)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
@@ -576,19 +580,19 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(1)
cond.release()
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(2)
cond.release()
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
@@ -599,7 +603,8 @@ class ConditionTests(test_utils.TestCase):
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 4)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
@@ -636,7 +641,7 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_cond():
- return (yield from cond)
+ raise Return((yield From(cond)))
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
@@ -652,7 +657,7 @@ class ConditionTests(test_utils.TestCase):
except RuntimeError as err:
self.assertEqual(
str(err),
- '"yield from" should be used as context manager expression')
+ '"yield From" should be used as context manager expression')
self.assertFalse(cond.locked())
@@ -718,7 +723,8 @@ class SemaphoreTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from sem)
+ yield From(sem.acquire())
+ raise Return(sem)
res = self.loop.run_until_complete(acquire_lock())
@@ -743,33 +749,34 @@ class SemaphoreTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(1)
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(2)
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(3)
- return True
+ raise Return(True)
@asyncio.coroutine
def c4(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(4)
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
@@ -829,7 +836,7 @@ class SemaphoreTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from sem)
+ raise Return((yield From(sem)))
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
@@ -849,7 +856,7 @@ class SemaphoreTests(test_utils.TestCase):
except RuntimeError as err:
self.assertEqual(
str(err),
- '"yield from" should be used as context manager expression')
+ '"yield From" should be used as context manager expression')
self.assertEqual(2, sem._value)
diff --git a/tests/test_proactor_events.py b/tests/test_proactor_events.py
index fcd9ab1..ceb28e2 100644
--- a/tests/test_proactor_events.py
+++ b/tests/test_proactor_events.py
@@ -1,15 +1,16 @@
"""Tests for proactor_events.py"""
import socket
-import unittest
-from unittest import mock
-import asyncio
-from asyncio.proactor_events import BaseProactorEventLoop
-from asyncio.proactor_events import _ProactorSocketTransport
-from asyncio.proactor_events import _ProactorWritePipeTransport
-from asyncio.proactor_events import _ProactorDuplexPipeTransport
-from asyncio import test_utils
+from trollius import test_utils
+from trollius.proactor_events import BaseProactorEventLoop
+from trollius.proactor_events import _ProactorDuplexPipeTransport
+from trollius.proactor_events import _ProactorSocketTransport
+from trollius.proactor_events import _ProactorWritePipeTransport
+from trollius.py33_exceptions import ConnectionAbortedError, ConnectionResetError
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
+import trollius as asyncio
def close_transport(transport):
@@ -152,7 +153,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
self.loop._proactor.send.return_value.add_done_callback.\
assert_called_with(tr._loop_writing)
- @mock.patch('asyncio.proactor_events.logger')
+ @mock.patch('trollius.proactor_events.logger')
def test_loop_writing_err(self, m_log):
err = self.loop._proactor.send.side_effect = OSError()
tr = self.socket_transport()
@@ -226,7 +227,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_fatal_error(self, m_logging):
tr = self.socket_transport()
tr._force_close = mock.Mock()
@@ -539,7 +540,7 @@ class BaseProactorEventLoopTests(test_utils.TestCase):
def test_process_events(self):
self.loop._process_events([])
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_create_server(self, m_log):
pf = mock.Mock()
call_soon = self.loop.call_soon = mock.Mock()
diff --git a/tests/test_queues.py b/tests/test_queues.py
index 8e38175..75ef988 100644
--- a/tests/test_queues.py
+++ b/tests/test_queues.py
@@ -1,10 +1,10 @@
"""Tests for queues.py"""
-import unittest
-from unittest import mock
-
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import Return, From
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
class _QueueTestBase(test_utils.TestCase):
@@ -32,7 +32,7 @@ class QueueBasicTests(_QueueTestBase):
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
- id_is_present = hex(id(q)) in fn(q)
+ id_is_present = ("%x" % id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
@@ -41,7 +41,7 @@ class QueueBasicTests(_QueueTestBase):
# Start a task that waits to get.
asyncio.Task(q.get(), loop=loop)
# Let it start waiting.
- yield from asyncio.sleep(0.1, loop=loop)
+ yield From(asyncio.sleep(0.1, loop=loop))
self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator
q.put_nowait(0)
@@ -55,7 +55,7 @@ class QueueBasicTests(_QueueTestBase):
# Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop)
# Let it start waiting.
- yield from asyncio.sleep(0.1, loop=loop)
+ yield From(asyncio.sleep(0.1, loop=loop))
self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator
q.get_nowait()
@@ -127,21 +127,22 @@ class QueueBasicTests(_QueueTestBase):
@asyncio.coroutine
def putter():
for i in range(3):
- yield from q.put(i)
+ yield From(q.put(i))
have_been_put.append(i)
- return True
+ raise Return(True)
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
- yield from asyncio.sleep(0.01, loop=loop)
+ yield From(None) # one extra iteration for the putter coroutine
+ yield From(asyncio.sleep(0.01, loop=loop))
# The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item.
- yield from asyncio.sleep(0.01, loop=loop)
+ yield From(asyncio.sleep(0.01, loop=loop))
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
@@ -161,7 +162,8 @@ class QueueGetTests(_QueueTestBase):
@asyncio.coroutine
def queue_get():
- return (yield from q.get())
+ result = (yield From(q.get()))
+ raise Return(result)
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
@@ -189,25 +191,24 @@ class QueueGetTests(_QueueTestBase):
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
- finished = False
+ non_local = {'finished': False}
@asyncio.coroutine
def queue_get():
- nonlocal finished
started.set()
- res = yield from q.get()
- finished = True
- return res
+ res = yield From(q.get())
+ non_local['finished'] = True
+ raise Return(res)
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
- yield from started.wait()
- self.assertFalse(finished)
- res = yield from queue_get_task
- self.assertTrue(finished)
- return res
+ yield From(started.wait())
+ self.assertFalse(non_local['finished'])
+ res = yield From(queue_get_task)
+ self.assertTrue(non_local['finished'])
+ raise Return(res)
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
@@ -237,14 +238,16 @@ class QueueGetTests(_QueueTestBase):
@asyncio.coroutine
def queue_get():
- return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
+ result = (yield From(asyncio.wait_for(q.get(), 0.051, loop=loop)))
+ raise Return(result)
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
- yield from asyncio.sleep(0.01, loop=loop) # let the task start
+ yield From(asyncio.sleep(0.01, loop=loop)) # let the task start
q.put_nowait(1)
- return (yield from get_task)
+ result = (yield From(get_task))
+ raise Return(result)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
@@ -280,12 +283,13 @@ class QueuePutTests(_QueueTestBase):
@asyncio.coroutine
def queue_put():
# No maxsize, won't block.
- yield from q.put(1)
+ yield From(q.put(1))
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
+ @asyncio.coroutine
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
@@ -295,24 +299,24 @@ class QueuePutTests(_QueueTestBase):
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
- finished = False
+ non_local = {'finished': False}
@asyncio.coroutine
def queue_put():
- nonlocal finished
started.set()
- yield from q.put(1)
- yield from q.put(2)
- finished = True
+ yield From(q.put(1))
+ yield From(q.put(2))
+ non_local['finished'] = True
@asyncio.coroutine
def queue_get():
- loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
- yield from started.wait()
- self.assertFalse(finished)
- yield from queue_put_task
- self.assertTrue(finished)
+ yield From(None)
+ loop.call_later(0.01, q.get_nowait)
+ yield From(started.wait())
+ self.assertFalse(non_local['finished'])
+ yield From(queue_put_task)
+ self.assertTrue(non_local['finished'])
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
@@ -430,8 +434,8 @@ class QueuePutTests(_QueueTestBase):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
- yield from q.put(1)
- yield from q.put(2)
+ yield From(q.put(1))
+ yield From(q.put(2))
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
@@ -440,12 +444,13 @@ class QueuePutTests(_QueueTestBase):
@asyncio.coroutine
def queue_put():
- yield from q.put(1)
- return True
+ yield From(q.put(1))
+ raise Return(True)
@asyncio.coroutine
def test():
- return (yield from q.get())
+ result = (yield From(q.get()))
+ raise Return(result)
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
@@ -502,7 +507,7 @@ class PriorityQueueTests(_QueueTestBase):
self.assertEqual([1, 2, 3], items)
-class _QueueJoinTestMixin:
+class _QueueJoinTestMixin(object):
q_class = None
@@ -515,7 +520,7 @@ class _QueueJoinTestMixin:
for i in range(100):
q.put_nowait(i)
- accumulator = 0
+ non_local = {'accumulator': 0}
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
@@ -523,11 +528,9 @@ class _QueueJoinTestMixin:
@asyncio.coroutine
def worker():
- nonlocal accumulator
-
while running:
- item = yield from q.get()
- accumulator += item
+ item = yield From(q.get())
+ non_local['accumulator'] += item
q.task_done()
@asyncio.coroutine
@@ -535,11 +538,11 @@ class _QueueJoinTestMixin:
tasks = [asyncio.Task(worker(), loop=self.loop)
for index in range(2)]
- yield from q.join()
- return tasks
+ yield From(q.join())
+ raise Return(tasks)
tasks = self.loop.run_until_complete(test())
- self.assertEqual(sum(range(100)), accumulator)
+ self.assertEqual(sum(range(100)), non_local['accumulator'])
# close running generators
running = False
@@ -555,8 +558,8 @@ class _QueueJoinTestMixin:
@asyncio.coroutine
def join():
- yield from q.join()
- yield from q.join()
+ yield From(q.join())
+ yield From(q.join())
self.loop.run_until_complete(join())
diff --git a/tests/test_selector_events.py b/tests/test_selector_events.py
index f0fcdd2..2f3d5ce 100644
--- a/tests/test_selector_events.py
+++ b/tests/test_selector_events.py
@@ -2,22 +2,39 @@
import errno
import socket
-import unittest
-from unittest import mock
+import sys
try:
import ssl
except ImportError:
ssl = None
-
-import asyncio
-from asyncio import selectors
-from asyncio import test_utils
-from asyncio.selector_events import BaseSelectorEventLoop
-from asyncio.selector_events import _SelectorTransport
-from asyncio.selector_events import _SelectorSslTransport
-from asyncio.selector_events import _SelectorSocketTransport
-from asyncio.selector_events import _SelectorDatagramTransport
-
+else:
+ from trollius.py3_ssl import SSLWantReadError, SSLWantWriteError
+
+import trollius as asyncio
+from trollius.py33_exceptions import (
+ BlockingIOError, InterruptedError,
+ ConnectionResetError, ConnectionRefusedError)
+from trollius import selectors
+from trollius import test_utils
+from trollius.selector_events import BaseSelectorEventLoop
+from trollius.selector_events import _SelectorDatagramTransport
+from trollius.selector_events import _SelectorSocketTransport
+from trollius.selector_events import _SelectorSslTransport
+from trollius.selector_events import _SelectorTransport
+from trollius.selector_events import _SSL_REQUIRES_SELECT
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
+
+
+if sys.version_info >= (3,):
+ UNICODE_STR = 'unicode'
+else:
+ UNICODE_STR = unicode('unicode')
+ try:
+ memoryview
+ except NameError:
+ # Python 2.6
+ memoryview = buffer
MOCK_ANY = mock.ANY
@@ -94,8 +111,8 @@ class BaseSelectorEventLoopTests(test_utils.TestCase):
# execute pending callbacks to close the socket transport
test_utils.run_briefly(self.loop)
- @mock.patch('asyncio.selector_events.ssl', None)
- @mock.patch('asyncio.sslproto.ssl', None)
+ @mock.patch('trollius.selector_events.ssl', None)
+ @mock.patch('trollius.sslproto.ssl', None)
def test_make_ssl_transport_without_ssl_error(self):
m = mock.Mock()
self.loop.add_reader = mock.Mock()
@@ -733,7 +750,7 @@ class SelectorTransportTests(test_utils.TestCase):
self.assertFalse(self.loop.readers)
self.assertEqual(1, self.loop.remove_reader_count[7])
- @mock.patch('asyncio.log.logger.error')
+ @mock.patch('trollius.log.logger.error')
def test_fatal_error(self, m_exc):
exc = OSError()
tr = self.create_transport()
@@ -900,7 +917,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
transport = self.socket_transport()
transport.write(data)
- self.sock.send.assert_called_with(data)
+ self.sock.send.assert_called_with(b'data')
def test_write_no_data(self):
transport = self.socket_transport()
@@ -969,7 +986,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
self.loop.assert_writer(7, transport._write_ready)
self.assertEqual(list_to_buffer([b'data']), transport._buffer)
- @mock.patch('asyncio.selector_events.logger')
+ @mock.patch('trollius.selector_events.logger')
def test_write_exception(self, m_log):
err = self.sock.send.side_effect = OSError()
@@ -1077,7 +1094,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
err,
'Fatal write error on socket transport')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_write_ready_exception_and_close(self, m_log):
self.sock.send.side_effect = OSError()
remove_writer = self.loop.remove_writer = mock.Mock()
@@ -1150,13 +1167,13 @@ class SelectorSslTransportTests(test_utils.TestCase):
def test_on_handshake_reader_retry(self):
self.loop.set_debug(False)
- self.sslsock.do_handshake.side_effect = ssl.SSLWantReadError
+ self.sslsock.do_handshake.side_effect = SSLWantReadError
transport = self.ssl_transport()
self.loop.assert_reader(1, transport._on_handshake, None)
def test_on_handshake_writer_retry(self):
self.loop.set_debug(False)
- self.sslsock.do_handshake.side_effect = ssl.SSLWantWriteError
+ self.sslsock.do_handshake.side_effect = SSLWantWriteError
transport = self.ssl_transport()
self.loop.assert_writer(1, transport._on_handshake, None)
@@ -1234,7 +1251,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
def test_write_str(self):
transport = self._make_one()
- self.assertRaises(TypeError, transport.write, 'str')
+ self.assertRaises(TypeError, transport.write, UNICODE_STR)
def test_write_closing(self):
transport = self._make_one()
@@ -1243,7 +1260,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport.write(b'data')
self.assertEqual(transport._conn_lost, 2)
- @mock.patch('asyncio.selector_events.logger')
+ @mock.patch('trollius.selector_events.logger')
def test_write_exception(self, m_log):
transport = self._make_one()
transport._conn_lost = 1
@@ -1255,6 +1272,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport.write(b'data')
m_log.warning.assert_called_with('socket.send() raised exception.')
+ @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv(self):
self.sslsock.recv.return_value = b'data'
transport = self._make_one()
@@ -1276,6 +1294,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
self.loop.add_writer.assert_called_with(
transport._sock_fd, transport._write_ready)
+ @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_eof(self):
self.sslsock.recv.return_value = b''
transport = self._make_one()
@@ -1284,6 +1303,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport.close.assert_called_with()
self.protocol.eof_received.assert_called_with()
+ @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_conn_reset(self):
err = self.sslsock.recv.side_effect = ConnectionResetError()
transport = self._make_one()
@@ -1292,8 +1312,9 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport._read_ready()
transport._force_close.assert_called_with(err)
+ @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_retry(self):
- self.sslsock.recv.side_effect = ssl.SSLWantReadError
+ self.sslsock.recv.side_effect = SSLWantReadError
transport = self._make_one()
transport._read_ready()
self.assertTrue(self.sslsock.recv.called)
@@ -1307,10 +1328,11 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport._read_ready()
self.assertFalse(self.protocol.data_received.called)
+ @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_write(self):
self.loop.remove_reader = mock.Mock()
self.loop.add_writer = mock.Mock()
- self.sslsock.recv.side_effect = ssl.SSLWantWriteError
+ self.sslsock.recv.side_effect = SSLWantWriteError
transport = self._make_one()
transport._read_ready()
self.assertFalse(self.protocol.data_received.called)
@@ -1320,6 +1342,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
self.loop.add_writer.assert_called_with(
transport._sock_fd, transport._write_ready)
+ @unittest.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_exc(self):
err = self.sslsock.recv.side_effect = OSError()
transport = self._make_one()
@@ -1383,7 +1406,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport = self._make_one()
transport._buffer = list_to_buffer([b'data'])
- self.sslsock.send.side_effect = ssl.SSLWantWriteError
+ self.sslsock.send.side_effect = SSLWantWriteError
transport._write_ready()
self.assertEqual(list_to_buffer([b'data']), transport._buffer)
@@ -1396,7 +1419,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport._buffer = list_to_buffer([b'data'])
self.loop.remove_writer = mock.Mock()
- self.sslsock.send.side_effect = ssl.SSLWantReadError
+ self.sslsock.send.side_effect = SSLWantReadError
transport._write_ready()
self.assertFalse(self.protocol.data_received.called)
self.assertTrue(transport._write_wants_read)
@@ -1452,7 +1475,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
self.assertTrue(self.protocol.connection_lost.called)
def test_close_not_connected(self):
- self.sslsock.do_handshake.side_effect = ssl.SSLWantReadError
+ self.sslsock.do_handshake.side_effect = SSLWantReadError
self.check_close()
self.assertFalse(self.protocol.connection_made.called)
self.assertFalse(self.protocol.connection_lost.called)
@@ -1465,9 +1488,9 @@ class SelectorSslTransportTests(test_utils.TestCase):
server_hostname='localhost')
-class SelectorSslWithoutSslTransportTests(unittest.TestCase):
+class SelectorSslWithoutSslTransportTests(test_utils.TestCase):
- @mock.patch('asyncio.selector_events.ssl', None)
+ @mock.patch('trollius.selector_events.ssl', None)
def test_ssl_transport_requires_ssl_module(self):
Mock = mock.Mock
with self.assertRaises(RuntimeError):
@@ -1550,7 +1573,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
transport.sendto(data, ('0.0.0.0', 1234))
self.assertTrue(self.sock.sendto.called)
self.assertEqual(
- self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234)))
+ self.sock.sendto.call_args[0], (b'data', ('0.0.0.0', 1234)))
def test_sendto_no_data(self):
transport = self.datagram_transport()
@@ -1606,7 +1629,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
self.assertEqual(
[(b'data', ('0.0.0.0', 12345))], list(transport._buffer))
- @mock.patch('asyncio.selector_events.logger')
+ @mock.patch('trollius.selector_events.logger')
def test_sendto_exception(self, m_log):
data = b'data'
err = self.sock.sendto.side_effect = RuntimeError()
@@ -1655,7 +1678,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
def test_sendto_str(self):
transport = self.datagram_transport()
- self.assertRaises(TypeError, transport.sendto, 'str', ())
+ self.assertRaises(TypeError, transport.sendto, UNICODE_STR, ())
def test_sendto_connected_addr(self):
transport = self.datagram_transport(address=('0.0.0.0', 1))
@@ -1749,7 +1772,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
self.assertFalse(transport._fatal_error.called)
self.assertTrue(self.protocol.error_received.called)
- @mock.patch('asyncio.base_events.logger.error')
+ @mock.patch('trollius.base_events.logger.error')
def test_fatal_error_connected(self, m_exc):
transport = self.datagram_transport(address=('0.0.0.0', 1))
err = ConnectionRefusedError()
diff --git a/tests/test_selectors.py b/tests/test_selectors.py
index a33f0fa..a06596e 100644
--- a/tests/test_selectors.py
+++ b/tests/test_selectors.py
@@ -4,22 +4,18 @@ import random
import signal
import sys
from time import sleep
-import unittest
-import unittest.mock
-try:
- from test import support
-except ImportError:
- from asyncio import test_support as support
-try:
- from time import monotonic as time
-except ImportError:
- from time import time as time
try:
import resource
except ImportError:
resource = None
-from asyncio import selectors
-from asyncio.test_utils import socketpair
+
+from trollius import selectors
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import socketpair
+from trollius.test_utils import unittest
+from trollius.time_monotonic import time_monotonic as time
def find_ready_matching(ready, flag):
@@ -30,7 +26,7 @@ def find_ready_matching(ready, flag):
return match
-class BaseSelectorTestCase(unittest.TestCase):
+class BaseSelectorTestCase(object):
def make_socketpair(self):
rd, wr = socketpair()
@@ -151,8 +147,8 @@ class BaseSelectorTestCase(unittest.TestCase):
# modify use a shortcut
d3 = object()
- s.register = unittest.mock.Mock()
- s.unregister = unittest.mock.Mock()
+ s.register = mock.Mock()
+ s.unregister = mock.Mock()
s.modify(rd, selectors.EVENT_READ, d3)
self.assertFalse(s.register.called)
@@ -357,7 +353,7 @@ class BaseSelectorTestCase(unittest.TestCase):
self.assertLess(time() - t, 2.5)
-class ScalableSelectorMixIn:
+class ScalableSelectorMixIn(object):
# see issue #18963 for why it's skipped on older OS X versions
@support.requires_mac_ver(10, 5)
@@ -403,52 +399,48 @@ class ScalableSelectorMixIn:
self.assertEqual(NUM_FDS // 2, len(s.select()))
-class DefaultSelectorTestCase(BaseSelectorTestCase):
+class DefaultSelectorTestCase(BaseSelectorTestCase, test_utils.TestCase):
SELECTOR = selectors.DefaultSelector
-class SelectSelectorTestCase(BaseSelectorTestCase):
+class SelectSelectorTestCase(BaseSelectorTestCase, test_utils.TestCase):
SELECTOR = selectors.SelectSelector
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
-class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn):
+class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
+ test_utils.TestCase):
SELECTOR = getattr(selectors, 'PollSelector', None)
@unittest.skipUnless(hasattr(selectors, 'EpollSelector'),
"Test needs selectors.EpollSelector")
-class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn):
+class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
+ test_utils.TestCase):
SELECTOR = getattr(selectors, 'EpollSelector', None)
@unittest.skipUnless(hasattr(selectors, 'KqueueSelector'),
- "Test needs selectors.KqueueSelector)")
-class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn):
+ "Test needs selectors.KqueueSelector)")
+class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
+ test_utils.TestCase):
SELECTOR = getattr(selectors, 'KqueueSelector', None)
@unittest.skipUnless(hasattr(selectors, 'DevpollSelector'),
- "Test needs selectors.DevpollSelector")
-class DevpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn):
+ "Test needs selectors.DevpollSelector")
+class DevpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn,
+ test_utils.TestCase):
SELECTOR = getattr(selectors, 'DevpollSelector', None)
-def test_main():
- tests = [DefaultSelectorTestCase, SelectSelectorTestCase,
- PollSelectorTestCase, EpollSelectorTestCase,
- KqueueSelectorTestCase, DevpollSelectorTestCase]
- support.run_unittest(*tests)
- support.reap_children()
-
-
-if __name__ == "__main__":
- test_main()
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_sslproto.py b/tests/test_sslproto.py
index a72967e..aa2af32 100644
--- a/tests/test_sslproto.py
+++ b/tests/test_sslproto.py
@@ -1,15 +1,16 @@
"""Tests for asyncio/sslproto.py."""
-import unittest
-from unittest import mock
try:
import ssl
except ImportError:
ssl = None
-import asyncio
-from asyncio import sslproto
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import ConnectionResetError
+from trollius import sslproto
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
@unittest.skipIf(ssl is None, 'No ssl module')
@@ -36,7 +37,7 @@ class SslProtoHandshakeTests(test_utils.TestCase):
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
- with mock.patch('asyncio.sslproto._SSLPipe', return_value=sslpipe):
+ with mock.patch('trollius.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
def test_cancel_handshake(self):
diff --git a/tests/test_streams.py b/tests/test_streams.py
index ef6f603..390174c 100644
--- a/tests/test_streams.py
+++ b/tests/test_streams.py
@@ -1,18 +1,22 @@
"""Tests for streams.py."""
import gc
+import io
import os
import socket
+import six
import sys
-import unittest
-from unittest import mock
try:
import ssl
except ImportError:
ssl = None
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import Return, From
+from trollius import compat
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
class StreamReaderTests(test_utils.TestCase):
@@ -29,9 +33,9 @@ class StreamReaderTests(test_utils.TestCase):
self.loop.close()
gc.collect()
- super().tearDown()
+ super(StreamReaderTests, self).tearDown()
- @mock.patch('asyncio.streams.events')
+ @mock.patch('trollius.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader()
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
@@ -413,7 +417,7 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def set_err():
- stream.set_exception(ValueError())
+ self.loop.call_soon(stream.set_exception, ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
@@ -444,9 +448,9 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def handle_client(self, client_reader, client_writer):
- data = yield from client_reader.readline()
+ data = yield From(client_reader.readline())
client_writer.write(data)
- yield from client_writer.drain()
+ yield From(client_writer.drain())
client_writer.close()
def start(self):
@@ -481,14 +485,14 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def client(addr):
- reader, writer = yield from asyncio.open_connection(
- *addr, loop=self.loop)
+ reader, writer = yield From(asyncio.open_connection(
+ *addr, loop=self.loop))
# send a line
writer.write(b"hello world!\n")
# read it back
- msgback = yield from reader.readline()
+ msgback = yield From(reader.readline())
writer.close()
- return msgback
+ raise Return(msgback)
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
@@ -518,9 +522,9 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def handle_client(self, client_reader, client_writer):
- data = yield from client_reader.readline()
+ data = yield From(client_reader.readline())
client_writer.write(data)
- yield from client_writer.drain()
+ yield From(client_writer.drain())
client_writer.close()
def start(self):
@@ -547,14 +551,14 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def client(path):
- reader, writer = yield from asyncio.open_unix_connection(
- path, loop=self.loop)
+ reader, writer = yield From(asyncio.open_unix_connection(
+ path, loop=self.loop))
# send a line
writer.write(b"hello world!\n")
# read it back
- msgback = yield from reader.readline()
+ msgback = yield From(reader.readline())
writer.close()
- return msgback
+ raise Return(msgback)
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
@@ -591,7 +595,7 @@ os.close(fd)
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
- pipe = open(rfd, 'rb', 0)
+ pipe = io.open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = self.loop.run_until_complete(
@@ -601,9 +605,10 @@ os.close(fd)
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
- create = asyncio.create_subprocess_exec(*args,
- pass_fds={wfd},
- loop=self.loop)
+ kw = {'loop': self.loop}
+ if six.PY3:
+ kw['pass_fds'] = set((wfd,))
+ create = asyncio.create_subprocess_exec(*args, **kw)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
diff --git a/tests/test_subprocess.py b/tests/test_subprocess.py
index 38f0cee..21e003a 100644
--- a/tests/test_subprocess.py
+++ b/tests/test_subprocess.py
@@ -1,29 +1,34 @@
+from trollius import subprocess
+from trollius import test_utils
+import trollius as asyncio
+import os
import signal
import sys
-import unittest
import warnings
-from unittest import mock
-
-import asyncio
-from asyncio import base_subprocess
-from asyncio import subprocess
-from asyncio import test_utils
-try:
- from test import support
-except ImportError:
- from asyncio import test_support as support
+from trollius import BrokenPipeError, ConnectionResetError, ProcessLookupError
+from trollius import From, Return
+from trollius import base_subprocess
+from trollius import test_support as support
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
+
if sys.platform != 'win32':
- from asyncio import unix_events
+ from trollius import unix_events
+
# Program blocking
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
# Program copying input to output
-PROGRAM_CAT = [
- sys.executable, '-c',
- ';'.join(('import sys',
- 'data = sys.stdin.buffer.read()',
- 'sys.stdout.buffer.write(data)'))]
+if sys.version_info >= (3,):
+ PROGRAM_CAT = ';'.join(('import sys',
+ 'data = sys.stdin.buffer.read()',
+ 'sys.stdout.buffer.write(data)'))
+else:
+ PROGRAM_CAT = ';'.join(('import sys',
+ 'data = sys.stdin.read()',
+ 'sys.stdout.write(data)'))
+PROGRAM_CAT = [sys.executable, '-c', PROGRAM_CAT]
class TestSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, *args, **kwargs):
@@ -82,21 +87,21 @@ class SubprocessMixin:
@asyncio.coroutine
def run(data):
- proc = yield from asyncio.create_subprocess_exec(
- *args,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- loop=self.loop)
+ proc = yield From(asyncio.create_subprocess_exec(
+ *args,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ loop=self.loop))
# feed data
proc.stdin.write(data)
- yield from proc.stdin.drain()
+ yield From(proc.stdin.drain())
proc.stdin.close()
# get output and exitcode
- data = yield from proc.stdout.read()
- exitcode = yield from proc.wait()
- return (exitcode, data)
+ data = yield From(proc.stdout.read())
+ exitcode = yield From(proc.wait())
+ raise Return(exitcode, data)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
@@ -109,13 +114,13 @@ class SubprocessMixin:
@asyncio.coroutine
def run(data):
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
- loop=self.loop)
- stdout, stderr = yield from proc.communicate(data)
- return proc.returncode, stdout
+ loop=self.loop))
+ stdout, stderr = yield From(proc.communicate(data))
+ raise Return(proc.returncode, stdout)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
@@ -130,10 +135,14 @@ class SubprocessMixin:
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 7)
+ @unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
def test_start_new_session(self):
+ def start_new_session():
+ os.setsid()
+
# start the new process in a new session
create = asyncio.create_subprocess_shell('exit 8',
- start_new_session=True,
+ preexec_fn=start_new_session,
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
@@ -165,7 +174,11 @@ class SubprocessMixin:
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_send_signal(self):
- code = 'import time; print("sleeping", flush=True); time.sleep(3600)'
+ code = '; '.join((
+ 'import sys, time',
+ 'print("sleeping")',
+ 'sys.stdout.flush()',
+ 'time.sleep(3600)'))
args = [sys.executable, '-c', code]
create = asyncio.create_subprocess_exec(*args,
stdout=subprocess.PIPE,
@@ -175,12 +188,12 @@ class SubprocessMixin:
@asyncio.coroutine
def send_signal(proc):
# basic synchronization to wait until the program is sleeping
- line = yield from proc.stdout.readline()
+ line = yield From(proc.stdout.readline())
self.assertEqual(line, b'sleeping\n')
proc.send_signal(signal.SIGHUP)
- returncode = (yield from proc.wait())
- return returncode
+ returncode = yield From(proc.wait())
+ raise Return(returncode)
returncode = self.loop.run_until_complete(send_signal(proc))
self.assertEqual(-signal.SIGHUP, returncode)
@@ -203,7 +216,7 @@ class SubprocessMixin:
@asyncio.coroutine
def write_stdin(proc, data):
proc.stdin.write(data)
- yield from proc.stdin.drain()
+ yield From(proc.stdin.drain())
coro = write_stdin(proc, large_data)
# drain() must raise BrokenPipeError or ConnectionResetError
@@ -236,27 +249,28 @@ class SubprocessMixin:
@asyncio.coroutine
def connect_read_pipe_mock(*args, **kw):
- transport, protocol = yield from connect_read_pipe(*args, **kw)
+ connect = connect_read_pipe(*args, **kw)
+ transport, protocol = yield From(connect)
transport.pause_reading = mock.Mock()
transport.resume_reading = mock.Mock()
- return (transport, protocol)
+ raise Return(transport, protocol)
self.loop.connect_read_pipe = connect_read_pipe_mock
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
limit=limit,
- loop=self.loop)
+ loop=self.loop))
stdout_transport = proc._transport.get_pipe_transport(1)
- stdout, stderr = yield from proc.communicate()
+ stdout, stderr = yield From(proc.communicate())
# The child process produced more than limit bytes of output,
# the stream reader transport should pause the protocol to not
# allocate too much memory.
- return (stdout, stdout_transport)
+ raise Return(stdout, stdout_transport)
# Issue #22685: Ensure that the stream reader pauses the protocol
# when the child process produces too much data
@@ -272,16 +286,16 @@ class SubprocessMixin:
@asyncio.coroutine
def len_message(message):
code = 'import sys; data = sys.stdin.read(); print(len(data))'
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
close_fds=False,
- loop=self.loop)
- stdout, stderr = yield from proc.communicate(message)
- exitcode = yield from proc.wait()
- return (stdout, exitcode)
+ loop=self.loop))
+ stdout, stderr = yield From(proc.communicate(message))
+ exitcode = yield From(proc.wait())
+ raise Return(stdout, exitcode)
output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
self.assertEqual(output.rstrip(), b'3')
@@ -292,15 +306,15 @@ class SubprocessMixin:
@asyncio.coroutine
def cancel_wait():
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
*PROGRAM_BLOCKED,
- loop=self.loop)
+ loop=self.loop))
# Create an internal future waiting on the process exit
task = self.loop.create_task(proc.wait())
self.loop.call_soon(task.cancel)
try:
- yield from task
+ yield From(task)
except asyncio.CancelledError:
pass
@@ -309,7 +323,7 @@ class SubprocessMixin:
# Kill the process and wait until it is done
proc.kill()
- yield from proc.wait()
+ yield From(proc.wait())
self.loop.run_until_complete(cancel_wait())
@@ -322,7 +336,7 @@ class SubprocessMixin:
self.loop.call_soon(task.cancel)
try:
- yield from task
+ yield From(task)
except asyncio.CancelledError:
pass
@@ -340,7 +354,7 @@ class SubprocessMixin:
self.loop.call_soon(task.cancel)
try:
- yield from task
+ yield From(task)
except asyncio.CancelledError:
pass
@@ -355,12 +369,11 @@ class SubprocessMixin:
def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
- transport, protocol = yield from create
+ transport, protocol = yield From(create)
- kill_called = False
+ non_local = {'kill_called': False}
def kill():
- nonlocal kill_called
- kill_called = True
+ non_local['kill_called'] = True
orig_kill()
proc = transport.get_extra_info('subprocess')
@@ -368,8 +381,8 @@ class SubprocessMixin:
proc.kill = kill
returncode = transport.get_returncode()
transport.close()
- yield from transport._wait()
- return (returncode, kill_called)
+ yield From(transport._wait())
+ raise Return(returncode, non_local['kill_called'])
# Ignore "Close running child process: kill ..." log
with test_utils.disable_logger():
@@ -385,7 +398,7 @@ class SubprocessMixin:
def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
- transport, protocol = yield from create
+ transport, protocol = yield From(create)
proc = transport.get_extra_info('subprocess')
# kill the process (but asyncio is not notified immediatly)
@@ -396,7 +409,8 @@ class SubprocessMixin:
proc_returncode = proc.poll()
transport_returncode = transport.get_returncode()
transport.close()
- return (proc_returncode, transport_returncode, proc.kill.called)
+ raise Return(proc_returncode, transport_returncode,
+ proc.kill.called)
# Ignore "Unknown child process pid ..." log of SafeChildWatcher,
# emitted because the test already consumes the exit status:
diff --git a/tests/test_tasks.py b/tests/test_tasks.py
index 0426787..6576ddb 100644
--- a/tests/test_tasks.py
+++ b/tests/test_tasks.py
@@ -5,28 +5,21 @@ import functools
import io
import os
import re
+import six
import sys
import types
-import unittest
import weakref
-from unittest import mock
-
-import asyncio
-from asyncio import coroutines
-from asyncio import test_utils
-try:
- from test import support
-except ImportError:
- from asyncio import test_support as support
-try:
- from test.support.script_helper import assert_python_ok
-except ImportError:
- try:
- from test.script_helper import assert_python_ok
- except ImportError:
- from asyncio.test_support import assert_python_ok
+
+import trollius as asyncio
+from trollius import From, Return
+from trollius import coroutines
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
+PY33 = (sys.version_info >= (3, 3))
PY34 = (sys.version_info >= (3, 4))
PY35 = (sys.version_info >= (3, 5))
@@ -35,6 +28,9 @@ PY35 = (sys.version_info >= (3, 5))
def coroutine_function():
pass
+@asyncio.coroutine
+def coroutine_function2(x, y):
+ yield From(asyncio.sleep(0))
@contextlib.contextmanager
def set_coroutine_debug(enabled):
@@ -164,14 +160,15 @@ class TaskTests(test_utils.TestCase):
self.assertIs(f, asyncio.async(f))
def test_get_stack(self):
- T = None
+ non_local = {'T': None}
@asyncio.coroutine
def foo():
- yield from bar()
+ yield From(bar())
@asyncio.coroutine
def bar():
+ T = non_local['T']
# test get_stack()
f = T.get_stack(limit=1)
try:
@@ -180,7 +177,7 @@ class TaskTests(test_utils.TestCase):
f = None
# test print_stack()
- file = io.StringIO()
+ file = six.StringIO()
T.print_stack(limit=1, file=file)
file.seek(0)
tb = file.read()
@@ -188,9 +185,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def runner():
- nonlocal T
- T = asyncio.ensure_future(foo(), loop=self.loop)
- yield from T
+ non_local['T'] = asyncio.ensure_future(foo(), loop=self.loop)
+ yield From(non_local['T'])
self.loop.run_until_complete(runner())
@@ -199,8 +195,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def notmuch():
- yield from []
- return 'abc'
+ yield From(None)
+ raise Return('abc')
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
@@ -214,7 +210,7 @@ class TaskTests(test_utils.TestCase):
# test coroutine object
gen = notmuch()
- if coroutines._DEBUG or PY35:
+ if PY35 or (coroutines._DEBUG and PY33):
coro_qualname = 'TaskTests.test_task_repr.<locals>.notmuch'
else:
coro_qualname = 'notmuch'
@@ -258,7 +254,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def notmuch():
- # notmuch() function doesn't use yield from: it will be wrapped by
+ # notmuch() function doesn't use yield: it will be wrapped by
# @coroutine decorator
return 123
@@ -272,13 +268,16 @@ class TaskTests(test_utils.TestCase):
# test coroutine object
gen = notmuch()
- if coroutines._DEBUG or PY35:
+ if PY35 or coroutines._DEBUG:
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
- coro_qualname = ('TaskTests.test_task_repr_coro_decorator'
- '.<locals>.notmuch')
+ if PY35 or (coroutines._DEBUG and PY33):
+ coro_qualname = ('TaskTests.test_task_repr_coro_decorator'
+ '.<locals>.notmuch')
+ else:
+ coro_qualname = 'notmuch'
else:
# On Python < 3.5, generators inherit the name of the code, not of
# the function. See: http://bugs.python.org/issue21205
@@ -326,7 +325,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def wait_for(fut):
- return (yield from fut)
+ res = yield From(fut)
+ raise Return(res)
fut = asyncio.Future(loop=self.loop)
task = asyncio.Task(wait_for(fut), loop=self.loop)
@@ -343,11 +343,8 @@ class TaskTests(test_utils.TestCase):
with set_coroutine_debug(True):
self.loop.set_debug(True)
- @asyncio.coroutine
- def func(x, y):
- yield from asyncio.sleep(0)
-
- partial_func = asyncio.coroutine(functools.partial(func, 1))
+ cb = functools.partial(coroutine_function2, 1)
+ partial_func = asyncio.coroutine(cb)
task = self.loop.create_task(partial_func(2))
# make warnings quiet
@@ -355,17 +352,16 @@ class TaskTests(test_utils.TestCase):
self.addCleanup(task._coro.close)
coro_repr = repr(task._coro)
- expected = ('<CoroWrapper TaskTests.test_task_repr_partial_corowrapper'
- '.<locals>.func(1)() running, ')
+ expected = ('<CoroWrapper coroutine_function2(1)() running, ')
self.assertTrue(coro_repr.startswith(expected),
coro_repr)
def test_task_basics(self):
@asyncio.coroutine
def outer():
- a = yield from inner1()
- b = yield from inner2()
- return a+b
+ a = yield From(inner1())
+ b = yield From(inner2())
+ raise Return(a+b)
@asyncio.coroutine
def inner1():
@@ -389,10 +385,11 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from asyncio.sleep(10.0, loop=loop)
- return 12
+ yield From(asyncio.sleep(10.0, loop=loop))
+ raise Return(12)
t = asyncio.Task(task(), loop=loop)
+ test_utils.run_briefly(loop)
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
@@ -403,9 +400,9 @@ class TaskTests(test_utils.TestCase):
def test_cancel_yield(self):
@asyncio.coroutine
def task():
- yield
- yield
- return 12
+ yield From(None)
+ yield From(None)
+ raise Return(12)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start coro
@@ -421,8 +418,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from f
- return 12
+ yield From(f)
+ raise Return(12)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start task
@@ -437,8 +434,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from f
- return 12
+ yield From(f)
+ raise Return(12)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -459,11 +456,11 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from fut1
+ yield From(fut1)
try:
- yield from fut2
+ yield From(fut2)
except asyncio.CancelledError:
- return 42
+ raise Return(42)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -484,13 +481,13 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from fut1
+ yield From(fut1)
try:
- yield from fut2
+ yield From(fut2)
except asyncio.CancelledError:
pass
- res = yield from fut3
- return res
+ res = yield From(fut3)
+ raise Return(res)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -517,8 +514,8 @@ class TaskTests(test_utils.TestCase):
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
- yield from asyncio.sleep(100, loop=loop)
- return 12
+ yield From(asyncio.sleep(100, loop=loop))
+ raise Return(12)
t = asyncio.Task(task(), loop=loop)
self.assertRaises(
@@ -540,17 +537,16 @@ class TaskTests(test_utils.TestCase):
loop = self.new_test_loop(gen)
- x = 0
+ non_local = {'x': 0}
waiters = []
@asyncio.coroutine
def task():
- nonlocal x
- while x < 10:
+ while non_local['x'] < 10:
waiters.append(asyncio.sleep(0.1, loop=loop))
- yield from waiters[-1]
- x += 1
- if x == 2:
+ yield From(waiters[-1])
+ non_local['x'] += 1
+ if non_local['x'] == 3:
loop.stop()
t = asyncio.Task(task(), loop=loop)
@@ -559,7 +555,7 @@ class TaskTests(test_utils.TestCase):
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
- self.assertEqual(x, 2)
+ self.assertEqual(non_local['x'], 3)
self.assertAlmostEqual(0.3, loop.time())
# close generators
@@ -570,6 +566,7 @@ class TaskTests(test_utils.TestCase):
def test_wait_for(self):
+ @asyncio.coroutine
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
@@ -579,27 +576,34 @@ class TaskTests(test_utils.TestCase):
loop = self.new_test_loop(gen)
- foo_running = None
+ non_local = {'foo_running': None}
@asyncio.coroutine
def foo():
- nonlocal foo_running
- foo_running = True
+ non_local['foo_running'] = True
try:
- yield from asyncio.sleep(0.2, loop=loop)
+ yield From(asyncio.sleep(0.2, loop=loop))
finally:
- foo_running = False
- return 'done'
+ non_local['foo_running'] = False
+ raise Return('done')
fut = asyncio.Task(foo(), loop=loop)
+ test_utils.run_briefly(loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1, loop=loop))
+
+ # Trollius issue #2: need to run the loop briefly to ensure that the
+ # cancellation is propagated to all tasks
+ waiter = asyncio.Future(loop=loop)
+ fut.add_done_callback(lambda f: waiter.set_result(True))
+ loop.run_until_complete(waiter)
+
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
- self.assertEqual(foo_running, False)
+ self.assertEqual(non_local['foo_running'], False)
def test_wait_for_blocking(self):
loop = self.new_test_loop()
@@ -626,17 +630,24 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- yield from asyncio.sleep(0.2, loop=loop)
- return 'done'
+ yield From(asyncio.sleep(0.2, loop=loop))
+ raise Return('done')
asyncio.set_event_loop(loop)
try:
fut = asyncio.Task(foo(), loop=loop)
+ test_utils.run_briefly(loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.01))
finally:
asyncio.set_event_loop(None)
+ # Trollius issue #2: need to run the loop briefly to ensure that the
+ # cancellation is propagated to all tasks
+ waiter = asyncio.Future(loop=loop)
+ fut.add_done_callback(lambda f: waiter.set_result(True))
+ loop.run_until_complete(waiter)
+
self.assertAlmostEqual(0.01, loop.time())
self.assertTrue(fut.done())
self.assertTrue(fut.cancelled())
@@ -672,10 +683,10 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a], loop=loop)
+ done, pending = yield From(asyncio.wait([b, a], loop=loop))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
- return 42
+ raise Return(42)
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
@@ -702,10 +713,10 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a])
+ done, pending = yield From(asyncio.wait([b, a]))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
- return 42
+ raise Return(42)
asyncio.set_event_loop(loop)
res = loop.run_until_complete(
@@ -726,7 +737,7 @@ class TaskTests(test_utils.TestCase):
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
- self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
+ self.assertEqual(set(f.result() for f in done), set(('test', 'spam')))
def test_wait_errors(self):
self.assertRaises(
@@ -760,8 +771,8 @@ class TaskTests(test_utils.TestCase):
loop=loop)
done, pending = loop.run_until_complete(task)
- self.assertEqual({b}, done)
- self.assertEqual({a}, pending)
+ self.assertEqual(set((b,)), done)
+ self.assertEqual(set((a,)), pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
@@ -777,12 +788,12 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def coro1():
- yield
+ yield From(None)
@asyncio.coroutine
def coro2():
- yield
- yield
+ yield From(None)
+ yield From(None)
a = asyncio.Task(coro1(), loop=self.loop)
b = asyncio.Task(coro2(), loop=self.loop)
@@ -792,7 +803,7 @@ class TaskTests(test_utils.TestCase):
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
- self.assertEqual({a, b}, done)
+ self.assertEqual(set((a, b)), done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
@@ -821,8 +832,8 @@ class TaskTests(test_utils.TestCase):
loop=loop)
done, pending = loop.run_until_complete(task)
- self.assertEqual({b}, done)
- self.assertEqual({a}, pending)
+ self.assertEqual(set((b,)), done)
+ self.assertEqual(set((a,)), pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
@@ -845,7 +856,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def exc():
- yield from asyncio.sleep(0.01, loop=loop)
+ yield From(asyncio.sleep(0.01, loop=loop))
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
@@ -853,8 +864,8 @@ class TaskTests(test_utils.TestCase):
loop=loop)
done, pending = loop.run_until_complete(task)
- self.assertEqual({b}, done)
- self.assertEqual({a}, pending)
+ self.assertEqual(set((b,)), done)
+ self.assertEqual(set((a,)), pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
@@ -876,14 +887,14 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleeper():
- yield from asyncio.sleep(0.15, loop=loop)
+ yield From(asyncio.sleep(0.15, loop=loop))
raise ZeroDivisionError('really')
b = asyncio.Task(sleeper(), loop=loop)
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a], loop=loop)
+ done, pending = yield From(asyncio.wait([b, a], loop=loop))
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
@@ -913,8 +924,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a], timeout=0.11,
- loop=loop)
+ done, pending = yield From(asyncio.wait([b, a], timeout=0.11,
+ loop=loop))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
@@ -964,17 +975,16 @@ class TaskTests(test_utils.TestCase):
# disable "slow callback" warning
loop.slow_callback_duration = 1.0
completed = set()
- time_shifted = False
+ non_local = {'time_shifted': False}
@asyncio.coroutine
def sleeper(dt, x):
- nonlocal time_shifted
- yield from asyncio.sleep(dt, loop=loop)
+ yield From(asyncio.sleep(dt, loop=loop))
completed.add(x)
- if not time_shifted and 'a' in completed and 'b' in completed:
- time_shifted = True
+ if not non_local['time_shifted'] and 'a' in completed and 'b' in completed:
+ non_local['time_shifted'] = True
loop.advance_time(0.14)
- return x
+ raise Return(x)
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
@@ -984,8 +994,8 @@ class TaskTests(test_utils.TestCase):
def foo():
values = []
for f in asyncio.as_completed([b, c, a], loop=loop):
- values.append((yield from f))
- return values
+ values.append((yield From(f)))
+ raise Return(values)
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
@@ -1017,11 +1027,11 @@ class TaskTests(test_utils.TestCase):
if values:
loop.advance_time(0.02)
try:
- v = yield from f
+ v = yield From(f)
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
- return values
+ raise Return(values)
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(len(res), 2, res)
@@ -1048,7 +1058,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop):
- v = yield from f
+ v = yield From(f)
self.assertEqual(v, 'a')
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
@@ -1064,7 +1074,7 @@ class TaskTests(test_utils.TestCase):
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.10, 'b', loop=loop)
- fs = {a, b}
+ fs = set((a, b))
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
@@ -1089,12 +1099,12 @@ class TaskTests(test_utils.TestCase):
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.05, 'b', loop=loop)
- fs = {a, b}
+ fs = set((a, b))
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs, loop=loop)
done, pending = loop.run_until_complete(waiter)
- self.assertEqual(set(f.result() for f in done), {'a', 'b'})
+ self.assertEqual(set(f.result() for f in done), set(('a', 'b')))
def test_as_completed_duplicate_coroutines(self):
@@ -1108,13 +1118,13 @@ class TaskTests(test_utils.TestCase):
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')],
loop=self.loop):
- result.append((yield from f))
- return result
+ result.append((yield From(f)))
+ raise Return(result)
fut = asyncio.Task(runner(), loop=self.loop)
self.loop.run_until_complete(fut)
result = fut.result()
- self.assertEqual(set(result), {'ham', 'spam'})
+ self.assertEqual(set(result), set(('ham', 'spam')))
self.assertEqual(len(result), 2)
def test_sleep(self):
@@ -1130,9 +1140,9 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleeper(dt, arg):
- yield from asyncio.sleep(dt/2, loop=loop)
- res = yield from asyncio.sleep(dt/2, arg, loop=loop)
- return res
+ yield From(asyncio.sleep(dt/2, loop=loop))
+ res = yield From(asyncio.sleep(dt/2, arg, loop=loop))
+ raise Return(res)
t = asyncio.Task(sleeper(0.1, 'yeah'), loop=loop)
loop.run_until_complete(t)
@@ -1152,22 +1162,21 @@ class TaskTests(test_utils.TestCase):
t = asyncio.Task(asyncio.sleep(10.0, 'yeah', loop=loop),
loop=loop)
- handle = None
+ non_local = {'handle': None}
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
- nonlocal handle
- handle = orig_call_later(delay, callback, *args)
- return handle
+ non_local['handle'] = orig_call_later(delay, callback, *args)
+ return non_local['handle']
loop.call_later = call_later
test_utils.run_briefly(loop)
- self.assertFalse(handle._cancelled)
+ self.assertFalse(non_local['handle']._cancelled)
t.cancel()
test_utils.run_briefly(loop)
- self.assertTrue(handle._cancelled)
+ self.assertTrue(non_local['handle']._cancelled)
def test_task_cancel_sleeping_task(self):
@@ -1182,18 +1191,18 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleep(dt):
- yield from asyncio.sleep(dt, loop=loop)
+ yield From(asyncio.sleep(dt, loop=loop))
@asyncio.coroutine
def doit():
sleeper = asyncio.Task(sleep(5000), loop=loop)
loop.call_later(0.1, sleeper.cancel)
try:
- yield from sleeper
+ yield From(sleeper)
except asyncio.CancelledError:
- return 'cancelled'
+ raise Return('cancelled')
else:
- return 'slept in'
+ raise Return('slept in')
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
@@ -1204,7 +1213,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def coro():
- yield from fut
+ yield From(fut)
task = asyncio.Task(coro(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1232,9 +1241,9 @@ class TaskTests(test_utils.TestCase):
def test_step_result(self):
@asyncio.coroutine
def notmuch():
- yield None
- yield 1
- return 'ko'
+ yield From(None)
+ yield From(1)
+ raise Return('ko')
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
@@ -1245,19 +1254,18 @@ class TaskTests(test_utils.TestCase):
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
- super().__init__(*args, **kwds)
+ super(Fut, self).__init__(*args, **kwds)
def add_done_callback(self, fn):
self.cb_added = True
- super().add_done_callback(fn)
+ super(Fut, self).add_done_callback(fn)
fut = Fut(loop=self.loop)
- result = None
+ non_local = {'result': None}
@asyncio.coroutine
def wait_for_future():
- nonlocal result
- result = yield from fut
+ non_local['result'] = yield From(fut)
t = asyncio.Task(wait_for_future(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1266,7 +1274,7 @@ class TaskTests(test_utils.TestCase):
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
- self.assertIs(res, result)
+ self.assertIs(res, non_local['result'])
self.assertTrue(t.done())
self.assertIsNone(t.result())
@@ -1292,24 +1300,24 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleeper():
- yield from asyncio.sleep(10, loop=loop)
+ yield From(asyncio.sleep(10, loop=loop))
base_exc = BaseException()
@asyncio.coroutine
def notmutch():
try:
- yield from sleeper()
+ yield From(sleeper())
except asyncio.CancelledError:
raise base_exc
task = asyncio.Task(notmutch(), loop=loop)
- test_utils.run_briefly(loop)
+ test_utils.run_briefly(loop, 2)
task.cancel()
self.assertFalse(task.done())
- self.assertRaises(BaseException, test_utils.run_briefly, loop)
+ self.assertRaises(BaseException, test_utils.run_briefly, loop, 2)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
@@ -1330,37 +1338,6 @@ class TaskTests(test_utils.TestCase):
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
- def test_yield_vs_yield_from(self):
- fut = asyncio.Future(loop=self.loop)
-
- @asyncio.coroutine
- def wait_for_future():
- yield fut
-
- task = wait_for_future()
- with self.assertRaises(RuntimeError):
- self.loop.run_until_complete(task)
-
- self.assertFalse(fut.done())
-
- def test_yield_vs_yield_from_generator(self):
- @asyncio.coroutine
- def coro():
- yield
-
- @asyncio.coroutine
- def wait_for_future():
- gen = coro()
- try:
- yield gen
- finally:
- gen.close()
-
- task = wait_for_future()
- self.assertRaises(
- RuntimeError,
- self.loop.run_until_complete, task)
-
def test_coroutine_non_gen_function(self):
@asyncio.coroutine
def func():
@@ -1411,7 +1388,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def coro1(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
- yield from fut1
+ yield From(fut1)
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
fut2.set_result(True)
@@ -1419,7 +1396,7 @@ class TaskTests(test_utils.TestCase):
def coro2(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
fut1.set_result(True)
- yield from fut2
+ yield From(fut2)
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
task1 = asyncio.Task(coro1(self.loop), loop=self.loop)
@@ -1434,54 +1411,50 @@ class TaskTests(test_utils.TestCase):
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
- nonlocal proof
try:
- yield from waiter
+ yield From(waiter)
except asyncio.CancelledError:
- proof += 1
+ non_local['proof'] += 1
raise
else:
self.fail('got past sleep() in inner()')
@asyncio.coroutine
def outer():
- nonlocal proof
try:
- yield from inner()
+ yield From(inner())
except asyncio.CancelledError:
- proof += 100 # Expect this path.
+ non_local['proof'] += 100 # Expect this path.
else:
- proof += 10
+ non_local['proof'] += 10
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
- self.assertEqual(proof, 101)
+ self.assertEqual(non_local['proof'], 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
- nonlocal proof
- yield from waiter
- proof += 1
+ yield From(waiter)
+ non_local['proof'] += 1
@asyncio.coroutine
def outer():
- nonlocal proof
- d, p = yield from asyncio.wait([inner()], loop=self.loop)
- proof += 100
+ d, p = yield From(asyncio.wait([inner()], loop=self.loop))
+ non_local['proof'] += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1490,7 +1463,7 @@ class TaskTests(test_utils.TestCase):
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
- self.assertEqual(proof, 1)
+ self.assertEqual(non_local['proof'], 1)
def test_shield_result(self):
inner = asyncio.Future(loop=self.loop)
@@ -1524,20 +1497,18 @@ class TaskTests(test_utils.TestCase):
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
- nonlocal proof
- yield from waiter
- proof += 1
+ yield From(waiter)
+ non_local['proof'] += 1
@asyncio.coroutine
def outer():
- nonlocal proof
- yield from asyncio.shield(inner(), loop=self.loop)
- proof += 100
+ yield From(asyncio.shield(inner(), loop=self.loop))
+ non_local['proof'] += 100
f = asyncio.ensure_future(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1546,7 +1517,7 @@ class TaskTests(test_utils.TestCase):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
- self.assertEqual(proof, 1)
+ self.assertEqual(non_local['proof'], 1)
def test_shield_gather(self):
child1 = asyncio.Future(loop=self.loop)
@@ -1615,7 +1586,7 @@ class TaskTests(test_utils.TestCase):
def coro():
# The actual coroutine.
self.assertTrue(gen.gi_running)
- yield from fut
+ yield From(fut)
# A completed Future used to run the coroutine.
fut = asyncio.Future(loop=self.loop)
@@ -1648,13 +1619,15 @@ class TaskTests(test_utils.TestCase):
with set_coroutine_debug(True):
@asyncio.coroutine
def t1():
- return (yield from t2())
+ res = yield From(t2())
+ raise Return(res)
@asyncio.coroutine
def t2():
f = asyncio.Future(loop=self.loop)
asyncio.Task(t3(f), loop=self.loop)
- return (yield from f)
+ res = yield From(f)
+ raise Return(res)
@asyncio.coroutine
def t3(f):
@@ -1667,15 +1640,16 @@ class TaskTests(test_utils.TestCase):
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
- return a
+ raise Return(a)
def call(arg):
- cw = asyncio.coroutines.CoroWrapper(foo())
+ cw = asyncio.coroutines.CoroWrapper(foo(), foo)
cw.send(None)
try:
cw.send(arg)
except StopIteration as ex:
- return ex.args[0]
+ ex.raised = True
+ return ex.value
else:
raise AssertionError('StopIteration was expected')
@@ -1684,8 +1658,9 @@ class TaskTests(test_utils.TestCase):
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
- def foo(): yield from []
- cw = asyncio.coroutines.CoroWrapper(foo())
+ def foo():
+ yield From(None)
+ cw = asyncio.coroutines.CoroWrapper(foo(), foo)
wd['cw'] = cw # Would fail without __weakref__ slot.
cw.gen = None # Suppress warning from __del__.
@@ -1695,7 +1670,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def kill_me(loop):
future = asyncio.Future(loop=loop)
- yield from future
+ yield From(future)
# at this point, the only reference to kill_me() task is
# the Task._wakeup() method in future._callbacks
raise Exception("code never reached")
@@ -1707,7 +1682,7 @@ class TaskTests(test_utils.TestCase):
# schedule the task
coro = kill_me(self.loop)
task = asyncio.ensure_future(coro, loop=self.loop)
- self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), {task})
+ self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), set((task,)))
# execute the task so it waits for future
self.loop._run_once()
@@ -1731,14 +1706,14 @@ class TaskTests(test_utils.TestCase):
})
mock_handler.reset_mock()
- @mock.patch('asyncio.coroutines.logger')
+ @mock.patch('trollius.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
with set_coroutine_debug(True):
@asyncio.coroutine
def coro_noop():
pass
- tb_filename = __file__
+ tb_filename = sys._getframe().f_code.co_filename
tb_lineno = sys._getframe().f_lineno + 2
# create a coroutine object but don't use it
coro_noop()
@@ -1747,14 +1722,14 @@ class TaskTests(test_utils.TestCase):
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
-
- regex = (r'^<CoroWrapper %s\(?\)? .* at %s:%s, .*> '
+ coro_name = getattr(coro_noop, '__qualname__', coro_noop.__name__)
+ regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> '
r'was never yielded from\n'
r'Coroutine object created at \(most recent call last\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
- % (re.escape(coro_noop.__qualname__),
+ % (re.escape(coro_name),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
@@ -1766,8 +1741,9 @@ class TaskTests(test_utils.TestCase):
task = asyncio.Task(coroutine_function(), loop=self.loop)
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(task._source_traceback, list)
+ filename = sys._getframe().f_code.co_filename
self.assertEqual(task._source_traceback[-1][:3],
- (__file__,
+ (filename,
lineno,
'test_task_source_traceback'))
self.loop.run_until_complete(task)
@@ -1780,7 +1756,7 @@ class TaskTests(test_utils.TestCase):
def blocking_coroutine():
fut = asyncio.Future(loop=loop)
# Block: fut result is never set
- yield from fut
+ yield From(fut)
task = loop.create_task(blocking_coroutine())
@@ -1874,30 +1850,19 @@ class GatherTestsBase:
aio_path = os.path.dirname(os.path.dirname(asyncio.__file__))
code = '\n'.join((
- 'import asyncio.coroutines',
- 'print(asyncio.coroutines._DEBUG)'))
-
- # Test with -E to not fail if the unit test was run with
- # PYTHONASYNCIODEBUG set to a non-empty string
- sts, stdout, stderr = assert_python_ok('-E', '-c', code,
- PYTHONPATH=aio_path)
- self.assertEqual(stdout.rstrip(), b'False')
+ 'import trollius.coroutines',
+ 'print(trollius.coroutines._DEBUG)'))
- sts, stdout, stderr = assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='',
- PYTHONPATH=aio_path)
+ sts, stdout, stderr = support.assert_python_ok('-c', code,
+ TROLLIUSDEBUG='',
+ PYTHONPATH=aio_path)
self.assertEqual(stdout.rstrip(), b'False')
- sts, stdout, stderr = assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='1',
- PYTHONPATH=aio_path)
+ sts, stdout, stderr = support.assert_python_ok('-c', code,
+ TROLLIUSDEBUG='1',
+ PYTHONPATH=aio_path)
self.assertEqual(stdout.rstrip(), b'True')
- sts, stdout, stderr = assert_python_ok('-E', '-c', code,
- PYTHONASYNCIODEBUG='1',
- PYTHONPATH=aio_path)
- self.assertEqual(stdout.rstrip(), b'False')
-
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
@@ -1986,7 +1951,7 @@ class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def setUp(self):
- super().setUp()
+ super(CoroutineGatherTests, self).setUp()
asyncio.set_event_loop(self.one_loop)
def wrap_futures(self, *futures):
@@ -1994,7 +1959,8 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
for fut in futures:
@asyncio.coroutine
def coro(fut=fut):
- return (yield from fut)
+ result = (yield From(fut))
+ raise Return(result)
coros.append(coro())
return coros
@@ -2026,44 +1992,42 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def inner():
- nonlocal proof
- yield from waiter
- proof += 1
+ yield From(waiter)
+ non_local['proof'] += 1
child1 = asyncio.ensure_future(inner(), loop=self.one_loop)
child2 = asyncio.ensure_future(inner(), loop=self.one_loop)
- gatherer = None
+ non_local['gatherer'] = None
@asyncio.coroutine
def outer():
- nonlocal proof, gatherer
- gatherer = asyncio.gather(child1, child2, loop=self.one_loop)
- yield from gatherer
- proof += 100
+ non_local['gatherer'] = asyncio.gather(child1, child2, loop=self.one_loop)
+ yield From(non_local['gatherer'])
+ non_local['proof'] += 100
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
- self.assertFalse(gatherer.cancel())
+ self.assertFalse(non_local['gatherer'].cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
- self.assertEqual(proof, 0)
+ self.assertEqual(non_local['proof'], 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
@asyncio.coroutine
def inner(f):
- yield from f
+ yield From(f)
raise RuntimeError('should not be ignored')
a = asyncio.Future(loop=self.one_loop)
@@ -2071,7 +2035,7 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
@asyncio.coroutine
def outer():
- yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop)
+ yield From(asyncio.gather(inner(a), inner(b), loop=self.one_loop))
f = asyncio.ensure_future(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
diff --git a/tests/test_transports.py b/tests/test_transports.py
index 3b6e3d6..d4c5780 100644
--- a/tests/test_transports.py
+++ b/tests/test_transports.py
@@ -1,13 +1,19 @@
"""Tests for transports.py."""
-import unittest
-from unittest import mock
+import trollius as asyncio
+from trollius import test_utils
+from trollius import transports
+from trollius.test_utils import mock
+from trollius.test_utils import unittest
-import asyncio
-from asyncio import transports
+try:
+ memoryview
+except NameError:
+ # Python 2.6
+ memoryview = buffer
-class TransportTests(unittest.TestCase):
+class TransportTests(test_utils.TestCase):
def test_ctor_extra_is_none(self):
transport = asyncio.Transport()
diff --git a/tests/test_unix_events.py b/tests/test_unix_events.py
index dc0835c..b6b5bdc 100644
--- a/tests/test_unix_events.py
+++ b/tests/test_unix_events.py
@@ -1,6 +1,7 @@
"""Tests for unix_events.py."""
import collections
+import contextlib
import errno
import io
import os
@@ -10,17 +11,17 @@ import stat
import sys
import tempfile
import threading
-import unittest
-from unittest import mock
+from trollius.test_utils import unittest
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
-
-import asyncio
-from asyncio import log
-from asyncio import test_utils
-from asyncio import unix_events
+import trollius as asyncio
+from trollius import log
+from trollius import test_utils
+from trollius import unix_events
+from trollius.py33_exceptions import BlockingIOError, ChildProcessError
+from trollius.test_utils import mock
MOCK_ANY = mock.ANY
@@ -60,7 +61,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.set_wakeup_fd.side_effect = ValueError
@@ -70,13 +71,13 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
@asyncio.coroutine
def simple_coroutine():
- yield from []
+ yield None
# callback must not be a coroutine function
coro_func = simple_coroutine
@@ -88,7 +89,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.add_signal_handler,
signal.SIGINT, func)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -98,7 +99,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -116,8 +117,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
- @mock.patch('asyncio.unix_events.signal')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.unix_events.signal')
+ @mock.patch('trollius.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
@@ -133,8 +134,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
- @mock.patch('asyncio.unix_events.signal')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.unix_events.signal')
+ @mock.patch('trollius.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
@@ -148,7 +149,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -161,7 +162,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
@@ -178,8 +179,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
- @mock.patch('asyncio.unix_events.signal')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.unix_events.signal')
+ @mock.patch('trollius.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
@@ -189,7 +190,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
@@ -199,7 +200,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
@@ -211,7 +212,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -240,7 +241,7 @@ class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
- with sock:
+ with contextlib.closing(sock):
coro = self.loop.create_unix_server(lambda: None, path)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
@@ -268,18 +269,19 @@ class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
- with sock:
+ with contextlib.closing(sock):
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Socket was expected'):
self.loop.run_until_complete(coro)
- @mock.patch('asyncio.unix_events.socket')
+ @mock.patch('trollius.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
+ m_socket.error = socket.error
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
@@ -331,7 +333,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
- blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking')
+ blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
@@ -389,7 +391,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
- @mock.patch('asyncio.log.logger.error')
+ @mock.patch('trollius.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
@@ -480,7 +482,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
- blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking')
+ blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
@@ -556,7 +558,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
- @mock.patch('asyncio.unix_events.logger')
+ @mock.patch('trollius.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
@@ -646,7 +648,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
- @mock.patch('asyncio.log.logger.error')
+ @mock.patch('trollius.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
@@ -756,7 +758,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.assertFalse(self.protocol.connection_lost.called)
-class AbstractChildWatcherTests(unittest.TestCase):
+class AbstractChildWatcherTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
@@ -775,7 +777,7 @@ class AbstractChildWatcherTests(unittest.TestCase):
NotImplementedError, watcher.__exit__, f, f, f)
-class BaseChildWatcherTests(unittest.TestCase):
+class BaseChildWatcherTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
@@ -845,19 +847,27 @@ class ChildWatcherTestsMixin:
def waitpid_mocks(func):
def wrapped_func(self):
+ exit_stack = []
+
def patch(target, wrapper):
- return mock.patch(target, wraps=wrapper,
- new_callable=mock.Mock)
-
- with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
- patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
- patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
- patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
- patch('os.waitpid', self.waitpid) as m_waitpid:
+ m = mock.patch(target, wraps=wrapper)
+ exit_stack.append(m)
+ return m.__enter__()
+
+ m_waitpid = patch('os.waitpid', self.waitpid)
+ m_WIFEXITED = patch('os.WIFEXITED', self.WIFEXITED)
+ m_WIFSIGNALED = patch('os.WIFSIGNALED', self.WIFSIGNALED)
+ m_WEXITSTATUS = patch('os.WEXITSTATUS', self.WEXITSTATUS)
+ m_WTERMSIG = patch('os.WTERMSIG', self.WTERMSIG)
+ try:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
+ finally:
+ for obj in reversed(exit_stack):
+ obj.__exit__(None, None, None)
+
return wrapped_func
@waitpid_mocks
@@ -1330,17 +1340,18 @@ class ChildWatcherTestsMixin:
callback1 = mock.Mock()
callback2 = mock.Mock()
- with self.ignore_warnings, self.watcher:
- self.running = True
- # child 1 terminates
- self.add_zombie(591, 7)
- # an unknown child terminates
- self.add_zombie(593, 17)
+ with self.ignore_warnings:
+ with self.watcher:
+ self.running = True
+ # child 1 terminates
+ self.add_zombie(591, 7)
+ # an unknown child terminates
+ self.add_zombie(593, 17)
- self.watcher._sig_chld()
+ self.watcher._sig_chld()
- self.watcher.add_child_handler(591, callback1)
- self.watcher.add_child_handler(592, callback2)
+ self.watcher.add_child_handler(591, callback1)
+ self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@@ -1359,15 +1370,15 @@ class ChildWatcherTestsMixin:
self.loop = self.new_test_loop()
patch = mock.patch.object
- with patch(old_loop, "remove_signal_handler") as m_old_remove, \
- patch(self.loop, "add_signal_handler") as m_new_add:
+ with patch(old_loop, "remove_signal_handler") as m_old_remove:
+ with patch(self.loop, "add_signal_handler") as m_new_add:
- self.watcher.attach_loop(self.loop)
+ self.watcher.attach_loop(self.loop)
- m_old_remove.assert_called_once_with(
- signal.SIGCHLD)
- m_new_add.assert_called_once_with(
- signal.SIGCHLD, self.watcher._sig_chld)
+ m_old_remove.assert_called_once_with(
+ signal.SIGCHLD)
+ m_new_add.assert_called_once_with(
+ signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
@@ -1479,7 +1490,7 @@ class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
return asyncio.FastChildWatcher()
-class PolicyTests(unittest.TestCase):
+class PolicyTests(test_utils.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
diff --git a/tests/test_windows_events.py b/tests/test_windows_events.py
index 7fcf402..ef0ab92 100644
--- a/tests/test_windows_events.py
+++ b/tests/test_windows_events.py
@@ -1,17 +1,18 @@
import os
import sys
-import unittest
-from unittest import mock
+from trollius.test_utils import unittest
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
-import _winapi
-
-import asyncio
-from asyncio import _overlapped
-from asyncio import test_utils
-from asyncio import windows_events
+import trollius as asyncio
+from trollius import Return, From
+from trollius import _overlapped
+from trollius import py33_winapi as _winapi
+from trollius import windows_events
+from trollius.py33_exceptions import PermissionError, FileNotFoundError
+from trollius import test_utils
+from trollius.test_utils import mock
class UpperProto(asyncio.Protocol):
@@ -58,11 +59,11 @@ class ProactorTests(test_utils.TestCase):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
- yield from self.loop.create_pipe_connection(
- asyncio.Protocol, ADDRESS)
+ yield From(self.loop.create_pipe_connection(
+ asyncio.Protocol, ADDRESS))
- [server] = yield from self.loop.start_serving_pipe(
- UpperProto, ADDRESS)
+ [server] = yield From(self.loop.start_serving_pipe(
+ UpperProto, ADDRESS))
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
@@ -70,27 +71,27 @@ class ProactorTests(test_utils.TestCase):
stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader,
loop=self.loop)
- trans, proto = yield from self.loop.create_pipe_connection(
- lambda: protocol, ADDRESS)
+ trans, proto = yield From(self.loop.create_pipe_connection(
+ lambda: protocol, ADDRESS))
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream_reader, trans))
for i, (r, w) in enumerate(clients):
- w.write('lower-{}\n'.format(i).encode())
+ w.write('lower-{0}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
- response = yield from r.readline()
- self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
+ response = yield From(r.readline())
+ self.assertEqual(response, 'LOWER-{0}\n'.format(i).encode())
w.close()
server.close()
with self.assertRaises(FileNotFoundError):
- yield from self.loop.create_pipe_connection(
- asyncio.Protocol, ADDRESS)
+ yield From(self.loop.create_pipe_connection(
+ asyncio.Protocol, ADDRESS))
- return 'done'
+ raise Return('done')
def test_connect_pipe_cancel(self):
exc = OSError()
diff --git a/tests/test_windows_utils.py b/tests/test_windows_utils.py
index d48b8bc..f73e263 100644
--- a/tests/test_windows_utils.py
+++ b/tests/test_windows_utils.py
@@ -2,21 +2,18 @@
import socket
import sys
-import unittest
import warnings
-from unittest import mock
+from trollius.test_utils import unittest
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
-import _winapi
-
-from asyncio import _overlapped
-from asyncio import windows_utils
-try:
- from test import support
-except ImportError:
- from asyncio import test_support as support
+from trollius import _overlapped
+from trollius import py33_winapi as _winapi
+from trollius import test_support as support
+from trollius import test_utils
+from trollius import windows_utils
+from trollius.test_utils import mock
class WinsocketpairTests(unittest.TestCase):
@@ -31,14 +28,15 @@ class WinsocketpairTests(unittest.TestCase):
ssock, csock = windows_utils.socketpair()
self.check_winsocketpair(ssock, csock)
- @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
+ @unittest.skipUnless(support.IPV6_ENABLED,
+ 'IPv6 not supported or enabled')
def test_winsocketpair_ipv6(self):
ssock, csock = windows_utils.socketpair(family=socket.AF_INET6)
self.check_winsocketpair(ssock, csock)
@unittest.skipIf(hasattr(socket, 'socketpair'),
'socket.socketpair is available')
- @mock.patch('asyncio.windows_utils.socket')
+ @mock.patch('trollius.windows_utils.socket')
def test_winsocketpair_exc(self, m_socket):
m_socket.AF_INET = socket.AF_INET
m_socket.SOCK_STREAM = socket.SOCK_STREAM
@@ -58,7 +56,7 @@ class WinsocketpairTests(unittest.TestCase):
@unittest.skipIf(hasattr(socket, 'socketpair'),
'socket.socketpair is available')
- @mock.patch('asyncio.windows_utils.socket')
+ @mock.patch('trollius.windows_utils.socket')
def test_winsocketpair_close(self, m_socket):
m_socket.AF_INET = socket.AF_INET
m_socket.SOCK_STREAM = socket.SOCK_STREAM
@@ -84,7 +82,7 @@ class PipeTests(unittest.TestCase):
ERROR_IO_INCOMPLETE = 996
try:
ov1.getresult()
- except OSError as e:
+ except WindowsError as e:
self.assertEqual(e.winerror, ERROR_IO_INCOMPLETE)
else:
raise RuntimeError('expected ERROR_IO_INCOMPLETE')
@@ -94,15 +92,15 @@ class PipeTests(unittest.TestCase):
self.assertEqual(ov2.error, 0)
ov2.WriteFile(h2, b"hello")
- self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
+ self.assertIn(ov2.error, set((0, _winapi.ERROR_IO_PENDING)))
- res = _winapi.WaitForMultipleObjects([ov2.event], False, 100)
+ res = _winapi.WaitForSingleObject(ov2.event, 100)
self.assertEqual(res, _winapi.WAIT_OBJECT_0)
self.assertFalse(ov1.pending)
self.assertEqual(ov1.error, ERROR_IO_INCOMPLETE)
self.assertFalse(ov2.pending)
- self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
+ self.assertIn(ov2.error, set((0, _winapi.ERROR_IO_PENDING)))
self.assertEqual(ov1.getresult(), b"hello")
finally:
_winapi.CloseHandle(h1)
@@ -117,7 +115,8 @@ class PipeTests(unittest.TestCase):
# check garbage collection of p closes handle
with warnings.catch_warnings():
- warnings.filterwarnings("ignore", "", ResourceWarning)
+ if sys.version_info >= (3, 4):
+ warnings.filterwarnings("ignore", "", ResourceWarning)
del p
support.gc_collect()
try:
@@ -173,9 +172,10 @@ class PopenTests(unittest.TestCase):
self.assertTrue(msg.upper().rstrip().startswith(out))
self.assertTrue(b"stderr".startswith(err))
- # The context manager calls wait() and closes resources
- with p:
- pass
+ p.stdin.close()
+ p.stdout.close()
+ p.stderr.close()
+ p.wait()
if __name__ == '__main__':
diff --git a/tox.ini b/tox.ini
index 3030441..2dde943 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,21 +1,98 @@
[tox]
-envlist = py33,py34,py3_release
+envlist = py26,py27,py2_release,py2_no_ssl,py2_no_concurrent,py32,py33,py34,py3_release,py3_no_ssl
+# and: pyflakes2,pyflakes3
[testenv]
deps=
aiotest
-# Run tests in debug mode
+ six
setenv =
- PYTHONASYNCIODEBUG = 1
+ TROLLIUSDEBUG = 1
commands=
python -Wd runtests.py -r {posargs}
python -Wd run_aiotest.py -r {posargs}
-[testenv:py3_release]
+[testenv:pyflakes2]
+basepython = python2
+deps=
+ pyflakes
+commands=
+ pyflakes trollius tests runtests.py check.py run_aiotest.py setup.py
+
+[testenv:pyflakes3]
+basepython = python3
+deps=
+ pyflakes
+commands=
+ pyflakes trollius tests runtests.py check.py run_aiotest.py setup.py
+
+[testenv:py26]
+deps=
+ aiotest
+ futures
+ mock==1.0.1
+ ordereddict
+ six
+ unittest2
+
+[testenv:py27]
+deps=
+ aiotest
+ futures
+ mock
+ six
+ unittest2
+
+[testenv:py2_release]
# Run tests in release mode
+basepython = python2
+deps=
+ aiotest
+ futures
+ mock
+ six
+ unittest2
setenv =
- PYTHONASYNCIODEBUG =
-basepython = python3
+ TROLLIUSDEBUG =
+
+[testenv:py2_no_ssl]
+basepython = python2
+deps=
+ aiotest
+ futures
+ mock
+ six
+ unittest2
+commands=
+ python -Wd runtests.py --no-ssl -r {posargs}
+
+[testenv:py2_no_concurrent]
+basepython = python2
+deps=
+ aiotest
+ futures
+ mock
+ six
+ unittest2
+commands=
+ python -Wd runtests.py --no-concurrent -r {posargs}
+
+[testenv:py32]
+deps=
+ aiotest
+ mock
+ six
[testenv:py35]
basepython = python3.5
+
+[testenv:py3_release]
+# Run tests in release mode
+basepython = python3
+setenv =
+ TROLLIUSDEBUG =
+
+[testenv:py3_no_ssl]
+basepython = python3
+commands=
+ python -Wd runtests.py --no-ssl -r {posargs}
diff --git a/asyncio/__init__.py b/trollius/__init__.py
index 011466b..a1379fb 100644
--- a/asyncio/__init__.py
+++ b/trollius/__init__.py
@@ -1,4 +1,4 @@
-"""The asyncio package, tracking PEP 3156."""
+"""The trollius package, tracking PEP 3156."""
import sys
@@ -24,6 +24,7 @@ from .events import *
from .futures import *
from .locks import *
from .protocols import *
+from .py33_exceptions import *
from .queues import *
from .streams import *
from .subprocess import *
@@ -33,6 +34,7 @@ from .transports import *
__all__ = (base_events.__all__ +
coroutines.__all__ +
events.__all__ +
+ py33_exceptions.__all__ +
futures.__all__ +
locks.__all__ +
protocols.__all__ +
@@ -48,3 +50,10 @@ if sys.platform == 'win32': # pragma: no cover
else:
from .unix_events import * # pragma: no cover
__all__ += unix_events.__all__
+
+try:
+ from .py3_ssl import *
+ __all__ += py3_ssl.__all__
+except ImportError:
+ # SSL support is optionnal
+ pass
diff --git a/asyncio/base_events.py b/trollius/base_events.py
index c205445..c5e6eff 100644
--- a/asyncio/base_events.py
+++ b/trollius/base_events.py
@@ -15,26 +15,35 @@ to modify the meaning of the API call itself.
import collections
-import concurrent.futures
import heapq
import inspect
import logging
import os
import socket
import subprocess
-import threading
-import time
-import traceback
import sys
+import traceback
import warnings
+try:
+ from collections import OrderedDict
+except ImportError:
+ # Python 2.6: use ordereddict backport
+ from ordereddict import OrderedDict
+try:
+ from threading import get_ident as _get_thread_ident
+except ImportError:
+ # Python 2
+ from threading import _get_ident as _get_thread_ident
from . import compat
from . import coroutines
from . import events
from . import futures
from . import tasks
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
+from .executor import get_default_executor
from .log import logger
+from .time_monotonic import time_monotonic, time_monotonic_resolution
__all__ = ['BaseEventLoop']
@@ -94,7 +103,7 @@ def _check_resolved_address(sock, address):
# if available
try:
socket.inet_pton(family, host)
- except OSError as exc:
+ except socket.error as exc:
raise ValueError("address must be resolved (IP address), "
"got host %r: %s"
% (host, exc))
@@ -108,10 +117,10 @@ def _check_resolved_address(sock, address):
type_mask |= socket.SOCK_CLOEXEC
try:
socket.getaddrinfo(host, port,
- family=family,
- type=(sock.type & ~type_mask),
- proto=sock.proto,
- flags=socket.AI_NUMERICHOST)
+ family,
+ (sock.type & ~type_mask),
+ sock.proto,
+ socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), "
"got host %r: %s"
@@ -172,10 +181,10 @@ class Server(events.AbstractServer):
@coroutine
def wait_closed(self):
if self.sockets is None or self._waiters is None:
- return
+ raise Return()
waiter = futures.Future(loop=self._loop)
self._waiters.append(waiter)
- yield from waiter
+ yield From(waiter)
class BaseEventLoop(events.AbstractEventLoop):
@@ -190,10 +199,9 @@ class BaseEventLoop(events.AbstractEventLoop):
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
- self._clock_resolution = time.get_clock_info('monotonic').resolution
+ self._clock_resolution = time_monotonic_resolution
self._exception_handler = None
- self.set_debug((not sys.flags.ignore_environment
- and bool(os.environ.get('PYTHONASYNCIODEBUG'))))
+ self.set_debug(bool(os.environ.get('TROLLIUSDEBUG')))
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
@@ -238,13 +246,13 @@ class BaseEventLoop(events.AbstractEventLoop):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
- def _make_socket_transport(self, sock, protocol, waiter=None, *,
+ def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
- *, server_side=False, server_hostname=None,
+ server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
@@ -294,7 +302,7 @@ class BaseEventLoop(events.AbstractEventLoop):
if self.is_running():
raise RuntimeError('Event loop is running.')
self._set_coroutine_wrapper(self._debug)
- self._thread_id = threading.get_ident()
+ self._thread_id = _get_thread_ident()
try:
while True:
try:
@@ -318,7 +326,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""
self._check_closed()
- new_task = not isinstance(future, futures.Future)
+ new_task = not isinstance(future, futures._FUTURE_CLASSES)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
@@ -366,7 +374,7 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
- self._scheduled.clear()
+ del self._scheduled[:]
executor = self._default_executor
if executor is not None:
self._default_executor = None
@@ -397,7 +405,7 @@ class BaseEventLoop(events.AbstractEventLoop):
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
- return time.monotonic()
+ return time_monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
@@ -477,7 +485,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""
if self._thread_id is None:
return
- thread_id = threading.get_ident()
+ thread_id = _get_thread_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
@@ -507,7 +515,7 @@ class BaseEventLoop(events.AbstractEventLoop):
if executor is None:
executor = self._default_executor
if executor is None:
- executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
+ executor = get_default_executor()
self._default_executor = executor
return futures.wrap_future(executor.submit(func, *args), loop=self)
@@ -539,7 +547,7 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug(msg)
return addrinfo
- def getaddrinfo(self, host, port, *,
+ def getaddrinfo(self, host, port,
family=0, type=0, proto=0, flags=0):
if self._debug:
return self.run_in_executor(None, self._getaddrinfo_debug,
@@ -552,7 +560,7 @@ class BaseEventLoop(events.AbstractEventLoop):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@coroutine
- def create_connection(self, protocol_factory, host=None, port=None, *,
+ def create_connection(self, protocol_factory, host=None, port=None,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""Connect to a TCP server.
@@ -602,15 +610,15 @@ class BaseEventLoop(events.AbstractEventLoop):
else:
f2 = None
- yield from tasks.wait(fs, loop=self)
+ yield From(tasks.wait(fs, loop=self))
infos = f1.result()
if not infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
@@ -622,11 +630,11 @@ class BaseEventLoop(events.AbstractEventLoop):
try:
sock.bind(laddr)
break
- except OSError as exc:
- exc = OSError(
+ except socket.error as exc:
+ exc = socket.error(
exc.errno, 'error while '
'attempting to bind on address '
- '{!r}: {}'.format(
+ '{0!r}: {1}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
@@ -635,8 +643,8 @@ class BaseEventLoop(events.AbstractEventLoop):
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
- yield from self.sock_connect(sock, address)
- except OSError as exc:
+ yield From(self.sock_connect(sock, address))
+ except socket.error as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
@@ -656,7 +664,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
- raise OSError('Multiple exceptions: {}'.format(
+ raise socket.error('Multiple exceptions: {0}'.format(
', '.join(str(exc) for exc in exceptions)))
elif sock is None:
@@ -665,15 +673,15 @@ class BaseEventLoop(events.AbstractEventLoop):
sock.setblocking(False)
- transport, protocol = yield from self._create_connection_transport(
- sock, protocol_factory, ssl, server_hostname)
+ transport, protocol = yield From(self._create_connection_transport(
+ sock, protocol_factory, ssl, server_hostname))
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
@@ -689,16 +697,16 @@ class BaseEventLoop(events.AbstractEventLoop):
transport = self._make_socket_transport(sock, protocol, waiter)
try:
- yield from waiter
+ yield From(waiter)
except:
transport.close()
raise
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
def create_datagram_endpoint(self, protocol_factory,
- local_addr=None, remote_addr=None, *,
+ local_addr=None, remote_addr=None,
family=0, proto=0, flags=0):
"""Create datagram connection."""
if not (local_addr or remote_addr):
@@ -707,17 +715,17 @@ class BaseEventLoop(events.AbstractEventLoop):
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join address by (family, protocol)
- addr_infos = collections.OrderedDict()
+ addr_infos = OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
- infos = yield from self.getaddrinfo(
+ infos = yield From(self.getaddrinfo(
*addr, family=family, type=socket.SOCK_DGRAM,
- proto=proto, flags=flags)
+ proto=proto, flags=flags))
if not infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
@@ -749,9 +757,9 @@ class BaseEventLoop(events.AbstractEventLoop):
if local_addr:
sock.bind(local_address)
if remote_addr:
- yield from self.sock_connect(sock, remote_address)
+ yield From(self.sock_connect(sock, remote_address))
r_addr = remote_address
- except OSError as exc:
+ except socket.error as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
@@ -779,16 +787,15 @@ class BaseEventLoop(events.AbstractEventLoop):
remote_addr, transport, protocol)
try:
- yield from waiter
+ yield From(waiter)
except:
transport.close()
raise
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
def create_server(self, protocol_factory, host=None, port=None,
- *,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
@@ -815,11 +822,11 @@ class BaseEventLoop(events.AbstractEventLoop):
if host == '':
host = None
- infos = yield from self.getaddrinfo(
+ infos = yield From(self.getaddrinfo(
host, port, family=family,
- type=socket.SOCK_STREAM, proto=0, flags=flags)
+ type=socket.SOCK_STREAM, proto=0, flags=flags))
if not infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
completed = False
try:
@@ -847,10 +854,11 @@ class BaseEventLoop(events.AbstractEventLoop):
True)
try:
sock.bind(sa)
- except OSError as err:
- raise OSError(err.errno, 'error while attempting '
- 'to bind on address %r: %s'
- % (sa, err.strerror.lower()))
+ except socket.error as err:
+ raise socket.error(err.errno,
+ 'error while attempting '
+ 'to bind on address %r: %s'
+ % (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
@@ -868,7 +876,7 @@ class BaseEventLoop(events.AbstractEventLoop):
self._start_serving(protocol_factory, sock, ssl, server)
if self._debug:
logger.info("%r is serving", server)
- return server
+ raise Return(server)
@coroutine
def connect_read_pipe(self, protocol_factory, pipe):
@@ -877,7 +885,7 @@ class BaseEventLoop(events.AbstractEventLoop):
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
- yield from waiter
+ yield From(waiter)
except:
transport.close()
raise
@@ -885,7 +893,7 @@ class BaseEventLoop(events.AbstractEventLoop):
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
def connect_write_pipe(self, protocol_factory, pipe):
@@ -894,7 +902,7 @@ class BaseEventLoop(events.AbstractEventLoop):
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
- yield from waiter
+ yield From(waiter)
except:
transport.close()
raise
@@ -902,7 +910,7 @@ class BaseEventLoop(events.AbstractEventLoop):
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
- return transport, protocol
+ raise Return(transport, protocol)
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
@@ -918,11 +926,11 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug(' '.join(info))
@coroutine
- def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
+ def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
- if not isinstance(cmd, (bytes, str)):
+ if not isinstance(cmd, compat.string_types):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
@@ -936,17 +944,20 @@ class BaseEventLoop(events.AbstractEventLoop):
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
- transport = yield from self._make_subprocess_transport(
- protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
+ transport = yield From(self._make_subprocess_transport(
+ protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs))
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
- def subprocess_exec(self, protocol_factory, program, *args,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, universal_newlines=False,
- shell=False, bufsize=0, **kwargs):
+ def subprocess_exec(self, protocol_factory, program, *args, **kwargs):
+ stdin = kwargs.pop('stdin', subprocess.PIPE)
+ stdout = kwargs.pop('stdout', subprocess.PIPE)
+ stderr = kwargs.pop('stderr', subprocess.PIPE)
+ universal_newlines = kwargs.pop('universal_newlines', False)
+ shell = kwargs.pop('shell', False)
+ bufsize = kwargs.pop('bufsize', 0)
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
@@ -955,7 +966,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
- if not isinstance(arg, (str, bytes)):
+ if not isinstance(arg, compat.string_types ):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
@@ -965,12 +976,12 @@ class BaseEventLoop(events.AbstractEventLoop):
# (password) and may be too long
debug_log = 'execute program %r' % program
self._log_subprocess(debug_log, stdin, stdout, stderr)
- transport = yield from self._make_subprocess_transport(
+ transport = yield From(self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
- bufsize, **kwargs)
+ bufsize, **kwargs))
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
- return transport, protocol
+ raise Return(transport, protocol)
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
@@ -986,7 +997,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
- 'got {!r}'.format(handler))
+ 'got {0!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
@@ -1005,7 +1016,15 @@ class BaseEventLoop(events.AbstractEventLoop):
exception = context.get('exception')
if exception is not None:
- exc_info = (type(exception), exception, exception.__traceback__)
+ if hasattr(exception, '__traceback__'):
+ # Python 3
+ tb = exception.__traceback__
+ else:
+ # call_exception_handler() is usually called indirectly
+ # from an except block. If it's not the case, the traceback
+ # is undefined...
+ tb = sys.exc_info()[2]
+ exc_info = (type(exception), exception, tb)
else:
exc_info = False
@@ -1016,7 +1035,7 @@ class BaseEventLoop(events.AbstractEventLoop):
log_lines = [message]
for key in sorted(context):
- if key in {'message', 'exception'}:
+ if key in ('message', 'exception'):
continue
value = context[key]
if key == 'source_traceback':
@@ -1029,7 +1048,7 @@ class BaseEventLoop(events.AbstractEventLoop):
value += tb.rstrip()
else:
value = repr(value)
- log_lines.append('{}: {}'.format(key, value))
+ log_lines.append('{0}: {1}'.format(key, value))
logger.error('\n'.join(log_lines), exc_info=exc_info)
@@ -1109,7 +1128,7 @@ class BaseEventLoop(events.AbstractEventLoop):
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
- self._timer_cancelled_count / sched_count >
+ float(self._timer_cancelled_count) / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
diff --git a/asyncio/base_subprocess.py b/trollius/base_subprocess.py
index 6851cd2..ffd6e76 100644
--- a/asyncio/base_subprocess.py
+++ b/trollius/base_subprocess.py
@@ -6,8 +6,9 @@ from . import compat
from . import futures
from . import protocols
from . import transports
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
from .log import logger
+from .py33_exceptions import ProcessLookupError
class BaseSubprocessTransport(transports.SubprocessTransport):
@@ -15,7 +16,7 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
def __init__(self, loop, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=None, extra=None, **kwargs):
- super().__init__(extra)
+ super(BaseSubprocessTransport, self).__init__(extra)
self._closed = False
self._protocol = protocol
self._loop = loop
@@ -157,21 +158,21 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
loop = self._loop
if proc.stdin is not None:
- _, pipe = yield from loop.connect_write_pipe(
+ _, pipe = yield From(loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0),
- proc.stdin)
+ proc.stdin))
self._pipes[0] = pipe
if proc.stdout is not None:
- _, pipe = yield from loop.connect_read_pipe(
+ _, pipe = yield From(loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1),
- proc.stdout)
+ proc.stdout))
self._pipes[1] = pipe
if proc.stderr is not None:
- _, pipe = yield from loop.connect_read_pipe(
+ _, pipe = yield From(loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2),
- proc.stderr)
+ proc.stderr))
self._pipes[2] = pipe
assert self._pending_calls is not None
@@ -222,11 +223,12 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
This method is a coroutine."""
if self._returncode is not None:
- return self._returncode
+ raise Return(self._returncode)
waiter = futures.Future(loop=self._loop)
self._exit_waiters.append(waiter)
- return (yield from waiter)
+ returncode = yield From(waiter)
+ raise Return(returncode)
def _try_finish(self):
assert not self._finished
diff --git a/trollius/compat.py b/trollius/compat.py
new file mode 100644
index 0000000..df64aba
--- /dev/null
+++ b/trollius/compat.py
@@ -0,0 +1,69 @@
+"""Compatibility helpers for the different Python versions."""
+
+import six
+import sys
+
+# Python 2.6 or older?
+PY26 = (sys.version_info < (2, 7))
+
+# Python 3.3 or newer?
+PY33 = (sys.version_info >= (3, 3))
+
+# Python 3.4 or newer?
+PY34 = sys.version_info >= (3, 4)
+
+# Python 3.5 or newer?
+PY35 = sys.version_info >= (3, 5)
+
+if six.PY3:
+ integer_types = (int,)
+ bytes_type = bytes
+ text_type = str
+ string_types = (bytes, str)
+ BYTES_TYPES = (bytes, bytearray, memoryview)
+else:
+ integer_types = (int, long,)
+ bytes_type = str
+ text_type = unicode
+ string_types = basestring
+ if PY26:
+ BYTES_TYPES = (str, bytearray, buffer)
+ else: # Python 2.7
+ BYTES_TYPES = (str, bytearray, memoryview, buffer)
+
+
+if six.PY3:
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+else:
+ exec("""def reraise(tp, value, tb=None): raise tp, value, tb""")
+
+
+def flatten_bytes(data):
+ """
+ Convert bytes-like objects (bytes, bytearray, memoryview, buffer) to
+ a bytes string.
+ """
+ if not isinstance(data, BYTES_TYPES):
+ raise TypeError('data argument must be byte-ish (%r)',
+ type(data))
+ if PY34:
+ # In Python 3.4, socket.send() and bytes.join() accept memoryview
+ # and bytearray
+ return data
+ if not data:
+ return b''
+ if six.PY2 and isinstance(data, (buffer, bytearray)):
+ return str(data)
+ elif not PY26 and isinstance(data, memoryview):
+ return data.tobytes()
+ else:
+ return data
+
+
+def flatten_list_bytes(data):
+ """Concatenate a sequence of bytes-like objects."""
+ data = map(flatten_bytes, data)
+ return b''.join(data)
diff --git a/asyncio/constants.py b/trollius/constants.py
index f9e1232..f9e1232 100644
--- a/asyncio/constants.py
+++ b/trollius/constants.py
diff --git a/asyncio/coroutines.py b/trollius/coroutines.py
index e11b21b..eea8c60 100644
--- a/asyncio/coroutines.py
+++ b/trollius/coroutines.py
@@ -1,5 +1,6 @@
__all__ = ['coroutine',
- 'iscoroutinefunction', 'iscoroutine']
+ 'iscoroutinefunction', 'iscoroutine',
+ 'From', 'Return']
import functools
import inspect
@@ -16,7 +17,7 @@ from .log import logger
# Opcode of "yield from" instruction
-_YIELD_FROM = opcode.opmap['YIELD_FROM']
+_YIELD_FROM = opcode.opmap.get('YIELD_FROM', None)
# If you set _DEBUG to true, @coroutine will wrap the resulting
# generator objects in a CoroWrapper instance (defined below). That
@@ -27,8 +28,7 @@ _YIELD_FROM = opcode.opmap['YIELD_FROM']
# before you define your coroutines. A downside of using this feature
# is that tracebacks show entries for the CoroWrapper.__next__ method
# when _DEBUG is true.
-_DEBUG = (not sys.flags.ignore_environment
- and bool(os.environ.get('PYTHONASYNCIODEBUG')))
+_DEBUG = bool(os.environ.get('TROLLIUSDEBUG'))
try:
@@ -48,28 +48,80 @@ except ImportError:
_CoroutineABC = _AwaitableABC = None
-# Check for CPython issue #21209
-def has_yield_from_bug():
- class MyGen:
- def __init__(self):
- self.send_args = None
- def __iter__(self):
- return self
- def __next__(self):
- return 42
- def send(self, *what):
- self.send_args = what
- return None
- def yield_from_gen(gen):
- yield from gen
- value = (1, 2, 3)
- gen = MyGen()
- coro = yield_from_gen(gen)
- next(coro)
- coro.send(value)
- return gen.send_args != (value,)
-_YIELD_FROM_BUG = has_yield_from_bug()
-del has_yield_from_bug
+if _YIELD_FROM is not None:
+ # Check for CPython issue #21209
+ exec('''if 1:
+ def has_yield_from_bug():
+ class MyGen:
+ def __init__(self):
+ self.send_args = None
+ def __iter__(self):
+ return self
+ def __next__(self):
+ return 42
+ def send(self, *what):
+ self.send_args = what
+ return None
+ def yield_from_gen(gen):
+ yield from gen
+ value = (1, 2, 3)
+ gen = MyGen()
+ coro = yield_from_gen(gen)
+ next(coro)
+ coro.send(value)
+ return gen.send_args != (value,)
+''')
+ _YIELD_FROM_BUG = has_yield_from_bug()
+ del has_yield_from_bug
+else:
+ _YIELD_FROM_BUG = False
+
+
+if compat.PY33:
+ # Don't use the Return class on Python 3.3 and later to support asyncio
+ # coroutines (to avoid the warning emited in Return destructor).
+ #
+ # The problem is that Return inherits from StopIteration. "yield from
+ # trollius_coroutine". Task._step() does not receive the Return exception,
+ # because "yield from" handles it internally. So it's not possible to set
+ # the raised attribute to True to avoid the warning in Return destructor.
+ def Return(*args):
+ if not args:
+ value = None
+ elif len(args) == 1:
+ value = args[0]
+ else:
+ value = args
+ return StopIteration(value)
+else:
+ class Return(StopIteration):
+ def __init__(self, *args):
+ StopIteration.__init__(self)
+ if not args:
+ self.value = None
+ elif len(args) == 1:
+ self.value = args[0]
+ else:
+ self.value = args
+ self.raised = False
+ if _DEBUG:
+ frame = sys._getframe(1)
+ self._source_traceback = traceback.extract_stack(frame)
+ # explicitly clear the reference to avoid reference cycles
+ frame = None
+ else:
+ self._source_traceback = None
+
+ def __del__(self):
+ if self.raised:
+ return
+
+ fmt = 'Return(%r) used without raise'
+ if self._source_traceback:
+ fmt += '\nReturn created at (most recent call last):\n'
+ tb = ''.join(traceback.format_list(self._source_traceback))
+ fmt += tb.rstrip()
+ logger.error(fmt, self.value)
def debug_wrapper(gen):
@@ -80,6 +132,21 @@ def debug_wrapper(gen):
return CoroWrapper(gen, None)
+def _coroutine_at_yield_from(coro):
+ """Test if the last instruction of a coroutine is "yield from".
+
+ Return False if the coroutine completed.
+ """
+ frame = coro.gi_frame
+ if frame is None:
+ return False
+ code = coro.gi_code
+ assert frame.f_lasti >= 0
+ offset = frame.f_lasti + 1
+ instr = code.co_code[offset]
+ return (instr == _YIELD_FROM)
+
+
class CoroWrapper:
# Wrapper for coroutine object in _DEBUG mode.
@@ -102,7 +169,8 @@ class CoroWrapper:
return self
def __next__(self):
- return self.gen.send(None)
+ return next(self.gen)
+ next = __next__
if _YIELD_FROM_BUG:
# For for CPython issue #21209: using "yield from" and a custom
@@ -178,6 +246,56 @@ class CoroWrapper:
msg += tb.rstrip()
logger.error(msg)
+if not compat.PY34:
+ # Backport functools.update_wrapper() from Python 3.4:
+ # - Python 2.7 fails if assigned attributes don't exist
+ # - Python 2.7 and 3.1 don't set the __wrapped__ attribute
+ # - Python 3.2 and 3.3 set __wrapped__ before updating __dict__
+ def _update_wrapper(wrapper,
+ wrapped,
+ assigned = functools.WRAPPER_ASSIGNMENTS,
+ updated = functools.WRAPPER_UPDATES):
+ """Update a wrapper function to look like the wrapped function
+
+ wrapper is the function to be updated
+ wrapped is the original function
+ assigned is a tuple naming the attributes assigned directly
+ from the wrapped function to the wrapper function (defaults to
+ functools.WRAPPER_ASSIGNMENTS)
+ updated is a tuple naming the attributes of the wrapper that
+ are updated with the corresponding attribute from the wrapped
+ function (defaults to functools.WRAPPER_UPDATES)
+ """
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
+ # from the wrapped function when updating __dict__
+ wrapper.__wrapped__ = wrapped
+ # Return the wrapper so this can be used as a decorator via partial()
+ return wrapper
+
+ def _wraps(wrapped,
+ assigned = functools.WRAPPER_ASSIGNMENTS,
+ updated = functools.WRAPPER_UPDATES):
+ """Decorator factory to apply update_wrapper() to a wrapper function
+
+ Returns a decorator that invokes update_wrapper() with the decorated
+ function as the wrapper argument and the arguments to wraps() as the
+ remaining arguments. Default arguments are as for update_wrapper().
+ This is a convenience function to simplify applying partial() to
+ update_wrapper().
+ """
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+else:
+ _wraps = functools.wraps
def coroutine(func):
"""Decorator to mark coroutines.
@@ -195,11 +313,12 @@ def coroutine(func):
if inspect.isgeneratorfunction(func):
coro = func
else:
- @functools.wraps(func)
+ @_wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
- if isinstance(res, futures.Future) or inspect.isgenerator(res):
- res = yield from res
+ if (isinstance(res, futures._FUTURE_CLASSES)
+ or inspect.isgenerator(res)):
+ res = yield From(res)
elif _AwaitableABC is not None:
# If 'func' returns an Awaitable (new in 3.5) we
# want to run it.
@@ -209,8 +328,8 @@ def coroutine(func):
pass
else:
if isinstance(res, _AwaitableABC):
- res = yield from await_meth()
- return res
+ res = yield From(await_meth())
+ raise Return(res)
if not _DEBUG:
if _types_coroutine is None:
@@ -218,7 +337,7 @@ def coroutine(func):
else:
wrapper = _types_coroutine(coro)
else:
- @functools.wraps(func)
+ @_wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
@@ -244,7 +363,13 @@ def iscoroutinefunction(func):
_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
if _CoroutineABC is not None:
_COROUTINE_TYPES += (_CoroutineABC,)
-
+if events.asyncio is not None:
+ # Accept also asyncio CoroWrapper for interoperability
+ if hasattr(events.asyncio, 'coroutines'):
+ _COROUTINE_TYPES += (events.asyncio.coroutines.CoroWrapper,)
+ else:
+ # old asyncio/Python versions
+ _COROUTINE_TYPES += (events.asyncio.tasks.CoroWrapper,)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
@@ -259,7 +384,7 @@ def _format_coroutine(coro):
func = coro.func
coro_name = coro.__qualname__
if coro_name is not None:
- coro_name = '{}()'.format(coro_name)
+ coro_name = '{0}()'.format(coro_name)
else:
func = coro
@@ -297,3 +422,19 @@ def _format_coroutine(coro):
% (coro_name, filename, lineno))
return coro_repr
+
+
+class FromWrapper(object):
+ __slots__ = ('obj',)
+
+ def __init__(self, obj):
+ if isinstance(obj, FromWrapper):
+ obj = obj.obj
+ assert not isinstance(obj, FromWrapper)
+ self.obj = obj
+
+def From(obj):
+ if not _DEBUG:
+ return obj
+ else:
+ return FromWrapper(obj)
diff --git a/trollius/events.py b/trollius/events.py
new file mode 100644
index 0000000..5261161
--- /dev/null
+++ b/trollius/events.py
@@ -0,0 +1,626 @@
+"""Event loop and event loop policy."""
+from __future__ import absolute_import
+
+__all__ = ['AbstractEventLoopPolicy',
+ 'AbstractEventLoop', 'AbstractServer',
+ 'Handle', 'TimerHandle',
+ 'get_event_loop_policy', 'set_event_loop_policy',
+ 'get_event_loop', 'set_event_loop', 'new_event_loop',
+ 'get_child_watcher', 'set_child_watcher',
+ ]
+
+import functools
+import inspect
+import socket
+import subprocess
+import sys
+import threading
+import traceback
+try:
+ import reprlib # Python 3
+except ImportError:
+ import repr as reprlib # Python 2
+
+try:
+ import asyncio
+except (ImportError, SyntaxError):
+ # ignore SyntaxError for convenience: ignore SyntaxError caused by "yield
+ # from" if asyncio module is in the Python path
+ asyncio = None
+
+from trollius import compat
+
+
+def _get_function_source(func):
+ if compat.PY34:
+ func = inspect.unwrap(func)
+ elif hasattr(func, '__wrapped__'):
+ func = func.__wrapped__
+ if inspect.isfunction(func):
+ code = func.__code__
+ return (code.co_filename, code.co_firstlineno)
+ if isinstance(func, functools.partial):
+ return _get_function_source(func.func)
+ if compat.PY34 and isinstance(func, functools.partialmethod):
+ return _get_function_source(func.func)
+ return None
+
+
+def _format_args(args):
+ """Format function arguments.
+
+ Special case for a single parameter: ('hello',) is formatted as ('hello').
+ """
+ # use reprlib to limit the length of the output
+ args_repr = reprlib.repr(args)
+ if len(args) == 1 and args_repr.endswith(',)'):
+ args_repr = args_repr[:-2] + ')'
+ return args_repr
+
+
+def _format_callback(func, args, suffix=''):
+ if isinstance(func, functools.partial):
+ if args is not None:
+ suffix = _format_args(args) + suffix
+ return _format_callback(func.func, func.args, suffix)
+
+ if hasattr(func, '__qualname__'):
+ func_repr = getattr(func, '__qualname__')
+ elif hasattr(func, '__name__'):
+ func_repr = getattr(func, '__name__')
+ else:
+ func_repr = repr(func)
+
+ if args is not None:
+ func_repr += _format_args(args)
+ if suffix:
+ func_repr += suffix
+ return func_repr
+
+def _format_callback_source(func, args):
+ func_repr = _format_callback(func, args)
+ source = _get_function_source(func)
+ if source:
+ func_repr += ' at %s:%s' % source
+ return func_repr
+
+
+class Handle(object):
+ """Object returned by callback registration methods."""
+
+ __slots__ = ('_callback', '_args', '_cancelled', '_loop',
+ '_source_traceback', '_repr', '__weakref__')
+
+ def __init__(self, callback, args, loop):
+ assert not isinstance(callback, Handle), 'A Handle is not a callback'
+ self._loop = loop
+ self._callback = callback
+ self._args = args
+ self._cancelled = False
+ self._repr = None
+ if self._loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+ else:
+ self._source_traceback = None
+
+ def _repr_info(self):
+ info = [self.__class__.__name__]
+ if self._cancelled:
+ info.append('cancelled')
+ if self._callback is not None:
+ info.append(_format_callback_source(self._callback, self._args))
+ if self._source_traceback:
+ frame = self._source_traceback[-1]
+ info.append('created at %s:%s' % (frame[0], frame[1]))
+ return info
+
+ def __repr__(self):
+ if self._repr is not None:
+ return self._repr
+ info = self._repr_info()
+ return '<%s>' % ' '.join(info)
+
+ def cancel(self):
+ if not self._cancelled:
+ self._cancelled = True
+ if self._loop.get_debug():
+ # Keep a representation in debug mode to keep callback and
+ # parameters. For example, to log the warning
+ # "Executing <Handle...> took 2.5 second"
+ self._repr = repr(self)
+ self._callback = None
+ self._args = None
+
+ def _run(self):
+ try:
+ self._callback(*self._args)
+ except Exception as exc:
+ cb = _format_callback_source(self._callback, self._args)
+ msg = 'Exception in callback {0}'.format(cb)
+ context = {
+ 'message': msg,
+ 'exception': exc,
+ 'handle': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ self = None # Needed to break cycles when an exception occurs.
+
+
+class TimerHandle(Handle):
+ """Object returned by timed callback registration methods."""
+
+ __slots__ = ['_scheduled', '_when']
+
+ def __init__(self, when, callback, args, loop):
+ assert when is not None
+ super(TimerHandle, self).__init__(callback, args, loop)
+ if self._source_traceback:
+ del self._source_traceback[-1]
+ self._when = when
+ self._scheduled = False
+
+ def _repr_info(self):
+ info = super(TimerHandle, self)._repr_info()
+ pos = 2 if self._cancelled else 1
+ info.insert(pos, 'when=%s' % self._when)
+ return info
+
+ def __hash__(self):
+ return hash(self._when)
+
+ def __lt__(self, other):
+ return self._when < other._when
+
+ def __le__(self, other):
+ if self._when < other._when:
+ return True
+ return self.__eq__(other)
+
+ def __gt__(self, other):
+ return self._when > other._when
+
+ def __ge__(self, other):
+ if self._when > other._when:
+ return True
+ return self.__eq__(other)
+
+ def __eq__(self, other):
+ if isinstance(other, TimerHandle):
+ return (self._when == other._when and
+ self._callback == other._callback and
+ self._args == other._args and
+ self._cancelled == other._cancelled)
+ return NotImplemented
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ return NotImplemented if equal is NotImplemented else not equal
+
+ def cancel(self):
+ if not self._cancelled:
+ self._loop._timer_handle_cancelled(self)
+ super(TimerHandle, self).cancel()
+
+
+class AbstractServer(object):
+ """Abstract server returned by create_server()."""
+
+ def close(self):
+ """Stop serving. This leaves existing connections open."""
+ return NotImplemented
+
+ def wait_closed(self):
+ """Coroutine to wait until service is closed."""
+ return NotImplemented
+
+
+if asyncio is not None:
+ # Reuse asyncio classes so asyncio.set_event_loop() and
+ # asyncio.set_event_loop_policy() accept Trollius event loop and trollius
+ # event loop policy
+ AbstractEventLoop = asyncio.AbstractEventLoop
+ AbstractEventLoopPolicy = asyncio.AbstractEventLoopPolicy
+else:
+ class AbstractEventLoop(object):
+ """Abstract event loop."""
+
+ # Running and stopping the event loop.
+
+ def run_forever(self):
+ """Run the event loop until stop() is called."""
+ raise NotImplementedError
+
+ def run_until_complete(self, future):
+ """Run the event loop until a Future is done.
+
+ Return the Future's result, or raise its exception.
+ """
+ raise NotImplementedError
+
+ def stop(self):
+ """Stop the event loop as soon as reasonable.
+
+ Exactly how soon that is may depend on the implementation, but
+ no more I/O callbacks should be scheduled.
+ """
+ raise NotImplementedError
+
+ def is_running(self):
+ """Return whether the event loop is currently running."""
+ raise NotImplementedError
+
+ def is_closed(self):
+ """Returns True if the event loop was closed."""
+ raise NotImplementedError
+
+ def close(self):
+ """Close the loop.
+
+ The loop should not be running.
+
+ This is idempotent and irreversible.
+
+ No other methods should be called after this one.
+ """
+ raise NotImplementedError
+
+ # Methods scheduling callbacks. All these return Handles.
+
+ def _timer_handle_cancelled(self, handle):
+ """Notification that a TimerHandle has been cancelled."""
+ raise NotImplementedError
+
+ def call_soon(self, callback, *args):
+ return self.call_later(0, callback, *args)
+
+ def call_later(self, delay, callback, *args):
+ raise NotImplementedError
+
+ def call_at(self, when, callback, *args):
+ raise NotImplementedError
+
+ def time(self):
+ raise NotImplementedError
+
+ # Method scheduling a coroutine object: create a task.
+
+ def create_task(self, coro):
+ raise NotImplementedError
+
+ # Methods for interacting with threads.
+
+ def call_soon_threadsafe(self, callback, *args):
+ raise NotImplementedError
+
+ def run_in_executor(self, executor, func, *args):
+ raise NotImplementedError
+
+ def set_default_executor(self, executor):
+ raise NotImplementedError
+
+ # Network I/O methods returning Futures.
+
+ def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0):
+ raise NotImplementedError
+
+ def getnameinfo(self, sockaddr, flags=0):
+ raise NotImplementedError
+
+ def create_connection(self, protocol_factory, host=None, port=None,
+ ssl=None, family=0, proto=0, flags=0, sock=None,
+ local_addr=None, server_hostname=None):
+ raise NotImplementedError
+
+ def create_server(self, protocol_factory, host=None, port=None,
+ family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
+ sock=None, backlog=100, ssl=None, reuse_address=None):
+ """A coroutine which creates a TCP server bound to host and port.
+
+ The return value is a Server object which can be used to stop
+ the service.
+
+ If host is an empty string or None all interfaces are assumed
+ and a list of multiple sockets will be returned (most likely
+ one for IPv4 and another one for IPv6).
+
+ family can be set to either AF_INET or AF_INET6 to force the
+ socket to use IPv4 or IPv6. If not set it will be determined
+ from host (defaults to AF_UNSPEC).
+
+ flags is a bitmask for getaddrinfo().
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+
+ backlog is the maximum number of queued connections passed to
+ listen() (defaults to 100).
+
+ ssl can be set to an SSLContext to enable SSL over the
+ accepted connections.
+
+ reuse_address tells the kernel to reuse a local socket in
+ TIME_WAIT state, without waiting for its natural timeout to
+ expire. If not specified will automatically be set to True on
+ UNIX.
+ """
+ raise NotImplementedError
+
+ def create_unix_connection(self, protocol_factory, path,
+ ssl=None, sock=None,
+ server_hostname=None):
+ raise NotImplementedError
+
+ def create_unix_server(self, protocol_factory, path,
+ sock=None, backlog=100, ssl=None):
+ """A coroutine which creates a UNIX Domain Socket server.
+
+ The return value is a Server object, which can be used to stop
+ the service.
+
+ path is a str, representing a file systsem path to bind the
+ server socket to.
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+
+ backlog is the maximum number of queued connections passed to
+ listen() (defaults to 100).
+
+ ssl can be set to an SSLContext to enable SSL over the
+ accepted connections.
+ """
+ raise NotImplementedError
+
+ def create_datagram_endpoint(self, protocol_factory,
+ local_addr=None, remote_addr=None,
+ family=0, proto=0, flags=0):
+ raise NotImplementedError
+
+ # Pipes and subprocesses.
+
+ def connect_read_pipe(self, protocol_factory, pipe):
+ """Register read pipe in event loop. Set the pipe to non-blocking mode.
+
+ protocol_factory should instantiate object with Protocol interface.
+ pipe is a file-like object.
+ Return pair (transport, protocol), where transport supports the
+ ReadTransport interface."""
+ # The reason to accept file-like object instead of just file descriptor
+ # is: we need to own pipe and close it at transport finishing
+ # Can got complicated errors if pass f.fileno(),
+ # close fd in pipe transport then close f and vise versa.
+ raise NotImplementedError
+
+ def connect_write_pipe(self, protocol_factory, pipe):
+ """Register write pipe in event loop.
+
+ protocol_factory should instantiate object with BaseProtocol interface.
+ Pipe is file-like object already switched to nonblocking.
+ Return pair (transport, protocol), where transport support
+ WriteTransport interface."""
+ # The reason to accept file-like object instead of just file descriptor
+ # is: we need to own pipe and close it at transport finishing
+ # Can got complicated errors if pass f.fileno(),
+ # close fd in pipe transport then close f and vise versa.
+ raise NotImplementedError
+
+ def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ **kwargs):
+ raise NotImplementedError
+
+ def subprocess_exec(self, protocol_factory, *args, **kwargs):
+ raise NotImplementedError
+
+ # Ready-based callback registration methods.
+ # The add_*() methods return None.
+ # The remove_*() methods return True if something was removed,
+ # False if there was nothing to delete.
+
+ def add_reader(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_reader(self, fd):
+ raise NotImplementedError
+
+ def add_writer(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_writer(self, fd):
+ raise NotImplementedError
+
+ # Completion based I/O methods returning Futures.
+
+ def sock_recv(self, sock, nbytes):
+ raise NotImplementedError
+
+ def sock_sendall(self, sock, data):
+ raise NotImplementedError
+
+ def sock_connect(self, sock, address):
+ raise NotImplementedError
+
+ def sock_accept(self, sock):
+ raise NotImplementedError
+
+ # Signal handling.
+
+ def add_signal_handler(self, sig, callback, *args):
+ raise NotImplementedError
+
+ def remove_signal_handler(self, sig):
+ raise NotImplementedError
+
+ # Task factory.
+
+ def set_task_factory(self, factory):
+ raise NotImplementedError
+
+ def get_task_factory(self):
+ raise NotImplementedError
+
+ # Error handlers.
+
+ def set_exception_handler(self, handler):
+ raise NotImplementedError
+
+ def default_exception_handler(self, context):
+ raise NotImplementedError
+
+ def call_exception_handler(self, context):
+ raise NotImplementedError
+
+ # Debug flag management.
+
+ def get_debug(self):
+ raise NotImplementedError
+
+ def set_debug(self, enabled):
+ raise NotImplementedError
+
+
+ class AbstractEventLoopPolicy(object):
+ """Abstract policy for accessing the event loop."""
+
+ def get_event_loop(self):
+ """Get the event loop for the current context.
+
+ Returns an event loop object implementing the BaseEventLoop interface,
+ or raises an exception in case no event loop has been set for the
+ current context and the current policy does not specify to create one.
+
+ It should never return None."""
+ raise NotImplementedError
+
+ def set_event_loop(self, loop):
+ """Set the event loop for the current context to loop."""
+ raise NotImplementedError
+
+ def new_event_loop(self):
+ """Create and return a new event loop object according to this
+ policy's rules. If there's need to set this loop as the event loop for
+ the current context, set_event_loop must be called explicitly."""
+ raise NotImplementedError
+
+ # Child processes handling (Unix only).
+
+ def get_child_watcher(self):
+ "Get the watcher for child processes."
+ raise NotImplementedError
+
+ def set_child_watcher(self, watcher):
+ """Set the watcher for child processes."""
+ raise NotImplementedError
+
+
+class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
+ """Default policy implementation for accessing the event loop.
+
+ In this policy, each thread has its own event loop. However, we
+ only automatically create an event loop by default for the main
+ thread; other threads by default have no event loop.
+
+ Other policies may have different rules (e.g. a single global
+ event loop, or automatically creating an event loop per thread, or
+ using some other notion of context to which an event loop is
+ associated).
+ """
+
+ _loop_factory = None
+
+ class _Local(threading.local):
+ _loop = None
+ _set_called = False
+
+ def __init__(self):
+ self._local = self._Local()
+
+ def get_event_loop(self):
+ """Get the event loop.
+
+ This may be None or an instance of EventLoop.
+ """
+ if (self._local._loop is None and
+ not self._local._set_called and
+ isinstance(threading.current_thread(), threading._MainThread)):
+ self.set_event_loop(self.new_event_loop())
+ if self._local._loop is None:
+ raise RuntimeError('There is no current event loop in thread %r.'
+ % threading.current_thread().name)
+ return self._local._loop
+
+ def set_event_loop(self, loop):
+ """Set the event loop."""
+ self._local._set_called = True
+ assert loop is None or isinstance(loop, AbstractEventLoop)
+ self._local._loop = loop
+
+ def new_event_loop(self):
+ """Create a new event loop.
+
+ You must call set_event_loop() to make this the current event
+ loop.
+ """
+ return self._loop_factory()
+
+
+# Event loop policy. The policy itself is always global, even if the
+# policy's rules say that there is an event loop per thread (or other
+# notion of context). The default policy is installed by the first
+# call to get_event_loop_policy().
+_event_loop_policy = None
+
+# Lock for protecting the on-the-fly creation of the event loop policy.
+_lock = threading.Lock()
+
+
+def _init_event_loop_policy():
+ global _event_loop_policy
+ with _lock:
+ if _event_loop_policy is None: # pragma: no branch
+ from . import DefaultEventLoopPolicy
+ _event_loop_policy = DefaultEventLoopPolicy()
+
+
+def get_event_loop_policy():
+ """Get the current event loop policy."""
+ if _event_loop_policy is None:
+ _init_event_loop_policy()
+ return _event_loop_policy
+
+
+def set_event_loop_policy(policy):
+ """Set the current event loop policy.
+
+ If policy is None, the default policy is restored."""
+ global _event_loop_policy
+ assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
+ _event_loop_policy = policy
+
+
+def get_event_loop():
+ """Equivalent to calling get_event_loop_policy().get_event_loop()."""
+ return get_event_loop_policy().get_event_loop()
+
+
+def set_event_loop(loop):
+ """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
+ get_event_loop_policy().set_event_loop(loop)
+
+
+def new_event_loop():
+ """Equivalent to calling get_event_loop_policy().new_event_loop()."""
+ return get_event_loop_policy().new_event_loop()
+
+
+def get_child_watcher():
+ """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
+ return get_event_loop_policy().get_child_watcher()
+
+
+def set_child_watcher(watcher):
+ """Equivalent to calling
+ get_event_loop_policy().set_child_watcher(watcher)."""
+ return get_event_loop_policy().set_child_watcher(watcher)
diff --git a/trollius/executor.py b/trollius/executor.py
new file mode 100644
index 0000000..9e7fdd7
--- /dev/null
+++ b/trollius/executor.py
@@ -0,0 +1,84 @@
+from .log import logger
+
+__all__ = (
+ 'CancelledError', 'TimeoutError',
+ 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
+ )
+
+# Argument for default thread pool executor creation.
+_MAX_WORKERS = 5
+
+try:
+ import concurrent.futures
+ import concurrent.futures._base
+except ImportError:
+ FIRST_COMPLETED = 'FIRST_COMPLETED'
+ FIRST_EXCEPTION = 'FIRST_EXCEPTION'
+ ALL_COMPLETED = 'ALL_COMPLETED'
+
+ class Future(object):
+ def __init__(self, callback, args):
+ try:
+ self._result = callback(*args)
+ self._exception = None
+ except Exception as err:
+ self._result = None
+ self._exception = err
+ self.callbacks = []
+
+ def cancelled(self):
+ return False
+
+ def done(self):
+ return True
+
+ def exception(self):
+ return self._exception
+
+ def result(self):
+ if self._exception is not None:
+ raise self._exception
+ else:
+ return self._result
+
+ def add_done_callback(self, callback):
+ callback(self)
+
+ class Error(Exception):
+ """Base class for all future-related exceptions."""
+ pass
+
+ class CancelledError(Error):
+ """The Future was cancelled."""
+ pass
+
+ class TimeoutError(Error):
+ """The operation exceeded the given deadline."""
+ pass
+
+ class SynchronousExecutor:
+ """
+ Synchronous executor: submit() blocks until it gets the result.
+ """
+ def submit(self, callback, *args):
+ return Future(callback, args)
+
+ def shutdown(self, wait):
+ pass
+
+ def get_default_executor():
+ logger.error("concurrent.futures module is missing: "
+ "use a synchrounous executor as fallback!")
+ return SynchronousExecutor()
+else:
+ FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
+ FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
+ ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+
+ Future = concurrent.futures.Future
+ Error = concurrent.futures._base.Error
+ CancelledError = concurrent.futures.CancelledError
+ TimeoutError = concurrent.futures.TimeoutError
+
+ def get_default_executor():
+ return concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
diff --git a/asyncio/futures.py b/trollius/futures.py
index dbe06c4..4d4e20f 100644
--- a/asyncio/futures.py
+++ b/trollius/futures.py
@@ -5,23 +5,27 @@ __all__ = ['CancelledError', 'TimeoutError',
'Future', 'wrap_future',
]
-import concurrent.futures._base
import logging
-import reprlib
+import six
import sys
import traceback
+try:
+ import reprlib # Python 3
+except ImportError:
+ import repr as reprlib # Python 2
from . import compat
from . import events
+from . import executor
# States for Future.
_PENDING = 'PENDING'
_CANCELLED = 'CANCELLED'
_FINISHED = 'FINISHED'
-Error = concurrent.futures._base.Error
-CancelledError = concurrent.futures.CancelledError
-TimeoutError = concurrent.futures.TimeoutError
+Error = executor.Error
+CancelledError = executor.CancelledError
+TimeoutError = executor.TimeoutError
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
@@ -30,7 +34,7 @@ class InvalidStateError(Error):
"""The operation is not allowed in this state."""
-class _TracebackLogger:
+class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
@@ -110,7 +114,7 @@ class _TracebackLogger:
self.loop.call_exception_handler({'message': msg})
-class Future:
+class Future(object):
"""This class is *almost* compatible with concurrent.futures.Future.
Differences:
@@ -136,10 +140,14 @@ class Future:
_blocking = False # proper use of future (yield vs yield from)
+ # Used by Python 2 to raise the exception with the original traceback
+ # in the exception() method in debug mode
+ _exception_tb = None
+
_log_traceback = False # Used for Python 3.4 and later
_tb_logger = None # Used for Python 3.3 only
- def __init__(self, *, loop=None):
+ def __init__(self, loop=None):
"""Initialize the future.
The optional event_loop argument allows to explicitly set the event
@@ -166,23 +174,23 @@ class Future:
if size == 1:
cb = format_cb(cb[0])
elif size == 2:
- cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
+ cb = '{0}, {1}'.format(format_cb(cb[0]), format_cb(cb[1]))
elif size > 2:
- cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
- size-2,
- format_cb(cb[-1]))
+ cb = '{0}, <{1} more>, {2}'.format(format_cb(cb[0]),
+ size-2,
+ format_cb(cb[-1]))
return 'cb=[%s]' % cb
def _repr_info(self):
info = [self._state.lower()]
if self._state == _FINISHED:
if self._exception is not None:
- info.append('exception={!r}'.format(self._exception))
+ info.append('exception={0!r}'.format(self._exception))
else:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(self._result)
- info.append('result={}'.format(result))
+ info.append('result={0}'.format(result))
if self._callbacks:
info.append(self._format_callbacks())
if self._source_traceback:
@@ -270,8 +278,13 @@ class Future:
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
+ exc_tb = self._exception_tb
+ self._exception_tb = None
if self._exception is not None:
- raise self._exception
+ if exc_tb is not None:
+ compat.reraise(type(self._exception), self._exception, exc_tb)
+ else:
+ raise self._exception
return self._result
def exception(self):
@@ -290,6 +303,7 @@ class Future:
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
+ self._exception_tb = None
return self._exception
def add_done_callback(self, fn):
@@ -332,31 +346,61 @@ class Future:
InvalidStateError.
"""
if self._state != _PENDING:
- raise InvalidStateError('{}: {!r}'.format(self._state, self))
+ raise InvalidStateError('{0}: {1!r}'.format(self._state, self))
self._result = result
self._state = _FINISHED
self._schedule_callbacks()
+ def _get_exception_tb(self):
+ return self._exception_tb
+
def set_exception(self, exception):
+ self._set_exception_with_tb(exception, None)
+
+ def _set_exception_with_tb(self, exception, exc_tb):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
- raise InvalidStateError('{}: {!r}'.format(self._state, self))
+ raise InvalidStateError('{0}: {1!r}'.format(self._state, self))
if isinstance(exception, type):
exception = exception()
self._exception = exception
+ if exc_tb is not None:
+ self._exception_tb = exc_tb
+ exc_tb = None
+ elif self._loop.get_debug() and not six.PY3:
+ self._exception_tb = sys.exc_info()[2]
self._state = _FINISHED
self._schedule_callbacks()
if compat.PY34:
self._log_traceback = True
else:
self._tb_logger = _TracebackLogger(self, exception)
- # Arrange for the logger to be activated after all callbacks
- # have had a chance to call result() or exception().
- self._loop.call_soon(self._tb_logger.activate)
+ if hasattr(exception, '__traceback__'):
+ # Python 3: exception contains a link to the traceback
+
+ # Arrange for the logger to be activated after all callbacks
+ # have had a chance to call result() or exception().
+ self._loop.call_soon(self._tb_logger.activate)
+ else:
+ if self._loop.get_debug():
+ frame = sys._getframe(1)
+ tb = ['Traceback (most recent call last):\n']
+ if self._exception_tb is not None:
+ tb += traceback.format_tb(self._exception_tb)
+ else:
+ tb += traceback.format_stack(frame)
+ tb += traceback.format_exception_only(type(exception), exception)
+ self._tb_logger.tb = tb
+ else:
+ self._tb_logger.tb = traceback.format_exception_only(
+ type(exception),
+ exception)
+
+ self._tb_logger.exc = None
# Truly internal methods.
@@ -379,23 +423,21 @@ class Future:
result = other.result()
self.set_result(result)
- def __iter__(self):
- if not self.done():
- self._blocking = True
- yield self # This tells Task to wait for completion.
- assert self.done(), "yield from wasn't used with future"
- return self.result() # May raise too.
-
if compat.PY35:
__await__ = __iter__ # make compatible with 'await' expression
+if events.asyncio is not None:
+ # Accept also asyncio Future objects for interoperability
+ _FUTURE_CLASSES = (Future, events.asyncio.Future)
+else:
+ _FUTURE_CLASSES = Future
-def wrap_future(fut, *, loop=None):
+def wrap_future(fut, loop=None):
"""Wrap concurrent.futures.Future object."""
- if isinstance(fut, Future):
+ if isinstance(fut, _FUTURE_CLASSES):
return fut
- assert isinstance(fut, concurrent.futures.Future), \
- 'concurrent.futures.Future is expected, got {!r}'.format(fut)
+ assert isinstance(fut, executor.Future), \
+ 'concurrent.futures.Future is expected, got {0!r}'.format(fut)
if loop is None:
loop = events.get_event_loop()
new_future = Future(loop=loop)
diff --git a/asyncio/locks.py b/trollius/locks.py
index 7a13279..03b4daa 100644
--- a/asyncio/locks.py
+++ b/trollius/locks.py
@@ -7,7 +7,7 @@ import collections
from . import compat
from . import events
from . import futures
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
class _ContextManager:
@@ -16,7 +16,7 @@ class _ContextManager:
This enables the following idiom for acquiring and releasing a
lock around a block:
- with (yield from lock):
+ with (yield From(lock)):
<block>
while failing loudly when accidentally using:
@@ -40,50 +40,34 @@ class _ContextManager:
self._lock = None # Crudely prevent reuse.
-class _ContextManagerMixin:
+class _ContextManagerMixin(object):
def __enter__(self):
raise RuntimeError(
- '"yield from" should be used as context manager expression')
+ '"yield From" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
- @coroutine
- def __iter__(self):
- # This is not a coroutine. It is meant to enable the idiom:
- #
- # with (yield from lock):
- # <block>
- #
- # as an alternative to:
- #
- # yield from lock.acquire()
- # try:
- # <block>
- # finally:
- # lock.release()
- yield from self.acquire()
- return _ContextManager(self)
-
- if compat.PY35:
-
- def __await__(self):
- # To make "with await lock" work.
- yield from self.acquire()
- return _ContextManager(self)
-
- @coroutine
- def __aenter__(self):
- yield from self.acquire()
- # We have no use for the "as ..." clause in the with
- # statement for locks.
- return None
-
- @coroutine
- def __aexit__(self, exc_type, exc, tb):
- self.release()
+ # FIXME: support PEP 492?
+ # if compat.PY35:
+
+ # def __await__(self):
+ # # To make "with await lock" work.
+ # yield from self.acquire()
+ # return _ContextManager(self)
+
+ # @coroutine
+ # def __aenter__(self):
+ # yield from self.acquire()
+ # # We have no use for the "as ..." clause in the with
+ # # statement for locks.
+ # return None
+
+ # @coroutine
+ # def __aexit__(self, exc_type, exc, tb):
+ # self.release()
class Lock(_ContextManagerMixin):
@@ -108,16 +92,16 @@ class Lock(_ContextManagerMixin):
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
- acquire() is a coroutine and should be called with 'yield from'.
+ acquire() is a coroutine and should be called with 'yield From'.
- Locks also support the context management protocol. '(yield from lock)'
+ Locks also support the context management protocol. '(yield From(lock))'
should be used as context manager expression.
Usage:
lock = Lock()
...
- yield from lock
+ yield From(lock)
try:
...
finally:
@@ -127,20 +111,20 @@ class Lock(_ContextManagerMixin):
lock = Lock()
...
- with (yield from lock):
+ with (yield From(lock)):
...
Lock objects can be tested for locking state:
if not lock.locked():
- yield from lock
+ yield From(lock)
else:
# lock is acquired
...
"""
- def __init__(self, *, loop=None):
+ def __init__(self, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
@@ -149,11 +133,11 @@ class Lock(_ContextManagerMixin):
self._loop = events.get_event_loop()
def __repr__(self):
- res = super().__repr__()
+ res = super(Lock, self).__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
@@ -168,14 +152,14 @@ class Lock(_ContextManagerMixin):
"""
if not self._waiters and not self._locked:
self._locked = True
- return True
+ raise Return(True)
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
+ yield From(fut)
self._locked = True
- return True
+ raise Return(True)
finally:
self._waiters.remove(fut)
@@ -201,7 +185,7 @@ class Lock(_ContextManagerMixin):
raise RuntimeError('Lock is not acquired.')
-class Event:
+class Event(object):
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
@@ -210,7 +194,7 @@ class Event:
false.
"""
- def __init__(self, *, loop=None):
+ def __init__(self, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
@@ -219,11 +203,11 @@ class Event:
self._loop = events.get_event_loop()
def __repr__(self):
- res = super().__repr__()
+ res = super(Event, self).__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
@@ -256,13 +240,13 @@ class Event:
set() to set the flag to true, then return True.
"""
if self._value:
- return True
+ raise Return(True)
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
- return True
+ yield From(fut)
+ raise Return(True)
finally:
self._waiters.remove(fut)
@@ -277,7 +261,7 @@ class Condition(_ContextManagerMixin):
A new Lock object is created and used as the underlying lock.
"""
- def __init__(self, lock=None, *, loop=None):
+ def __init__(self, lock=None, loop=None):
if loop is not None:
self._loop = loop
else:
@@ -297,11 +281,11 @@ class Condition(_ContextManagerMixin):
self._waiters = collections.deque()
def __repr__(self):
- res = super().__repr__()
+ res = super(Condition, self).__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
@@ -323,13 +307,24 @@ class Condition(_ContextManagerMixin):
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
- return True
+ yield From(fut)
+ raise Return(True)
finally:
self._waiters.remove(fut)
- finally:
- yield from self.acquire()
+ except Exception as exc:
+ # Workaround CPython bug #23353: using yield/yield-from in an
+ # except block of a generator doesn't clear properly
+ # sys.exc_info()
+ err = exc
+ else:
+ err = None
+
+ if err is not None:
+ yield From(self.acquire())
+ raise err
+
+ yield From(self.acquire())
@coroutine
def wait_for(self, predicate):
@@ -341,9 +336,9 @@ class Condition(_ContextManagerMixin):
"""
result = predicate()
while not result:
- yield from self.wait()
+ yield From(self.wait())
result = predicate()
- return result
+ raise Return(result)
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
@@ -393,7 +388,7 @@ class Semaphore(_ContextManagerMixin):
ValueError is raised.
"""
- def __init__(self, value=1, *, loop=None):
+ def __init__(self, value=1, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
@@ -404,12 +399,12 @@ class Semaphore(_ContextManagerMixin):
self._loop = events.get_event_loop()
def __repr__(self):
- res = super().__repr__()
- extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
+ res = super(Semaphore, self).__repr__()
+ extra = 'locked' if self.locked() else 'unlocked,value:{0}'.format(
self._value)
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
@@ -427,14 +422,14 @@ class Semaphore(_ContextManagerMixin):
"""
if not self._waiters and self._value > 0:
self._value -= 1
- return True
+ raise Return(True)
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
+ yield From(fut)
self._value -= 1
- return True
+ raise Return(True)
finally:
self._waiters.remove(fut)
@@ -457,11 +452,11 @@ class BoundedSemaphore(Semaphore):
above the initial value.
"""
- def __init__(self, value=1, *, loop=None):
+ def __init__(self, value=1, loop=None):
self._bound_value = value
- super().__init__(value, loop=loop)
+ super(BoundedSemaphore, self).__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
- super().release()
+ super(BoundedSemaphore, self).release()
diff --git a/asyncio/log.py b/trollius/log.py
index 23a7074..23a7074 100644
--- a/asyncio/log.py
+++ b/trollius/log.py
diff --git a/asyncio/proactor_events.py b/trollius/proactor_events.py
index abe4c12..66b4caf 100644
--- a/asyncio/proactor_events.py
+++ b/trollius/proactor_events.py
@@ -16,6 +16,9 @@ from . import futures
from . import sslproto
from . import transports
from .log import logger
+from .compat import flatten_bytes
+from .py33_exceptions import (BrokenPipeError,
+ ConnectionAbortedError, ConnectionResetError)
class _ProactorBasePipeTransport(transports._FlowControlMixin,
@@ -24,7 +27,7 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
- super().__init__(extra, loop)
+ super(_ProactorBasePipeTransport, self).__init__(extra, loop)
self._set_extra(sock)
self._sock = sock
self._protocol = protocol
@@ -143,7 +146,8 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
- super().__init__(loop, sock, protocol, waiter, extra, server)
+ super(_ProactorReadPipeTransport, self).__init__(loop, sock, protocol,
+ waiter, extra, server)
self._paused = False
self._loop.call_soon(self._loop_reading)
@@ -220,9 +224,7 @@ class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
"""Transport for write pipes."""
def write(self, data):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if self._eof_written:
raise RuntimeError('write_eof() already called')
@@ -301,7 +303,7 @@ class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
def __init__(self, *args, **kw):
- super().__init__(*args, **kw)
+ super(_ProactorWritePipeTransport, self).__init__(*args, **kw)
self._read_fut = self._loop._proactor.recv(self._sock, 16)
self._read_fut.add_done_callback(self._pipe_closed)
@@ -368,7 +370,7 @@ class _ProactorSocketTransport(_ProactorReadPipeTransport,
class BaseProactorEventLoop(base_events.BaseEventLoop):
def __init__(self, proactor):
- super().__init__()
+ super(BaseProactorEventLoop, self).__init__()
logger.debug('Using proactor: %s', proactor.__class__.__name__)
self._proactor = proactor
self._selector = proactor # convenient alias
@@ -383,7 +385,7 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
- *, server_side=False, server_hostname=None,
+ server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
raise NotImplementedError("Proactor event loop requires Python 3.5"
@@ -427,7 +429,7 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
self._selector = None
# Close the event loop
- super().close()
+ super(BaseProactorEventLoop, self).close()
def sock_recv(self, sock, n):
return self._proactor.recv(sock, n)
diff --git a/asyncio/protocols.py b/trollius/protocols.py
index 80fcac9..2c18287 100644
--- a/asyncio/protocols.py
+++ b/trollius/protocols.py
@@ -4,7 +4,7 @@ __all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
'SubprocessProtocol']
-class BaseProtocol:
+class BaseProtocol(object):
"""Common base class for protocol interfaces.
Usually user implements protocols that derived from BaseProtocol
diff --git a/trollius/py27_weakrefset.py b/trollius/py27_weakrefset.py
new file mode 100644
index 0000000..990c3a6
--- /dev/null
+++ b/trollius/py27_weakrefset.py
@@ -0,0 +1,202 @@
+# Access WeakSet through the weakref module.
+# This code is separated-out because it is needed
+# by abc.py to load everything else at startup.
+
+from _weakref import ref
+
+__all__ = ['WeakSet']
+
+
+class _IterationGuard(object):
+ # This context manager registers itself in the current iterators of the
+ # weak container, such as to delay all removals until the context manager
+ # exits.
+ # This technique should be relatively thread-safe (since sets are).
+
+ def __init__(self, weakcontainer):
+ # Don't create cycles
+ self.weakcontainer = ref(weakcontainer)
+
+ def __enter__(self):
+ w = self.weakcontainer()
+ if w is not None:
+ w._iterating.add(self)
+ return self
+
+ def __exit__(self, e, t, b):
+ w = self.weakcontainer()
+ if w is not None:
+ s = w._iterating
+ s.remove(self)
+ if not s:
+ w._commit_removals()
+
+
+class WeakSet(object):
+ def __init__(self, data=None):
+ self.data = set()
+ def _remove(item, selfref=ref(self)):
+ self = selfref()
+ if self is not None:
+ if self._iterating:
+ self._pending_removals.append(item)
+ else:
+ self.data.discard(item)
+ self._remove = _remove
+ # A list of keys to be removed
+ self._pending_removals = []
+ self._iterating = set()
+ if data is not None:
+ self.update(data)
+
+ def _commit_removals(self):
+ l = self._pending_removals
+ discard = self.data.discard
+ while l:
+ discard(l.pop())
+
+ def __iter__(self):
+ with _IterationGuard(self):
+ for itemref in self.data:
+ item = itemref()
+ if item is not None:
+ yield item
+
+ def __len__(self):
+ return len(self.data) - len(self._pending_removals)
+
+ def __contains__(self, item):
+ try:
+ wr = ref(item)
+ except TypeError:
+ return False
+ return wr in self.data
+
+ def __reduce__(self):
+ return (self.__class__, (list(self),),
+ getattr(self, '__dict__', None))
+
+ __hash__ = None
+
+ def add(self, item):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.add(ref(item, self._remove))
+
+ def clear(self):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.clear()
+
+ def copy(self):
+ return self.__class__(self)
+
+ def pop(self):
+ if self._pending_removals:
+ self._commit_removals()
+ while True:
+ try:
+ itemref = self.data.pop()
+ except KeyError:
+ raise KeyError('pop from empty WeakSet')
+ item = itemref()
+ if item is not None:
+ return item
+
+ def remove(self, item):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.remove(ref(item))
+
+ def discard(self, item):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.discard(ref(item))
+
+ def update(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ for element in other:
+ self.add(element)
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def difference(self, other):
+ newset = self.copy()
+ newset.difference_update(other)
+ return newset
+ __sub__ = difference
+
+ def difference_update(self, other):
+ self.__isub__(other)
+ def __isub__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ if self is other:
+ self.data.clear()
+ else:
+ self.data.difference_update(ref(item) for item in other)
+ return self
+
+ def intersection(self, other):
+ return self.__class__(item for item in other if item in self)
+ __and__ = intersection
+
+ def intersection_update(self, other):
+ self.__iand__(other)
+ def __iand__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.intersection_update(ref(item) for item in other)
+ return self
+
+ def issubset(self, other):
+ return self.data.issubset(ref(item) for item in other)
+ __le__ = issubset
+
+ def __lt__(self, other):
+ return self.data < set(ref(item) for item in other)
+
+ def issuperset(self, other):
+ return self.data.issuperset(ref(item) for item in other)
+ __ge__ = issuperset
+
+ def __gt__(self, other):
+ return self.data > set(ref(item) for item in other)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.data == set(ref(item) for item in other)
+
+ def __ne__(self, other):
+ opposite = self.__eq__(other)
+ if opposite is NotImplemented:
+ return NotImplemented
+ return not opposite
+
+ def symmetric_difference(self, other):
+ newset = self.copy()
+ newset.symmetric_difference_update(other)
+ return newset
+ __xor__ = symmetric_difference
+
+ def symmetric_difference_update(self, other):
+ self.__ixor__(other)
+ def __ixor__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ if self is other:
+ self.data.clear()
+ else:
+ self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
+ return self
+
+ def union(self, other):
+ return self.__class__(e for s in (self, other) for e in s)
+ __or__ = union
+
+ def isdisjoint(self, other):
+ return len(self.intersection(other)) == 0
diff --git a/trollius/py33_exceptions.py b/trollius/py33_exceptions.py
new file mode 100644
index 0000000..f10dfe9
--- /dev/null
+++ b/trollius/py33_exceptions.py
@@ -0,0 +1,144 @@
+__all__ = ['BlockingIOError', 'BrokenPipeError', 'ChildProcessError',
+ 'ConnectionRefusedError', 'ConnectionResetError',
+ 'InterruptedError', 'ConnectionAbortedError', 'PermissionError',
+ 'FileNotFoundError', 'ProcessLookupError',
+ ]
+
+import errno
+import select
+import socket
+import sys
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+from .compat import PY33
+
+if PY33:
+ import builtins
+ BlockingIOError = builtins.BlockingIOError
+ BrokenPipeError = builtins.BrokenPipeError
+ ChildProcessError = builtins.ChildProcessError
+ ConnectionRefusedError = builtins.ConnectionRefusedError
+ ConnectionResetError = builtins.ConnectionResetError
+ InterruptedError = builtins.InterruptedError
+ ConnectionAbortedError = builtins.ConnectionAbortedError
+ PermissionError = builtins.PermissionError
+ FileNotFoundError = builtins.FileNotFoundError
+ ProcessLookupError = builtins.ProcessLookupError
+
+else:
+ # Python < 3.3
+ class BlockingIOError(OSError):
+ pass
+
+ class BrokenPipeError(OSError):
+ pass
+
+ class ChildProcessError(OSError):
+ pass
+
+ class ConnectionRefusedError(OSError):
+ pass
+
+ class InterruptedError(OSError):
+ pass
+
+ class ConnectionResetError(OSError):
+ pass
+
+ class ConnectionAbortedError(OSError):
+ pass
+
+ class PermissionError(OSError):
+ pass
+
+ class FileNotFoundError(OSError):
+ pass
+
+ class ProcessLookupError(OSError):
+ pass
+
+
+_MAP_ERRNO = {
+ errno.EACCES: PermissionError,
+ errno.EAGAIN: BlockingIOError,
+ errno.EALREADY: BlockingIOError,
+ errno.ECHILD: ChildProcessError,
+ errno.ECONNABORTED: ConnectionAbortedError,
+ errno.ECONNREFUSED: ConnectionRefusedError,
+ errno.ECONNRESET: ConnectionResetError,
+ errno.EINPROGRESS: BlockingIOError,
+ errno.EINTR: InterruptedError,
+ errno.ENOENT: FileNotFoundError,
+ errno.EPERM: PermissionError,
+ errno.EPIPE: BrokenPipeError,
+ errno.ESHUTDOWN: BrokenPipeError,
+ errno.EWOULDBLOCK: BlockingIOError,
+ errno.ESRCH: ProcessLookupError,
+}
+
+if sys.platform == 'win32':
+ from trollius import _overlapped
+ _MAP_ERRNO.update({
+ _overlapped.ERROR_CONNECTION_REFUSED: ConnectionRefusedError,
+ _overlapped.ERROR_CONNECTION_ABORTED: ConnectionAbortedError,
+ _overlapped.ERROR_NETNAME_DELETED: ConnectionResetError,
+ })
+
+
+def get_error_class(key, default):
+ return _MAP_ERRNO.get(key, default)
+
+
+if sys.version_info >= (3,):
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+else:
+ exec("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+def _wrap_error(exc, mapping, key):
+ if key not in mapping:
+ return
+ new_err_cls = mapping[key]
+ new_err = new_err_cls(*exc.args)
+
+ # raise a new exception with the original traceback
+ if hasattr(exc, '__traceback__'):
+ traceback = exc.__traceback__
+ else:
+ traceback = sys.exc_info()[2]
+ reraise(new_err_cls, new_err, traceback)
+
+
+if not PY33:
+ def wrap_error(func, *args, **kw):
+ """
+ Wrap socket.error, IOError, OSError, select.error to raise new specialized
+ exceptions of Python 3.3 like InterruptedError (PEP 3151).
+ """
+ try:
+ return func(*args, **kw)
+ except (socket.error, IOError, OSError) as exc:
+ if ssl is not None and isinstance(exc, ssl.SSLError):
+ raise
+ if hasattr(exc, 'winerror'):
+ _wrap_error(exc, _MAP_ERRNO, exc.winerror)
+ # _MAP_ERRNO does not contain all Windows errors.
+ # For some errors like "file not found", exc.errno should
+ # be used (ex: ENOENT).
+ _wrap_error(exc, _MAP_ERRNO, exc.errno)
+ raise
+ except select.error as exc:
+ if exc.args:
+ _wrap_error(exc, _MAP_ERRNO, exc.args[0])
+ raise
+else:
+ def wrap_error(func, *args, **kw):
+ return func(*args, **kw)
diff --git a/trollius/py33_winapi.py b/trollius/py33_winapi.py
new file mode 100644
index 0000000..792bc45
--- /dev/null
+++ b/trollius/py33_winapi.py
@@ -0,0 +1,75 @@
+
+__all__ = [
+ 'CloseHandle', 'CreateNamedPipe', 'CreateFile', 'ConnectNamedPipe',
+ 'NULL',
+ 'GENERIC_READ', 'GENERIC_WRITE', 'OPEN_EXISTING', 'INFINITE',
+ 'PIPE_ACCESS_INBOUND',
+ 'PIPE_ACCESS_DUPLEX', 'PIPE_TYPE_MESSAGE', 'PIPE_READMODE_MESSAGE',
+ 'PIPE_WAIT', 'PIPE_UNLIMITED_INSTANCES', 'NMPWAIT_WAIT_FOREVER',
+ 'FILE_FLAG_OVERLAPPED', 'FILE_FLAG_FIRST_PIPE_INSTANCE',
+ 'WaitForMultipleObjects', 'WaitForSingleObject',
+ 'WAIT_OBJECT_0', 'ERROR_IO_PENDING',
+ ]
+
+try:
+ # FIXME: use _overlapped on Python 3.3? see windows_utils.pipe()
+ from _winapi import (
+ CloseHandle, CreateNamedPipe, CreateFile, ConnectNamedPipe,
+ NULL,
+ GENERIC_READ, GENERIC_WRITE, OPEN_EXISTING, INFINITE,
+ PIPE_ACCESS_INBOUND,
+ PIPE_ACCESS_DUPLEX, PIPE_TYPE_MESSAGE, PIPE_READMODE_MESSAGE,
+ PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, NMPWAIT_WAIT_FOREVER,
+ FILE_FLAG_OVERLAPPED, FILE_FLAG_FIRST_PIPE_INSTANCE,
+ WaitForMultipleObjects, WaitForSingleObject,
+ WAIT_OBJECT_0, ERROR_IO_PENDING,
+ )
+except ImportError:
+ # Python < 3.3
+ from _multiprocessing import win32
+ import _subprocess
+
+ from trollius import _overlapped
+
+ CloseHandle = win32.CloseHandle
+ CreateNamedPipe = win32.CreateNamedPipe
+ CreateFile = win32.CreateFile
+ NULL = win32.NULL
+
+ GENERIC_READ = win32.GENERIC_READ
+ GENERIC_WRITE = win32.GENERIC_WRITE
+ OPEN_EXISTING = win32.OPEN_EXISTING
+ INFINITE = win32.INFINITE
+
+ PIPE_ACCESS_INBOUND = win32.PIPE_ACCESS_INBOUND
+ PIPE_ACCESS_DUPLEX = win32.PIPE_ACCESS_DUPLEX
+ PIPE_READMODE_MESSAGE = win32.PIPE_READMODE_MESSAGE
+ PIPE_TYPE_MESSAGE = win32.PIPE_TYPE_MESSAGE
+ PIPE_WAIT = win32.PIPE_WAIT
+ PIPE_UNLIMITED_INSTANCES = win32.PIPE_UNLIMITED_INSTANCES
+ NMPWAIT_WAIT_FOREVER = win32.NMPWAIT_WAIT_FOREVER
+
+ FILE_FLAG_OVERLAPPED = 0x40000000
+ FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000
+
+ WAIT_OBJECT_0 = _subprocess.WAIT_OBJECT_0
+ WaitForSingleObject = _subprocess.WaitForSingleObject
+ ERROR_IO_PENDING = _overlapped.ERROR_IO_PENDING
+
+ def ConnectNamedPipe(handle, overlapped):
+ ov = _overlapped.Overlapped()
+ ov.ConnectNamedPipe(handle)
+ return ov
+
+ def WaitForMultipleObjects(events, wait_all, timeout):
+ if not wait_all:
+ raise NotImplementedError()
+
+ for ev in events:
+ res = WaitForSingleObject(ev, timeout)
+ if res != WAIT_OBJECT_0:
+ err = win32.GetLastError()
+ msg = _overlapped.FormatMessage(err)
+ raise WindowsError(err, msg)
+
+ return WAIT_OBJECT_0
diff --git a/trollius/py3_ssl.py b/trollius/py3_ssl.py
new file mode 100644
index 0000000..c592ee6
--- /dev/null
+++ b/trollius/py3_ssl.py
@@ -0,0 +1,149 @@
+"""
+Backport SSL functions and exceptions:
+- BACKPORT_SSL_ERRORS (bool)
+- SSLWantReadError, SSLWantWriteError, SSLEOFError
+- BACKPORT_SSL_CONTEXT (bool)
+- SSLContext
+- wrap_socket()
+- wrap_ssl_error()
+"""
+import errno
+import ssl
+import sys
+from trollius.py33_exceptions import _wrap_error
+
+__all__ = ["SSLContext", "BACKPORT_SSL_ERRORS", "BACKPORT_SSL_CONTEXT",
+ "SSLWantReadError", "SSLWantWriteError", "SSLEOFError",
+ ]
+
+try:
+ SSLWantReadError = ssl.SSLWantReadError
+ SSLWantWriteError = ssl.SSLWantWriteError
+ SSLEOFError = ssl.SSLEOFError
+ BACKPORT_SSL_ERRORS = False
+except AttributeError:
+ # Python < 3.3
+ BACKPORT_SSL_ERRORS = True
+
+ class SSLWantReadError(ssl.SSLError):
+ pass
+
+ class SSLWantWriteError(ssl.SSLError):
+ pass
+
+ class SSLEOFError(ssl.SSLError):
+ pass
+
+
+try:
+ SSLContext = ssl.SSLContext
+ BACKPORT_SSL_CONTEXT = False
+ wrap_socket = ssl.wrap_socket
+except AttributeError:
+ # Python < 3.2
+ BACKPORT_SSL_CONTEXT = True
+
+ if (sys.version_info < (2, 6, 6)):
+ # SSLSocket constructor has bugs in Python older than 2.6.6:
+ # http://bugs.python.org/issue5103
+ # http://bugs.python.org/issue7943
+ from socket import socket, error as socket_error, _delegate_methods
+ import _ssl
+
+ class BackportSSLSocket(ssl.SSLSocket):
+ # Override SSLSocket.__init__()
+ def __init__(self, sock, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=ssl.CERT_NONE,
+ ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=None,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True):
+ socket.__init__(self, _sock=sock._sock)
+ # The initializer for socket overrides the methods send(), recv(), etc.
+ # in the instancce, which we don't need -- but we want to provide the
+ # methods defined in SSLSocket.
+ for attr in _delegate_methods:
+ try:
+ delattr(self, attr)
+ except AttributeError:
+ pass
+
+ if certfile and not keyfile:
+ keyfile = certfile
+ # see if it's connected
+ try:
+ socket.getpeername(self)
+ except socket_error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # no, no connection yet
+ self._connected = False
+ self._sslobj = None
+ else:
+ # yes, create the SSL object
+ self._connected = True
+ self._sslobj = _ssl.sslwrap(self._sock, server_side,
+ keyfile, certfile,
+ cert_reqs, ssl_version, ca_certs)
+ if do_handshake_on_connect:
+ self.do_handshake()
+ self.keyfile = keyfile
+ self.certfile = certfile
+ self.cert_reqs = cert_reqs
+ self.ssl_version = ssl_version
+ self.ca_certs = ca_certs
+ self.do_handshake_on_connect = do_handshake_on_connect
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+
+ def wrap_socket(sock, server_hostname=None, **kwargs):
+ # ignore server_hostname parameter, not supported
+ kwargs.pop('server_hostname', None)
+ return BackportSSLSocket(sock, **kwargs)
+ else:
+ _wrap_socket = ssl.wrap_socket
+
+ def wrap_socket(sock, **kwargs):
+ # ignore server_hostname parameter, not supported
+ kwargs.pop('server_hostname', None)
+ return _wrap_socket(sock, **kwargs)
+
+
+ class SSLContext(object):
+ def __init__(self, protocol=ssl.PROTOCOL_SSLv23):
+ self.protocol = protocol
+ self.certfile = None
+ self.keyfile = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def wrap_socket(self, sock, **kwargs):
+ return wrap_socket(sock,
+ ssl_version=self.protocol,
+ certfile=self.certfile,
+ keyfile=self.keyfile,
+ **kwargs)
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_NONE
+
+
+if BACKPORT_SSL_ERRORS:
+ _MAP_ERRORS = {
+ ssl.SSL_ERROR_WANT_READ: SSLWantReadError,
+ ssl.SSL_ERROR_WANT_WRITE: SSLWantWriteError,
+ ssl.SSL_ERROR_EOF: SSLEOFError,
+ }
+
+ def wrap_ssl_error(func, *args, **kw):
+ try:
+ return func(*args, **kw)
+ except ssl.SSLError as exc:
+ if exc.args:
+ _wrap_error(exc, _MAP_ERRORS, exc.args[0])
+ raise
+else:
+ def wrap_ssl_error(func, *args, **kw):
+ return func(*args, **kw)
diff --git a/asyncio/queues.py b/trollius/queues.py
index 021043d..18167ab 100644
--- a/asyncio/queues.py
+++ b/trollius/queues.py
@@ -9,7 +9,7 @@ from . import compat
from . import events
from . import futures
from . import locks
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
class QueueEmpty(Exception):
@@ -26,7 +26,7 @@ class QueueFull(Exception):
pass
-class Queue:
+class Queue(object):
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
@@ -38,7 +38,7 @@ class Queue:
interrupted between calling qsize() and doing an operation on the Queue.
"""
- def __init__(self, maxsize=0, *, loop=None):
+ def __init__(self, maxsize=0, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
@@ -73,22 +73,22 @@ class Queue:
self._finished.clear()
def __repr__(self):
- return '<{} at {:#x} {}>'.format(
+ return '<{0} at {1:#x} {2}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
- return '<{} {}>'.format(type(self).__name__, self._format())
+ return '<{0} {1}>'.format(type(self).__name__, self._format())
def _format(self):
- result = 'maxsize={!r}'.format(self._maxsize)
+ result = 'maxsize={0!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
- result += ' _queue={!r}'.format(list(self._queue))
+ result += ' _queue={0!r}'.format(list(self._queue))
if self._getters:
- result += ' _getters[{}]'.format(len(self._getters))
+ result += ' _getters[{0}]'.format(len(self._getters))
if self._putters:
- result += ' _putters[{}]'.format(len(self._putters))
+ result += ' _putters[{0}]'.format(len(self._putters))
if self._unfinished_tasks:
- result += ' tasks={}'.format(self._unfinished_tasks)
+ result += ' tasks={0}'.format(self._unfinished_tasks)
return result
def _consume_done_getters(self):
@@ -149,7 +149,7 @@ class Queue:
waiter = futures.Future(loop=self._loop)
self._putters.append(waiter)
- yield from waiter
+ yield From(waiter)
self._put(item)
else:
@@ -195,15 +195,16 @@ class Queue:
# ChannelTest.test_wait.
self._loop.call_soon(putter._set_result_unless_cancelled, None)
- return self._get()
+ raise Return(self._get())
elif self.qsize():
- return self._get()
+ raise Return(self._get())
else:
waiter = futures.Future(loop=self._loop)
self._getters.append(waiter)
try:
- return (yield from waiter)
+ value = (yield From(waiter))
+ raise Return(value)
except futures.CancelledError:
# if we get CancelledError, it means someone cancelled this
# get() coroutine. But there is a chance that the waiter
@@ -286,7 +287,7 @@ class Queue:
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
- yield from self._finished.wait()
+ yield From(self._finished.wait())
class PriorityQueue(Queue):
diff --git a/asyncio/selector_events.py b/trollius/selector_events.py
index 4a99658..67ef26e 100644
--- a/asyncio/selector_events.py
+++ b/trollius/selector_events.py
@@ -10,9 +10,11 @@ import collections
import errno
import functools
import socket
+import sys
import warnings
try:
import ssl
+ from .py3_ssl import wrap_ssl_error, SSLWantReadError, SSLWantWriteError
except ImportError: # pragma: no cover
ssl = None
@@ -22,10 +24,28 @@ from . import constants
from . import events
from . import futures
from . import selectors
-from . import transports
from . import sslproto
-from .coroutines import coroutine
+from . import transports
+from .compat import flatten_bytes
+from .coroutines import coroutine, From
from .log import logger
+from .py33_exceptions import (wrap_error,
+ BlockingIOError, InterruptedError, ConnectionAbortedError, BrokenPipeError,
+ ConnectionResetError)
+
+# On Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4,
+# _SelectorSslTransport._read_ready() hangs if the socket has no data.
+# Example: test_events.test_create_server_ssl()
+_SSL_REQUIRES_SELECT = (sys.version_info < (2, 6, 6))
+if _SSL_REQUIRES_SELECT:
+ import select
+
+
+def _get_socket_error(sock, address):
+ err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ # Jump to the except clause below.
+ raise OSError(err, 'Connect call failed %s' % (address,))
def _test_selector_event(selector, fd, event):
@@ -46,7 +66,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""
def __init__(self, selector=None):
- super().__init__()
+ super(BaseSelectorEventLoop, self).__init__()
if selector is None:
selector = selectors.DefaultSelector()
@@ -54,13 +74,13 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
self._selector = selector
self._make_self_pipe()
- def _make_socket_transport(self, sock, protocol, waiter=None, *,
+ def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
- *, server_side=False, server_hostname=None,
+ server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
return self._make_legacy_ssl_transport(
@@ -75,7 +95,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
return ssl_protocol._app_transport
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
- waiter, *,
+ waiter,
server_side=False, server_hostname=None,
extra=None, server=None):
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
@@ -95,7 +115,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if self.is_closed():
return
self._close_self_pipe()
- super().close()
+ super(BaseSelectorEventLoop, self).close()
if self._selector is not None:
self._selector.close()
self._selector = None
@@ -125,7 +145,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def _read_from_self(self):
while True:
try:
- data = self._ssock.recv(4096)
+ data = wrap_error(self._ssock.recv, 4096)
if not data:
break
self._process_self_data(data)
@@ -143,7 +163,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
csock = self._csock
if csock is not None:
try:
- csock.send(b'\0')
+ wrap_error(csock.send, b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
@@ -158,14 +178,14 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
- conn, addr = sock.accept()
+ conn, addr = wrap_error(sock.accept)
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
- except OSError as exc:
+ except socket.error as exc:
# There's nowhere to send the error, so just log it.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
@@ -207,7 +227,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
server=server)
try:
- yield from waiter
+ yield From(waiter)
except:
transport.close()
raise
@@ -331,7 +351,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if fut.cancelled():
return
try:
- data = sock.recv(n)
+ data = wrap_error(sock.recv, n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
@@ -368,7 +388,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
return
try:
- n = sock.send(data)
+ n = wrap_error(sock.send, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
@@ -408,7 +428,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
try:
- sock.connect(address)
+ wrap_error(sock.connect, address)
except (BlockingIOError, InterruptedError):
# Issue #23618: When the C function connect() fails with EINTR, the
# connection runs in background. We have to wait until the socket
@@ -430,10 +450,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
return
try:
- err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- # Jump to any except clause below.
- raise OSError(err, 'Connect call failed %s' % (address,))
+ wrap_error(_get_socket_error, sock, address)
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
@@ -465,7 +482,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if fut.cancelled():
return
try:
- conn, address = sock.accept()
+ conn, address = wrap_error(sock.accept)
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
@@ -506,7 +523,7 @@ class _SelectorTransport(transports._FlowControlMixin,
_sock = None
def __init__(self, loop, sock, protocol, extra=None, server=None):
- super().__init__(extra, loop)
+ super(_SelectorTransport, self).__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
@@ -593,7 +610,7 @@ class _SelectorTransport(transports._FlowControlMixin,
if self._conn_lost:
return
if self._buffer:
- self._buffer.clear()
+ del self._buffer[:]
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
@@ -623,7 +640,7 @@ class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
- super().__init__(loop, sock, protocol, extra, server)
+ super(_SelectorSocketTransport, self).__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
@@ -657,7 +674,7 @@ class _SelectorSocketTransport(_SelectorTransport):
def _read_ready(self):
try:
- data = self._sock.recv(self.max_size)
+ data = wrap_error(self._sock.recv, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
@@ -678,9 +695,7 @@ class _SelectorSocketTransport(_SelectorTransport):
self.close()
def write(self, data):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
@@ -695,7 +710,7 @@ class _SelectorSocketTransport(_SelectorTransport):
if not self._buffer:
# Optimization: try to send now.
try:
- n = self._sock.send(data)
+ n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
@@ -715,13 +730,14 @@ class _SelectorSocketTransport(_SelectorTransport):
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
+ data = flatten_bytes(self._buffer)
try:
- n = self._sock.send(self._buffer)
+ n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
- self._buffer.clear()
+ del self._buffer[:]
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
@@ -766,7 +782,7 @@ class _SelectorSslTransport(_SelectorTransport):
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
- super().__init__(loop, sslsock, protocol, extra, server)
+ super(_SelectorSslTransport, self).__init__(loop, sslsock, protocol, extra, server)
# the protocol connection is only made after the SSL handshake
self._protocol_connected = False
@@ -797,12 +813,12 @@ class _SelectorSslTransport(_SelectorTransport):
def _on_handshake(self, start_time):
try:
- self._sock.do_handshake()
- except ssl.SSLWantReadError:
+ wrap_ssl_error(self._sock.do_handshake)
+ except SSLWantReadError:
self._loop.add_reader(self._sock_fd,
self._on_handshake, start_time)
return
- except ssl.SSLWantWriteError:
+ except SSLWantWriteError:
self._loop.add_writer(self._sock_fd,
self._on_handshake, start_time)
return
@@ -842,8 +858,9 @@ class _SelectorSslTransport(_SelectorTransport):
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
- compression=self._sock.compression(),
)
+ if hasattr(self._sock, 'compression'):
+ self._extra['compression'] = self._sock.compression()
self._read_wants_write = False
self._write_wants_read = False
@@ -883,6 +900,9 @@ class _SelectorSslTransport(_SelectorTransport):
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
+ def _sock_recv(self):
+ return wrap_ssl_error(self._sock.recv, self.max_size)
+
def _read_ready(self):
if self._write_wants_read:
self._write_wants_read = False
@@ -892,10 +912,16 @@ class _SelectorSslTransport(_SelectorTransport):
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
- data = self._sock.recv(self.max_size)
- except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
+ if _SSL_REQUIRES_SELECT:
+ rfds = (self._sock.fileno(),)
+ rfds = select.select(rfds, (), (), 0.0)[0]
+ if not rfds:
+ # False alarm.
+ return
+ data = wrap_error(self._sock_recv)
+ except (BlockingIOError, InterruptedError, SSLWantReadError):
pass
- except ssl.SSLWantWriteError:
+ except SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
@@ -924,17 +950,18 @@ class _SelectorSslTransport(_SelectorTransport):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
+ data = flatten_bytes(self._buffer)
try:
- n = self._sock.send(self._buffer)
- except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
+ n = wrap_error(self._sock.send, data)
+ except (BlockingIOError, InterruptedError, SSLWantWriteError):
n = 0
- except ssl.SSLWantReadError:
+ except SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
- self._buffer.clear()
+ del self._buffer[:]
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
@@ -949,9 +976,7 @@ class _SelectorSslTransport(_SelectorTransport):
self._call_connection_lost(None)
def write(self, data):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if not data:
return
@@ -978,7 +1003,8 @@ class _SelectorDatagramTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
- super().__init__(loop, sock, protocol, extra)
+ super(_SelectorDatagramTransport, self).__init__(loop, sock,
+ protocol, extra)
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
@@ -993,7 +1019,7 @@ class _SelectorDatagramTransport(_SelectorTransport):
def _read_ready(self):
try:
- data, addr = self._sock.recvfrom(self.max_size)
+ data, addr = wrap_error(self._sock.recvfrom, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
@@ -1004,9 +1030,7 @@ class _SelectorDatagramTransport(_SelectorTransport):
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if not data:
return
@@ -1024,9 +1048,9 @@ class _SelectorDatagramTransport(_SelectorTransport):
# Attempt to send it right away first.
try:
if self._address:
- self._sock.send(data)
+ wrap_error(self._sock.send, data)
else:
- self._sock.sendto(data, addr)
+ wrap_error(self._sock.sendto, data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
@@ -1047,9 +1071,9 @@ class _SelectorDatagramTransport(_SelectorTransport):
data, addr = self._buffer.popleft()
try:
if self._address:
- self._sock.send(data)
+ wrap_error(self._sock.send, data)
else:
- self._sock.sendto(data, addr)
+ wrap_error(self._sock.sendto, data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
diff --git a/asyncio/selectors.py b/trollius/selectors.py
index 6d569c3..cf0475d 100644
--- a/asyncio/selectors.py
+++ b/trollius/selectors.py
@@ -11,6 +11,9 @@ import math
import select
import sys
+from .py33_exceptions import wrap_error, InterruptedError
+from .compat import integer_types
+
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
@@ -29,16 +32,16 @@ def _fileobj_to_fd(fileobj):
Raises:
ValueError if the object is invalid
"""
- if isinstance(fileobj, int):
+ if isinstance(fileobj, integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
- "{!r}".format(fileobj)) from None
+ "{0!r}".format(fileobj))
if fd < 0:
- raise ValueError("Invalid file descriptor: {}".format(fd))
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
@@ -61,13 +64,13 @@ class _SelectorMapping(Mapping):
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
-class BaseSelector(metaclass=ABCMeta):
+class BaseSelector(object):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
@@ -81,6 +84,7 @@ class BaseSelector(metaclass=ABCMeta):
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
+ __metaclass__ = ABCMeta
@abstractmethod
def register(self, fileobj, events, data=None):
@@ -179,7 +183,7 @@ class BaseSelector(metaclass=ABCMeta):
try:
return mapping[fileobj]
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
@abstractmethod
def get_map(self):
@@ -223,12 +227,12 @@ class _BaseSelectorImpl(BaseSelector):
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
- raise ValueError("Invalid events: {!r}".format(events))
+ raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
- raise KeyError("{!r} (FD {}) is already registered"
+ raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
@@ -238,7 +242,7 @@ class _BaseSelectorImpl(BaseSelector):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
@@ -246,7 +250,7 @@ class _BaseSelectorImpl(BaseSelector):
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
@@ -282,12 +286,12 @@ class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
- super().__init__()
+ super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
@@ -295,7 +299,7 @@ class SelectSelector(_BaseSelectorImpl):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
@@ -311,7 +315,8 @@ class SelectSelector(_BaseSelectorImpl):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
- r, w, _ = self._select(self._readers, self._writers, [], timeout)
+ r, w, _ = wrap_error(self._select,
+ self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
@@ -335,11 +340,11 @@ if hasattr(select, 'poll'):
"""Poll-based selector."""
def __init__(self):
- super().__init__()
+ super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(PollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
@@ -349,7 +354,7 @@ if hasattr(select, 'poll'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
@@ -361,10 +366,10 @@ if hasattr(select, 'poll'):
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1e3)
+ timeout = int(math.ceil(timeout * 1e3))
ready = []
try:
- fd_event_list = self._poll.poll(timeout)
+ fd_event_list = wrap_error(self._poll.poll, timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
@@ -386,14 +391,14 @@ if hasattr(select, 'epoll'):
"""Epoll-based selector."""
def __init__(self):
- super().__init__()
+ super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(EpollSelector, self).register(fileobj, events, data)
epoll_events = 0
if events & EVENT_READ:
epoll_events |= select.EPOLLIN
@@ -403,10 +408,10 @@ if hasattr(select, 'epoll'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(EpollSelector, self).unregister(fileobj)
try:
self._epoll.unregister(key.fd)
- except OSError:
+ except IOError:
# This can happen if the FD was closed since it
# was registered.
pass
@@ -429,7 +434,7 @@ if hasattr(select, 'epoll'):
ready = []
try:
- fd_event_list = self._epoll.poll(timeout, max_ev)
+ fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
@@ -446,7 +451,7 @@ if hasattr(select, 'epoll'):
def close(self):
self._epoll.close()
- super().close()
+ super(EpollSelector, self).close()
if hasattr(select, 'devpoll'):
@@ -455,14 +460,14 @@ if hasattr(select, 'devpoll'):
"""Solaris /dev/poll selector."""
def __init__(self):
- super().__init__()
+ super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
@@ -472,7 +477,7 @@ if hasattr(select, 'devpoll'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
@@ -504,7 +509,7 @@ if hasattr(select, 'devpoll'):
def close(self):
self._devpoll.close()
- super().close()
+ super(DevpollSelector, self).close()
if hasattr(select, 'kqueue'):
@@ -513,14 +518,14 @@ if hasattr(select, 'kqueue'):
"""Kqueue-based selector."""
def __init__(self):
- super().__init__()
+ super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
@@ -532,7 +537,7 @@ if hasattr(select, 'kqueue'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
@@ -557,7 +562,8 @@ if hasattr(select, 'kqueue'):
max_ev = len(self._fd_to_key)
ready = []
try:
- kev_list = self._kqueue.control(None, max_ev, timeout)
+ kev_list = wrap_error(self._kqueue.control,
+ None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
@@ -576,7 +582,7 @@ if hasattr(select, 'kqueue'):
def close(self):
self._kqueue.close()
- super().close()
+ super(KqueueSelector, self).close()
# Choose the best implementation, roughly:
diff --git a/asyncio/sslproto.py b/trollius/sslproto.py
index e5ae49a..1404fd7 100644
--- a/asyncio/sslproto.py
+++ b/trollius/sslproto.py
@@ -2,6 +2,7 @@ import collections
import warnings
try:
import ssl
+ from .py3_ssl import BACKPORT_SSL_CONTEXT
except ImportError: # pragma: no cover
ssl = None
@@ -9,6 +10,7 @@ from . import compat
from . import protocols
from . import transports
from .log import logger
+from .py33_exceptions import BrokenPipeError, ConnectionResetError
def _create_transport_context(server_side, server_hostname):
@@ -26,10 +28,11 @@ def _create_transport_context(server_side, server_hostname):
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext.options |= ssl.OP_NO_SSLv2
- sslcontext.options |= ssl.OP_NO_SSLv3
- sslcontext.set_default_verify_paths()
- sslcontext.verify_mode = ssl.CERT_REQUIRED
+ if not BACKPORT_SSL_CONTEXT:
+ sslcontext.options |= ssl.OP_NO_SSLv2
+ sslcontext.options |= ssl.OP_NO_SSLv3
+ sslcontext.set_default_verify_paths()
+ sslcontext.verify_mode = ssl.CERT_REQUIRED
return sslcontext
@@ -43,6 +46,12 @@ _DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
+if ssl is not None:
+ if hasattr(ssl, 'CertificateError'):
+ _SSL_ERRORS = (ssl.SSLError, ssl.CertificateError)
+ else:
+ _SSL_ERRORS = ssl.SSLError
+
class _SSLPipe(object):
"""An SSL "Pipe".
@@ -224,7 +233,7 @@ class _SSLPipe(object):
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
- except (ssl.SSLError, ssl.CertificateError) as exc:
+ except _SSL_ERRORS as exc:
if getattr(exc, 'errno', None) not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
@@ -569,7 +578,8 @@ class SSLProtocol(protocols.Protocol):
ssl.match_hostname(peercert, self._server_hostname)
except BaseException as exc:
if self._loop.get_debug():
- if isinstance(exc, ssl.CertificateError):
+ if (hasattr(ssl, 'CertificateError')
+ and isinstance(exc, ssl.CertificateError)):
logger.warning("%r: SSL handshake failed "
"on verifying the certificate",
self, exc_info=True)
diff --git a/asyncio/streams.py b/trollius/streams.py
index 6484c43..cde58fb 100644
--- a/asyncio/streams.py
+++ b/trollius/streams.py
@@ -15,7 +15,8 @@ from . import compat
from . import events
from . import futures
from . import protocols
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
+from .py33_exceptions import ConnectionResetError
from .log import logger
@@ -37,7 +38,7 @@ class IncompleteReadError(EOFError):
@coroutine
-def open_connection(host=None, port=None, *,
+def open_connection(host=None, port=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
@@ -60,14 +61,14 @@ def open_connection(host=None, port=None, *,
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
- transport, _ = yield from loop.create_connection(
- lambda: protocol, host, port, **kwds)
+ transport, _ = yield From(loop.create_connection(
+ lambda: protocol, host, port, **kwds))
writer = StreamWriter(transport, protocol, reader, loop)
- return reader, writer
+ raise Return(reader, writer)
@coroutine
-def start_server(client_connected_cb, host=None, port=None, *,
+def start_server(client_connected_cb, host=None, port=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
@@ -99,28 +100,29 @@ def start_server(client_connected_cb, host=None, port=None, *,
loop=loop)
return protocol
- return (yield from loop.create_server(factory, host, port, **kwds))
+ server = yield From(loop.create_server(factory, host, port, **kwds))
+ raise Return(server)
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
@coroutine
- def open_unix_connection(path=None, *,
+ def open_unix_connection(path=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
- transport, _ = yield from loop.create_unix_connection(
- lambda: protocol, path, **kwds)
+ transport, _ = yield From(loop.create_unix_connection(
+ lambda: protocol, path, **kwds))
writer = StreamWriter(transport, protocol, reader, loop)
- return reader, writer
+ raise Return(reader, writer)
@coroutine
- def start_unix_server(client_connected_cb, path=None, *,
+ def start_unix_server(client_connected_cb, path=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
@@ -132,7 +134,8 @@ if hasattr(socket, 'AF_UNIX'):
loop=loop)
return protocol
- return (yield from loop.create_unix_server(factory, path, **kwds))
+ server = (yield From(loop.create_unix_server(factory, path, **kwds)))
+ raise Return(server)
class FlowControlMixin(protocols.Protocol):
@@ -198,7 +201,7 @@ class FlowControlMixin(protocols.Protocol):
assert waiter is None or waiter.cancelled()
waiter = futures.Future(loop=self._loop)
self._drain_waiter = waiter
- yield from waiter
+ yield From(waiter)
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
@@ -211,7 +214,7 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
- super().__init__(loop=loop)
+ super(StreamReaderProtocol, self).__init__(loop=loop)
self._stream_reader = stream_reader
self._stream_writer = None
self._client_connected_cb = client_connected_cb
@@ -232,7 +235,7 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
self._stream_reader.feed_eof()
else:
self._stream_reader.set_exception(exc)
- super().connection_lost(exc)
+ super(StreamReaderProtocol, self).connection_lost(exc)
def data_received(self, data):
self._stream_reader.feed_data(data)
@@ -242,7 +245,7 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
return True
-class StreamWriter:
+class StreamWriter(object):
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
@@ -295,16 +298,16 @@ class StreamWriter:
The intended use is to write
w.write(data)
- yield from w.drain()
+ yield From(w.drain())
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
- yield from self._protocol._drain_helper()
+ yield From(self._protocol._drain_helper())
-class StreamReader:
+class StreamReader(object):
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
# The line length limit is a security feature;
@@ -409,9 +412,16 @@ class StreamReader:
raise RuntimeError('%s() called while another coroutine is '
'already waiting for incoming data' % func_name)
+ # In asyncio, there is no need to recheck if we got data or EOF thanks
+ # to "yield from". In trollius, a StreamReader method can be called
+ # after the _wait_for_data() coroutine is scheduled and before it is
+ # really executed.
+ if self._buffer or self._eof:
+ return
+
self._waiter = futures.Future(loop=self._loop)
try:
- yield from self._waiter
+ yield From(self._waiter)
finally:
self._waiter = None
@@ -428,7 +438,7 @@ class StreamReader:
ichar = self._buffer.find(b'\n')
if ichar < 0:
line.extend(self._buffer)
- self._buffer.clear()
+ del self._buffer[:]
else:
ichar += 1
line.extend(self._buffer[:ichar])
@@ -443,10 +453,10 @@ class StreamReader:
break
if not_enough:
- yield from self._wait_for_data('readline')
+ yield From(self._wait_for_data('readline'))
self._maybe_resume_transport()
- return bytes(line)
+ raise Return(bytes(line))
@coroutine
def read(self, n=-1):
@@ -454,7 +464,7 @@ class StreamReader:
raise self._exception
if not n:
- return b''
+ raise Return(b'')
if n < 0:
# This used to just loop creating a new waiter hoping to
@@ -463,25 +473,25 @@ class StreamReader:
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
- block = yield from self.read(self._limit)
+ block = yield From(self.read(self._limit))
if not block:
break
blocks.append(block)
- return b''.join(blocks)
+ raise Return(b''.join(blocks))
else:
if not self._buffer and not self._eof:
- yield from self._wait_for_data('read')
+ yield From(self._wait_for_data('read'))
if n < 0 or len(self._buffer) <= n:
data = bytes(self._buffer)
- self._buffer.clear()
+ del self._buffer[:]
else:
# n > 0 and len(self._buffer) > n
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
- return data
+ raise Return(data)
@coroutine
def readexactly(self, n):
@@ -497,23 +507,24 @@ class StreamReader:
blocks = []
while n > 0:
- block = yield from self.read(n)
+ block = yield From(self.read(n))
if not block:
partial = b''.join(blocks)
raise IncompleteReadError(partial, len(partial) + n)
blocks.append(block)
n -= len(block)
- return b''.join(blocks)
-
- if compat.PY35:
- @coroutine
- def __aiter__(self):
- return self
-
- @coroutine
- def __anext__(self):
- val = yield from self.readline()
- if val == b'':
- raise StopAsyncIteration
- return val
+ raise Return(b''.join(blocks))
+
+ # FIXME: should we support __aiter__ and __anext__ in Trollius?
+ #if compat.PY35:
+ # @coroutine
+ # def __aiter__(self):
+ # return self
+ #
+ # @coroutine
+ # def __anext__(self):
+ # val = yield from self.readline()
+ # if val == b'':
+ # raise StopAsyncIteration
+ # return val
diff --git a/asyncio/subprocess.py b/trollius/subprocess.py
index ead4039..4ed2b5c 100644
--- a/asyncio/subprocess.py
+++ b/trollius/subprocess.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import subprocess
@@ -6,13 +8,15 @@ from . import events
from . import protocols
from . import streams
from . import tasks
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
+from .py33_exceptions import BrokenPipeError, ConnectionResetError
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
-DEVNULL = subprocess.DEVNULL
+if hasattr(subprocess, 'DEVNULL'):
+ DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
@@ -20,7 +24,7 @@ class SubprocessStreamProtocol(streams.FlowControlMixin,
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
- super().__init__(loop=loop)
+ super(SubprocessStreamProtocol, self).__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
@@ -113,7 +117,8 @@ class Process:
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
- return (yield from self._transport._wait())
+ return_code = yield From(self._transport._wait())
+ raise Return(return_code)
def send_signal(self, signal):
self._transport.send_signal(signal)
@@ -132,7 +137,7 @@ class Process:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
- yield from self.stdin.drain()
+ yield From(self.stdin.drain())
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
@@ -157,12 +162,12 @@ class Process:
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
- output = yield from stream.read()
+ output = yield From(stream.read())
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
- return output
+ raise Return(output)
@coroutine
def communicate(self, input=None):
@@ -178,36 +183,43 @@ class Process:
stderr = self._read_stream(2)
else:
stderr = self._noop()
- stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
- loop=self._loop)
- yield from self.wait()
- return (stdout, stderr)
+ stdin, stdout, stderr = yield From(tasks.gather(stdin, stdout, stderr,
+ loop=self._loop))
+ yield From(self.wait())
+ raise Return(stdout, stderr)
@coroutine
-def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
- loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
+def create_subprocess_shell(cmd, **kwds):
+ stdin = kwds.pop('stdin', None)
+ stdout = kwds.pop('stdout', None)
+ stderr = kwds.pop('stderr', None)
+ loop = kwds.pop('loop', None)
+ limit = kwds.pop('limit', streams._DEFAULT_LIMIT)
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
- transport, protocol = yield from loop.subprocess_shell(
+ transport, protocol = yield From(loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
- stderr=stderr, **kwds)
- return Process(transport, protocol, loop)
+ stderr=stderr, **kwds))
+ raise Return(Process(transport, protocol, loop))
@coroutine
-def create_subprocess_exec(program, *args, stdin=None, stdout=None,
- stderr=None, loop=None,
- limit=streams._DEFAULT_LIMIT, **kwds):
+def create_subprocess_exec(program, *args, **kwds):
+ stdin = kwds.pop('stdin', None)
+ stdout = kwds.pop('stdout', None)
+ stderr = kwds.pop('stderr', None)
+ loop = kwds.pop('loop', None)
+ limit = kwds.pop('limit', streams._DEFAULT_LIMIT)
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
- transport, protocol = yield from loop.subprocess_exec(
+ transport, protocol = yield From(loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
- stderr=stderr, **kwds)
- return Process(transport, protocol, loop)
+ stderr=stderr, **kwds))
+ raise Return(Process(transport, protocol, loop))
diff --git a/asyncio/tasks.py b/trollius/tasks.py
index a235e74..3e0e1b1 100644
--- a/asyncio/tasks.py
+++ b/trollius/tasks.py
@@ -1,4 +1,5 @@
"""Support for tasks, coroutines and the scheduler."""
+from __future__ import print_function
__all__ = ['Task',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
@@ -6,19 +7,30 @@ __all__ = ['Task',
'gather', 'shield', 'ensure_future',
]
-import concurrent.futures
import functools
-import inspect
import linecache
import traceback
import warnings
-import weakref
+try:
+ from weakref import WeakSet
+except ImportError:
+ # Python 2.6
+ from .py27_weakrefset import WeakSet
from . import compat
from . import coroutines
from . import events
+from . import executor
from . import futures
-from .coroutines import coroutine
+from .locks import Lock, Condition, Semaphore, _ContextManager
+from .coroutines import coroutine, From, Return
+
+
+
+@coroutine
+def _lock_coroutine(lock):
+ yield From(lock.acquire())
+ raise Return(_ContextManager(lock))
class Task(futures.Future):
@@ -34,7 +46,7 @@ class Task(futures.Future):
# must be _wakeup().
# Weak set containing all tasks alive.
- _all_tasks = weakref.WeakSet()
+ _all_tasks = WeakSet()
# Dictionary containing tasks that are currently active in
# all running event loops. {EventLoop: Task}
@@ -64,11 +76,11 @@ class Task(futures.Future):
"""
if loop is None:
loop = events.get_event_loop()
- return {t for t in cls._all_tasks if t._loop is loop}
+ return set(t for t in cls._all_tasks if t._loop is loop)
- def __init__(self, coro, *, loop=None):
+ def __init__(self, coro, loop=None):
assert coroutines.iscoroutine(coro), repr(coro)
- super().__init__(loop=loop)
+ super(Task, self).__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._coro = coro
@@ -93,7 +105,7 @@ class Task(futures.Future):
futures.Future.__del__(self)
def _repr_info(self):
- info = super()._repr_info()
+ info = super(Task, self)._repr_info()
if self._must_cancel:
# replace status
@@ -106,7 +118,7 @@ class Task(futures.Future):
info.insert(2, 'wait_for=%r' % self._fut_waiter)
return info
- def get_stack(self, *, limit=None):
+ def get_stack(self, limit=None):
"""Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
@@ -153,7 +165,7 @@ class Task(futures.Future):
tb = tb.tb_next
return frames
- def print_stack(self, *, limit=None, file=None):
+ def print_stack(self, limit=None, file=None):
"""Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
@@ -220,9 +232,9 @@ class Task(futures.Future):
self._must_cancel = True
return True
- def _step(self, value=None, exc=None):
+ def _step(self, value=None, exc=None, exc_tb=None):
assert not self.done(), \
- '_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc)
+ '_step(): already done: {0!r}, {1!r}, {2!r}'.format(self, value, exc)
if self._must_cancel:
if not isinstance(exc, futures.CancelledError):
exc = futures.CancelledError()
@@ -230,6 +242,10 @@ class Task(futures.Future):
coro = self._coro
self._fut_waiter = None
+ if exc_tb is not None:
+ init_exc = exc
+ else:
+ init_exc = None
self.__class__._current_tasks[self._loop] = self
# Call either coro.throw(exc) or coro.send(value).
try:
@@ -238,71 +254,104 @@ class Task(futures.Future):
else:
result = coro.send(value)
except StopIteration as exc:
- self.set_result(exc.value)
+ if compat.PY33:
+ # asyncio Task object? get the result of the coroutine
+ result = exc.value
+ else:
+ if isinstance(exc, Return):
+ exc.raised = True
+ result = exc.value
+ else:
+ result = None
+ self.set_result(result)
except futures.CancelledError as exc:
- super().cancel() # I.e., Future.cancel(self).
- except Exception as exc:
- self.set_exception(exc)
+ super(Task, self).cancel() # I.e., Future.cancel(self).
except BaseException as exc:
- self.set_exception(exc)
- raise
+ if exc is init_exc:
+ self._set_exception_with_tb(exc, exc_tb)
+ exc_tb = None
+ else:
+ self.set_exception(exc)
+
+ if not isinstance(exc, Exception):
+ # reraise BaseException
+ raise
else:
- if isinstance(result, futures.Future):
- # Yielded Future must come from Future.__iter__().
- if result._blocking:
- result._blocking = False
- result.add_done_callback(self._wakeup)
- self._fut_waiter = result
- if self._must_cancel:
- if self._fut_waiter.cancel():
- self._must_cancel = False
+ if coroutines._DEBUG:
+ if not coroutines._coroutine_at_yield_from(self._coro):
+ # trollius coroutine must "yield From(...)"
+ if not isinstance(result, coroutines.FromWrapper):
+ self._loop.call_soon(
+ self._step, None,
+ RuntimeError("yield used without From"))
+ return
+ result = result.obj
else:
- self._loop.call_soon(
- self._step, None,
- RuntimeError(
- 'yield was used instead of yield from '
- 'in task {!r} with {!r}'.format(self, result)))
+ # asyncio coroutine using "yield from ..."
+ if isinstance(result, coroutines.FromWrapper):
+ result = result.obj
+ elif isinstance(result, coroutines.FromWrapper):
+ result = result.obj
+
+ if coroutines.iscoroutine(result):
+ # "yield coroutine" creates a task, the current task
+ # will wait until the new task is done
+ result = self._loop.create_task(result)
+ # FIXME: faster check. common base class? hasattr?
+ elif isinstance(result, (Lock, Condition, Semaphore)):
+ coro = _lock_coroutine(result)
+ result = self._loop.create_task(coro)
+
+ if isinstance(result, futures._FUTURE_CLASSES):
+ # Yielded Future must come from Future.__iter__().
+ result.add_done_callback(self._wakeup)
+ self._fut_waiter = result
+ if self._must_cancel:
+ if self._fut_waiter.cancel():
+ self._must_cancel = False
elif result is None:
# Bare yield relinquishes control for one event loop iteration.
self._loop.call_soon(self._step)
- elif inspect.isgenerator(result):
- # Yielding a generator is just wrong.
- self._loop.call_soon(
- self._step, None,
- RuntimeError(
- 'yield was used instead of yield from for '
- 'generator in task {!r} with {}'.format(
- self, result)))
else:
# Yielding something else is an error.
self._loop.call_soon(
self._step, None,
RuntimeError(
- 'Task got bad yield: {!r}'.format(result)))
+ 'Task got bad yield: {0!r}'.format(result)))
finally:
self.__class__._current_tasks.pop(self._loop)
self = None # Needed to break cycles when an exception occurs.
def _wakeup(self, future):
- try:
- value = future.result()
- except Exception as exc:
- # This may also be a cancellation.
- self._step(None, exc)
+ if (future._state == futures._FINISHED
+ and future._exception is not None):
+ # Get the traceback before calling exception(), because calling
+ # the exception() method clears the traceback
+ exc_tb = future._get_exception_tb()
+ exc = future.exception()
+ self._step(None, exc, exc_tb)
+ exc_tb = None
else:
- self._step(value, None)
+ try:
+ value = future.result()
+ except Exception as exc:
+ # This may also be a cancellation.
+ self._step(None, exc)
+ else:
+ self._step(value, None)
self = None # Needed to break cycles when an exception occurs.
# wait() and as_completed() similar to those in PEP 3148.
-FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
-FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
-ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+# Export symbols in trollius.tasks for compatibility with asyncio
+FIRST_COMPLETED = executor.FIRST_COMPLETED
+FIRST_EXCEPTION = executor.FIRST_EXCEPTION
+ALL_COMPLETED = executor.ALL_COMPLETED
@coroutine
-def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
+def wait(fs, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty.
@@ -313,24 +362,25 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
Usage:
- done, pending = yield from asyncio.wait(fs)
+ done, pending = yield From(asyncio.wait(fs))
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
- if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+ if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
if not fs:
raise ValueError('Set of coroutines/Futures is empty.')
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
- raise ValueError('Invalid return_when value: {}'.format(return_when))
+ raise ValueError('Invalid return_when value: {0}'.format(return_when))
if loop is None:
loop = events.get_event_loop()
- fs = {ensure_future(f, loop=loop) for f in set(fs)}
+ fs = set(ensure_future(f, loop=loop) for f in set(fs))
- return (yield from _wait(fs, timeout, return_when, loop))
+ result = yield From(_wait(fs, timeout, return_when, loop))
+ raise Return(result)
def _release_waiter(waiter, *args):
@@ -339,7 +389,7 @@ def _release_waiter(waiter, *args):
@coroutine
-def wait_for(fut, timeout, *, loop=None):
+def wait_for(fut, timeout, loop=None):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
@@ -356,7 +406,8 @@ def wait_for(fut, timeout, *, loop=None):
loop = events.get_event_loop()
if timeout is None:
- return (yield from fut)
+ result = yield From(fut)
+ raise Return(result)
waiter = futures.Future(loop=loop)
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -368,14 +419,14 @@ def wait_for(fut, timeout, *, loop=None):
try:
# wait until the future completes or the timeout
try:
- yield from waiter
+ yield From(waiter)
except futures.CancelledError:
fut.remove_done_callback(cb)
fut.cancel()
raise
if fut.done():
- return fut.result()
+ raise Return(fut.result())
else:
fut.remove_done_callback(cb)
fut.cancel()
@@ -395,12 +446,11 @@ def _wait(fs, timeout, return_when, loop):
timeout_handle = None
if timeout is not None:
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
- counter = len(fs)
+ non_local = {'counter': len(fs)}
def _on_completion(f):
- nonlocal counter
- counter -= 1
- if (counter <= 0 or
+ non_local['counter'] -= 1
+ if (non_local['counter'] <= 0 or
return_when == FIRST_COMPLETED or
return_when == FIRST_EXCEPTION and (not f.cancelled() and
f.exception() is not None)):
@@ -413,7 +463,7 @@ def _wait(fs, timeout, return_when, loop):
f.add_done_callback(_on_completion)
try:
- yield from waiter
+ yield From(waiter)
finally:
if timeout_handle is not None:
timeout_handle.cancel()
@@ -425,11 +475,11 @@ def _wait(fs, timeout, return_when, loop):
done.add(f)
else:
pending.add(f)
- return done, pending
+ raise Return(done, pending)
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
-def as_completed(fs, *, loop=None, timeout=None):
+def as_completed(fs, loop=None, timeout=None):
"""Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
@@ -439,18 +489,18 @@ def as_completed(fs, *, loop=None, timeout=None):
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
- result = yield from f # The 'yield from' may raise.
+ result = yield From(f) # The 'yield' may raise.
# Use result.
- If a timeout is specified, the 'yield from' will raise
+ If a timeout is specified, the 'yield' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
- if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+ if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
loop = loop if loop is not None else events.get_event_loop()
- todo = {ensure_future(f, loop=loop) for f in set(fs)}
+ todo = set(ensure_future(f, loop=loop) for f in set(fs))
from .queues import Queue # Import here to avoid circular import problem.
done = Queue(loop=loop)
timeout_handle = None
@@ -471,11 +521,11 @@ def as_completed(fs, *, loop=None, timeout=None):
@coroutine
def _wait_for_one():
- f = yield from done.get()
+ f = yield From(done.get())
if f is None:
# Dummy value from _on_timeout().
raise futures.TimeoutError
- return f.result() # May raise f.exception().
+ raise Return(f.result()) # May raise f.exception().
for f in todo:
f.add_done_callback(_on_completion)
@@ -486,18 +536,19 @@ def as_completed(fs, *, loop=None, timeout=None):
@coroutine
-def sleep(delay, result=None, *, loop=None):
+def sleep(delay, result=None, loop=None):
"""Coroutine that completes after a given time (in seconds)."""
future = futures.Future(loop=loop)
h = future._loop.call_later(delay,
future._set_result_unless_cancelled, result)
try:
- return (yield from future)
+ result = yield From(future)
+ raise Return(result)
finally:
h.cancel()
-def async(coro_or_future, *, loop=None):
+def async(coro_or_future, loop=None):
"""Wrap a coroutine in a future.
If the argument is a Future, it is returned directly.
@@ -511,12 +562,15 @@ def async(coro_or_future, *, loop=None):
return ensure_future(coro_or_future, loop=loop)
-def ensure_future(coro_or_future, *, loop=None):
+def ensure_future(coro_or_future, loop=None):
"""Wrap a coroutine in a future.
If the argument is a Future, it is returned directly.
"""
- if isinstance(coro_or_future, futures.Future):
+ # FIXME: only check if coroutines._DEBUG is True?
+ if isinstance(coro_or_future, coroutines.FromWrapper):
+ coro_or_future = coro_or_future.obj
+ if isinstance(coro_or_future, futures._FUTURE_CLASSES):
if loop is not None and loop is not coro_or_future._loop:
raise ValueError('loop argument must agree with Future')
return coro_or_future
@@ -539,8 +593,8 @@ class _GatheringFuture(futures.Future):
cancelled.
"""
- def __init__(self, children, *, loop=None):
- super().__init__(loop=loop)
+ def __init__(self, children, loop=None):
+ super(_GatheringFuture, self).__init__(loop=loop)
self._children = children
def cancel(self):
@@ -551,7 +605,7 @@ class _GatheringFuture(futures.Future):
return True
-def gather(*coros_or_futures, loop=None, return_exceptions=False):
+def gather(*coros_or_futures, **kw):
"""Return a future aggregating results from the given coroutines
or futures.
@@ -571,6 +625,11 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
prevent the cancellation of one child to cause other children to
be cancelled.)
"""
+ loop = kw.pop('loop', None)
+ return_exceptions = kw.pop('return_exceptions', False)
+ if kw:
+ raise TypeError("unexpected keyword")
+
if not coros_or_futures:
outer = futures.Future(loop=loop)
outer.set_result([])
@@ -578,7 +637,7 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
arg_to_fut = {}
for arg in set(coros_or_futures):
- if not isinstance(arg, futures.Future):
+ if not isinstance(arg, futures._FUTURE_CLASSES):
fut = ensure_future(arg, loop=loop)
if loop is None:
loop = fut._loop
@@ -596,11 +655,10 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
children = [arg_to_fut[arg] for arg in coros_or_futures]
nchildren = len(children)
outer = _GatheringFuture(children, loop=loop)
- nfinished = 0
+ non_local = {'nfinished': 0}
results = [None] * nchildren
def _done_callback(i, fut):
- nonlocal nfinished
if outer.done():
if not fut.cancelled():
# Mark exception retrieved.
@@ -620,8 +678,8 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
else:
res = fut._result
results[i] = res
- nfinished += 1
- if nfinished == nchildren:
+ non_local['nfinished'] += 1
+ if non_local['nfinished'] == nchildren:
outer.set_result(results)
for i, fut in enumerate(children):
@@ -629,16 +687,16 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
return outer
-def shield(arg, *, loop=None):
+def shield(arg, loop=None):
"""Wait for a future, shielding it from cancellation.
The statement
- res = yield from shield(something())
+ res = yield From(shield(something()))
is exactly equivalent to the statement
- res = yield from something()
+ res = yield From(something())
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
@@ -651,7 +709,7 @@ def shield(arg, *, loop=None):
you can combine shield() with a try/except clause, as follows:
try:
- res = yield from shield(something())
+ res = yield From(shield(something()))
except CancelledError:
res = None
"""
diff --git a/asyncio/test_support.py b/trollius/test_support.py
index 0fadfad..b40576a 100644
--- a/asyncio/test_support.py
+++ b/trollius/test_support.py
@@ -4,6 +4,7 @@
# Ignore symbol TEST_HOME_DIR: test_events works without it
+from __future__ import absolute_import
import functools
import gc
import os
@@ -14,6 +15,7 @@ import subprocess
import sys
import time
+from trollius import test_utils
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
@@ -39,7 +41,9 @@ def _assert_python(expected_success, *args, **env_vars):
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars
- cmd_line = [sys.executable, '-X', 'faulthandler']
+ cmd_line = [sys.executable]
+ if sys.version_info >= (3, 3):
+ cmd_line.extend(('-X', 'faulthandler'))
if isolated and sys.version_info >= (3, 4):
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
@@ -248,7 +252,7 @@ def requires_mac_ver(*min_version):
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
- raise unittest.SkipTest(
+ raise test_utils.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
@@ -275,7 +279,7 @@ def _requires_unix_version(sysname, min_version):
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
- raise unittest.SkipTest(
+ raise test_utils.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
@@ -300,9 +304,6 @@ except ImportError:
# Use test.script_helper if available
try:
- from test.support.script_helper import assert_python_ok
+ from test.script_helper import assert_python_ok
except ImportError:
- try:
- from test.script_helper import assert_python_ok
- except ImportError:
- pass
+ pass
diff --git a/asyncio/test_utils.py b/trollius/test_utils.py
index 8cee95b..ebebb25 100644
--- a/asyncio/test_utils.py
+++ b/trollius/test_utils.py
@@ -7,23 +7,38 @@ import logging
import os
import re
import socket
-import socketserver
import sys
import tempfile
import threading
import time
-import unittest
-from unittest import mock
-from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
+import six
+
+try:
+ import socketserver
+ from http.server import HTTPServer
+except ImportError:
+ # Python 2
+ import SocketServer as socketserver
+ from BaseHTTPServer import HTTPServer
+
+try:
+ from unittest import mock
+except ImportError:
+ # Python < 3.3
+ import mock
+
try:
import ssl
+ from .py3_ssl import SSLContext, wrap_socket
except ImportError: # pragma: no cover
+ # SSL support disabled in Python
ssl = None
from . import base_events
+from . import compat
from . import events
from . import futures
from . import selectors
@@ -37,27 +52,112 @@ if sys.platform == 'win32': # pragma: no cover
else:
from socket import socketpair # pragma: no cover
+try:
+ # Prefer unittest2 if available (on Python 2)
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+skipIf = unittest.skipIf
+skipUnless = unittest.skipUnless
+SkipTest = unittest.SkipTest
+
+
+if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
+ class _BaseTestCaseContext:
+
+ def __init__(self, test_case):
+ self.test_case = test_case
+
+ def _raiseFailure(self, standardMsg):
+ msg = self.test_case._formatMessage(self.msg, standardMsg)
+ raise self.test_case.failureException(msg)
+
+
+ class _AssertRaisesBaseContext(_BaseTestCaseContext):
+
+ def __init__(self, expected, test_case, callable_obj=None,
+ expected_regex=None):
+ _BaseTestCaseContext.__init__(self, test_case)
+ self.expected = expected
+ self.test_case = test_case
+ if callable_obj is not None:
+ try:
+ self.obj_name = callable_obj.__name__
+ except AttributeError:
+ self.obj_name = str(callable_obj)
+ else:
+ self.obj_name = None
+ if isinstance(expected_regex, (bytes, str)):
+ expected_regex = re.compile(expected_regex)
+ self.expected_regex = expected_regex
+ self.msg = None
+
+ def handle(self, name, callable_obj, args, kwargs):
+ """
+ If callable_obj is None, assertRaises/Warns is being used as a
+ context manager, so check for a 'msg' kwarg and return self.
+ If callable_obj is not None, call it passing args and kwargs.
+ """
+ if callable_obj is None:
+ self.msg = kwargs.pop('msg', None)
+ return self
+ with self:
+ callable_obj(*args, **kwargs)
+
+
+ class _AssertRaisesContext(_AssertRaisesBaseContext):
+ """A context manager used to implement TestCase.assertRaises* methods."""
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ if exc_type is None:
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ if self.obj_name:
+ self._raiseFailure("{0} not raised by {1}".format(exc_name,
+ self.obj_name))
+ else:
+ self._raiseFailure("{0} not raised".format(exc_name))
+ if not issubclass(exc_type, self.expected):
+ # let unexpected exceptions pass through
+ return False
+ self.exception = exc_value
+ if self.expected_regex is None:
+ return True
+
+ expected_regex = self.expected_regex
+ if not expected_regex.search(str(exc_value)):
+ self._raiseFailure('"{0}" does not match "{1}"'.format(
+ expected_regex.pattern, str(exc_value)))
+ return True
+
def dummy_ssl_context():
if ssl is None:
return None
else:
- return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ return SSLContext(ssl.PROTOCOL_SSLv23)
-def run_briefly(loop):
+def run_briefly(loop, steps=1):
@coroutine
def once():
pass
- gen = once()
- t = loop.create_task(gen)
- # Don't log a warning if the task is not done after run_until_complete().
- # It occurs if the loop is stopped or if a task raises a BaseException.
- t._log_destroy_pending = False
- try:
- loop.run_until_complete(t)
- finally:
- gen.close()
+ for step in range(steps):
+ gen = once()
+ t = loop.create_task(gen)
+ # Don't log a warning if the task is not done after run_until_complete().
+ # It occurs if the loop is stopped or if a task raises a BaseException.
+ t._log_destroy_pending = False
+ try:
+ loop.run_until_complete(t)
+ finally:
+ gen.close()
def run_until(loop, pred, timeout=30):
@@ -89,12 +189,12 @@ class SilentWSGIRequestHandler(WSGIRequestHandler):
pass
-class SilentWSGIServer(WSGIServer):
+class SilentWSGIServer(WSGIServer, object):
request_timeout = 2
def get_request(self):
- request, client_addr = super().get_request()
+ request, client_addr = super(SilentWSGIServer, self).get_request()
request.settimeout(self.request_timeout)
return request, client_addr
@@ -115,10 +215,10 @@ class SSLWSGIServerMixin:
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
- ssock = ssl.wrap_socket(request,
- keyfile=keyfile,
- certfile=certfile,
- server_side=True)
+ ssock = wrap_socket(request,
+ keyfile=keyfile,
+ certfile=certfile,
+ server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
@@ -131,7 +231,7 @@ class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
-def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
+def _run_test_server(address, use_ssl, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
@@ -158,7 +258,7 @@ def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
if hasattr(socket, 'AF_UNIX'):
- class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
+ class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer, object):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
@@ -166,7 +266,7 @@ if hasattr(socket, 'AF_UNIX'):
self.server_port = 80
- class UnixWSGIServer(UnixHTTPServer, WSGIServer):
+ class UnixWSGIServer(UnixHTTPServer, WSGIServer, object):
request_timeout = 2
@@ -175,7 +275,7 @@ if hasattr(socket, 'AF_UNIX'):
self.setup_environ()
def get_request(self):
- request, client_addr = super().get_request()
+ request, client_addr = super(UnixWSGIServer, self).get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
@@ -214,18 +314,20 @@ if hasattr(socket, 'AF_UNIX'):
@contextlib.contextmanager
- def run_test_unix_server(*, use_ssl=False):
+ def run_test_unix_server(use_ssl=False):
with unix_socket_path() as path:
- yield from _run_test_server(address=path, use_ssl=use_ssl,
- server_cls=SilentUnixWSGIServer,
- server_ssl_cls=UnixSSLWSGIServer)
+ for item in _run_test_server(address=path, use_ssl=use_ssl,
+ server_cls=SilentUnixWSGIServer,
+ server_ssl_cls=UnixSSLWSGIServer):
+ yield item
@contextlib.contextmanager
-def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
- yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
- server_cls=SilentWSGIServer,
- server_ssl_cls=SSLWSGIServer)
+def run_test_server(host='127.0.0.1', port=0, use_ssl=False):
+ for item in _run_test_server(address=(host, port), use_ssl=use_ssl,
+ server_cls=SilentWSGIServer,
+ server_ssl_cls=SSLWSGIServer):
+ yield item
def make_test_protocol(base):
@@ -278,7 +380,7 @@ class TestLoop(base_events.BaseEventLoop):
"""
def __init__(self, gen=None):
- super().__init__()
+ super(TestLoop, self).__init__()
if gen is None:
def gen():
@@ -307,7 +409,7 @@ class TestLoop(base_events.BaseEventLoop):
self._time += advance
def close(self):
- super().close()
+ super(TestLoop, self).close()
if self._check_on_close:
try:
self._gen.send(0)
@@ -328,11 +430,11 @@ class TestLoop(base_events.BaseEventLoop):
return False
def assert_reader(self, fd, callback, *args):
- assert fd in self.readers, 'fd {} is not registered'.format(fd)
+ assert fd in self.readers, 'fd {0} is not registered'.format(fd)
handle = self.readers[fd]
- assert handle._callback == callback, '{!r} != {!r}'.format(
+ assert handle._callback == callback, '{0!r} != {1!r}'.format(
handle._callback, callback)
- assert handle._args == args, '{!r} != {!r}'.format(
+ assert handle._args == args, '{0!r} != {1!r}'.format(
handle._args, args)
def add_writer(self, fd, callback, *args):
@@ -347,11 +449,11 @@ class TestLoop(base_events.BaseEventLoop):
return False
def assert_writer(self, fd, callback, *args):
- assert fd in self.writers, 'fd {} is not registered'.format(fd)
+ assert fd in self.writers, 'fd {0} is not registered'.format(fd)
handle = self.writers[fd]
- assert handle._callback == callback, '{!r} != {!r}'.format(
+ assert handle._callback == callback, '{0!r} != {1!r}'.format(
handle._callback, callback)
- assert handle._args == args, '{!r} != {!r}'.format(
+ assert handle._args == args, '{0!r} != {1!r}'.format(
handle._args, args)
def reset_counters(self):
@@ -359,7 +461,7 @@ class TestLoop(base_events.BaseEventLoop):
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
- super()._run_once()
+ super(TestLoop, self)._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
@@ -367,7 +469,7 @@ class TestLoop(base_events.BaseEventLoop):
def call_at(self, when, callback, *args):
self._timers.append(when)
- return super().call_at(when, callback, *args)
+ return super(TestLoop, self).call_at(when, callback, *args)
def _process_events(self, event_list):
return
@@ -401,7 +503,7 @@ def get_function_source(func):
class TestCase(unittest.TestCase):
- def set_event_loop(self, loop, *, cleanup=True):
+ def set_event_loop(self, loop, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
@@ -418,7 +520,22 @@ class TestCase(unittest.TestCase):
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
- self.assertEqual(sys.exc_info(), (None, None, None))
+ if sys.exc_info()[0] == SkipTest:
+ if six.PY2:
+ sys.exc_clear()
+ else:
+ pass #self.assertEqual(sys.exc_info(), (None, None, None))
+
+ def check_soure_traceback(self, source_traceback, lineno_delta):
+ frame = sys._getframe(1)
+ filename = frame.f_code.co_filename
+ lineno = frame.f_lineno + lineno_delta
+ name = frame.f_code.co_name
+ self.assertIsInstance(source_traceback, list)
+ self.assertEqual(source_traceback[-1][:3],
+ (filename,
+ lineno,
+ name))
@contextlib.contextmanager
@@ -442,5 +559,5 @@ def mock_nonblocking_socket():
def force_legacy_ssl_support():
- return mock.patch('asyncio.sslproto._is_sslproto_available',
+ return mock.patch('trollius.sslproto._is_sslproto_available',
return_value=False)
diff --git a/trollius/time_monotonic.py b/trollius/time_monotonic.py
new file mode 100644
index 0000000..e99364c
--- /dev/null
+++ b/trollius/time_monotonic.py
@@ -0,0 +1,192 @@
+"""
+Backport of time.monotonic() of Python 3.3 (PEP 418) for Python 2.7.
+
+- time_monotonic(). This clock may or may not be monotonic depending on the
+ operating system.
+- time_monotonic_resolution: Resolution of time_monotonic() clock in second
+
+Support Windows, Mac OS X, Linux, FreeBSD, OpenBSD and Solaris, but requires
+the ctypes module.
+"""
+import os
+import sys
+from .log import logger
+from .py33_exceptions import get_error_class
+
+__all__ = ('time_monotonic',)
+
+# default implementation: system clock (non monotonic!)
+from time import time as time_monotonic
+# the worst resolution is 15.6 ms on Windows
+time_monotonic_resolution = 0.050
+
+if os.name == "nt":
+ # Windows: use GetTickCount64() or GetTickCount()
+ try:
+ import ctypes
+ from ctypes import windll
+ from ctypes.wintypes import DWORD
+ except ImportError:
+ logger.error("time_monotonic import error", exc_info=True)
+ else:
+ # GetTickCount64() requires Windows Vista, Server 2008 or later
+ if hasattr(windll.kernel32, 'GetTickCount64'):
+ ULONGLONG = ctypes.c_uint64
+
+ GetTickCount64 = windll.kernel32.GetTickCount64
+ GetTickCount64.restype = ULONGLONG
+ GetTickCount64.argtypes = ()
+
+ def time_monotonic():
+ return GetTickCount64() * 1e-3
+ time_monotonic_resolution = 1e-3
+ else:
+ GetTickCount = windll.kernel32.GetTickCount
+ GetTickCount.restype = DWORD
+ GetTickCount.argtypes = ()
+
+ # Detect GetTickCount() integer overflow (32 bits, roll-over after 49.7
+ # days). It increases an internal epoch (reference time) by 2^32 each
+ # time that an overflow is detected. The epoch is stored in the
+ # process-local state and so the value of time_monotonic() may be
+ # different in two Python processes running for more than 49 days.
+ def time_monotonic():
+ ticks = GetTickCount()
+ if ticks < time_monotonic.last:
+ # Integer overflow detected
+ time_monotonic.delta += 2**32
+ time_monotonic.last = ticks
+ return (ticks + time_monotonic.delta) * 1e-3
+ time_monotonic.last = 0
+ time_monotonic.delta = 0
+ time_monotonic_resolution = 1e-3
+
+elif sys.platform == 'darwin':
+ # Mac OS X: use mach_absolute_time() and mach_timebase_info()
+ try:
+ import ctypes
+ import ctypes.util
+ libc_name = ctypes.util.find_library('c')
+ except ImportError:
+ logger.error("time_monotonic import error", exc_info=True)
+ libc_name = None
+ if libc_name:
+ libc = ctypes.CDLL(libc_name, use_errno=True)
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.argtypes = ()
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ _fields_ = (
+ ('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32),
+ )
+ mach_timebase_info_data_p = ctypes.POINTER(mach_timebase_info_data_t)
+
+ mach_timebase_info = libc.mach_timebase_info
+ mach_timebase_info.argtypes = (mach_timebase_info_data_p,)
+ mach_timebase_info.restype = ctypes.c_int
+
+ def time_monotonic():
+ return mach_absolute_time() * time_monotonic.factor
+
+ timebase = mach_timebase_info_data_t()
+ mach_timebase_info(ctypes.byref(timebase))
+ time_monotonic.factor = float(timebase.numer) / timebase.denom * 1e-9
+ time_monotonic_resolution = time_monotonic.factor
+ del timebase
+
+elif sys.platform.startswith(("linux", "freebsd", "openbsd", "sunos")):
+ # Linux, FreeBSD, OpenBSD: use clock_gettime(CLOCK_MONOTONIC)
+ # Solaris: use clock_gettime(CLOCK_HIGHRES)
+
+ library = None
+ try:
+ import ctypes
+ import ctypes.util
+ except ImportError:
+ logger.error("time_monotonic import error", exc_info=True)
+ libraries = ()
+ else:
+ if sys.platform.startswith(("freebsd", "openbsd")):
+ libraries = ('c',)
+ elif sys.platform.startswith("linux"):
+ # Linux: in glibc 2.17+, clock_gettime() is provided by the libc,
+ # on older versions, it is provided by librt
+ libraries = ('c', 'rt')
+ else:
+ # Solaris
+ libraries = ('rt',)
+
+ for name in libraries:
+ filename = ctypes.util.find_library(name)
+ if not filename:
+ continue
+ library = ctypes.CDLL(filename, use_errno=True)
+ if not hasattr(library, 'clock_gettime'):
+ library = None
+
+ if library is not None:
+ if sys.platform.startswith("openbsd"):
+ import platform
+ release = platform.release()
+ release = tuple(map(int, release.split('.')))
+ if release >= (5, 5):
+ time_t = ctypes.c_int64
+ else:
+ time_t = ctypes.c_int32
+ else:
+ time_t = ctypes.c_long
+ clockid_t = ctypes.c_int
+
+ class timespec(ctypes.Structure):
+ _fields_ = (
+ ('tv_sec', time_t),
+ ('tv_nsec', ctypes.c_long),
+ )
+ timespec_p = ctypes.POINTER(timespec)
+
+ clock_gettime = library.clock_gettime
+ clock_gettime.argtypes = (clockid_t, timespec_p)
+ clock_gettime.restype = ctypes.c_int
+
+ def ctypes_oserror():
+ errno = ctypes.get_errno()
+ message = os.strerror(errno)
+ error_class = get_error_class(errno, OSError)
+ return error_class(errno, message)
+
+ def time_monotonic():
+ ts = timespec()
+ err = clock_gettime(time_monotonic.clk_id, ctypes.byref(ts))
+ if err:
+ raise ctypes_oserror()
+ return ts.tv_sec + ts.tv_nsec * 1e-9
+
+ if sys.platform.startswith("linux"):
+ time_monotonic.clk_id = 1 # CLOCK_MONOTONIC
+ elif sys.platform.startswith("freebsd"):
+ time_monotonic.clk_id = 4 # CLOCK_MONOTONIC
+ elif sys.platform.startswith("openbsd"):
+ time_monotonic.clk_id = 3 # CLOCK_MONOTONIC
+ else:
+ assert sys.platform.startswith("sunos")
+ time_monotonic.clk_id = 4 # CLOCK_HIGHRES
+
+ def get_resolution():
+ _clock_getres = library.clock_getres
+ _clock_getres.argtypes = (clockid_t, timespec_p)
+ _clock_getres.restype = ctypes.c_int
+
+ ts = timespec()
+ err = _clock_getres(time_monotonic.clk_id, ctypes.byref(ts))
+ if err:
+ raise ctypes_oserror()
+ return ts.tv_sec + ts.tv_nsec * 1e-9
+ time_monotonic_resolution = get_resolution()
+ del get_resolution
+
+else:
+ logger.error("time_monotonic: unspported platform %r", sys.platform)
+
diff --git a/asyncio/transports.py b/trollius/transports.py
index 70b323f..1f086c1 100644
--- a/asyncio/transports.py
+++ b/trollius/transports.py
@@ -1,13 +1,13 @@
"""Abstract Transport class."""
-from asyncio import compat
+from trollius import compat
__all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
'Transport', 'DatagramTransport', 'SubprocessTransport',
]
-class BaseTransport:
+class BaseTransport(object):
"""Base class for transports."""
def __init__(self, extra=None):
@@ -224,7 +224,7 @@ class _FlowControlMixin(Transport):
override set_write_buffer_limits() (e.g. to specify different
defaults).
- The subclass constructor must call super().__init__(extra). This
+ The subclass constructor must call super(Class, self).__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
@@ -233,7 +233,7 @@ class _FlowControlMixin(Transport):
"""
def __init__(self, extra=None, loop=None):
- super().__init__(extra)
+ super(_FlowControlMixin, self).__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
diff --git a/asyncio/unix_events.py b/trollius/unix_events.py
index bf3b084..cdefaca 100644
--- a/asyncio/unix_events.py
+++ b/trollius/unix_events.py
@@ -1,4 +1,5 @@
"""Selector event loop for Unix with signal handling."""
+from __future__ import absolute_import
import errno
import os
@@ -21,8 +22,13 @@ from . import futures
from . import selector_events
from . import selectors
from . import transports
-from .coroutines import coroutine
+from .compat import flatten_bytes
+from .coroutines import coroutine, From, Return
from .log import logger
+from .py33_exceptions import (
+ reraise, wrap_error,
+ BlockingIOError, BrokenPipeError, ConnectionResetError,
+ InterruptedError, ChildProcessError)
__all__ = ['SelectorEventLoop',
@@ -34,9 +40,10 @@ if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
-def _sighandler_noop(signum, frame):
- """Dummy signal handler."""
- pass
+if compat.PY33:
+ def _sighandler_noop(signum, frame):
+ """Dummy signal handler."""
+ pass
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
@@ -46,23 +53,27 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""
def __init__(self, selector=None):
- super().__init__(selector)
+ super(_UnixSelectorEventLoop, self).__init__(selector)
self._signal_handlers = {}
def _socketpair(self):
return socket.socketpair()
def close(self):
- super().close()
+ super(_UnixSelectorEventLoop, self).close()
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
- def _process_self_data(self, data):
- for signum in data:
- if not signum:
- # ignore null bytes written by _write_to_self()
- continue
- self._handle_signal(signum)
+ # On Python <= 3.2, the C signal handler of Python writes a null byte into
+ # the wakeup file descriptor. We cannot retrieve the signal numbers from
+ # the file descriptor.
+ if compat.PY33:
+ def _process_self_data(self, data):
+ for signum in data:
+ if not signum:
+ # ignore null bytes written by _write_to_self()
+ continue
+ self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
@@ -89,14 +100,30 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
self._signal_handlers[sig] = handle
try:
- # Register a dummy signal handler to ask Python to write the signal
- # number in the wakup file descriptor. _process_self_data() will
- # read signal numbers from this file descriptor to handle signals.
- signal.signal(sig, _sighandler_noop)
+ if compat.PY33:
+ # On Python 3.3 and newer, the C signal handler writes the
+ # signal number into the wakeup file descriptor and then calls
+ # Py_AddPendingCall() to schedule the Python signal handler.
+ #
+ # Register a dummy signal handler to ask Python to write the
+ # signal number into the wakup file descriptor.
+ # _process_self_data() will read signal numbers from this file
+ # descriptor to handle signals.
+ signal.signal(sig, _sighandler_noop)
+ else:
+ # On Python 3.2 and older, the C signal handler first calls
+ # Py_AddPendingCall() to schedule the Python signal handler,
+ # and then write a null byte into the wakeup file descriptor.
+ signal.signal(sig, self._handle_signal)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
- except OSError as exc:
+ except (RuntimeError, OSError) as exc:
+ # On Python 2, signal.signal(signal.SIGKILL, signal.SIG_IGN) raises
+ # RuntimeError(22, 'Invalid argument'). On Python 3,
+ # OSError(22, 'Invalid argument') is raised instead.
+ exc_type, exc_value, tb = sys.exc_info()
+
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
@@ -104,12 +131,12 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
- if exc.errno == errno.EINVAL:
- raise RuntimeError('sig {} cannot be caught'.format(sig))
+ if isinstance(exc, RuntimeError) or exc.errno == errno.EINVAL:
+ raise RuntimeError('sig {0} cannot be caught'.format(sig))
else:
- raise
+ reraise(exc_type, exc_value, tb)
- def _handle_signal(self, sig):
+ def _handle_signal(self, sig, frame=None):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
@@ -139,7 +166,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
- raise RuntimeError('sig {} cannot be caught'.format(sig))
+ raise RuntimeError('sig {0} cannot be caught'.format(sig))
else:
raise
@@ -158,11 +185,11 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
- raise TypeError('sig must be an int, not {!r}'.format(sig))
+ raise TypeError('sig must be an int, not {0!r}'.format(sig))
if not (1 <= sig < signal.NSIG):
raise ValueError(
- 'sig {} out of range(1, {})'.format(sig, signal.NSIG))
+ 'sig {0} out of range(1, {1})'.format(sig, signal.NSIG))
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
@@ -186,7 +213,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
try:
- yield from waiter
+ yield From(waiter)
except Exception as exc:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly
@@ -197,16 +224,16 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
if err is not None:
transp.close()
- yield from transp._wait()
+ yield From(transp._wait())
raise err
- return transp
+ raise Return(transp)
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine
- def create_unix_connection(self, protocol_factory, path, *,
+ def create_unix_connection(self, protocol_factory, path,
ssl=None, sock=None,
server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str)
@@ -226,7 +253,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
- yield from self.sock_connect(sock, path)
+ yield From(self.sock_connect(sock, path))
except:
sock.close()
raise
@@ -236,12 +263,12 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise ValueError('no path and sock were specified')
sock.setblocking(False)
- transport, protocol = yield from self._create_connection_transport(
- sock, protocol_factory, ssl, server_hostname)
- return transport, protocol
+ transport, protocol = yield From(self._create_connection_transport(
+ sock, protocol_factory, ssl, server_hostname))
+ raise Return(transport, protocol)
@coroutine
- def create_unix_server(self, protocol_factory, path=None, *,
+ def create_unix_server(self, protocol_factory, path=None,
sock=None, backlog=100, ssl=None):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
@@ -255,13 +282,13 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
try:
sock.bind(path)
- except OSError as exc:
+ except socket.error as exc:
sock.close()
if exc.errno == errno.EADDRINUSE:
# Let's improve the error message by adding
# with what exact address it occurs.
- msg = 'Address {!r} is already in use'.format(path)
- raise OSError(errno.EADDRINUSE, msg) from None
+ msg = 'Address {0!r} is already in use'.format(path)
+ raise OSError(errno.EADDRINUSE, msg)
else:
raise
except:
@@ -274,7 +301,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
if sock.family != socket.AF_UNIX:
raise ValueError(
- 'A UNIX Domain Socket was expected, got {!r}'.format(sock))
+ 'A UNIX Domain Socket was expected, got {0!r}'.format(sock))
server = base_events.Server(self, [sock])
sock.listen(backlog)
@@ -284,6 +311,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
if hasattr(os, 'set_blocking'):
+ # Python 3.5 and newer
def _set_nonblocking(fd):
os.set_blocking(fd, False)
else:
@@ -300,7 +328,7 @@ class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
- super().__init__(extra)
+ super(_UnixReadPipeTransport, self).__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
@@ -342,7 +370,7 @@ class _UnixReadPipeTransport(transports.ReadTransport):
def _read_ready(self):
try:
- data = os.read(self._fileno, self.max_size)
+ data = wrap_error(os.read, self._fileno, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
@@ -410,7 +438,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
transports.WriteTransport):
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
- super().__init__(extra, loop)
+ super(_UnixWritePipeTransport, self).__init__(extra, loop)
self._extra['pipe'] = pipe
self._pipe = pipe
self._fileno = pipe.fileno()
@@ -476,9 +504,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
self._close()
def write(self, data):
- assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
- if isinstance(data, bytearray):
- data = memoryview(data)
+ data = flatten_bytes(data)
if not data:
return
@@ -492,7 +518,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
if not self._buffer:
# Attempt to send it right away first.
try:
- n = os.write(self._fileno, data)
+ n = wrap_error(os.write, self._fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
@@ -512,9 +538,9 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
data = b''.join(self._buffer)
assert data, 'Data should not be empty'
- self._buffer.clear()
+ del self._buffer[:]
try:
- n = os.write(self._fileno, data)
+ n = wrap_error(os.write, self._fileno, data)
except (BlockingIOError, InterruptedError):
self._buffer.append(data)
except Exception as exc:
@@ -583,7 +609,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
self._closing = True
if self._buffer:
self._loop.remove_writer(self._fileno)
- self._buffer.clear()
+ del self._buffer[:]
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
@@ -634,11 +660,20 @@ class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs)
if stdin_w is not None:
+ # Retrieve the file descriptor from stdin_w, stdin_w should not
+ # "own" the file descriptor anymore: closing stdin_fd file
+ # descriptor must close immediatly the file
stdin.close()
- self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
+ if hasattr(stdin_w, 'detach'):
+ stdin_fd = stdin_w.detach()
+ self._proc.stdin = os.fdopen(stdin_fd, 'wb', bufsize)
+ else:
+ stdin_dup = os.dup(stdin_w.fileno())
+ stdin_w.close()
+ self._proc.stdin = os.fdopen(stdin_dup, 'wb', bufsize)
-class AbstractChildWatcher:
+class AbstractChildWatcher(object):
"""Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
@@ -774,12 +809,12 @@ class SafeChildWatcher(BaseChildWatcher):
"""
def __init__(self):
- super().__init__()
+ super(SafeChildWatcher, self).__init__()
self._callbacks = {}
def close(self):
self._callbacks.clear()
- super().close()
+ super(SafeChildWatcher, self).close()
def __enter__(self):
return self
@@ -851,7 +886,7 @@ class FastChildWatcher(BaseChildWatcher):
(O(1) each time a child terminates).
"""
def __init__(self):
- super().__init__()
+ super(FastChildWatcher, self).__init__()
self._callbacks = {}
self._lock = threading.Lock()
self._zombies = {}
@@ -860,7 +895,7 @@ class FastChildWatcher(BaseChildWatcher):
def close(self):
self._callbacks.clear()
self._zombies.clear()
- super().close()
+ super(FastChildWatcher, self).close()
def __enter__(self):
with self._lock:
@@ -907,7 +942,7 @@ class FastChildWatcher(BaseChildWatcher):
# long as we're able to reap a child.
while True:
try:
- pid, status = os.waitpid(-1, os.WNOHANG)
+ pid, status = wrap_error(os.waitpid, -1, os.WNOHANG)
except ChildProcessError:
# No more child processes exist.
return
@@ -950,7 +985,7 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
_loop_factory = _UnixSelectorEventLoop
def __init__(self):
- super().__init__()
+ super(_UnixDefaultEventLoopPolicy, self).__init__()
self._watcher = None
def _init_watcher(self):
@@ -969,7 +1004,7 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
the child watcher.
"""
- super().set_event_loop(loop)
+ super(_UnixDefaultEventLoopPolicy, self).set_event_loop(loop)
if self._watcher is not None and \
isinstance(threading.current_thread(), threading._MainThread):
diff --git a/asyncio/windows_events.py b/trollius/windows_events.py
index 922594f..3102d23 100644
--- a/asyncio/windows_events.py
+++ b/trollius/windows_events.py
@@ -1,6 +1,5 @@
"""Selector and proactor event loops for Windows."""
-import _winapi
import errno
import math
import socket
@@ -11,12 +10,14 @@ from . import events
from . import base_subprocess
from . import futures
from . import proactor_events
+from . import py33_winapi as _winapi
from . import selector_events
from . import tasks
from . import windows_utils
from . import _overlapped
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
from .log import logger
+from .py33_exceptions import wrap_error, BrokenPipeError, ConnectionResetError
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
@@ -42,14 +43,14 @@ class _OverlappedFuture(futures.Future):
Cancelling it will immediately cancel the overlapped operation.
"""
- def __init__(self, ov, *, loop=None):
- super().__init__(loop=loop)
+ def __init__(self, ov, loop=None):
+ super(_OverlappedFuture, self).__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._ov = ov
def _repr_info(self):
- info = super()._repr_info()
+ info = super(_OverlappedFuture, self)._repr_info()
if self._ov is not None:
state = 'pending' if self._ov.pending else 'completed'
info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
@@ -73,22 +74,22 @@ class _OverlappedFuture(futures.Future):
def cancel(self):
self._cancel_overlapped()
- return super().cancel()
+ return super(_OverlappedFuture, self).cancel()
def set_exception(self, exception):
- super().set_exception(exception)
+ super(_OverlappedFuture, self).set_exception(exception)
self._cancel_overlapped()
def set_result(self, result):
- super().set_result(result)
+ super(_OverlappedFuture, self).set_result(result)
self._ov = None
class _BaseWaitHandleFuture(futures.Future):
"""Subclass of Future which represents a wait handle."""
- def __init__(self, ov, handle, wait_handle, *, loop=None):
- super().__init__(loop=loop)
+ def __init__(self, ov, handle, wait_handle, loop=None):
+ super(_BaseWaitHandleFuture, self).__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
# Keep a reference to the Overlapped object to keep it alive until the
@@ -107,7 +108,7 @@ class _BaseWaitHandleFuture(futures.Future):
_winapi.WAIT_OBJECT_0)
def _repr_info(self):
- info = super()._repr_info()
+ info = super(_BaseWaitHandleFuture, self)._repr_info()
info.append('handle=%#x' % self._handle)
if self._handle is not None:
state = 'signaled' if self._poll() else 'waiting'
@@ -147,15 +148,15 @@ class _BaseWaitHandleFuture(futures.Future):
def cancel(self):
self._unregister_wait()
- return super().cancel()
+ return super(_BaseWaitHandleFuture, self).cancel()
def set_exception(self, exception):
self._unregister_wait()
- super().set_exception(exception)
+ super(_BaseWaitHandleFuture, self).set_exception(exception)
def set_result(self, result):
self._unregister_wait()
- super().set_result(result)
+ super(_BaseWaitHandleFuture, self).set_result(result)
class _WaitCancelFuture(_BaseWaitHandleFuture):
@@ -163,8 +164,9 @@ class _WaitCancelFuture(_BaseWaitHandleFuture):
_WaitHandleFuture using an event.
"""
- def __init__(self, ov, event, wait_handle, *, loop=None):
- super().__init__(ov, event, wait_handle, loop=loop)
+ def __init__(self, ov, event, wait_handle, loop=None):
+ super(_WaitCancelFuture, self).__init__(ov, event, wait_handle,
+ loop=loop)
self._done_callback = None
@@ -178,8 +180,9 @@ class _WaitCancelFuture(_BaseWaitHandleFuture):
class _WaitHandleFuture(_BaseWaitHandleFuture):
- def __init__(self, ov, handle, wait_handle, proactor, *, loop=None):
- super().__init__(ov, handle, wait_handle, loop=loop)
+ def __init__(self, ov, handle, wait_handle, proactor, loop=None):
+ super(_WaitHandleFuture, self).__init__(ov, handle, wait_handle,
+ loop=loop)
self._proactor = proactor
self._unregister_proactor = True
self._event = _overlapped.CreateEvent(None, True, False, None)
@@ -201,7 +204,7 @@ class _WaitHandleFuture(_BaseWaitHandleFuture):
self._proactor._unregister(self._ov)
self._proactor = None
- super()._unregister_wait_cb(fut)
+ super(_WaitHandleFuture, self)._unregister_wait_cb(fut)
def _unregister_wait(self):
if not self._registered:
@@ -259,7 +262,7 @@ class PipeServer(object):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
- h = _winapi.CreateNamedPipe(
+ h = wrap_error(_winapi.CreateNamedPipe,
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
@@ -301,7 +304,7 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
def __init__(self, proactor=None):
if proactor is None:
proactor = IocpProactor()
- super().__init__(proactor)
+ super(ProactorEventLoop, self).__init__(proactor)
def _socketpair(self):
return windows_utils.socketpair()
@@ -309,11 +312,11 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
@coroutine
def create_pipe_connection(self, protocol_factory, address):
f = self._proactor.connect_pipe(address)
- pipe = yield from f
+ pipe = yield From(f)
protocol = protocol_factory()
trans = self._make_duplex_pipe_transport(pipe, protocol,
extra={'addr': address})
- return trans, protocol
+ raise Return(trans, protocol)
@coroutine
def start_serving_pipe(self, protocol_factory, address):
@@ -372,7 +375,7 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
waiter=waiter, extra=extra,
**kwargs)
try:
- yield from waiter
+ yield From(waiter)
except Exception as exc:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly sys.exc_info()
@@ -382,13 +385,13 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
if err is not None:
transp.close()
- yield from transp._wait()
+ yield From(transp._wait())
raise err
- return transp
+ raise Return(transp)
-class IocpProactor:
+class IocpProactor(object):
"""Proactor implementation using IOCP."""
def __init__(self, concurrency=0xffffffff):
@@ -426,16 +429,16 @@ class IocpProactor:
ov = _overlapped.Overlapped(NULL)
try:
if isinstance(conn, socket.socket):
- ov.WSARecv(conn.fileno(), nbytes, flags)
+ wrap_error(ov.WSARecv, conn.fileno(), nbytes, flags)
else:
- ov.ReadFile(conn.fileno(), nbytes)
+ wrap_error(ov.ReadFile, conn.fileno(), nbytes)
except BrokenPipeError:
return self._result(b'')
def finish_recv(trans, key, ov):
try:
- return ov.getresult()
- except OSError as exc:
+ return wrap_error(ov.getresult)
+ except WindowsError as exc:
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
raise ConnectionResetError(*exc.args)
else:
@@ -453,8 +456,8 @@ class IocpProactor:
def finish_send(trans, key, ov):
try:
- return ov.getresult()
- except OSError as exc:
+ return wrap_error(ov.getresult)
+ except WindowsError as exc:
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
raise ConnectionResetError(*exc.args)
else:
@@ -469,7 +472,7 @@ class IocpProactor:
ov.AcceptEx(listener.fileno(), conn.fileno())
def finish_accept(trans, key, ov):
- ov.getresult()
+ wrap_error(ov.getresult)
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
buf = struct.pack('@P', listener.fileno())
conn.setsockopt(socket.SOL_SOCKET,
@@ -481,7 +484,7 @@ class IocpProactor:
def accept_coro(future, conn):
# Coroutine closing the accept socket if the future is cancelled
try:
- yield from future
+ yield From(future)
except futures.CancelledError:
conn.close()
raise
@@ -496,7 +499,7 @@ class IocpProactor:
# The socket needs to be locally bound before we call ConnectEx().
try:
_overlapped.BindLocal(conn.fileno(), conn.family)
- except OSError as e:
+ except WindowsError as e:
if e.winerror != errno.WSAEINVAL:
raise
# Probably already locally bound; check using getsockname().
@@ -506,7 +509,7 @@ class IocpProactor:
ov.ConnectEx(conn.fileno(), address)
def finish_connect(trans, key, ov):
- ov.getresult()
+ wrap_error(ov.getresult)
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
conn.setsockopt(socket.SOL_SOCKET,
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
@@ -526,7 +529,7 @@ class IocpProactor:
return self._result(pipe)
def finish_accept_pipe(trans, key, ov):
- ov.getresult()
+ wrap_error(ov.getresult)
return pipe
return self._register(ov, pipe, finish_accept_pipe)
@@ -539,17 +542,17 @@ class IocpProactor:
# Call CreateFile() in a loop until it doesn't fail with
# ERROR_PIPE_BUSY
try:
- handle = _overlapped.ConnectPipe(address)
+ handle = wrap_error(_overlapped.ConnectPipe, address)
break
- except OSError as exc:
+ except WindowsError as exc:
if exc.winerror != _overlapped.ERROR_PIPE_BUSY:
raise
# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later
delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY)
- yield from tasks.sleep(delay, loop=self._loop)
+ yield From(tasks.sleep(delay, loop=self._loop))
- return windows_utils.PipeHandle(handle)
+ raise Return(windows_utils.PipeHandle(handle))
def wait_for_handle(self, handle, timeout=None):
"""Wait for a handle.
@@ -572,7 +575,7 @@ class IocpProactor:
else:
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
- ms = math.ceil(timeout * 1e3)
+ ms = int(math.ceil(timeout * 1e3))
# We only create ov so we can use ov.address as a key for the cache.
ov = _overlapped.Overlapped(NULL)
@@ -660,7 +663,7 @@ class IocpProactor:
else:
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
- ms = math.ceil(timeout * 1e3)
+ ms = int(math.ceil(timeout * 1e3))
if ms >= INFINITE:
raise ValueError("timeout too big")
@@ -705,7 +708,7 @@ class IocpProactor:
# Remove unregisted futures
for ov in self._unregistered:
self._cache.pop(ov.address, None)
- self._unregistered.clear()
+ del self._unregistered[:]
def _stop_serving(self, obj):
# obj is a socket or pipe handle. It will be closed in
diff --git a/asyncio/windows_utils.py b/trollius/windows_utils.py
index 870cd13..288d547 100644
--- a/asyncio/windows_utils.py
+++ b/trollius/windows_utils.py
@@ -1,13 +1,13 @@
"""
Various Windows specific bits and pieces
"""
+from __future__ import absolute_import
import sys
if sys.platform != 'win32': # pragma: no cover
raise ImportError('win32 only')
-import _winapi
import itertools
import msvcrt
import os
@@ -16,6 +16,12 @@ import subprocess
import tempfile
import warnings
+import six
+
+from . import py33_winapi as _winapi
+from . import compat
+from .py33_exceptions import wrap_error, BlockingIOError, InterruptedError
+
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
@@ -64,7 +70,7 @@ else:
try:
csock.setblocking(False)
try:
- csock.connect((addr, port))
+ wrap_error(csock.connect, (addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
@@ -80,7 +86,7 @@ else:
# Replacement for os.pipe() using handles instead of fds
-def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
+def pipe(duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
"""Like os.pipe() but with overlapped support and using handles not fds."""
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
(os.getpid(), next(_mmap_counter)))
@@ -115,7 +121,12 @@ def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
flags_and_attribs, _winapi.NULL)
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
- ov.GetOverlappedResult(True)
+ if hasattr(ov, 'GetOverlappedResult'):
+ # _winapi module of Python 3.3
+ ov.GetOverlappedResult(True)
+ else:
+ # _overlapped module
+ wrap_error(ov.getresult, True)
return h1, h2
except:
if h1 is not None:
@@ -128,7 +139,7 @@ def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
# Wrapper for a pipe handle
-class PipeHandle:
+class PipeHandle(object):
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
The IOCP event loop can use these instead of socket objects.
@@ -152,14 +163,15 @@ class PipeHandle:
raise ValueError("I/O operatioon on closed pipe")
return self._handle
- def close(self, *, CloseHandle=_winapi.CloseHandle):
+ def close(self, CloseHandle=_winapi.CloseHandle):
if self._handle is not None:
CloseHandle(self._handle)
self._handle = None
def __del__(self):
if self._handle is not None:
- warnings.warn("unclosed %r" % self, ResourceWarning)
+ if six.PY3:
+ warnings.warn("unclosed %r" % self, ResourceWarning)
self.close()
def __enter__(self):
@@ -200,8 +212,11 @@ class Popen(subprocess.Popen):
else:
stderr_wfd = stderr
try:
- super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
- stderr=stderr_wfd, **kwds)
+ super(Popen, self).__init__(args,
+ stdin=stdin_rfd,
+ stdout=stdout_wfd,
+ stderr=stderr_wfd,
+ **kwds)
except:
for h in (stdin_wh, stdout_rh, stderr_rh):
if h is not None:
diff --git a/update-asyncio-step1.sh b/update-asyncio-step1.sh
new file mode 100755
index 0000000..e2ac4f6
--- /dev/null
+++ b/update-asyncio-step1.sh
@@ -0,0 +1,12 @@
+set -e -x
+git checkout trollius
+git pull -u
+git checkout master
+git pull https://github.com/python/asyncio.git
+
+git checkout trollius
+# rename-threshold=25: a similarity of 25% is enough to consider two files
+# rename candidates
+git merge -X rename-threshold=25 master
+
+echo "Now run ./update-tulip-step2.sh"
diff --git a/update-asyncio-step2.sh b/update-asyncio-step2.sh
new file mode 100755
index 0000000..f813b6d
--- /dev/null
+++ b/update-asyncio-step2.sh
@@ -0,0 +1,36 @@
+set -e
+
+# Check for merge conflicts
+if $(git status --porcelain|grep -q '^.U '); then
+ echo "Fix the following conflicts:"
+ git status
+ exit 1
+fi
+
+# Ensure that yield from is not used
+if $(git diff|grep -q 'yield from'); then
+ echo "yield from present in changed code!"
+ git diff | grep 'yield from' -B5 -A3
+ exit 1
+fi
+
+# Ensure that mock patchs trollius module, not asyncio
+if $(grep -q 'patch.*asyncio' tests/*.py); then
+ echo "Fix following patch lines in tests/"
+ grep 'patch.*asyncio' tests/*.py
+ exit 1
+fi
+
+# Python 2.6 compatibility
+if $(grep -q -E '\{[^0-9].*format' */*.py); then
+ echo "Issues with Python 2.6 compatibility:"
+ grep -E '\{[^0-9].*format' */*.py
+ exit 1
+fi
+if $(grep -q -F 'super()' */*.py); then
+ echo "Issues with Python 2.6 compatibility:"
+ grep -F 'super()' */*.py
+ exit 1
+fi
+
+echo "Now run ./update-tulip-step3.sh"
diff --git a/update-asyncio-step3.sh b/update-asyncio-step3.sh
new file mode 100755
index 0000000..cc13503
--- /dev/null
+++ b/update-asyncio-step3.sh
@@ -0,0 +1,10 @@
+set -e -x
+./update-asyncio-step2.sh
+tox -e py27,py34
+
+git status
+echo
+echo "Now type:"
+echo "git commit -m 'Merge asyncio into trollius'"
+echo
+echo "You may have to add unstaged files"
diff --git a/update_stdlib.sh b/update_stdlib.sh
deleted file mode 100755
index 0cdbb1b..0000000
--- a/update_stdlib.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-
-# Script to copy asyncio files to the standard library tree.
-# Optional argument is the root of the Python 3.4 tree.
-# Assumes you have already created Lib/asyncio and
-# Lib/test/test_asyncio in the destination tree.
-
-CPYTHON=${1-$HOME/cpython}
-
-if [ ! -d $CPYTHON ]
-then
- echo Bad destination $CPYTHON
- exit 1
-fi
-
-if [ ! -f asyncio/__init__.py ]
-then
- echo Bad current directory
- exit 1
-fi
-
-maybe_copy()
-{
- SRC=$1
- DST=$CPYTHON/$2
- if cmp $DST $SRC
- then
- return
- fi
- echo ======== $SRC === $DST ========
- diff -u $DST $SRC
- echo -n "Copy $SRC? [y/N/back] "
- read X
- case $X in
- [yY]*) echo Copying $SRC; cp $SRC $DST;;
- back) echo Copying TO $SRC; cp $DST $SRC;;
- *) echo Not copying $SRC;;
- esac
-}
-
-for i in `(cd asyncio && ls *.py)`
-do
- if [ $i == test_support.py ]
- then
- continue
- fi
-
- if [ $i == selectors.py ]
- then
- if [ "`(cd $CPYTHON; hg branch)`" == "3.4" ]
- then
- echo "Destination is 3.4 branch -- ignoring selectors.py"
- else
- maybe_copy asyncio/$i Lib/$i
- fi
- else
- maybe_copy asyncio/$i Lib/asyncio/$i
- fi
-done
-
-for i in `(cd tests && ls *.py *.pem)`
-do
- if [ $i == test_selectors.py ]
- then
- continue
- fi
- maybe_copy tests/$i Lib/test/test_asyncio/$i
-done
-
-maybe_copy overlapped.c Modules/overlapped.c