summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVictor Stinner <victor.stinner@gmail.com>2014-12-19 23:03:31 +0100
committerVictor Stinner <victor.stinner@gmail.com>2014-12-19 23:03:31 +0100
commitc4753e8a5aaa40069a98f5da2c0eb91e4b2b7f8e (patch)
treec1d6b195cbc699fcfef038bcf1591a5dba34efde
parent8d8b24205834d6151ad806a73b3b10f7cc0c5baa (diff)
parentf1a0f0aa574bbe317b37e245c9d1ddcc2c0f8e36 (diff)
downloadtrollius-c4753e8a5aaa40069a98f5da2c0eb91e4b2b7f8e.tar.gz
Merge Tulip into Trollius
-rw-r--r--.hgignore2
-rw-r--r--AUTHORS40
-rw-r--r--ChangeLog185
-rw-r--r--MANIFEST.in18
-rw-r--r--Makefile4
-rw-r--r--README62
-rw-r--r--TODO16
-rw-r--r--asyncio/coroutines.py195
-rw-r--r--asyncio/events.py597
-rw-r--r--check.py2
-rw-r--r--doc/Makefile153
-rw-r--r--doc/asyncio.rst185
-rw-r--r--doc/changelog.rst499
-rw-r--r--doc/conf.py240
-rw-r--r--doc/dev.rst60
-rw-r--r--doc/index.rst75
-rw-r--r--doc/install.rst111
-rw-r--r--doc/make.bat190
-rw-r--r--doc/trollius.jpgbin0 -> 30083 bytes
-rw-r--r--doc/using.rst85
-rw-r--r--examples/cacheclt.py56
-rw-r--r--examples/cachesvr.py27
-rw-r--r--examples/child_process.py27
-rw-r--r--examples/crawl.py109
-rw-r--r--examples/echo_client_tulip.py7
-rw-r--r--examples/echo_server_tulip.py7
-rw-r--r--examples/fetch0.py11
-rw-r--r--examples/fetch1.py26
-rw-r--r--examples/fetch2.py52
-rw-r--r--examples/fetch3.py83
-rw-r--r--examples/fuzz_as_completed.py13
-rw-r--r--examples/hello_callback.py4
-rw-r--r--examples/hello_coroutine.py9
-rw-r--r--examples/interop_asyncio.py53
-rw-r--r--examples/shell.py27
-rw-r--r--examples/simple_tcp_server.py36
-rw-r--r--examples/sink.py14
-rw-r--r--examples/source.py13
-rw-r--r--examples/source1.py26
-rw-r--r--examples/stacks.py6
-rw-r--r--examples/subprocess_attach_read_pipe.py16
-rw-r--r--examples/subprocess_attach_write_pipe.py27
-rw-r--r--examples/subprocess_shell.py20
-rwxr-xr-xexamples/tcp_echo.py6
-rw-r--r--examples/timing_tcp_server.py41
-rwxr-xr-xexamples/udp_echo.py8
-rw-r--r--overlapped.c54
-rw-r--r--release.py6
-rw-r--r--run_aiotest.py8
-rwxr-xr-x[-rw-r--r--]runtests.py112
-rw-r--r--setup.py64
-rw-r--r--tests/echo3.py10
-rw-r--r--tests/test_asyncio.py141
-rw-r--r--tests/test_base_events.py216
-rw-r--r--tests/test_events.py392
-rw-r--r--tests/test_futures.py98
-rw-r--r--tests/test_locks.py165
-rw-r--r--tests/test_proactor_events.py21
-rw-r--r--tests/test_queues.py104
-rw-r--r--tests/test_selector_events.py90
-rw-r--r--tests/test_selectors.py9
-rw-r--r--tests/test_streams.py58
-rw-r--r--tests/test_subprocess.py102
-rw-r--r--tests/test_tasks.py428
-rw-r--r--tests/test_transports.py15
-rw-r--r--tests/test_unix_events.py130
-rw-r--r--tests/test_windows_events.py39
-rw-r--r--tests/test_windows_utils.py38
-rw-r--r--tox.ini23
-rw-r--r--trollius/__init__.py (renamed from asyncio/__init__.py)13
-rw-r--r--trollius/base_events.py (renamed from asyncio/base_events.py)166
-rw-r--r--trollius/base_subprocess.py (renamed from asyncio/base_subprocess.py)16
-rw-r--r--trollius/compat.py61
-rw-r--r--trollius/constants.py (renamed from asyncio/constants.py)0
-rw-r--r--trollius/coroutines.py342
-rw-r--r--trollius/events.py633
-rw-r--r--trollius/executor.py84
-rw-r--r--trollius/futures.py (renamed from asyncio/futures.py)111
-rw-r--r--trollius/locks.py (renamed from asyncio/locks.py)124
-rw-r--r--trollius/log.py (renamed from asyncio/log.py)0
-rw-r--r--trollius/proactor_events.py (renamed from asyncio/proactor_events.py)18
-rw-r--r--trollius/protocols.py (renamed from asyncio/protocols.py)2
-rw-r--r--trollius/py27_weakrefset.py202
-rw-r--r--trollius/py33_exceptions.py144
-rw-r--r--trollius/py33_winapi.py75
-rw-r--r--trollius/py3_ssl.py149
-rw-r--r--trollius/queues.py (renamed from asyncio/queues.py)45
-rw-r--r--trollius/selector_events.py (renamed from asyncio/selector_events.py)137
-rw-r--r--trollius/selectors.py (renamed from asyncio/selectors.py)72
-rw-r--r--trollius/streams.py (renamed from asyncio/streams.py)65
-rw-r--r--trollius/subprocess.py (renamed from asyncio/subprocess.py)76
-rw-r--r--trollius/tasks.py (renamed from asyncio/tasks.py)242
-rw-r--r--trollius/test_support.py (renamed from asyncio/test_support.py)10
-rw-r--r--trollius/test_utils.py (renamed from asyncio/test_utils.py)233
-rw-r--r--trollius/time_monotonic.py192
-rw-r--r--trollius/transports.py (renamed from asyncio/transports.py)15
-rw-r--r--trollius/unix_events.py (renamed from asyncio/unix_events.py)146
-rw-r--r--trollius/windows_events.py (renamed from asyncio/windows_events.py)88
-rw-r--r--trollius/windows_utils.py (renamed from asyncio/windows_utils.py)29
-rwxr-xr-xupdate-tulip-step1.sh9
-rwxr-xr-xupdate-tulip-step2.sh39
-rwxr-xr-xupdate-tulip-step3.sh4
-rwxr-xr-xupdate_stdlib.sh70
103 files changed, 6349 insertions, 3029 deletions
diff --git a/.hgignore b/.hgignore
index 736c7fd..186576f 100644
--- a/.hgignore
+++ b/.hgignore
@@ -12,4 +12,6 @@ distribute-\d+.\d+.\d+.tar.gz$
build$
dist$
.*\.egg-info$
+
+# Directory created by the "tox" command (ex: tox -e py27)
\.tox$
diff --git a/AUTHORS b/AUTHORS
index d25b446..c625633 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,26 +1,14 @@
-A. Jesse Jiryu Davis <jesse AT mongodb.com>
-Aaron Griffith
-Andrew Svetlov <andrew.svetlov AT gmail.com>
-Anthony Baire
-Antoine Pitrou <solipsis AT pitrou.net>
-Arnaud Faure
-Aymeric Augustin
-Brett Cannon
-Charles-François Natali <cf.natali AT gmail.com>
-Christian Heimes
-Donald Stufft
-Eli Bendersky <eliben AT gmail.com>
-Geert Jansen <geertj AT gmail.com>
-Giampaolo Rodola' <g.rodola AT gmail.com>
-Guido van Rossum <guido AT python.org>: creator of the Tulip project and author of the PEP 3156
-Gustavo Carneiro <gjcarneiro AT gmail.com>
-Jeff Quast
-Jonathan Slenders <jonathan.slenders AT gmail.com>
-Nikolay Kim <fafhrd91 AT gmail.com>
-Richard Oudkerk <shibturn AT gmail.com>
-Saúl Ibarra Corretgé <saghul AT gmail.com>
-Serhiy Storchaka
-Vajrasky Kok
-Victor Stinner <victor.stinner AT gmail.com>
-Vladimir Kryachko
-Yury Selivanov <yselivanov AT gmail.com>
+Trollius authors
+================
+
+Ian Wienand <iwienand@redhat.com>
+Marc Schlaich <marc.schlaich AT gmail.com>
+Victor Stinner <victor.stinner AT gmail.com> - creator of the Trollius project
+
+The photo of Trollis flower was taken by Imartin6 and distributed under the CC
+BY-SA 3.0 license. It comes from:
+http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg
+
+Trollius is a port of the Tulip project on Python 2, see also authors of the
+Tulip project (AUTHORS file of the Tulip project).
+
diff --git a/ChangeLog b/ChangeLog
deleted file mode 100644
index e483fb2..0000000
--- a/ChangeLog
+++ /dev/null
@@ -1,185 +0,0 @@
-2014-09-30: Tulip 3.4.2
-=======================
-
-New shiny methods like create_task(), better documentation, much better debug
-mode, better tests.
-
-asyncio API
------------
-
-* Add BaseEventLoop.create_task() method: schedule a coroutine object.
- It allows other asyncio implementations to use their own Task class to
- change its behaviour.
-
-* New BaseEventLoop methods:
-
- - create_task(): schedule a coroutine
- - get_debug()
- - is_closed()
- - set_debug()
-
-* Add _FlowControlMixin.get_write_buffer_limits() method
-
-* sock_recv(), sock_sendall(), sock_connect(), sock_accept() methods of
- SelectorEventLoop now raise an exception if the socket is blocking mode
-
-* Include unix_events/windows_events symbols in asyncio.__all__.
- Examples: SelectorEventLoop, ProactorEventLoop, DefaultEventLoopPolicy.
-
-* attach(), detach(), loop, active_count and waiters attributes of the Server
- class are now private
-
-* BaseEventLoop: run_forever(), run_until_complete() now raises an exception if
- the event loop was closed
-
-* close() now raises an exception if the event loop is running, because pending
- callbacks would be lost
-
-* Queue now accepts a float for the maximum size.
-
-* Process.communicate() now ignores BrokenPipeError and ConnectionResetError
- exceptions, as Popen.communicate() of the subprocess module
-
-
-Performances
-------------
-
-* Optimize handling of cancelled timers
-
-
-Debug
------
-
-* Future (and Task), CoroWrapper and Handle now remembers where they were
- created (new _source_traceback object), traceback displayed when errors are
- logged.
-
-* On Python 3.4 and newer, Task destrutor now logs a warning if the task was
- destroyed while it was still pending. It occurs if the last reference
- to the task was removed, while the coroutine didn't finish yet.
-
-* Much more useful events are logged:
-
- - Event loop closed
- - Network connection
- - Creation of a subprocess
- - Pipe lost
- - Log many errors previously silently ignored
- - SSL handshake failure
- - etc.
-
-* BaseEventLoop._debug is now True if the envrionement variable
- PYTHONASYNCIODEBUG is set
-
-* Log the duration of DNS resolution and SSL handshake
-
-* Log a warning if a callback blocks the event loop longer than 100 ms
- (configurable duration)
-
-* repr(CoroWrapper) and repr(Task) now contains the current status of the
- coroutine (running, done), current filename and line number, and filename and
- line number where the object was created
-
-* Enhance representation (repr) of transports: add the file descriptor, status
- (idle, polling, writing, etc.), size of the write buffer, ...
-
-* Add repr(BaseEventLoop)
-
-* run_until_complete() doesn't log a warning anymore when called with a
- coroutine object which raises an exception.
-
-
-Bugfixes
---------
-
-* windows_utils.socketpair() now ensures that sockets are closed in case
- of error.
-
-* Rewrite bricks of the IocpProactor() to make it more reliable
-
-* IocpProactor destructor now closes it.
-
-* _OverlappedFuture.set_exception() now cancels the overlapped operation.
-
-* Rewrite _WaitHandleFuture:
-
- - cancel() is now able to signal the cancellation to the overlapped object
- - _unregister_wait() now catchs and logs exceptions
-
-* PipeServer.close() (class used on Windows) now cancels the accept pipe
- future.
-
-* Rewrite signal handling in the UNIX implementation of SelectorEventLoop:
- use the self-pipe to store pending signals instead of registering a
- signal handler calling directly _handle_signal(). The change fixes a
- race condition.
-
-* create_unix_server(): close the socket on error.
-
-* Fix wait_for()
-
-* Rewrite gather()
-
-* drain() is now a classic coroutine, no more special return value (empty
- tuple)
-
-* Rewrite SelectorEventLoop.sock_connect() to handle correctly timeout
-
-* Process data of the self-pipe faster to accept more pending events,
- especially signals written by signal handlers: the callback reads all pending
- data, not only a single byte
-
-* Don't try to set the result of a Future anymore if it was cancelled
- (explicitly or by a timeout)
-
-* CoroWrapper now works around CPython issue #21209: yield from & custom
- generator classes don't work together, issue with the send() method. It only
- affected asyncio in debug mode on Python older than 3.4.2
-
-
-Misc changes
-------------
-
-* windows_utils.socketpair() now supports IPv6.
-
-* Better documentation (online & docstrings): fill remaining XXX, more examples
-
-* new asyncio.coroutines submodule, to ease maintenance with the trollius
- project: @coroutine, _DEBUG, iscoroutine() and iscoroutinefunction() have
- been moved from asyncio.tasks to asyncio.coroutines
-
-* Cleanup code, ex: remove unused attribute (ex: _rawsock)
-
-* Reuse os.set_blocking() of Python 3.5.
-
-* Close explicitly the event loop in Tulip examples.
-
-* runtests.py now mention if tests are running in release or debug mode.
-
-2014-05-19: Tulip 3.4.1
-=======================
-
-2014-02-24: Tulip 0.4.1
-=======================
-
-2014-02-10: Tulip 0.3.1
-=======================
-
-* Add asyncio.subprocess submodule and the Process class.
-
-2013-11-25: Tulip 0.2.1
-=======================
-
-* Add support of subprocesses using transports and protocols.
-
-2013-10-22: Tulip 0.1.1
-=======================
-
-* First release.
-
-Creation of the project
-=======================
-
-* 2013-10-14: The tulip package was renamed to asyncio.
-* 2012-10-16: Creation of the Tulip project, started as mail threads on the
- python-ideas mailing list.
diff --git a/MANIFEST.in b/MANIFEST.in
index 314325c..405b309 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,11 +1,13 @@
-include AUTHORS COPYING
+include AUTHORS COPYING TODO tox.ini
include Makefile
include overlapped.c pypi.bat
-include check.py runtests.py
-include update_stdlib.sh
+include check.py runtests.py run_aiotest.py
+include update-tulip*.sh
-recursive-include examples *.py
-recursive-include tests *.crt
-recursive-include tests *.key
-recursive-include tests *.pem
-recursive-include tests *.py
+include doc/conf.py doc/make.bat doc/Makefile
+include doc/*.rst doc/*.jpg
+
+include examples/*.py
+
+include tests/*.crt tests/*.pem tests/*.key
+include tests/*.py
diff --git a/Makefile b/Makefile
index eda02f2..768298b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
# Some simple testing tasks (sorry, UNIX only).
-PYTHON=python3
+PYTHON=python
VERBOSE=$(V)
V= 0
FLAGS=
@@ -40,6 +40,8 @@ clean:
rm -rf build
rm -rf asyncio.egg-info
rm -f MANIFEST
+ rm -rf trollius.egg-info
+ rm -rf .tox
# For distribution builders only!
diff --git a/README b/README
index 2f3150a..ea5d7ca 100644
--- a/README
+++ b/README
@@ -1,44 +1,40 @@
-Tulip is the codename for my reference implementation of PEP 3156.
+Trollius provides infrastructure for writing single-threaded concurrent
+code using coroutines, multiplexing I/O access over sockets and other
+resources, running network clients and servers, and other related primitives.
+Here is a more detailed list of the package contents:
-PEP 3156: http://www.python.org/dev/peps/pep-3156/
+* a pluggable event loop with various system-specific implementations;
-*** This requires Python 3.3 or later! ***
+* transport and protocol abstractions (similar to those in `Twisted
+ <http://twistedmatrix.com/>`_);
-Copyright/license: Open source, Apache 2.0. Enjoy.
+* concrete support for TCP, UDP, SSL, subprocess pipes, delayed calls, and
+ others (some may be system-dependent);
-Master Mercurial repo: http://code.google.com/p/tulip/
+* a ``Future`` class that mimics the one in the ``concurrent.futures`` module,
+ but adapted for use with the event loop;
-The actual code lives in the 'asyncio' subdirectory.
-Tests are in the 'tests' subdirectory.
+* coroutines and tasks based on generators (``yield``), to help write
+ concurrent code in a sequential fashion;
-To run tests:
- - make test
+* cancellation support for ``Future``\s and coroutines;
-To run coverage (coverage package is required):
- - make coverage
+* synchronization primitives for use between coroutines in a single thread,
+ mimicking those in the ``threading`` module;
-On Windows, things are a little more complicated. Assume 'P' is your
-Python binary (for example C:\Python33\python.exe).
+* an interface for passing work off to a threadpool, for times when you
+ absolutely, positively have to use a library that makes blocking I/O calls.
-You must first build the _overlapped.pyd extension and have it placed
-in the asyncio directory, as follows:
+Trollius is a portage of the `Tulip project <http://code.google.com/p/tulip/>`_
+(``asyncio`` module, `PEP 3156 <http://legacy.python.org/dev/peps/pep-3156/>`_)
+on Python 2. Trollius works on Python 2.6-3.5. It has been tested on Windows,
+Linux, Mac OS X, FreeBSD and OpenIndiana.
- C> P setup.py build_ext --inplace
+* `Asyncio documentation <http://docs.python.org/dev/library/asyncio.html>`_
+* `Trollius documentation <http://trollius.readthedocs.org/>`_
+* `Trollius project in the Python Cheeseshop (PyPI)
+ <https://pypi.python.org/pypi/trollius>`_
+* `Trollius project at Bitbucket <https://bitbucket.org/enovance/trollius>`_
+* Copyright/license: Open source, Apache 2.0. Enjoy!
-If this complains about vcvars.bat, you probably don't have the
-required version of Visual Studio installed. Compiling extensions for
-Python 3.3 requires Microsoft Visual C++ 2010 (MSVC 10.0) of any
-edition; you can download Visual Studio Express 2010 for free from
-http://www.visualstudio.com/downloads (scroll down to Visual C++ 2010
-Express).
-
-Once you have built the _overlapped.pyd extension successfully you can
-run the tests as follows:
-
- C> P runtests.py
-
-And coverage as follows:
-
- C> P runtests.py --coverage
-
---Guido van Rossum <guido@python.org>
+See also the `Tulip project <http://code.google.com/p/tulip/>`_.
diff --git a/TODO b/TODO
new file mode 100644
index 0000000..a7ae0c9
--- /dev/null
+++ b/TODO
@@ -0,0 +1,16 @@
+Unsorted "TODO" tasks:
+
+* Replace logger with warning in monotonic clock and synchronous executor
+* Windows: use _overlapped in py33_winapi?
+* Fix tests failing with PyPy:
+
+ - sys.getrefcount()
+ - test_queues.test_repr
+ - test_futures.test_tb_logger_exception_unretrieved
+
+* write unit test for create_connection(ssl=True)
+* Fix examples:
+
+ - stacks.py: 'exceptions.ZeroDivisionError' object has no attribute '__traceback__'
+
+* Fix all FIXME in the code
diff --git a/asyncio/coroutines.py b/asyncio/coroutines.py
deleted file mode 100644
index c28de95..0000000
--- a/asyncio/coroutines.py
+++ /dev/null
@@ -1,195 +0,0 @@
-__all__ = ['coroutine',
- 'iscoroutinefunction', 'iscoroutine']
-
-import functools
-import inspect
-import opcode
-import os
-import sys
-import traceback
-import types
-
-from . import events
-from . import futures
-from .log import logger
-
-
-# Opcode of "yield from" instruction
-_YIELD_FROM = opcode.opmap['YIELD_FROM']
-
-# If you set _DEBUG to true, @coroutine will wrap the resulting
-# generator objects in a CoroWrapper instance (defined below). That
-# instance will log a message when the generator is never iterated
-# over, which may happen when you forget to use "yield from" with a
-# coroutine call. Note that the value of the _DEBUG flag is taken
-# when the decorator is used, so to be of any use it must be set
-# before you define your coroutines. A downside of using this feature
-# is that tracebacks show entries for the CoroWrapper.__next__ method
-# when _DEBUG is true.
-_DEBUG = (not sys.flags.ignore_environment
- and bool(os.environ.get('PYTHONASYNCIODEBUG')))
-
-
-# Check for CPython issue #21209
-def has_yield_from_bug():
- class MyGen:
- def __init__(self):
- self.send_args = None
- def __iter__(self):
- return self
- def __next__(self):
- return 42
- def send(self, *what):
- self.send_args = what
- return None
- def yield_from_gen(gen):
- yield from gen
- value = (1, 2, 3)
- gen = MyGen()
- coro = yield_from_gen(gen)
- next(coro)
- coro.send(value)
- return gen.send_args != (value,)
-_YIELD_FROM_BUG = has_yield_from_bug()
-del has_yield_from_bug
-
-
-class CoroWrapper:
- # Wrapper for coroutine object in _DEBUG mode.
-
- def __init__(self, gen, func):
- assert inspect.isgenerator(gen), gen
- self.gen = gen
- self.func = func
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
- # __name__, __qualname__, __doc__ attributes are set by the coroutine()
- # decorator
-
- def __repr__(self):
- coro_repr = _format_coroutine(self)
- if self._source_traceback:
- frame = self._source_traceback[-1]
- coro_repr += ', created at %s:%s' % (frame[0], frame[1])
- return '<%s %s>' % (self.__class__.__name__, coro_repr)
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self.gen)
-
- if _YIELD_FROM_BUG:
- # For for CPython issue #21209: using "yield from" and a custom
- # generator, generator.send(tuple) unpacks the tuple instead of passing
- # the tuple unchanged. Check if the caller is a generator using "yield
- # from" to decide if the parameter should be unpacked or not.
- def send(self, *value):
- frame = sys._getframe()
- caller = frame.f_back
- assert caller.f_lasti >= 0
- if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
- value = value[0]
- return self.gen.send(value)
- else:
- def send(self, value):
- return self.gen.send(value)
-
- def throw(self, exc):
- return self.gen.throw(exc)
-
- def close(self):
- return self.gen.close()
-
- @property
- def gi_frame(self):
- return self.gen.gi_frame
-
- @property
- def gi_running(self):
- return self.gen.gi_running
-
- @property
- def gi_code(self):
- return self.gen.gi_code
-
- def __del__(self):
- # Be careful accessing self.gen.frame -- self.gen might not exist.
- gen = getattr(self, 'gen', None)
- frame = getattr(gen, 'gi_frame', None)
- if frame is not None and frame.f_lasti == -1:
- msg = '%r was never yielded from' % self
- tb = getattr(self, '_source_traceback', ())
- if tb:
- tb = ''.join(traceback.format_list(tb))
- msg += ('\nCoroutine object created at '
- '(most recent call last):\n')
- msg += tb.rstrip()
- logger.error(msg)
-
-
-def coroutine(func):
- """Decorator to mark coroutines.
-
- If the coroutine is not yielded from before it is destroyed,
- an error message is logged.
- """
- if inspect.isgeneratorfunction(func):
- coro = func
- else:
- @functools.wraps(func)
- def coro(*args, **kw):
- res = func(*args, **kw)
- if isinstance(res, futures.Future) or inspect.isgenerator(res):
- res = yield from res
- return res
-
- if not _DEBUG:
- wrapper = coro
- else:
- @functools.wraps(func)
- def wrapper(*args, **kwds):
- w = CoroWrapper(coro(*args, **kwds), func)
- if w._source_traceback:
- del w._source_traceback[-1]
- w.__name__ = func.__name__
- if hasattr(func, '__qualname__'):
- w.__qualname__ = func.__qualname__
- w.__doc__ = func.__doc__
- return w
-
- wrapper._is_coroutine = True # For iscoroutinefunction().
- return wrapper
-
-
-def iscoroutinefunction(func):
- """Return True if func is a decorated coroutine function."""
- return getattr(func, '_is_coroutine', False)
-
-
-_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
-
-def iscoroutine(obj):
- """Return True if obj is a coroutine object."""
- return isinstance(obj, _COROUTINE_TYPES)
-
-
-def _format_coroutine(coro):
- assert iscoroutine(coro)
- coro_name = getattr(coro, '__qualname__', coro.__name__)
-
- filename = coro.gi_code.co_filename
- if (isinstance(coro, CoroWrapper)
- and not inspect.isgeneratorfunction(coro.func)):
- filename, lineno = events._get_function_source(coro.func)
- if coro.gi_frame is None:
- coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
- else:
- coro_repr = '%s() running, defined at %s:%s' % (coro_name, filename, lineno)
- elif coro.gi_frame is not None:
- lineno = coro.gi_frame.f_lineno
- coro_repr = '%s() running at %s:%s' % (coro_name, filename, lineno)
- else:
- lineno = coro.gi_code.co_firstlineno
- coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
-
- return coro_repr
diff --git a/asyncio/events.py b/asyncio/events.py
deleted file mode 100644
index 8a7bb81..0000000
--- a/asyncio/events.py
+++ /dev/null
@@ -1,597 +0,0 @@
-"""Event loop and event loop policy."""
-
-__all__ = ['AbstractEventLoopPolicy',
- 'AbstractEventLoop', 'AbstractServer',
- 'Handle', 'TimerHandle',
- 'get_event_loop_policy', 'set_event_loop_policy',
- 'get_event_loop', 'set_event_loop', 'new_event_loop',
- 'get_child_watcher', 'set_child_watcher',
- ]
-
-import functools
-import inspect
-import reprlib
-import socket
-import subprocess
-import sys
-import threading
-import traceback
-
-
-_PY34 = sys.version_info >= (3, 4)
-
-
-def _get_function_source(func):
- if _PY34:
- func = inspect.unwrap(func)
- elif hasattr(func, '__wrapped__'):
- func = func.__wrapped__
- if inspect.isfunction(func):
- code = func.__code__
- return (code.co_filename, code.co_firstlineno)
- if isinstance(func, functools.partial):
- return _get_function_source(func.func)
- if _PY34 and isinstance(func, functools.partialmethod):
- return _get_function_source(func.func)
- return None
-
-
-def _format_args(args):
- """Format function arguments.
-
- Special case for a single parameter: ('hello',) is formatted as ('hello').
- """
- # use reprlib to limit the length of the output
- args_repr = reprlib.repr(args)
- if len(args) == 1 and args_repr.endswith(',)'):
- args_repr = args_repr[:-2] + ')'
- return args_repr
-
-
-def _format_callback(func, args, suffix=''):
- if isinstance(func, functools.partial):
- if args is not None:
- suffix = _format_args(args) + suffix
- return _format_callback(func.func, func.args, suffix)
-
- func_repr = getattr(func, '__qualname__', None)
- if not func_repr:
- func_repr = repr(func)
-
- if args is not None:
- func_repr += _format_args(args)
- if suffix:
- func_repr += suffix
-
- source = _get_function_source(func)
- if source:
- func_repr += ' at %s:%s' % source
- return func_repr
-
-
-class Handle:
- """Object returned by callback registration methods."""
-
- __slots__ = ('_callback', '_args', '_cancelled', '_loop',
- '_source_traceback', '_repr', '__weakref__')
-
- def __init__(self, callback, args, loop):
- assert not isinstance(callback, Handle), 'A Handle is not a callback'
- self._loop = loop
- self._callback = callback
- self._args = args
- self._cancelled = False
- self._repr = None
- if self._loop.get_debug():
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
- else:
- self._source_traceback = None
-
- def _repr_info(self):
- info = [self.__class__.__name__]
- if self._cancelled:
- info.append('cancelled')
- if self._callback is not None:
- info.append(_format_callback(self._callback, self._args))
- if self._source_traceback:
- frame = self._source_traceback[-1]
- info.append('created at %s:%s' % (frame[0], frame[1]))
- return info
-
- def __repr__(self):
- if self._repr is not None:
- return self._repr
- info = self._repr_info()
- return '<%s>' % ' '.join(info)
-
- def cancel(self):
- if not self._cancelled:
- self._cancelled = True
- if self._loop.get_debug():
- # Keep a representation in debug mode to keep callback and
- # parameters. For example, to log the warning
- # "Executing <Handle...> took 2.5 second"
- self._repr = repr(self)
- self._callback = None
- self._args = None
-
- def _run(self):
- try:
- self._callback(*self._args)
- except Exception as exc:
- cb = _format_callback(self._callback, self._args)
- msg = 'Exception in callback {}'.format(cb)
- context = {
- 'message': msg,
- 'exception': exc,
- 'handle': self,
- }
- if self._source_traceback:
- context['source_traceback'] = self._source_traceback
- self._loop.call_exception_handler(context)
- self = None # Needed to break cycles when an exception occurs.
-
-
-class TimerHandle(Handle):
- """Object returned by timed callback registration methods."""
-
- __slots__ = ['_scheduled', '_when']
-
- def __init__(self, when, callback, args, loop):
- assert when is not None
- super().__init__(callback, args, loop)
- if self._source_traceback:
- del self._source_traceback[-1]
- self._when = when
- self._scheduled = False
-
- def _repr_info(self):
- info = super()._repr_info()
- pos = 2 if self._cancelled else 1
- info.insert(pos, 'when=%s' % self._when)
- return info
-
- def __hash__(self):
- return hash(self._when)
-
- def __lt__(self, other):
- return self._when < other._when
-
- def __le__(self, other):
- if self._when < other._when:
- return True
- return self.__eq__(other)
-
- def __gt__(self, other):
- return self._when > other._when
-
- def __ge__(self, other):
- if self._when > other._when:
- return True
- return self.__eq__(other)
-
- def __eq__(self, other):
- if isinstance(other, TimerHandle):
- return (self._when == other._when and
- self._callback == other._callback and
- self._args == other._args and
- self._cancelled == other._cancelled)
- return NotImplemented
-
- def __ne__(self, other):
- equal = self.__eq__(other)
- return NotImplemented if equal is NotImplemented else not equal
-
- def cancel(self):
- if not self._cancelled:
- self._loop._timer_handle_cancelled(self)
- super().cancel()
-
-
-class AbstractServer:
- """Abstract server returned by create_server()."""
-
- def close(self):
- """Stop serving. This leaves existing connections open."""
- return NotImplemented
-
- def wait_closed(self):
- """Coroutine to wait until service is closed."""
- return NotImplemented
-
-
-class AbstractEventLoop:
- """Abstract event loop."""
-
- # Running and stopping the event loop.
-
- def run_forever(self):
- """Run the event loop until stop() is called."""
- raise NotImplementedError
-
- def run_until_complete(self, future):
- """Run the event loop until a Future is done.
-
- Return the Future's result, or raise its exception.
- """
- raise NotImplementedError
-
- def stop(self):
- """Stop the event loop as soon as reasonable.
-
- Exactly how soon that is may depend on the implementation, but
- no more I/O callbacks should be scheduled.
- """
- raise NotImplementedError
-
- def is_running(self):
- """Return whether the event loop is currently running."""
- raise NotImplementedError
-
- def is_closed(self):
- """Returns True if the event loop was closed."""
- raise NotImplementedError
-
- def close(self):
- """Close the loop.
-
- The loop should not be running.
-
- This is idempotent and irreversible.
-
- No other methods should be called after this one.
- """
- raise NotImplementedError
-
- # Methods scheduling callbacks. All these return Handles.
-
- def _timer_handle_cancelled(self, handle):
- """Notification that a TimerHandle has been cancelled."""
- raise NotImplementedError
-
- def call_soon(self, callback, *args):
- return self.call_later(0, callback, *args)
-
- def call_later(self, delay, callback, *args):
- raise NotImplementedError
-
- def call_at(self, when, callback, *args):
- raise NotImplementedError
-
- def time(self):
- raise NotImplementedError
-
- # Method scheduling a coroutine object: create a task.
-
- def create_task(self, coro):
- raise NotImplementedError
-
- # Methods for interacting with threads.
-
- def call_soon_threadsafe(self, callback, *args):
- raise NotImplementedError
-
- def run_in_executor(self, executor, callback, *args):
- raise NotImplementedError
-
- def set_default_executor(self, executor):
- raise NotImplementedError
-
- # Network I/O methods returning Futures.
-
- def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
- raise NotImplementedError
-
- def getnameinfo(self, sockaddr, flags=0):
- raise NotImplementedError
-
- def create_connection(self, protocol_factory, host=None, port=None, *,
- ssl=None, family=0, proto=0, flags=0, sock=None,
- local_addr=None, server_hostname=None):
- raise NotImplementedError
-
- def create_server(self, protocol_factory, host=None, port=None, *,
- family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
- sock=None, backlog=100, ssl=None, reuse_address=None):
- """A coroutine which creates a TCP server bound to host and port.
-
- The return value is a Server object which can be used to stop
- the service.
-
- If host is an empty string or None all interfaces are assumed
- and a list of multiple sockets will be returned (most likely
- one for IPv4 and another one for IPv6).
-
- family can be set to either AF_INET or AF_INET6 to force the
- socket to use IPv4 or IPv6. If not set it will be determined
- from host (defaults to AF_UNSPEC).
-
- flags is a bitmask for getaddrinfo().
-
- sock can optionally be specified in order to use a preexisting
- socket object.
-
- backlog is the maximum number of queued connections passed to
- listen() (defaults to 100).
-
- ssl can be set to an SSLContext to enable SSL over the
- accepted connections.
-
- reuse_address tells the kernel to reuse a local socket in
- TIME_WAIT state, without waiting for its natural timeout to
- expire. If not specified will automatically be set to True on
- UNIX.
- """
- raise NotImplementedError
-
- def create_unix_connection(self, protocol_factory, path, *,
- ssl=None, sock=None,
- server_hostname=None):
- raise NotImplementedError
-
- def create_unix_server(self, protocol_factory, path, *,
- sock=None, backlog=100, ssl=None):
- """A coroutine which creates a UNIX Domain Socket server.
-
- The return value is a Server object, which can be used to stop
- the service.
-
- path is a str, representing a file systsem path to bind the
- server socket to.
-
- sock can optionally be specified in order to use a preexisting
- socket object.
-
- backlog is the maximum number of queued connections passed to
- listen() (defaults to 100).
-
- ssl can be set to an SSLContext to enable SSL over the
- accepted connections.
- """
- raise NotImplementedError
-
- def create_datagram_endpoint(self, protocol_factory,
- local_addr=None, remote_addr=None, *,
- family=0, proto=0, flags=0):
- raise NotImplementedError
-
- # Pipes and subprocesses.
-
- def connect_read_pipe(self, protocol_factory, pipe):
- """Register read pipe in event loop. Set the pipe to non-blocking mode.
-
- protocol_factory should instantiate object with Protocol interface.
- pipe is a file-like object.
- Return pair (transport, protocol), where transport supports the
- ReadTransport interface."""
- # The reason to accept file-like object instead of just file descriptor
- # is: we need to own pipe and close it at transport finishing
- # Can got complicated errors if pass f.fileno(),
- # close fd in pipe transport then close f and vise versa.
- raise NotImplementedError
-
- def connect_write_pipe(self, protocol_factory, pipe):
- """Register write pipe in event loop.
-
- protocol_factory should instantiate object with BaseProtocol interface.
- Pipe is file-like object already switched to nonblocking.
- Return pair (transport, protocol), where transport support
- WriteTransport interface."""
- # The reason to accept file-like object instead of just file descriptor
- # is: we need to own pipe and close it at transport finishing
- # Can got complicated errors if pass f.fileno(),
- # close fd in pipe transport then close f and vise versa.
- raise NotImplementedError
-
- def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- **kwargs):
- raise NotImplementedError
-
- def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- **kwargs):
- raise NotImplementedError
-
- # Ready-based callback registration methods.
- # The add_*() methods return None.
- # The remove_*() methods return True if something was removed,
- # False if there was nothing to delete.
-
- def add_reader(self, fd, callback, *args):
- raise NotImplementedError
-
- def remove_reader(self, fd):
- raise NotImplementedError
-
- def add_writer(self, fd, callback, *args):
- raise NotImplementedError
-
- def remove_writer(self, fd):
- raise NotImplementedError
-
- # Completion based I/O methods returning Futures.
-
- def sock_recv(self, sock, nbytes):
- raise NotImplementedError
-
- def sock_sendall(self, sock, data):
- raise NotImplementedError
-
- def sock_connect(self, sock, address):
- raise NotImplementedError
-
- def sock_accept(self, sock):
- raise NotImplementedError
-
- # Signal handling.
-
- def add_signal_handler(self, sig, callback, *args):
- raise NotImplementedError
-
- def remove_signal_handler(self, sig):
- raise NotImplementedError
-
- # Error handlers.
-
- def set_exception_handler(self, handler):
- raise NotImplementedError
-
- def default_exception_handler(self, context):
- raise NotImplementedError
-
- def call_exception_handler(self, context):
- raise NotImplementedError
-
- # Debug flag management.
-
- def get_debug(self):
- raise NotImplementedError
-
- def set_debug(self, enabled):
- raise NotImplementedError
-
-
-class AbstractEventLoopPolicy:
- """Abstract policy for accessing the event loop."""
-
- def get_event_loop(self):
- """Get the event loop for the current context.
-
- Returns an event loop object implementing the BaseEventLoop interface,
- or raises an exception in case no event loop has been set for the
- current context and the current policy does not specify to create one.
-
- It should never return None."""
- raise NotImplementedError
-
- def set_event_loop(self, loop):
- """Set the event loop for the current context to loop."""
- raise NotImplementedError
-
- def new_event_loop(self):
- """Create and return a new event loop object according to this
- policy's rules. If there's need to set this loop as the event loop for
- the current context, set_event_loop must be called explicitly."""
- raise NotImplementedError
-
- # Child processes handling (Unix only).
-
- def get_child_watcher(self):
- "Get the watcher for child processes."
- raise NotImplementedError
-
- def set_child_watcher(self, watcher):
- """Set the watcher for child processes."""
- raise NotImplementedError
-
-
-class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
- """Default policy implementation for accessing the event loop.
-
- In this policy, each thread has its own event loop. However, we
- only automatically create an event loop by default for the main
- thread; other threads by default have no event loop.
-
- Other policies may have different rules (e.g. a single global
- event loop, or automatically creating an event loop per thread, or
- using some other notion of context to which an event loop is
- associated).
- """
-
- _loop_factory = None
-
- class _Local(threading.local):
- _loop = None
- _set_called = False
-
- def __init__(self):
- self._local = self._Local()
-
- def get_event_loop(self):
- """Get the event loop.
-
- This may be None or an instance of EventLoop.
- """
- if (self._local._loop is None and
- not self._local._set_called and
- isinstance(threading.current_thread(), threading._MainThread)):
- self.set_event_loop(self.new_event_loop())
- if self._local._loop is None:
- raise RuntimeError('There is no current event loop in thread %r.'
- % threading.current_thread().name)
- return self._local._loop
-
- def set_event_loop(self, loop):
- """Set the event loop."""
- self._local._set_called = True
- assert loop is None or isinstance(loop, AbstractEventLoop)
- self._local._loop = loop
-
- def new_event_loop(self):
- """Create a new event loop.
-
- You must call set_event_loop() to make this the current event
- loop.
- """
- return self._loop_factory()
-
-
-# Event loop policy. The policy itself is always global, even if the
-# policy's rules say that there is an event loop per thread (or other
-# notion of context). The default policy is installed by the first
-# call to get_event_loop_policy().
-_event_loop_policy = None
-
-# Lock for protecting the on-the-fly creation of the event loop policy.
-_lock = threading.Lock()
-
-
-def _init_event_loop_policy():
- global _event_loop_policy
- with _lock:
- if _event_loop_policy is None: # pragma: no branch
- from . import DefaultEventLoopPolicy
- _event_loop_policy = DefaultEventLoopPolicy()
-
-
-def get_event_loop_policy():
- """Get the current event loop policy."""
- if _event_loop_policy is None:
- _init_event_loop_policy()
- return _event_loop_policy
-
-
-def set_event_loop_policy(policy):
- """Set the current event loop policy.
-
- If policy is None, the default policy is restored."""
- global _event_loop_policy
- assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
- _event_loop_policy = policy
-
-
-def get_event_loop():
- """Equivalent to calling get_event_loop_policy().get_event_loop()."""
- return get_event_loop_policy().get_event_loop()
-
-
-def set_event_loop(loop):
- """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
- get_event_loop_policy().set_event_loop(loop)
-
-
-def new_event_loop():
- """Equivalent to calling get_event_loop_policy().new_event_loop()."""
- return get_event_loop_policy().new_event_loop()
-
-
-def get_child_watcher():
- """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
- return get_event_loop_policy().get_child_watcher()
-
-
-def set_child_watcher(watcher):
- """Equivalent to calling
- get_event_loop_policy().set_child_watcher(watcher)."""
- return get_event_loop_policy().set_child_watcher(watcher)
diff --git a/check.py b/check.py
index 6db82d6..dcefc18 100644
--- a/check.py
+++ b/check.py
@@ -37,7 +37,7 @@ def process(fn):
line = line.rstrip('\n')
sline = line.rstrip()
if len(line) >= 80 or line != sline or not isascii(line):
- print('{}:{:d}:{}{}'.format(
+ print('{0}:{1:d}:{2}{3}'.format(
fn, i+1, sline, '_' * (len(line) - len(sline))))
finally:
f.close()
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000..314751a
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Trollius.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Trollius.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/Trollius"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Trollius"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/doc/asyncio.rst b/doc/asyncio.rst
new file mode 100644
index 0000000..5866d62
--- /dev/null
+++ b/doc/asyncio.rst
@@ -0,0 +1,185 @@
+++++++++++++++++++
+Trollius and Tulip
+++++++++++++++++++
+
+Differences between Trollius and Tulip
+======================================
+
+Syntax of coroutines
+--------------------
+
+The major difference between Trollius and Tulip is the syntax of coroutines:
+
+================== ======================
+Tulip Trollius
+================== ======================
+``yield from ...`` ``yield From(...)``
+``yield from []`` ``yield From(None)``
+``return`` ``raise Return()``
+``return x`` ``raise Return(x)``
+``return x, y`` ``raise Return(x, y)``
+================== ======================
+
+Because of this major difference, it was decided to call the module
+``trollius`` instead of ``asyncio``. This choice also allows to use Trollius on
+Python 3.4 and later. Changing imports is not enough to use Trollius code with
+asyncio: the asyncio event loop explicit rejects coroutines using ``yield``
+(instead of ``yield from``).
+
+OSError and socket.error exceptions
+-----------------------------------
+
+The ``OSError`` exception changed in Python 3.3: there are now subclasses like
+``ConnectionResetError`` or ``BlockingIOError``. The exception hierarchy also
+changed: ``socket.error`` is now an alias to ``OSError``. The ``asyncio``
+module is written for Python 3.3 and newer and so is based on these new
+exceptions.
+
+.. seealso::
+
+ `PEP 3151: Reworking the OS and IO exception hierarchy
+ <https://www.python.org/dev/peps/pep-3151>`_.
+
+On Python 3.2 and older, Trollius wraps ``OSError``, ``IOError``,
+``socket.error`` and ``select.error`` exceptions on operating system and socket
+operations to raise more specific exceptions, subclasses of ``OSError``:
+
+* ``trollius.BlockingIOError``
+* ``trollius.BrokenPipeError``
+* ``trollius.ChildProcessError``
+* ``trollius.ConnectionAbortedError``
+* ``trollius.ConnectionRefusedError``
+* ``trollius.ConnectionResetError``
+* ``trollius.FileNotFoundError``
+* ``trollius.InterruptedError``
+* ``trollius.PermissionError``
+
+On Python 3.3 and newer, these symbols are just aliases to builtin exceptions.
+
+.. note::
+
+ ``ssl.SSLError`` exceptions are not wrapped to ``OSError``, even if
+ ``ssl.SSLError`` is a subclass of ``socket.error``.
+
+
+SSLError
+--------
+
+On Python 3.2 and older, Trollius wraps ``ssl.SSLError`` exceptions to raise
+more specific exceptions, subclasses of ``ssl.SSLError``, to mimic the Python
+3.3:
+
+* ``trollius.SSLEOFError``
+* ``trollius.SSLWantReadError``
+* ``trollius.SSLWantWriteError``
+
+On Python 3.3 and newer, these symbols are just aliases to exceptions of the
+``ssl`` module.
+
+``trollius.BACKPORT_SSL_ERRORS`` constant:
+
+* ``True`` if ``ssl.SSLError`` are wrapped to Trollius exceptions (Python 2
+ older than 2.7.9, or Python 3 older than 3.3),
+* ``False`` is trollius SSL exceptions are just aliases.
+
+
+SSLContext
+----------
+
+Python 3.3 has a new ``ssl.SSLContext`` class: see the `documentaton of the
+ssl.SSLContext class
+<https://docs.python.org/3/library/ssl.html#ssl.SSLContext>`_.
+
+On Python 3.2 and older, Trollius has a basic ``trollius.SSLContext`` class to
+mimic Python 3.3 API, but it only has a few features:
+
+* ``protocol``, ``certfile`` and ``keyfile`` attributes
+* read-only ``verify_mode`` attribute: its value is ``CERT_NONE``
+* ``load_cert_chain(certfile, keyfile)`` method
+* ``wrap_socket(sock, **kw)`` method: see the ``ssl.wrap_socket()``
+ documentation of your Python version for the keyword parameters
+
+Example of missing features:
+
+* no ``options`` attribute
+* the ``verify_mode`` attriubte cannot be modified
+* no ``set_default_verify_paths()`` method
+* no "Server Name Indication" (SNI) support
+* etc.
+
+On Python 3.2 and older, the trollius SSL transport does not have the
+``'compression'`` extra info.
+
+``trollius.BACKPORT_SSL_CONTEXT`` constant:
+
+* ``True`` if ``trollius.SSLContext`` is the backported class (Python 2 older
+ than 2.7.9, or Python 3 older than 3.3),
+* ``False`` if ``trollius.SSLContext`` is just an alias to ``ssl.SSLContext``.
+
+
+Other differences
+-----------------
+
+* Trollius uses the ``TROLLIUSDEBUG`` envrionment variable instead of
+ the ``PYTHONASYNCIODEBUG`` envrionment variable. ``TROLLIUSDEBUG`` variable
+ is used even if the Python command line option ``-E`` is used.
+* ``asyncio.subprocess`` has no ``DEVNULL`` constant
+* Python 2 does not support keyword-only parameters.
+* If the ``concurrent.futures`` module is missing,
+ ``BaseEventLoop.run_in_executor()`` uses a synchronous executor instead of a
+ pool of threads. It blocks until the function returns. For example, DNS
+ resolutions are blocking in this case.
+* Trollius has more symbols than Tulip for compatibility with Python older than
+ 3.3:
+
+ - ``From``: part of ``yield From(...)`` syntax
+ - ``Return``: part of ``raise Return(...)`` syntax
+
+
+Write code working on Trollius and Tulip
+========================================
+
+Trollius and Tulip are different, especially for coroutines (``yield
+From(...)`` vs ``yield from ...``).
+
+To use asyncio or Trollius on Python 2 and Python 3, add the following code at
+the top of your file::
+
+ try:
+ # Use builtin asyncio on Python 3.4+, or Tulip on Python 3.3
+ import asyncio
+ except ImportError:
+ # Use Trollius on Python <= 3.2
+ import trollius as asyncio
+
+It is possible to write code working on both projects using only callbacks.
+This option is used by the following projects which work on Trollius and Tulip:
+
+* `AutobahnPython <https://github.com/tavendo/AutobahnPython>`_: WebSocket &
+ WAMP for Python, it works on Trollius (Python 2.6 and 2.7), Tulip (Python
+ 3.3) and Python 3.4 (asyncio), and also on Twisted.
+* `Pulsar <http://pythonhosted.org/pulsar/>`_: Event driven concurrent
+ framework for Python. With pulsar you can write asynchronous servers
+ performing one or several activities in different threads and/or processes.
+ Trollius 0.3 requires Pulsar 0.8.2 or later. Pulsar uses the ``asyncio``
+ module if available, or import ``trollius``.
+* `Tornado <http://www.tornadoweb.org/>`_ supports Tulip and Trollius since
+ Tornado 3.2: `tornado.platform.asyncio — Bridge between asyncio and Tornado
+ <http://tornado.readthedocs.org/en/latest/asyncio.html>`_. It tries to import
+ asyncio or fallback on importing trollius.
+
+Another option is to provide functions returning ``Future`` objects, so the
+caller can decide to use callback using ``fut.add_done_callback(callback)`` or
+to use coroutines (``yield From(fut)`` for Trollius, or ``yield from fut`` for
+Tulip). This option is used by the `aiodns <https://github.com/saghul/aiodns>`_
+project for example.
+
+Since Trollius 0.4, it's possible to use Tulip and Trollius coroutines in the
+same process. The only limit is that the event loop must be a Trollius event
+loop.
+
+.. note::
+
+ The Trollius module was called ``asyncio`` in Trollius version 0.2. The
+ module name changed to ``trollius`` to support Python 3.4.
+
diff --git a/doc/changelog.rst b/doc/changelog.rst
new file mode 100644
index 0000000..884bd22
--- /dev/null
+++ b/doc/changelog.rst
@@ -0,0 +1,499 @@
+++++++++++
+Change log
+++++++++++
+
+Version 1.0.4 (development version)
+===================================
+
+Changes:
+
+* Python issue #22922: create_task(), call_at(), call_soon(),
+ call_soon_threadsafe() and run_in_executor() now raise an error if the event
+ loop is closed. Initial patch written by Torsten Landschoff.
+* Python issue #22921: Don't require OpenSSL SNI to pass hostname to ssl
+ functions. Patch by Donald Stufft.
+* Add run_aiotest.py: run the aiotest test suite.
+* tox now also run the aiotest test suite
+* Python issue #23074: get_event_loop() now raises an exception if the thread
+ has no event loop even if assertions are disabled.
+
+Bugfixes:
+
+* Fix a race condition in BaseSubprocessTransport._try_finish(): ensure that
+ connection_made() is called before connection_lost().
+* Python issue #23009: selectors, make sure EpollSelecrtor.select() works when
+ no file descriptor is registered.
+* Python issue #22922: Fix ProactorEventLoop.close(). Call
+ _stop_accept_futures() before sestting the _closed attribute, otherwise
+ call_soon() raises an error.
+* Python issue #22429: Fix EventLoop.run_until_complete(), don't stop the event
+ loop if a BaseException is raised, because the event loop is already stopped.
+* Initialize more Future and Task attributes in the class definition to avoid
+ attribute errors in destructors.
+* Python issue #22685: Set the transport of stdout and stderr StreamReader
+ objects in the SubprocessStreamProtocol. It allows to pause the transport to
+ not buffer too much stdout or stderr data.
+* BaseSelectorEventLoop.close() now closes the self-pipe before calling the
+ parent close() method. If the event loop is already closed, the self-pipe is
+ not unregistered from the selector.
+
+
+2014-10-20: Version 1.0.3
+=========================
+
+Changes:
+
+* On Python 2 in debug mode, Future.set_exception() now stores the traceback
+ object of the exception in addition to the exception object. When a task
+ waiting for another task and the other task raises an exception, the
+ traceback object is now copied with the exception. Be careful, storing the
+ traceback object may create reference leaks.
+* Use ssl.create_default_context() if available to create the default SSL
+ context: Python 2.7.9 and newer, or Python 3.4 and newer.
+* On Python 3.5 and newer, reuse socket.socketpair() in the windows_utils
+ submodule.
+* On Python 3.4 and newer, use os.set_inheritable().
+* Enhance protocol representation: add "closed" or "closing" info.
+* run_forever() now consumes BaseException of the temporary task. If the
+ coroutine raised a BaseException, consume the exception to not log a warning.
+ The caller doesn't have access to the local task.
+* Python issue 22448: cleanup _run_once(), only iterate once to remove delayed
+ calls that were cancelled.
+* The destructor of the Return class now shows where the Return object was
+ created.
+* run_tests.py doesn't catch any exceptions anymore when loading tests, only
+ catch SkipTest.
+* Fix (SSL) tests for the future Python 2.7.9 which includes a "new" ssl
+ module: module backported from Python 3.5.
+* BaseEventLoop.add_signal_handler() now raises an exception if the parameter
+ is a coroutine function.
+* Coroutine functions and objects are now rejected with a TypeError by the
+ following functions: add_signal_handler(), call_at(), call_later(),
+ call_soon(), call_soon_threadsafe(), run_in_executor().
+
+
+2014-10-02: Version 1.0.2
+=========================
+
+This release fixes bugs. It also provides more information in debug mode on
+error.
+
+Major changes:
+
+* Tulip issue #203: Add _FlowControlMixin.get_write_buffer_limits() method.
+* Python issue #22063: socket operations (socket,recv, sock_sendall,
+ sock_connect, sock_accept) of SelectorEventLoop now raise an exception in
+ debug mode if sockets are in blocking mode.
+
+Major bugfixes:
+
+* Tulip issue #205: Fix a race condition in BaseSelectorEventLoop.sock_connect().
+* Tulip issue #201: Fix a race condition in wait_for(). Don't raise a
+ TimeoutError if we reached the timeout and the future completed in the same
+ iteration of the event loop. A side effect of the bug is that Queue.get()
+ looses items.
+* PipeServer.close() now cancels the "accept pipe" future which cancels the
+ overlapped operation.
+
+Other changes:
+
+* Python issue #22448: Improve cancelled timer callback handles cleanup. Patch
+ by Joshua Moore-Oliva.
+* Python issue #22369: Change "context manager protocol" to "context management
+ protocol". Patch written by Serhiy Storchaka.
+* Tulip issue #206: In debug mode, keep the callback in the representation of
+ Handle and TimerHandle after cancel().
+* Tulip issue #207: Fix test_tasks.test_env_var_debug() to use correct asyncio
+ module.
+* runtests.py: display a message to mention if tests are run in debug or
+ release mode
+* Tulip issue #200: Log errors in debug mode instead of simply ignoring them.
+* Tulip issue #200: _WaitHandleFuture._unregister_wait() now catchs and logs
+ exceptions.
+* _fatal_error() method of _UnixReadPipeTransport and _UnixWritePipeTransport
+ now log all exceptions in debug mode
+* Fix debug log in BaseEventLoop.create_connection(): get the socket object
+ from the transport because SSL transport closes the old socket and creates a
+ new SSL socket object.
+* Remove the _SelectorSslTransport._rawsock attribute: it contained the closed
+ socket (not very useful) and it was not used.
+* Fix _SelectorTransport.__repr__() if the transport was closed
+* Use the new os.set_blocking() function of Python 3.5 if available
+
+
+2014-07-30: Version 1.0.1
+=========================
+
+This release supports PyPy and has a better support of asyncio coroutines,
+especially in debug mode.
+
+Changes:
+
+* Tulip issue #198: asyncio.Condition now accepts an optional lock object.
+* Enhance representation of Future and Future subclasses: add "created at".
+
+Bugfixes:
+
+* Fix Trollius issue #9: @trollius.coroutine now works on callbable objects
+ (without ``__name__`` attribute), not only on functions.
+* Fix Trollius issue #13: asyncio futures are now accepted in all functions:
+ as_completed(), async(), @coroutine, gather(), run_until_complete(),
+ wrap_future().
+* Fix support of asyncio coroutines in debug mode. If the last instruction
+ of the coroutine is "yield from", it's an asyncio coroutine and it does not
+ need to use From().
+* Fix and enhance _WaitHandleFuture.cancel():
+
+ - Tulip issue #195: Fix a crash on Windows: don't call UnregisterWait() twice
+ if a _WaitHandleFuture is cancelled twice.
+ - Fix _WaitHandleFuture.cancel(): return the result of the parent cancel()
+ method (True or False).
+ - _WaitHandleFuture.cancel() now notify IocpProactor through the overlapped
+ object that the wait was cancelled.
+
+* Tulip issue #196: _OverlappedFuture now clears its reference to the
+ overlapped object. IocpProactor keeps a reference to the overlapped object
+ until it is notified of its completion. Log also an error in debug mode if it
+ gets unexpected notifications.
+* Fix runtest.py to be able to log at level DEBUG.
+
+Other changes:
+
+* BaseSelectorEventLoop._write_to_self() now logs errors in debug mode.
+* Fix as_completed(): it's not a coroutine, don't use ``yield From(...)`` but
+ ``yield ...``
+* Tulip issue #193: Convert StreamWriter.drain() to a classic coroutine.
+* Tulip issue #194: Don't use sys.getrefcount() in unit tests: the full test
+ suite now pass on PyPy.
+
+
+2014-07-21: Version 1.0
+=======================
+
+Major Changes
+-------------
+
+* Event loops have a new ``create_task()`` method, which is now the recommanded
+ way to create a task object. This method can be overriden by third-party
+ event loops to use their own task class.
+* The debug mode has been improved a lot. Set ``TROLLIUSDEBUG`` envrironment
+ variable to ``1`` and configure logging to log at level ``logging.DEBUG``
+ (ex: ``logging.basicConfig(level=logging.DEBUG)``). Changes:
+
+ - much better representation of Trollius objects (ex: ``repr(task)``):
+ unified ``<Class arg1 arg2 ...>`` format, use qualified name when available
+ - show the traceback where objects were created
+ - show the current filename and line number for coroutine
+ - show the filename and line number where objects were created
+ - log most important socket events
+ - log most important subprocess events
+
+* ``Handle.cancel()`` now clears references to callback and args
+* Log an error if a Task is destroyed while it is still pending, but only on
+ Python 3.4 and newer.
+* Fix for asyncio coroutines when passing tuple value in debug mode.
+ ``CoroWrapper.send()`` now checks if it is called from a "yield from"
+ generator to decide if the parameter should be unpacked or not.
+* ``Process.communicate()`` now ignores ``BrokenPipeError`` and
+ ``ConnectionResetError`` exceptions.
+* Rewrite signal handling on Python 3.3 and newer to fix a race condition: use
+ the "self-pipe" to get signal numbers.
+
+
+Other Changes
+-------------
+
+* Fix ``ProactorEventLoop()`` in debug mode
+* Fix a race condition when setting the result of a Future with
+ ``call_soon()``. Add an helper, a private method, to set the result only if
+ the future was not cancelled.
+* Fix ``asyncio.__all__``: export also ``unix_events`` and ``windows_events``
+ symbols. For example, on Windows, it was not possible to get
+ ``ProactorEventLoop`` or ``DefaultEventLoopPolicy`` using ``from asyncio
+ import *``.
+* ``Handle.cancel()`` now clears references to callback and args
+* Make Server attributes and methods private, the sockets attribute remains
+ public.
+* BaseEventLoop.create_datagram_endpoint() now waits until
+ protocol.connection_made() has been called. Document also why transport
+ constructors use a waiter.
+* _UnixSubprocessTransport: fix file mode of stdin: open stdin in write mode,
+ not in read mode.
+
+
+2014-06-23: version 0.4
+=======================
+
+Changes between Trollius 0.3 and 0.4:
+
+* Trollius event loop now supports asyncio coroutines:
+
+ - Trollius coroutines can yield asyncio coroutines,
+ - asyncio coroutines can yield Trollius coroutines,
+ - asyncio.set_event_loop() accepts a Trollius event loop,
+ - asyncio.set_event_loop_policy() accepts a Trollius event loop policy.
+
+* The ``PYTHONASYNCIODEBUG`` envrionment variable has been renamed to
+ ``TROLLIUSDEBUG``. The environment variable is now used even if the Python
+ command line option ``-E`` is used.
+* Synchronize with Tulip.
+* Support PyPy (fix subproces, fix unit tests).
+
+Tulip changes:
+
+* Tulip issue #171: BaseEventLoop.close() now raises an exception if the event
+ loop is running. You must first stop the event loop and then wait until it
+ stopped, before closing it.
+* Tulip issue #172: only log selector timing in debug mode
+* Enable the debug mode of event loops when the ``TROLLIUSDEBUG`` environment
+ variable is set
+* BaseEventLoop._assert_is_current_event_loop() now only raises an exception if
+ the current loop is set.
+* Tulip issue #105: in debug mode, log callbacks taking more than 100 ms to be
+ executed.
+* Python issue 21595: ``BaseSelectorEventLoop._read_from_self()`` reads all
+ available bytes from the "self pipe", not only a single byte. This change
+ reduces the risk of having the pipe full and so getting the "BlockingIOError:
+ [Errno 11] Resource temporarily unavailable" message.
+* Python issue 21723: asyncio.Queue: support any type of number (ex: float) for
+ the maximum size. Patch written by Vajrasky Kok.
+* Issue #173: Enhance repr(Handle) and repr(Task): add the filename and line
+ number, when available. For task, the current line number of the coroutine
+ is used.
+* Add BaseEventLoop.is_closed() method. run_forever() and run_until_complete()
+ methods now raises an exception if the event loop was closed.
+* Make sure that socketpair() close sockets on error. Close the listening
+ socket if sock.bind() raises an exception.
+* Fix ResourceWarning: close sockets on errors.
+ BaseEventLoop.create_connection(), BaseEventLoop.create_datagram_endpoint()
+ and _UnixSelectorEventLoop.create_unix_server() now close the newly created
+ socket on error.
+* Rephrase and fix docstrings.
+* Fix tests on Windows: wait for the subprocess exit. Before, regrtest failed
+ to remove the temporary test directory because the process was still running
+ in this directory.
+* Refactor unit tests.
+
+On Python 3.5, generators now get their name from the function, no more from
+the code. So the ``@coroutine`` decorator doesn't loose the original name of
+the function anymore.
+
+
+2014-05-26: version 0.3
+=======================
+
+Rename the Python module ``asyncio`` to ``trollius`` to support Python 3.4. On
+Python 3.4, there is already a module called ``asyncio`` in the standard
+library which conflicted with ``asyncio`` module of Trollius 0.2. To write
+asyncio code working on Trollius and Tulip, use ``import trollius as asyncio``.
+
+Changes between Trollius 0.2 and 0.3:
+
+* Synchronize with Tulip 3.4.1.
+* Enhance Trollius documentation.
+* Trollius issue #7: Fix ``asyncio.time_monotonic`` on Windows older than
+ Vista (ex: Windows 2000 and Windows XP).
+* Fedora packages have been accepted.
+
+Changes between Tulip 3.4.0 and 3.4.1:
+
+* Pull in Solaris ``devpoll`` support by Giampaolo Rodola
+ (``trollius.selectors`` module).
+* Add options ``-r`` and ``--randomize`` to runtests.py to randomize test
+ order.
+* Add a simple echo client/server example.
+* Tulip issue #166: Add ``__weakref__`` slots to ``Handle`` and
+ ``CoroWrapper``.
+* ``EventLoop.create_unix_server()`` now raises a ``ValueError`` if path and
+ sock are specified at the same time.
+* Ensure ``call_soon()``, ``call_later()`` and ``call_at()`` are invoked on
+ current loop in debug mode. Raise a ``RuntimeError`` if the event loop of the
+ current thread is different. The check should help to debug thread-safetly
+ issue. Patch written by David Foster.
+* Tulip issue #157: Improve test_events.py, avoid ``run_briefly()`` which is
+ not reliable.
+* Reject add/remove reader/writer when event loop is closed.
+
+Bugfixes of Tulip 3.4.1:
+
+* Tulip issue #168: ``StreamReader.read(-1)`` from pipe may hang if
+ data exceeds buffer limit.
+* CPython issue #21447: Fix a race condition in
+ ``BaseEventLoop._write_to_self()``.
+* Different bugfixes in ``CoroWrapper`` of ``trollius.coroutines``, class used
+ when running Trollius in debug mode:
+
+ - Fix ``CoroWrapper`` to workaround yield-from bug in CPython 3.4.0. The
+ CPython bug is now fixed in CPython 3.4.1 and 3.5.
+ - Make sure ``CoroWrapper.send`` proxies one argument correctly.
+ - CPython issue #21340: Be careful accessing instance variables in ``__del__``.
+ - Tulip issue #163: Add ``gi_{frame,running,code}`` properties to
+ ``CoroWrapper``.
+
+* Fix ``ResourceWarning`` warnings
+* Tulip issue #159: Fix ``windows_utils.socketpair()``. Use ``"127.0.0.1"``
+ (IPv4) or ``"::1"`` (IPv6) host instead of ``"localhost"``, because
+ ``"localhost"`` may be a different IP address. Reject also invalid arguments:
+ only ``AF_INET`` and ``AF_INET6`` with ``SOCK_STREAM`` (and ``proto=0``) are
+ supported.
+* Tulip issue #158: ``Task._step()`` now also sets ``self`` to ``None`` if an
+ exception is raised. ``self`` is set to ``None`` to break a reference cycle.
+
+
+2014-03-04: version 0.2
+=======================
+
+Trollius now uses ``yield From(...)`` syntax which looks close to Tulip ``yield
+from ...`` and allows to port more easily Trollius code to Tulip. The usage of
+``From()`` is not mandatory yet, but it may become mandatory in a future
+version. However, if ``yield`` is used without ``From``, an exception is
+raised if the event loop is running in debug mode.
+
+Major changes:
+
+* Replace ``yield ...`` syntax with ``yield From(...)``
+* On Python 2, Future.set_exception() now only saves the traceback if the debug
+ mode of the event loop is enabled for best performances in production mode.
+ Use ``loop.set_debug(True)`` to save the traceback.
+
+Bugfixes:
+
+* Fix ``BaseEventLoop.default_exception_handler()`` on Python 2: get the
+ traceback from ``sys.exc_info()``
+* Fix unit tests on SSL sockets on Python older than 2.6.6. Example:
+ Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4.
+* Fix error handling in the asyncio.time_monotonic module
+* Fix acquire() method of Lock, Condition and Semaphore: don't return a context
+ manager but True, as Tulip. Task._step() now does the trick.
+
+Other changes:
+
+* tox.ini: set PYTHONASYNCIODEBUG to 1 to run tests
+
+2014-02-25: version 0.1.6
+=========================
+
+Trollius changes:
+
+* Add a new Sphinx documentation:
+ http://trollius.readthedocs.org/
+* tox: pass posargs to nosetests. Patch contributed by Ian Wienand.
+* Fix support of Python 3.2 and add py32 to tox.ini
+* Merge with Tulip 0.4.1
+
+Major changes of Tulip 0.4.1:
+
+* Issue #81: Add support for UNIX Domain Sockets. New APIs:
+
+ - loop.create_unix_connection()
+ - loop.create_unix_server()
+ - streams.open_unix_connection()
+ - streams.start_unix_server()
+
+* Issue #80: Add new event loop exception handling API. New APIs:
+
+ - loop.set_exception_handler()
+ - loop.call_exception_handler()
+ - loop.default_exception_handler()
+
+* Issue #136: Add get_debug() and set_debug() methods to BaseEventLoopTests.
+ Add also a ``PYTHONASYNCIODEBUG`` environment variable to debug coroutines
+ since Python startup, to be able to debug coroutines defined directly in the
+ asyncio module.
+
+Other changes of Tulip 0.4.1:
+
+* asyncio.subprocess: Fix a race condition in communicate()
+* Fix _ProactorWritePipeTransport._pipe_closed()
+* Issue #139: Improve error messages on "fatal errors".
+* Issue #140: WriteTransport.set_write_buffer_size() to call
+ _maybe_pause_protocol()
+* Issue #129: BaseEventLoop.sock_connect() now raises an error if the address
+ is not resolved (hostname instead of an IP address) for AF_INET and
+ AF_INET6 address families.
+* Issue #131: as_completed() and wait() now raises a TypeError if the list of
+ futures is not a list but a Future, Task or coroutine object
+* Python issue #20495: Skip test_read_pty_output() of test_asyncio on FreeBSD
+ older than FreeBSD 8
+* Issue #130: Add more checks on subprocess_exec/subprocess_shell parameters
+* Issue #126: call_soon(), call_soon_threadsafe(), call_later(), call_at()
+ and run_in_executor() now raise a TypeError if the callback is a coroutine
+ function.
+* Python issue #20505: BaseEventLoop uses again the resolution of the clock
+ to decide if scheduled tasks should be executed or not.
+
+
+2014-02-10: version 0.1.5
+=========================
+
+- Merge with Tulip 0.3.1:
+
+ * New asyncio.subprocess module
+ * _UnixWritePipeTransport now also supports character devices, as
+ _UnixReadPipeTransport. Patch written by Jonathan Slenders.
+ * StreamReader.readexactly() now raises an IncompleteReadError if the
+ end of stream is reached before we received enough bytes, instead of
+ returning less bytes than requested.
+ * poll and epoll selectors now round the timeout away from zero (instead of
+ rounding towards zero) to fix a performance issue
+ * asyncio.queue: Empty renamed to QueueEmpty, Full to QueueFull
+ * _fatal_error() of _UnixWritePipeTransport and _ProactorBasePipeTransport
+ don't log BrokenPipeError nor ConnectionResetError
+ * Future.set_exception(exc) now instanciate exc if it is a class
+ * streams.StreamReader: Use bytearray instead of deque of bytes for internal
+ buffer
+
+- Fix test_wait_for() unit test
+
+2014-01-22: version 0.1.4
+=========================
+
+- The project moved to https://bitbucket.org/enovance/trollius
+- Fix CoroWrapper (_DEBUG=True): add missing import
+- Emit a warning when Return is not raised
+- Merge with Tulip to get latest Tulip bugfixes
+- Fix dependencies in tox.ini for the different Python versions
+
+2014-01-13: version 0.1.3
+=========================
+
+- Workaround bugs in the ssl module of Python older than 2.6.6. For example,
+ Mac OS 10.6 (Snow Leopard) uses Python 2.6.1.
+- ``return x, y`` is now written ``raise Return(x, y)`` instead of
+ ``raise Return((x, y))``
+- Support "with (yield lock):" syntax for Lock, Condition and Semaphore
+- SSL support is now optional: don't fail if the ssl module is missing
+- Add tox.ini, tool to run unit tests. For example, "tox -e py27" creates a
+ virtual environment to run tests with Python 2.7.
+
+2014-01-08: version 0.1.2
+=========================
+
+- Trollius now supports CPython 2.6-3.4, PyPy and Windows. All unit tests
+ pass with CPython 2.7 on Linux.
+- Fix Windows support. Fix compilation of the _overlapped module and add a
+ asyncio._winapi module (written in pure Python). Patch written by Marc
+ Schlaich.
+- Support Python 2.6: require an extra dependency,
+ ordereddict (and unittest2 for unit tests)
+- Support Python 3.2, 3.3 and 3.4
+- Support PyPy 2.2
+- Don't modify __builtins__ nor the ssl module to inject backported exceptions
+ like BlockingIOError or SSLWantReadError. Exceptions are available in the
+ asyncio module, ex: asyncio.BlockingIOError.
+
+2014-01-06: version 0.1.1
+=========================
+
+- Fix asyncio.time_monotonic on Mac OS X
+- Fix create_connection(ssl=True)
+- Don't export backported SSLContext in the ssl module anymore to not confuse
+ libraries testing hasattr(ssl, "SSLContext")
+- Relax dependency on the backported concurrent.futures module: use a
+ synchronous executor if the module is missing
+
+2014-01-04: version 0.1
+=======================
+
+- First public release
+
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000..5535a22
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+#
+# Trollius documentation build configuration file, created by
+# sphinx-quickstart on Fri Feb 21 11:05:42 2014.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#import sys, os
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = []
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Trollius'
+copyright = u'2014, Victor Stinner'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = release = '1.0.4'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'Trolliusdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+ ('index', 'Trollius.tex', u'Trollius Documentation',
+ u'Victor Stinner', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ ('index', 'trollius', u'Trollius Documentation',
+ [u'Victor Stinner'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ ('index', 'Trollius', u'Trollius Documentation',
+ u'Victor Stinner', 'Trollius', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
diff --git a/doc/dev.rst b/doc/dev.rst
new file mode 100644
index 0000000..faabb16
--- /dev/null
+++ b/doc/dev.rst
@@ -0,0 +1,60 @@
+Run tests
+=========
+
+Run tests with tox
+------------------
+
+The `tox project <https://testrun.org/tox/latest/>`_ can be used to build a
+virtual environment with all runtime and test dependencies and run tests
+against different Python versions (2.6, 2.7, 3.2, 3.3).
+
+For example, to run tests with Python 2.7, just type::
+
+ tox -e py27
+
+To run tests against other Python versions:
+
+* ``py26``: Python 2.6
+* ``py27``: Python 2.7
+* ``py32``: Python 3.2
+* ``py33``: Python 3.3
+
+
+Test Dependencies
+-----------------
+
+On Python older than 3.3, unit tests require the `mock
+<https://pypi.python.org/pypi/mock>`_ module. Python 2.6 requires also
+`unittest2 <https://pypi.python.org/pypi/unittest2>`_.
+
+To run ``run_aiotest.py``, you need the `aiotest
+<https://pypi.python.org/pypi/aiotest>`_ test suite: ``pip install aiotest``.
+
+
+Run tests on UNIX
+-----------------
+
+Run the following commands from the directory of the Trollius project.
+
+To run tests::
+
+ make test
+
+To run coverage (``coverage`` package is required)::
+
+ make coverage
+
+
+Run tests on Windows
+--------------------
+
+Run the following commands from the directory of the Trollius project.
+
+You can run the tests as follows::
+
+ C:\Python27\python.exe runtests.py
+
+And coverage as follows::
+
+ C:\Python27\python.exe runtests.py --coverage
+
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..ebe4c38
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,75 @@
+Trollius
+========
+
+.. image:: trollius.jpg
+ :alt: Trollius altaicus from Khangai Mountains (Mongòlia)
+ :align: right
+ :target: http://commons.wikimedia.org/wiki/File:Trollius_altaicus.jpg
+
+Trollius provides infrastructure for writing single-threaded concurrent
+code using coroutines, multiplexing I/O access over sockets and other
+resources, running network clients and servers, and other related primitives.
+Here is a more detailed list of the package contents:
+
+* a pluggable event loop with various system-specific implementations;
+
+* transport and protocol abstractions (similar to those in `Twisted
+ <http://twistedmatrix.com/>`_);
+
+* concrete support for TCP, UDP, SSL, subprocess pipes, delayed calls, and
+ others (some may be system-dependent);
+
+* a ``Future`` class that mimics the one in the ``concurrent.futures`` module,
+ but adapted for use with the event loop;
+
+* coroutines and tasks based on generators (``yield``), to help write
+ concurrent code in a sequential fashion;
+
+* cancellation support for ``Future``\s and coroutines;
+
+* synchronization primitives for use between coroutines in a single thread,
+ mimicking those in the ``threading`` module;
+
+* an interface for passing work off to a threadpool, for times when you
+ absolutely, positively have to use a library that makes blocking I/O calls.
+
+Trollius is a portage of the `Tulip project <http://code.google.com/p/tulip/>`_
+(``asyncio`` module, `PEP 3156 <http://legacy.python.org/dev/peps/pep-3156/>`_)
+on Python 2. Trollius works on Python 2.6-3.5. It has been tested on Windows,
+Linux, Mac OS X, FreeBSD and OpenIndiana.
+
+* `Asyncio documentation <http://docs.python.org/dev/library/asyncio.html>`_
+* `Trollius documentation <http://trollius.readthedocs.org/>`_ (this document)
+* `Trollius project in the Python Cheeseshop (PyPI)
+ <https://pypi.python.org/pypi/trollius>`_
+* `Trollius project at Bitbucket <https://bitbucket.org/enovance/trollius>`_
+* Copyright/license: Open source, Apache 2.0. Enjoy!
+
+See also the `Tulip project <http://code.google.com/p/tulip/>`_ (asyncio module
+for Python 3.3).
+
+
+Table Of Contents
+=================
+
+.. toctree::
+
+ using
+ install
+ asyncio
+ dev
+ changelog
+
+
+Trollius name
+=============
+
+Extract of `Trollius Wikipedia article
+<http://en.wikipedia.org/wiki/Trollius>`_:
+
+Trollius is a genus of about 30 species of plants in the family Ranunculaceae,
+closely related to Ranunculus. The common name of some species is globeflower
+or globe flower. Native to the cool temperate regions of the Northern
+Hemisphere, with the greatest diversity of species in Asia, trollius usually
+grow in heavy, wet clay soils.
+
diff --git a/doc/install.rst b/doc/install.rst
new file mode 100644
index 0000000..ea2b455
--- /dev/null
+++ b/doc/install.rst
@@ -0,0 +1,111 @@
+++++++++++++++++
+Install Trollius
+++++++++++++++++
+
+Packages for Linux
+==================
+
+* `Debian package
+ <https://packages.debian.org/fr/sid/python-trollius>`_
+* `ArchLinux package
+ <https://aur.archlinux.org/packages/python2-trollius/>`_
+* `Fedora and CentOS package: python-trollius
+ <http://pkgs.org/download/python-trollius>`_
+
+
+Install Trollius on Windows using pip
+=====================================
+
+Since Trollius 0.2, `precompiled wheel packages <http://pythonwheels.com/>`_
+are now distributed on the Python Cheeseshop (PyPI). Procedure to install
+Trollius on Windows:
+
+* `Install pip
+ <http://www.pip-installer.org/en/latest/installing.html>`_, download
+ ``get-pip.py`` and type::
+
+ \Python27\python.exe get-pip.py
+
+* If you already have pip, ensure that you have at least pip 1.4. If you need
+ to upgrade::
+
+ \Python27\python.exe -m pip install -U pip
+
+* Install Trollius::
+
+ \Python27\python.exe -m pip install trollius
+
+* pip also installs the ``futures`` dependency
+
+.. note::
+
+ Only wheel packages for Python 2.7 are currently distributed on the
+ Cheeseshop (PyPI). If you need wheel packages for other Python versions,
+ please ask.
+
+Download source code
+====================
+
+Command to download the development version of the source code (``trollius``
+branch)::
+
+ hg clone 'https://bitbucket.org/enovance/trollius#trollius'
+
+The actual code lives in the ``trollius`` subdirectory. Tests are in the
+``tests`` subdirectory.
+
+See the `trollius project at Bitbucket
+<https://bitbucket.org/enovance/trollius>`_.
+
+The source code of the Trollius project is in the ``trollius`` branch of the
+Mercurial repository, not in the default branch. The default branch is the
+Tulip project, Trollius repository is a fork of the Tulip repository.
+
+
+Dependencies
+============
+
+On Python older than 3.2, the `futures <https://pypi.python.org/pypi/futures>`_
+project is needed to get a backport of ``concurrent.futures``.
+
+Python 2.6 requires also `ordereddict
+<https://pypi.python.org/pypi/ordereddict>`_.
+
+
+Build manually Trollius on Windows
+==================================
+
+On Windows, if you cannot use precompiled wheel packages, an extension module
+must be compiled: the ``_overlapped`` module (source code: ``overlapped.c``).
+Read `Compile Python extensions on Windows
+<http://haypo-notes.readthedocs.org/python.html#compile-python-extensions-on-windows>`_
+to prepare your environment to build the Python extension. Then build the
+extension using::
+
+ C:\Python27\python.exe setup.py build_ext
+
+
+Backports
+=========
+
+To support Python 2.6-3.4, many Python modules of the standard library have
+been backported:
+
+======================== ========= =======================
+Name Python Backport
+======================== ========= =======================
+OSError 3.3 asyncio.py33_exceptions
+_overlapped 3.4 asyncio._overlapped
+_winapi 3.3 asyncio.py33_winapi
+collections.OrderedDict 2.7, 3.1 ordereddict (PyPI)
+concurrent.futures 3.2 futures (PyPI)
+selectors 3.4 asyncio.selectors
+ssl 3.2, 3.3 asyncio.py3_ssl
+time.monotonic 3.3 asyncio.time_monotonic
+unittest 2.7, 3.1 unittest2 (PyPI)
+unittest.mock 3.3 mock (PyPI)
+weakref.WeakSet 2.7, 3.0 asyncio.py27_weakrefset
+======================== ========= =======================
+
+
+
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000..5789d41
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Trollius.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Trollius.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/doc/trollius.jpg b/doc/trollius.jpg
new file mode 100644
index 0000000..f4976c7
--- /dev/null
+++ b/doc/trollius.jpg
Binary files differ
diff --git a/doc/using.rst b/doc/using.rst
new file mode 100644
index 0000000..c730f86
--- /dev/null
+++ b/doc/using.rst
@@ -0,0 +1,85 @@
+++++++++++++++
+Using Trollius
+++++++++++++++
+
+Documentation of the asyncio module
+===================================
+
+The documentation of the asyncio is part of the Python project. It can be read
+online: `asyncio - Asynchronous I/O, event loop, coroutines and tasks
+<http://docs.python.org/dev/library/asyncio.html>`_.
+
+To adapt asyncio examples for Trollius, "just":
+
+* replace ``asyncio`` with ``trollius``
+ (or use ``import trollius as asyncio``)
+* replace ``yield from ...`` with ``yield From(...)``
+* replace ``yield from []`` with ``yield From(None)``
+* in coroutines, replace ``return res`` with ``raise Return(res)``
+
+
+Trollius Hello World
+====================
+
+Print ``Hello World`` every two seconds, using a coroutine::
+
+ import trollius
+ from trollius import From
+
+ @trollius.coroutine
+ def greet_every_two_seconds():
+ while True:
+ print('Hello World')
+ yield From(trollius.sleep(2))
+
+ loop = trollius.get_event_loop()
+ loop.run_until_complete(greet_every_two_seconds())
+
+
+Debug mode
+==========
+
+To enable the debug mode:
+
+* Set ``TROLLIUSDEBUG`` envrironment variable to ``1``
+* Configure logging to log at level ``logging.DEBUG``,
+ ``logging.basicConfig(level=logging.DEBUG)`` for example
+
+The ``BaseEventLoop.set_debug()`` method can be used to set the debug mode on a
+specific event loop. The environment variable enables also the debug mode for
+coroutines.
+
+Effect of the debug mode:
+
+* On Python 2, :meth:`Future.set_exception` stores the traceback, so
+ ``loop.run_until_complete()`` raises the exception with the original
+ traceback.
+* Log coroutines defined but never "yielded"
+* BaseEventLoop.call_soon() and BaseEventLoop.call_at() methods raise an
+ exception if they are called from the wrong thread.
+* Log the execution time of the selector
+* Log callbacks taking more than 100 ms to be executed. The
+ BaseEventLoop.slow_callback_duration attribute is the minimum duration in
+ seconds of "slow" callbacks.
+* Log most important subprocess events:
+
+ - Log stdin, stdout and stderr transports and protocols
+ - Log process identifier (pid)
+ - Log connection of pipes
+ - Log process exit
+ - Log Process.communicate() tasks: feed stdin, read stdout and stderr
+
+* Log most important socket events:
+
+ - Socket connected
+ - New client (socket.accept())
+ - Connection reset or closed by peer (EOF)
+ - Log time elapsed in DNS resolution (getaddrinfo)
+ - Log pause/resume reading
+ - Log time of SSL handshake
+ - Log SSL handshake errors
+
+See `Debug mode of asyncio
+<https://docs.python.org/dev/library/asyncio-dev.html#debug-mode-of-asyncio>`_
+for more information.
+
diff --git a/examples/cacheclt.py b/examples/cacheclt.py
index b11a4d1..1f8ece4 100644
--- a/examples/cacheclt.py
+++ b/examples/cacheclt.py
@@ -4,8 +4,9 @@ See cachesvr.py for protocol description.
"""
import argparse
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import From, Return
+from trollius import test_utils
import json
import logging
@@ -62,24 +63,24 @@ class CacheClient:
@asyncio.coroutine
def get(self, key):
- resp = yield from self.request('get', key)
+ resp = yield From(self.request('get', key))
if resp is None:
- return None
- return resp.get('value')
+ raise Return()
+ raise Return(resp.get('value'))
@asyncio.coroutine
def set(self, key, value):
- resp = yield from self.request('set', key, value)
+ resp = yield From(self.request('set', key, value))
if resp is None:
- return False
- return resp.get('status') == 'ok'
+ raise Return(False)
+ raise Return(resp.get('status') == 'ok')
@asyncio.coroutine
def delete(self, key):
- resp = yield from self.request('delete', key)
+ resp = yield From(self.request('delete', key))
if resp is None:
- return False
- return resp.get('status') == 'ok'
+ raise Return(False)
+ raise Return(resp.get('status') == 'ok')
@asyncio.coroutine
def request(self, type, key, value=None):
@@ -91,24 +92,25 @@ class CacheClient:
waiter = asyncio.Future(loop=self.loop)
if self.initialized:
try:
- yield from self.send(payload, waiter)
+ yield From(self.send(payload, waiter))
except IOError:
self.todo.add((payload, waiter))
else:
self.todo.add((payload, waiter))
- return (yield from waiter)
+ result = (yield From(waiter))
+ raise Return(result)
@asyncio.coroutine
def activity(self):
backoff = 0
while True:
try:
- self.reader, self.writer = yield from asyncio.open_connection(
- self.host, self.port, ssl=self.sslctx, loop=self.loop)
+ self.reader, self.writer = yield From(asyncio.open_connection(
+ self.host, self.port, ssl=self.sslctx, loop=self.loop))
except Exception as exc:
backoff = min(args.max_backoff, backoff + (backoff//2) + 1)
logging.info('Error connecting: %r; sleep %s', exc, backoff)
- yield from asyncio.sleep(backoff, loop=self.loop)
+ yield From(asyncio.sleep(backoff, loop=self.loop))
continue
backoff = 0
self.next_id = 0
@@ -118,9 +120,9 @@ class CacheClient:
while self.todo:
payload, waiter = self.todo.pop()
if not waiter.done():
- yield from self.send(payload, waiter)
+ yield From(self.send(payload, waiter))
while True:
- resp_id, resp = yield from self.process()
+ resp_id, resp = yield From(self.process())
if resp_id in self.pending:
payload, waiter = self.pending.pop(resp_id)
if not waiter.done():
@@ -143,11 +145,11 @@ class CacheClient:
self.writer.write(frame.encode('ascii'))
self.writer.write(payload)
self.pending[req_id] = payload, waiter
- yield from self.writer.drain()
+ yield From(self.writer.drain())
@asyncio.coroutine
def process(self):
- frame = yield from self.reader.readline()
+ frame = yield From(self.reader.readline())
if not frame:
raise EOFError()
head, tail = frame.split(None, 1)
@@ -156,17 +158,17 @@ class CacheClient:
if head != b'response':
raise IOError('Bad frame: %r' % frame)
resp_id, resp_size = map(int, tail.split())
- data = yield from self.reader.readexactly(resp_size)
+ data = yield From(self.reader.readexactly(resp_size))
if len(data) != resp_size:
raise EOFError()
resp = json.loads(data.decode('utf8'))
- return resp_id, resp
+ raise Return(resp_id, resp)
def main():
asyncio.set_event_loop(None)
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
@@ -193,13 +195,13 @@ def testing(label, cache, loop):
while True:
logging.info('%s %s', label, '-'*20)
try:
- ret = yield from w(cache.set(key, 'hello-%s-world' % label))
+ ret = yield From(w(cache.set(key, 'hello-%s-world' % label)))
logging.info('%s set %s', label, ret)
- ret = yield from w(cache.get(key))
+ ret = yield From(w(cache.get(key)))
logging.info('%s get %s', label, ret)
- ret = yield from w(cache.delete(key))
+ ret = yield From(w(cache.delete(key)))
logging.info('%s del %s', label, ret)
- ret = yield from w(cache.get(key))
+ ret = yield From(w(cache.get(key)))
logging.info('%s get2 %s', label, ret)
except asyncio.TimeoutError:
logging.warn('%s Timeout', label)
diff --git a/examples/cachesvr.py b/examples/cachesvr.py
index 053f9c2..20a54e4 100644
--- a/examples/cachesvr.py
+++ b/examples/cachesvr.py
@@ -57,7 +57,8 @@ form is returned, but the connection is not closed:
"""
import argparse
-import asyncio
+import trollius as asyncio
+from trollius import From
import json
import logging
import os
@@ -104,7 +105,7 @@ class Cache:
peer = writer.get_extra_info('socket').getpeername()
logging.info('got a connection from %s', peer)
try:
- yield from self.frame_parser(reader, writer)
+ yield From(self.frame_parser(reader, writer))
except Exception as exc:
logging.error('error %r from %s', exc, peer)
else:
@@ -122,13 +123,13 @@ class Cache:
# if the client doesn't send enough data but doesn't
# disconnect either. We add a timeout to each. (But the
# timeout should really be implemented by StreamReader.)
- framing_b = yield from asyncio.wait_for(
+ framing_b = yield From(asyncio.wait_for(
reader.readline(),
- timeout=args.timeout, loop=self.loop)
+ timeout=args.timeout, loop=self.loop))
if random.random()*100 < args.fail_percent:
logging.warn('Inserting random failure')
- yield from asyncio.sleep(args.fail_sleep*random.random(),
- loop=self.loop)
+ yield From(asyncio.sleep(args.fail_sleep*random.random(),
+ loop=self.loop))
writer.write(b'error random failure\r\n')
break
logging.debug('framing_b = %r', framing_b)
@@ -151,9 +152,9 @@ class Cache:
writer.write(b'error invalid frame parameters\r\n')
break
last_request_id = request_id
- request_b = yield from asyncio.wait_for(
+ request_b = yield From(asyncio.wait_for(
reader.readexactly(byte_count),
- timeout=args.timeout, loop=self.loop)
+ timeout=args.timeout, loop=self.loop))
try:
request = json.loads(request_b.decode('utf8'))
except ValueError:
@@ -165,10 +166,10 @@ class Cache:
break
response_b = json.dumps(response).encode('utf8') + b'\r\n'
byte_count = len(response_b)
- framing_s = 'response {} {}\r\n'.format(request_id, byte_count)
+ framing_s = 'response {0} {1}\r\n'.format(request_id, byte_count)
writer.write(framing_s.encode('ascii'))
- yield from asyncio.sleep(args.resp_sleep*random.random(),
- loop=self.loop)
+ yield From(asyncio.sleep(args.resp_sleep*random.random(),
+ loop=self.loop))
writer.write(response_b)
def handle_request(self, request):
@@ -217,7 +218,7 @@ class Cache:
def main():
asyncio.set_event_loop(None)
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
@@ -226,7 +227,7 @@ def main():
import ssl
# TODO: take cert/key from args as well.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
- sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslctx = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.options |= ssl.OP_NO_SSLv2
sslctx.load_cert_chain(
certfile=os.path.join(here, 'ssl_cert.pem'),
diff --git a/examples/child_process.py b/examples/child_process.py
index 3fac175..9e403a4 100644
--- a/examples/child_process.py
+++ b/examples/child_process.py
@@ -10,15 +10,16 @@ import os
import sys
try:
- import asyncio
+ import trollius as asyncio
except ImportError:
# asyncio is not installed
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
- import asyncio
+ import trollius as asyncio
+from trollius import From, Return
if sys.platform == 'win32':
- from asyncio.windows_utils import Popen, PIPE
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_utils import Popen, PIPE
+ from trollius.windows_events import ProactorEventLoop
else:
from subprocess import Popen, PIPE
@@ -29,8 +30,8 @@ else:
@asyncio.coroutine
def connect_write_pipe(file):
loop = asyncio.get_event_loop()
- transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file)
- return transport
+ transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol, file))
+ raise Return(transport)
#
# Wrap a readable pipe in a stream
@@ -42,8 +43,8 @@ def connect_read_pipe(file):
stream_reader = asyncio.StreamReader(loop=loop)
def factory():
return asyncio.StreamReaderProtocol(stream_reader)
- transport, _ = yield from loop.connect_read_pipe(factory, file)
- return stream_reader, transport
+ transport, _ = yield From(loop.connect_read_pipe(factory, file))
+ raise Return(stream_reader, transport)
#
@@ -80,9 +81,9 @@ def main(loop):
p = Popen([sys.executable, '-c', code],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
- stdin = yield from connect_write_pipe(p.stdin)
- stdout, stdout_transport = yield from connect_read_pipe(p.stdout)
- stderr, stderr_transport = yield from connect_read_pipe(p.stderr)
+ stdin = yield From(connect_write_pipe(p.stdin))
+ stdout, stdout_transport = yield From(connect_read_pipe(p.stdout))
+ stderr, stderr_transport = yield From(connect_read_pipe(p.stderr))
# interact with subprocess
name = {stdout:'OUT', stderr:'ERR'}
@@ -100,9 +101,9 @@ def main(loop):
# get and print lines from stdout, stderr
timeout = None
while registered:
- done, pending = yield from asyncio.wait(
+ done, pending = yield From(asyncio.wait(
registered, timeout=timeout,
- return_when=asyncio.FIRST_COMPLETED)
+ return_when=asyncio.FIRST_COMPLETED))
if not done:
break
for f in done:
diff --git a/examples/crawl.py b/examples/crawl.py
index 4bb0b4e..7f54059 100644
--- a/examples/crawl.py
+++ b/examples/crawl.py
@@ -1,7 +1,9 @@
-#!/usr/bin/env python3.4
+#!/usr/bin/env python
"""A simple web crawler."""
+from __future__ import print_function
+
# TODO:
# - More organized logging (with task ID or URL?).
# - Use logging module for Logger.
@@ -15,15 +17,23 @@
# - Handle out of file descriptors directly? (How?)
import argparse
-import asyncio
+import trollius as asyncio
+from trollius import From, Return
import asyncio.locks
import cgi
-from http.client import BadStatusLine
import logging
import re
import sys
import time
-import urllib.parse
+try:
+ from httplib import BadStatusLine
+ import urlparse
+ from urllib import splitport as urllib_splitport
+except ImportError:
+ # Python 3
+ from http.client import BadStatusLine
+ from urllib import parse as urlparse
+ from urllib.parse import splitport as urllib_splitport
ARGS = argparse.ArgumentParser(description="Web crawler")
@@ -96,7 +106,8 @@ class Logger:
def _log(self, n, args):
if self.level >= n:
- print(*args, file=sys.stderr, flush=True)
+ print(*args, file=sys.stderr)
+ sys.stderr.flush()
def log(self, n, *args):
self._log(n, args)
@@ -133,14 +144,14 @@ class ConnectionPool:
for conn in conns:
conn.close()
self.connections.clear()
- self.queue.clear()
+ del self.queue[:]
@asyncio.coroutine
def get_connection(self, host, port, ssl):
"""Create or reuse a connection."""
port = port or (443 if ssl else 80)
try:
- ipaddrs = yield from self.loop.getaddrinfo(host, port)
+ ipaddrs = yield From(self.loop.getaddrinfo(host, port))
except Exception as exc:
self.log(0, 'Exception %r for (%r, %r)' % (exc, host, port))
raise
@@ -148,7 +159,8 @@ class ConnectionPool:
(host, ', '.join(ip[4][0] for ip in ipaddrs)))
# Look for a reusable connection.
- for _, _, _, _, (h, p, *_) in ipaddrs:
+ for _, _, _, _, addr in ipaddrs:
+ h, p = addr[:2]
key = h, p, ssl
conn = None
conns = self.connections.get(key)
@@ -163,13 +175,13 @@ class ConnectionPool:
else:
self.log(1, '* Reusing pooled connection', key,
'FD =', conn.fileno())
- return conn
+ raise Return(conn)
# Create a new connection.
conn = Connection(self.log, self, host, port, ssl)
- yield from conn.connect()
+ yield From(conn.connect())
self.log(1, '* New connection', conn.key, 'FD =', conn.fileno())
- return conn
+ raise Return(conn)
def recycle_connection(self, conn):
"""Make a connection available for reuse.
@@ -258,8 +270,8 @@ class Connection:
@asyncio.coroutine
def connect(self):
- self.reader, self.writer = yield from asyncio.open_connection(
- self.host, self.port, ssl=self.ssl)
+ self.reader, self.writer = yield From(asyncio.open_connection(
+ self.host, self.port, ssl=self.ssl))
peername = self.writer.get_extra_info('peername')
if peername:
self.host, self.port = peername[:2]
@@ -286,7 +298,7 @@ class Request:
self.log = log
self.url = url
self.pool = pool
- self.parts = urllib.parse.urlparse(self.url)
+ self.parts = urlparse.urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
@@ -311,8 +323,8 @@ class Request:
(self.hostname, self.port,
'ssl' if self.ssl else 'tcp',
self.url))
- self.conn = yield from self.pool.get_connection(self.hostname,
- self.port, self.ssl)
+ self.conn = yield From(self.pool.get_connection(self.hostname,
+ self.port, self.ssl))
def close(self, recycle=False):
"""Close the connection, recycle if requested."""
@@ -336,7 +348,7 @@ class Request:
"""Send the request."""
request_line = '%s %s %s' % (self.method, self.full_path,
self.http_version)
- yield from self.putline(request_line)
+ yield From(self.putline(request_line))
# TODO: What if a header is already set?
self.headers.append(('User-Agent', 'asyncio-example-crawl/0.0'))
self.headers.append(('Host', self.netloc))
@@ -344,15 +356,15 @@ class Request:
##self.headers.append(('Accept-Encoding', 'gzip'))
for key, value in self.headers:
line = '%s: %s' % (key, value)
- yield from self.putline(line)
- yield from self.putline('')
+ yield From(self.putline(line))
+ yield From(self.putline(''))
@asyncio.coroutine
def get_response(self):
"""Receive the response."""
response = Response(self.log, self.conn.reader)
- yield from response.read_headers()
- return response
+ yield From(response.read_headers())
+ raise Return(response)
class Response:
@@ -374,14 +386,15 @@ class Response:
@asyncio.coroutine
def getline(self):
"""Read one line from the connection."""
- line = (yield from self.reader.readline()).decode('latin-1').rstrip()
+ line = (yield From(self.reader.readline()))
+ line = line.decode('latin-1').rstrip()
self.log(2, '<', line)
- return line
+ raise Return(line)
@asyncio.coroutine
def read_headers(self):
"""Read the response status and the request headers."""
- status_line = yield from self.getline()
+ status_line = yield From(self.getline())
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
self.log(0, 'bad status_line', repr(status_line))
@@ -389,7 +402,7 @@ class Response:
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
- header_line = yield from self.getline()
+ header_line = yield From(self.getline())
if not header_line:
break
# TODO: Continuation lines.
@@ -426,7 +439,7 @@ class Response:
self.log(2, 'parsing chunked response')
blocks = []
while True:
- size_header = yield from self.reader.readline()
+ size_header = yield From(self.reader.readline())
if not size_header:
self.log(0, 'premature end of chunked response')
break
@@ -435,10 +448,10 @@ class Response:
size = int(parts[0], 16)
if size:
self.log(3, 'reading chunk of', size, 'bytes')
- block = yield from self.reader.readexactly(size)
+ block = yield From(self.reader.readexactly(size))
assert len(block) == size, (len(block), size)
blocks.append(block)
- crlf = yield from self.reader.readline()
+ crlf = yield From(self.reader.readline())
assert crlf == b'\r\n', repr(crlf)
if not size:
break
@@ -447,12 +460,12 @@ class Response:
'bytes in', len(blocks), 'blocks')
else:
self.log(3, 'reading until EOF')
- body = yield from self.reader.read()
+ body = yield From(self.reader.read())
# TODO: Should make sure not to recycle the connection
# in this case.
else:
- body = yield from self.reader.readexactly(nbytes)
- return body
+ body = yield From(self.reader.readexactly(nbytes))
+ raise Return(body)
class Fetcher:
@@ -504,10 +517,10 @@ class Fetcher:
self.request = None
try:
self.request = Request(self.log, self.url, self.crawler.pool)
- yield from self.request.connect()
- yield from self.request.send_request()
- self.response = yield from self.request.get_response()
- self.body = yield from self.response.read()
+ yield From(self.request.connect())
+ yield From(self.request.send_request())
+ self.response = yield From(self.request.get_response())
+ self.body = yield From(self.response.read())
h_conn = self.response.get_header('connection').lower()
if h_conn != 'close':
self.request.close(recycle=True)
@@ -531,7 +544,7 @@ class Fetcher:
return
next_url = self.response.get_redirect_url()
if next_url:
- self.next_url = urllib.parse.urljoin(self.url, next_url)
+ self.next_url = urlparse.urljoin(self.url, next_url)
if self.max_redirect > 0:
self.log(1, 'redirect to', self.next_url, 'from', self.url)
self.crawler.add_url(self.next_url, self.max_redirect-1)
@@ -556,8 +569,8 @@ class Fetcher:
self.new_urls = set()
for url in self.urls:
url = unescape(url)
- url = urllib.parse.urljoin(self.url, url)
- url, frag = urllib.parse.urldefrag(url)
+ url = urlparse.urljoin(self.url, url)
+ url, frag = urlparse.urldefrag(url)
if self.crawler.add_url(url):
self.new_urls.add(url)
@@ -657,8 +670,8 @@ class Crawler:
self.pool = ConnectionPool(self.log, max_pool, max_tasks)
self.root_domains = set()
for root in roots:
- parts = urllib.parse.urlparse(root)
- host, port = urllib.parse.splitport(parts.netloc)
+ parts = urlparse.urlparse(root)
+ host, port = urllib_splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
@@ -731,11 +744,11 @@ class Crawler:
"""Add a URL to the todo list if not seen before."""
if self.exclude and re.search(self.exclude, url):
return False
- parts = urllib.parse.urlparse(url)
+ parts = urlparse.urlparse(url)
if parts.scheme not in ('http', 'https'):
self.log(2, 'skipping non-http scheme in', url)
return False
- host, port = urllib.parse.splitport(parts.netloc)
+ host, port = urllib_splitport(parts.netloc)
if not self.host_okay(host):
self.log(2, 'skipping non-root host in', url)
return False
@@ -750,7 +763,7 @@ class Crawler:
@asyncio.coroutine
def crawl(self):
"""Run the crawler until all finished."""
- with (yield from self.termination):
+ with (yield From(self.termination)):
while self.todo or self.busy:
if self.todo:
url, max_redirect = self.todo.popitem()
@@ -762,7 +775,7 @@ class Crawler:
self.busy[url] = fetcher
fetcher.task = asyncio.Task(self.fetch(fetcher))
else:
- yield from self.termination.wait()
+ yield From(self.termination.wait())
self.t1 = time.time()
@asyncio.coroutine
@@ -772,13 +785,13 @@ class Crawler:
Once this returns, move the fetcher from busy to done.
"""
url = fetcher.url
- with (yield from self.governor):
+ with (yield From(self.governor)):
try:
- yield from fetcher.fetch() # Fetcher gonna fetch.
+ yield From(fetcher.fetch()) # Fetcher gonna fetch.
finally:
# Force GC of the task, so the error is logged.
fetcher.task = None
- with (yield from self.termination):
+ with (yield From(self.termination)):
self.done[url] = fetcher
del self.busy[url]
self.termination.notify()
@@ -828,7 +841,7 @@ def main():
log = Logger(args.level)
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
asyncio.set_event_loop(loop)
elif args.select:
diff --git a/examples/echo_client_tulip.py b/examples/echo_client_tulip.py
index 88124ef..0a60926 100644
--- a/examples/echo_client_tulip.py
+++ b/examples/echo_client_tulip.py
@@ -1,15 +1,16 @@
-import asyncio
+import trollius as asyncio
+from trollius import From
END = b'Bye-bye!\n'
@asyncio.coroutine
def echo_client():
- reader, writer = yield from asyncio.open_connection('localhost', 8000)
+ reader, writer = yield From(asyncio.open_connection('localhost', 8000))
writer.write(b'Hello, world\n')
writer.write(b'What a fine day it is.\n')
writer.write(END)
while True:
- line = yield from reader.readline()
+ line = yield From(reader.readline())
print('received:', line)
if line == END or not line:
break
diff --git a/examples/echo_server_tulip.py b/examples/echo_server_tulip.py
index 8167e54..d7e6e29 100644
--- a/examples/echo_server_tulip.py
+++ b/examples/echo_server_tulip.py
@@ -1,13 +1,14 @@
-import asyncio
+import trollius as asyncio
+from trollius import From
@asyncio.coroutine
def echo_server():
- yield from asyncio.start_server(handle_connection, 'localhost', 8000)
+ yield From(asyncio.start_server(handle_connection, 'localhost', 8000))
@asyncio.coroutine
def handle_connection(reader, writer):
while True:
- data = yield from reader.read(8192)
+ data = yield From(reader.read(8192))
if not data:
break
writer.write(data)
diff --git a/examples/fetch0.py b/examples/fetch0.py
index 180fcf2..f98feeb 100644
--- a/examples/fetch0.py
+++ b/examples/fetch0.py
@@ -1,25 +1,26 @@
"""Simplest possible HTTP client."""
+from __future__ import print_function
import sys
-from asyncio import *
+from trollius import *
@coroutine
def fetch():
- r, w = yield from open_connection('python.org', 80)
+ r, w = yield From(open_connection('python.org', 80))
request = 'GET / HTTP/1.0\r\n\r\n'
print('>', request, file=sys.stderr)
w.write(request.encode('latin-1'))
while True:
- line = yield from r.readline()
+ line = yield From(r.readline())
line = line.decode('latin-1').rstrip()
if not line:
break
print('<', line, file=sys.stderr)
print(file=sys.stderr)
- body = yield from r.read()
- return body
+ body = yield From(r.read())
+ raise Return(body)
def main():
diff --git a/examples/fetch1.py b/examples/fetch1.py
index 8dbb6e4..9e9a1ca 100644
--- a/examples/fetch1.py
+++ b/examples/fetch1.py
@@ -3,10 +3,14 @@
This version adds URL parsing (including SSL) and a Response object.
"""
+from __future__ import print_function
import sys
-import urllib.parse
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
-from asyncio import *
+from trollius import *
class Response:
@@ -22,13 +26,15 @@ class Response:
def read(self, reader):
@coroutine
def getline():
- return (yield from reader.readline()).decode('latin-1').rstrip()
- status_line = yield from getline()
+ line = (yield From(reader.readline()))
+ line = line.decode('latin-1').rstrip()
+ raise Return(line)
+ status_line = yield From(getline())
if self.verbose: print('<', status_line, file=sys.stderr)
self.http_version, status, self.reason = status_line.split(None, 2)
self.status = int(status)
while True:
- header_line = yield from getline()
+ header_line = yield From(getline())
if not header_line:
break
if self.verbose: print('<', header_line, file=sys.stderr)
@@ -40,7 +46,7 @@ class Response:
@coroutine
def fetch(url, verbose=True):
- parts = urllib.parse.urlparse(url)
+ parts = urlparse(url)
if parts.scheme == 'http':
ssl = False
elif parts.scheme == 'https':
@@ -57,12 +63,12 @@ def fetch(url, verbose=True):
request = 'GET %s HTTP/1.0\r\n\r\n' % path
if verbose:
print('>', request, file=sys.stderr, end='')
- r, w = yield from open_connection(parts.hostname, port, ssl=ssl)
+ r, w = yield From(open_connection(parts.hostname, port, ssl=ssl))
w.write(request.encode('latin-1'))
response = Response(verbose)
- yield from response.read(r)
- body = yield from r.read()
- return body
+ yield From(response.read(r))
+ body = yield From(r.read())
+ raise Return(body)
def main():
diff --git a/examples/fetch2.py b/examples/fetch2.py
index 7617b59..5a321a8 100644
--- a/examples/fetch2.py
+++ b/examples/fetch2.py
@@ -3,11 +3,17 @@
This version adds a Request object.
"""
+from __future__ import print_function
import sys
-import urllib.parse
-from http.client import BadStatusLine
+try:
+ from urllib.parse import urlparse
+ from http.client import BadStatusLine
+except ImportError:
+ # Python 2
+ from urlparse import urlparse
+ from httplib import BadStatusLine
-from asyncio import *
+from trollius import *
class Request:
@@ -15,7 +21,7 @@ class Request:
def __init__(self, url, verbose=True):
self.url = url
self.verbose = verbose
- self.parts = urllib.parse.urlparse(self.url)
+ self.parts = urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
@@ -40,9 +46,9 @@ class Request:
print('* Connecting to %s:%s using %s' %
(self.hostname, self.port, 'ssl' if self.ssl else 'tcp'),
file=sys.stderr)
- self.reader, self.writer = yield from open_connection(self.hostname,
+ self.reader, self.writer = yield From(open_connection(self.hostname,
self.port,
- ssl=self.ssl)
+ ssl=self.ssl))
if self.verbose:
print('* Connected to %s' %
(self.writer.get_extra_info('peername'),),
@@ -67,8 +73,8 @@ class Request:
@coroutine
def get_response(self):
response = Response(self.reader, self.verbose)
- yield from response.read_headers()
- return response
+ yield From(response.read_headers())
+ raise Return(response)
class Response:
@@ -83,11 +89,13 @@ class Response:
@coroutine
def getline(self):
- return (yield from self.reader.readline()).decode('latin-1').rstrip()
+ line = (yield From(self.reader.readline()))
+ line = line.decode('latin-1').rstrip()
+ raise Return(line)
@coroutine
def read_headers(self):
- status_line = yield from self.getline()
+ status_line = yield From(self.getline())
if self.verbose: print('<', status_line, file=sys.stderr)
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
@@ -95,7 +103,7 @@ class Response:
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
- header_line = yield from self.getline()
+ header_line = yield From(self.getline())
if not header_line:
break
if self.verbose: print('<', header_line, file=sys.stderr)
@@ -112,20 +120,20 @@ class Response:
nbytes = int(value)
break
if nbytes is None:
- body = yield from self.reader.read()
+ body = yield From(self.reader.read())
else:
- body = yield from self.reader.readexactly(nbytes)
- return body
+ body = yield From(self.reader.readexactly(nbytes))
+ raise Return(body)
@coroutine
def fetch(url, verbose=True):
request = Request(url, verbose)
- yield from request.connect()
- yield from request.send_request()
- response = yield from request.get_response()
- body = yield from response.read()
- return body
+ yield From(request.connect())
+ yield From(request.send_request())
+ response = yield From(request.get_response())
+ body = yield From(response.read())
+ raise Return(body)
def main():
@@ -134,7 +142,11 @@ def main():
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
finally:
loop.close()
- sys.stdout.buffer.write(body)
+ if hasattr(sys.stdout, 'buffer'):
+ sys.stdout.buffer.write(body)
+ else:
+ # Python 2
+ sys.stdout.write(body)
if __name__ == '__main__':
diff --git a/examples/fetch3.py b/examples/fetch3.py
index 9419afd..0fc56d1 100644
--- a/examples/fetch3.py
+++ b/examples/fetch3.py
@@ -4,11 +4,17 @@ This version adds a primitive connection pool, redirect following and
chunked transfer-encoding. It also supports a --iocp flag.
"""
+from __future__ import print_function
import sys
-import urllib.parse
-from http.client import BadStatusLine
+try:
+ from urllib.parse import urlparse
+ from http.client import BadStatusLine
+except ImportError:
+ # Python 2
+ from urlparse import urlparse
+ from httplib import BadStatusLine
-from asyncio import *
+from trollius import *
class ConnectionPool:
@@ -25,12 +31,13 @@ class ConnectionPool:
@coroutine
def open_connection(self, host, port, ssl):
port = port or (443 if ssl else 80)
- ipaddrs = yield from get_event_loop().getaddrinfo(host, port)
+ ipaddrs = yield From(get_event_loop().getaddrinfo(host, port))
if self.verbose:
print('* %s resolves to %s' %
(host, ', '.join(ip[4][0] for ip in ipaddrs)),
file=sys.stderr)
- for _, _, _, _, (h, p, *_) in ipaddrs:
+ for _, _, _, _, addr in ipaddrs:
+ h, p = addr[:2]
key = h, p, ssl
conn = self.connections.get(key)
if conn:
@@ -40,14 +47,15 @@ class ConnectionPool:
continue
if self.verbose:
print('* Reusing pooled connection', key, file=sys.stderr)
- return conn
- reader, writer = yield from open_connection(host, port, ssl=ssl)
- host, port, *_ = writer.get_extra_info('peername')
+ raise Return(conn)
+ reader, writer = yield From(open_connection(host, port, ssl=ssl))
+ addr = writer.get_extra_info('peername')
+ host, port = addr[:2]
key = host, port, ssl
self.connections[key] = reader, writer
if self.verbose:
print('* New connection', key, file=sys.stderr)
- return reader, writer
+ raise Return(reader, writer)
class Request:
@@ -55,7 +63,7 @@ class Request:
def __init__(self, url, verbose=True):
self.url = url
self.verbose = verbose
- self.parts = urllib.parse.urlparse(self.url)
+ self.parts = urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
@@ -83,9 +91,9 @@ class Request:
self.vprint('* Connecting to %s:%s using %s' %
(self.hostname, self.port, 'ssl' if self.ssl else 'tcp'))
self.reader, self.writer = \
- yield from pool.open_connection(self.hostname,
+ yield From(pool.open_connection(self.hostname,
self.port,
- ssl=self.ssl)
+ ssl=self.ssl))
self.vprint('* Connected to %s' %
(self.writer.get_extra_info('peername'),))
@@ -93,24 +101,24 @@ class Request:
def putline(self, line):
self.vprint('>', line)
self.writer.write(line.encode('latin-1') + b'\r\n')
- ##yield from self.writer.drain()
+ ##yield From(self.writer.drain())
@coroutine
def send_request(self):
request = '%s %s %s' % (self.method, self.full_path, self.http_version)
- yield from self.putline(request)
+ yield From(self.putline(request))
if 'host' not in {key.lower() for key, _ in self.headers}:
self.headers.insert(0, ('Host', self.netloc))
for key, value in self.headers:
line = '%s: %s' % (key, value)
- yield from self.putline(line)
- yield from self.putline('')
+ yield From(self.putline(line))
+ yield From(self.putline(''))
@coroutine
def get_response(self):
response = Response(self.reader, self.verbose)
- yield from response.read_headers()
- return response
+ yield From(response.read_headers())
+ raise Return(response)
class Response:
@@ -129,20 +137,21 @@ class Response:
@coroutine
def getline(self):
- line = (yield from self.reader.readline()).decode('latin-1').rstrip()
+ line = (yield From(self.reader.readline()))
+ line = line.decode('latin-1').rstrip()
self.vprint('<', line)
- return line
+ raise Return(line)
@coroutine
def read_headers(self):
- status_line = yield from self.getline()
+ status_line = yield From(self.getline())
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
raise BadStatusLine(status_line)
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
- header_line = yield from self.getline()
+ header_line = yield From(self.getline())
if not header_line:
break
# TODO: Continuation lines.
@@ -173,23 +182,23 @@ class Response:
blocks = []
size = -1
while size:
- size_header = yield from self.reader.readline()
+ size_header = yield From(self.reader.readline())
if not size_header:
break
parts = size_header.split(b';')
size = int(parts[0], 16)
if size:
- block = yield from self.reader.readexactly(size)
+ block = yield From(self.reader.readexactly(size))
assert len(block) == size, (len(block), size)
blocks.append(block)
- crlf = yield from self.reader.readline()
+ crlf = yield From(self.reader.readline())
assert crlf == b'\r\n', repr(crlf)
body = b''.join(blocks)
else:
- body = yield from self.reader.read()
+ body = yield From(self.reader.read())
else:
- body = yield from self.reader.readexactly(nbytes)
- return body
+ body = yield From(self.reader.readexactly(nbytes))
+ raise Return(body)
@coroutine
@@ -198,23 +207,23 @@ def fetch(url, verbose=True, max_redirect=10):
try:
for _ in range(max_redirect):
request = Request(url, verbose)
- yield from request.connect(pool)
- yield from request.send_request()
- response = yield from request.get_response()
- body = yield from response.read()
+ yield From(request.connect(pool))
+ yield From(request.send_request())
+ response = yield From(request.get_response())
+ body = yield From(response.read())
next_url = response.get_redirect_url()
if not next_url:
break
url = urllib.parse.urljoin(url, next_url)
print('redirect to', url, file=sys.stderr)
- return body
+ raise Return(body)
finally:
pool.close()
def main():
if '--iocp' in sys.argv:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
@@ -223,7 +232,11 @@ def main():
body = loop.run_until_complete(fetch(sys.argv[1], '-v' in sys.argv))
finally:
loop.close()
- sys.stdout.buffer.write(body)
+ if hasattr(sys.stdout, 'buffer'):
+ sys.stdout.buffer.write(body)
+ else:
+ # Python 2
+ sys.stdout.write(body)
if __name__ == '__main__':
diff --git a/examples/fuzz_as_completed.py b/examples/fuzz_as_completed.py
index 123fbf1..7e74fe7 100644
--- a/examples/fuzz_as_completed.py
+++ b/examples/fuzz_as_completed.py
@@ -2,26 +2,29 @@
"""Fuzz tester for as_completed(), by Glenn Langford."""
-import asyncio
+from __future__ import print_function
+
+import trollius as asyncio
+from trollius import From, Return
import itertools
import random
import sys
@asyncio.coroutine
def sleeper(time):
- yield from asyncio.sleep(time)
- return time
+ yield From(asyncio.sleep(time))
+ raise Return(time)
@asyncio.coroutine
def watcher(tasks,delay=False):
res = []
for t in asyncio.as_completed(tasks):
- r = yield from t
+ r = yield From(t)
res.append(r)
if delay:
# simulate processing delay
process_time = random.random() / 10
- yield from asyncio.sleep(process_time)
+ yield From(asyncio.sleep(process_time))
#print(res)
#assert(sorted(res) == res)
if sorted(res) != res:
diff --git a/examples/hello_callback.py b/examples/hello_callback.py
index 7ccbea1..f192c8d 100644
--- a/examples/hello_callback.py
+++ b/examples/hello_callback.py
@@ -1,6 +1,6 @@
"""Print 'Hello World' every two seconds, using a callback."""
-import asyncio
+import trollius
def print_and_repeat(loop):
@@ -9,7 +9,7 @@ def print_and_repeat(loop):
if __name__ == '__main__':
- loop = asyncio.get_event_loop()
+ loop = trollius.get_event_loop()
print_and_repeat(loop)
try:
loop.run_forever()
diff --git a/examples/hello_coroutine.py b/examples/hello_coroutine.py
index b9347aa..e6a4e6c 100644
--- a/examples/hello_coroutine.py
+++ b/examples/hello_coroutine.py
@@ -1,17 +1,18 @@
"""Print 'Hello World' every two seconds, using a coroutine."""
-import asyncio
+import trollius
+from trollius import From
-@asyncio.coroutine
+@trollius.coroutine
def greet_every_two_seconds():
while True:
print('Hello World')
- yield from asyncio.sleep(2)
+ yield From(trollius.sleep(2))
if __name__ == '__main__':
- loop = asyncio.get_event_loop()
+ loop = trollius.get_event_loop()
try:
loop.run_until_complete(greet_every_two_seconds())
finally:
diff --git a/examples/interop_asyncio.py b/examples/interop_asyncio.py
new file mode 100644
index 0000000..b20e3ed
--- /dev/null
+++ b/examples/interop_asyncio.py
@@ -0,0 +1,53 @@
+import asyncio
+import trollius
+
+@asyncio.coroutine
+def asyncio_noop():
+ pass
+
+@asyncio.coroutine
+def asyncio_coroutine(coro):
+ print("asyncio coroutine")
+ res = yield from coro
+ print("asyncio inner coroutine result: %r" % (res,))
+ print("asyncio coroutine done")
+ return "asyncio"
+
+@trollius.coroutine
+def trollius_noop():
+ pass
+
+@trollius.coroutine
+def trollius_coroutine(coro):
+ print("trollius coroutine")
+ res = yield trollius.From(coro)
+ print("trollius inner coroutine result: %r" % (res,))
+ print("trollius coroutine done")
+ raise trollius.Return("trollius")
+
+def main():
+ # use trollius event loop policy in asyncio
+ policy = trollius.get_event_loop_policy()
+ asyncio.set_event_loop_policy(policy)
+
+ # create an event loop for the main thread: use Trollius event loop
+ loop = trollius.get_event_loop()
+ assert asyncio.get_event_loop() is loop
+
+ print("[ asyncio coroutine called from trollius coroutine ]")
+ coro1 = asyncio_noop()
+ coro2 = asyncio_coroutine(coro1)
+ res = loop.run_until_complete(trollius_coroutine(coro2))
+ print("trollius coroutine result: %r" % res)
+ print("")
+
+ print("[ asyncio coroutine called from trollius coroutine ]")
+ coro1 = trollius_noop()
+ coro2 = trollius_coroutine(coro1)
+ res = loop.run_until_complete(asyncio_coroutine(coro2))
+ print("asyncio coroutine result: %r" % res)
+ print("")
+
+ loop.close()
+
+main()
diff --git a/examples/shell.py b/examples/shell.py
index 7dc7caf..5d6158a 100644
--- a/examples/shell.py
+++ b/examples/shell.py
@@ -1,31 +1,33 @@
"""Examples using create_subprocess_exec() and create_subprocess_shell()."""
-import asyncio
+import trollius as asyncio
+from trollius import From
import signal
-from asyncio.subprocess import PIPE
+from trollius.subprocess import PIPE
+from trollius.py33_exceptions import ProcessLookupError
@asyncio.coroutine
def cat(loop):
- proc = yield from asyncio.create_subprocess_shell("cat",
+ proc = yield From(asyncio.create_subprocess_shell("cat",
stdin=PIPE,
- stdout=PIPE)
+ stdout=PIPE))
print("pid: %s" % proc.pid)
message = "Hello World!"
print("cat write: %r" % message)
- stdout, stderr = yield from proc.communicate(message.encode('ascii'))
+ stdout, stderr = yield From(proc.communicate(message.encode('ascii')))
print("cat read: %r" % stdout.decode('ascii'))
- exitcode = yield from proc.wait()
+ exitcode = yield From(proc.wait())
print("(exit code %s)" % exitcode)
@asyncio.coroutine
def ls(loop):
- proc = yield from asyncio.create_subprocess_exec("ls",
- stdout=PIPE)
+ proc = yield From(asyncio.create_subprocess_exec("ls",
+ stdout=PIPE))
while True:
- line = yield from proc.stdout.readline()
+ line = yield From(proc.stdout.readline())
if not line:
break
print("ls>>", line.decode('ascii').rstrip())
@@ -35,10 +37,11 @@ def ls(loop):
pass
@asyncio.coroutine
-def test_call(*args, timeout=None):
+def test_call(*args, **kw):
+ timeout = kw.pop('timeout', None)
try:
- proc = yield from asyncio.create_subprocess_exec(*args)
- exitcode = yield from asyncio.wait_for(proc.wait(), timeout)
+ proc = yield From(asyncio.create_subprocess_exec(*args))
+ exitcode = yield From(asyncio.wait_for(proc.wait(), timeout))
print("%s: exit code %s" % (' '.join(args), exitcode))
except asyncio.TimeoutError:
print("timeout! (%.1f sec)" % timeout)
diff --git a/examples/simple_tcp_server.py b/examples/simple_tcp_server.py
index b796d9b..882938e 100644
--- a/examples/simple_tcp_server.py
+++ b/examples/simple_tcp_server.py
@@ -8,9 +8,11 @@ in the same process. It listens on port 1234 on 127.0.0.1, so it will
fail if this port is currently in use.
"""
+from __future__ import print_function
import sys
-import asyncio
+import trollius as asyncio
import asyncio.streams
+from trollius import From, Return
class MyServer:
@@ -58,28 +60,31 @@ class MyServer:
out one or more lines back to the client with the result.
"""
while True:
- data = (yield from client_reader.readline()).decode("utf-8")
+ data = (yield From(client_reader.readline()))
+ data = data.decode("utf-8")
if not data: # an empty string means the client disconnected
break
- cmd, *args = data.rstrip().split(' ')
+ parts = data.rstrip().split(' ')
+ cmd = parts[0]
+ args = parts[1:]
if cmd == 'add':
arg1 = float(args[0])
arg2 = float(args[1])
retval = arg1 + arg2
- client_writer.write("{!r}\n".format(retval).encode("utf-8"))
+ client_writer.write("{0!r}\n".format(retval).encode("utf-8"))
elif cmd == 'repeat':
times = int(args[0])
msg = args[1]
client_writer.write("begin\n".encode("utf-8"))
for idx in range(times):
- client_writer.write("{}. {}\n".format(idx+1, msg)
+ client_writer.write("{0}. {1}\n".format(idx+1, msg)
.encode("utf-8"))
client_writer.write("end\n".encode("utf-8"))
else:
- print("Bad command {!r}".format(data), file=sys.stderr)
+ print("Bad command {0!r}".format(data), file=sys.stderr)
# This enables us to have flow control in our connection.
- yield from client_writer.drain()
+ yield From(client_writer.drain())
def start(self, loop):
"""
@@ -115,32 +120,33 @@ def main():
@asyncio.coroutine
def client():
- reader, writer = yield from asyncio.streams.open_connection(
- '127.0.0.1', 12345, loop=loop)
+ reader, writer = yield From(asyncio.streams.open_connection(
+ '127.0.0.1', 12345, loop=loop))
def send(msg):
print("> " + msg)
writer.write((msg + '\n').encode("utf-8"))
def recv():
- msgback = (yield from reader.readline()).decode("utf-8").rstrip()
+ msgback = (yield From(reader.readline()))
+ msgback = msgback.decode("utf-8").rstrip()
print("< " + msgback)
- return msgback
+ raise Return(msgback)
# send a line
send("add 1 2")
- msg = yield from recv()
+ msg = yield From(recv())
send("repeat 5 hello")
- msg = yield from recv()
+ msg = yield From(recv())
assert msg == 'begin'
while True:
- msg = yield from recv()
+ msg = yield From(recv())
if msg == 'end':
break
writer.close()
- yield from asyncio.sleep(0.5)
+ yield From(asyncio.sleep(0.5))
# creates a client and connects to our server
try:
diff --git a/examples/sink.py b/examples/sink.py
index d362cbb..fb28ade 100644
--- a/examples/sink.py
+++ b/examples/sink.py
@@ -1,10 +1,11 @@
"""Test service that accepts connections and reads all data off them."""
+from __future__ import print_function
import argparse
import os
import sys
-from asyncio import *
+from trollius import *
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
ARGS.add_argument(
@@ -63,23 +64,24 @@ def start(loop, host, port):
import ssl
# TODO: take cert/key from args as well.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
- sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslctx.options |= ssl.OP_NO_SSLv2
+ sslctx = SSLContext(ssl.PROTOCOL_SSLv23)
+ if not BACKPORT_SSL_CONTEXT:
+ sslctx.options |= ssl.OP_NO_SSLv2
sslctx.load_cert_chain(
certfile=os.path.join(here, 'ssl_cert.pem'),
keyfile=os.path.join(here, 'ssl_key.pem'))
- server = yield from loop.create_server(Service, host, port, ssl=sslctx)
+ server = yield From(loop.create_server(Service, host, port, ssl=sslctx))
dprint('serving TLS' if sslctx else 'serving',
[s.getsockname() for s in server.sockets])
- yield from server.wait_closed()
+ yield From(server.wait_closed())
def main():
global args
args = ARGS.parse_args()
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
diff --git a/examples/source.py b/examples/source.py
index 7fd11fb..c3ebd55 100644
--- a/examples/source.py
+++ b/examples/source.py
@@ -1,10 +1,11 @@
"""Test client that connects and sends infinite data."""
+from __future__ import print_function
import argparse
import sys
-from asyncio import *
-from asyncio import test_utils
+from trollius import *
+from trollius import test_utils
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
@@ -74,18 +75,18 @@ def start(loop, host, port):
sslctx = None
if args.tls:
sslctx = test_utils.dummy_ssl_context()
- tr, pr = yield from loop.create_connection(Client, host, port,
- ssl=sslctx)
+ tr, pr = yield From(loop.create_connection(Client, host, port,
+ ssl=sslctx))
dprint('tr =', tr)
dprint('pr =', pr)
- yield from pr.waiter
+ yield From(pr.waiter)
def main():
global args
args = ARGS.parse_args()
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
diff --git a/examples/source1.py b/examples/source1.py
index 6802e96..48a53af 100644
--- a/examples/source1.py
+++ b/examples/source1.py
@@ -1,10 +1,11 @@
"""Like source.py, but uses streams."""
+from __future__ import print_function
import argparse
import sys
-from asyncio import *
-from asyncio import test_utils
+from trollius import *
+from trollius import test_utils
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
ARGS.add_argument(
@@ -33,7 +34,7 @@ class Debug:
overwriting = False
label = 'stream1:'
- def print(self, *args):
+ def print_(self, *args):
if self.overwriting:
print(file=sys.stderr)
self.overwriting = 0
@@ -46,7 +47,8 @@ class Debug:
if self.overwriting == 3:
print(self.label, '[...]', file=sys.stderr)
end = '\r'
- print(self.label, *args, file=sys.stderr, end=end, flush=True)
+ print(self.label, *args, file=sys.stderr, end=end)
+ sys.stdout.flush()
@coroutine
@@ -55,11 +57,11 @@ def start(loop, args):
total = 0
sslctx = None
if args.tls:
- d.print('using dummy SSLContext')
+ d.print_('using dummy SSLContext')
sslctx = test_utils.dummy_ssl_context()
- r, w = yield from open_connection(args.host, args.port, ssl=sslctx)
- d.print('r =', r)
- d.print('w =', w)
+ r, w = yield From(open_connection(args.host, args.port, ssl=sslctx))
+ d.print_('r =', r)
+ d.print_('w =', w)
if args.stop:
w.write(b'stop')
w.close()
@@ -73,17 +75,17 @@ def start(loop, args):
w.write(data)
f = w.drain()
if f:
- d.print('pausing')
- yield from f
+ d.print_('pausing')
+ yield From(f)
except (ConnectionResetError, BrokenPipeError) as exc:
- d.print('caught', repr(exc))
+ d.print_('caught', repr(exc))
def main():
global args
args = ARGS.parse_args()
if args.iocp:
- from asyncio.windows_events import ProactorEventLoop
+ from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
diff --git a/examples/stacks.py b/examples/stacks.py
index 0b7e0b2..abe24a0 100644
--- a/examples/stacks.py
+++ b/examples/stacks.py
@@ -1,7 +1,7 @@
"""Crude demo for print_stack()."""
-from asyncio import *
+from trollius import *
@coroutine
@@ -10,9 +10,9 @@ def helper(r):
for t in Task.all_tasks():
t.print_stack()
print('--- end helper ---')
- line = yield from r.readline()
+ line = yield From(r.readline())
1/0
- return line
+ raise Return(line)
def doit():
l = get_event_loop()
diff --git a/examples/subprocess_attach_read_pipe.py b/examples/subprocess_attach_read_pipe.py
index d8a6242..a2f9bb5 100644
--- a/examples/subprocess_attach_read_pipe.py
+++ b/examples/subprocess_attach_read_pipe.py
@@ -1,7 +1,8 @@
#!/usr/bin/env python3
"""Example showing how to attach a read pipe to a subprocess."""
-import asyncio
+import trollius as asyncio
import os, sys
+from trollius import From
code = """
import os, sys
@@ -17,16 +18,19 @@ def task():
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
- pipe = open(rfd, 'rb', 0)
+ pipe = os.fdopen(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=loop)
- transport, _ = yield from loop.connect_read_pipe(lambda: protocol, pipe)
+ transport, _ = yield From(loop.connect_read_pipe(lambda: protocol, pipe))
- proc = yield from asyncio.create_subprocess_exec(*args, pass_fds={wfd})
- yield from proc.wait()
+ kwds = {}
+ if sys.version_info >= (3, 2):
+ kwds['pass_fds'] = (wfd,)
+ proc = yield From(asyncio.create_subprocess_exec(*args, **kwds))
+ yield From(proc.wait())
os.close(wfd)
- data = yield from reader.read()
+ data = yield From(reader.read())
print("read = %r" % data.decode())
loop.run_until_complete(task())
diff --git a/examples/subprocess_attach_write_pipe.py b/examples/subprocess_attach_write_pipe.py
index 8614877..8b9e7ec 100644
--- a/examples/subprocess_attach_write_pipe.py
+++ b/examples/subprocess_attach_write_pipe.py
@@ -1,14 +1,19 @@
#!/usr/bin/env python3
"""Example showing how to attach a write pipe to a subprocess."""
-import asyncio
+import trollius as asyncio
+from trollius import From
import os, sys
-from asyncio import subprocess
+from trollius import subprocess
code = """
import os, sys
fd = int(sys.argv[1])
data = os.read(fd, 1024)
-sys.stdout.buffer.write(data)
+if sys.version_info >= (3,):
+ stdout = sys.stdout.buffer
+else:
+ stdout = sys.stdout
+stdout.write(data)
"""
loop = asyncio.get_event_loop()
@@ -17,17 +22,17 @@ loop = asyncio.get_event_loop()
def task():
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(rfd)]
- proc = yield from asyncio.create_subprocess_exec(
- *args,
- pass_fds={rfd},
- stdout=subprocess.PIPE)
+ kwargs = {'stdout': subprocess.PIPE}
+ if sys.version_info >= (3, 2):
+ kwargs['pass_fds'] = (rfd,)
+ proc = yield From(asyncio.create_subprocess_exec(*args, **kwargs))
- pipe = open(wfd, 'wb', 0)
- transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol,
- pipe)
+ pipe = os.fdopen(wfd, 'wb', 0)
+ transport, _ = yield From(loop.connect_write_pipe(asyncio.Protocol,
+ pipe))
transport.write(b'data')
- stdout, stderr = yield from proc.communicate()
+ stdout, stderr = yield From(proc.communicate())
print("stdout = %r" % stdout.decode())
pipe.close()
diff --git a/examples/subprocess_shell.py b/examples/subprocess_shell.py
index 745cb64..8941236 100644
--- a/examples/subprocess_shell.py
+++ b/examples/subprocess_shell.py
@@ -1,21 +1,23 @@
"""Example writing to and reading from a subprocess at the same time using
tasks."""
-import asyncio
+import trollius as asyncio
import os
-from asyncio.subprocess import PIPE
+from trollius import From
+from trollius.subprocess import PIPE
+from trollius.py33_exceptions import BrokenPipeError, ConnectionResetError
@asyncio.coroutine
def send_input(writer, input):
try:
for line in input:
- print('sending', len(line), 'bytes')
+ print('sending %s bytes' % len(line))
writer.write(line)
d = writer.drain()
if d:
print('pause writing')
- yield from d
+ yield From(d)
print('resume writing')
writer.close()
except BrokenPipeError:
@@ -26,7 +28,7 @@ def send_input(writer, input):
@asyncio.coroutine
def log_errors(reader):
while True:
- line = yield from reader.readline()
+ line = yield From(reader.readline())
if not line:
break
print('ERROR', repr(line))
@@ -34,7 +36,7 @@ def log_errors(reader):
@asyncio.coroutine
def read_stdout(stdout):
while True:
- line = yield from stdout.readline()
+ line = yield From(stdout.readline())
print('received', repr(line))
if not line:
break
@@ -47,7 +49,7 @@ def start(cmd, input=None, **kwds):
kwds['stdin'] = None
else:
kwds['stdin'] = PIPE
- proc = yield from asyncio.create_subprocess_shell(cmd, **kwds)
+ proc = yield From(asyncio.create_subprocess_shell(cmd, **kwds))
tasks = []
if input is not None:
@@ -66,9 +68,9 @@ def start(cmd, input=None, **kwds):
if tasks:
# feed stdin while consuming stdout to avoid hang
# when stdin pipe is full
- yield from asyncio.wait(tasks)
+ yield From(asyncio.wait(tasks))
- exitcode = yield from proc.wait()
+ exitcode = yield From(proc.wait())
print("exit code: %s" % exitcode)
diff --git a/examples/tcp_echo.py b/examples/tcp_echo.py
index d743242..773327f 100755
--- a/examples/tcp_echo.py
+++ b/examples/tcp_echo.py
@@ -1,7 +1,7 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
"""TCP echo server example."""
import argparse
-import asyncio
+import trollius as asyncio
import sys
try:
import signal
@@ -105,7 +105,7 @@ if __name__ == '__main__':
ARGS.print_help()
else:
if args.iocp:
- from asyncio import windows_events
+ from trollius import windows_events
loop = windows_events.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
diff --git a/examples/timing_tcp_server.py b/examples/timing_tcp_server.py
index 883ce6d..c93c407 100644
--- a/examples/timing_tcp_server.py
+++ b/examples/timing_tcp_server.py
@@ -8,12 +8,14 @@ in the same process. It listens on port 1234 on 127.0.0.1, so it will
fail if this port is currently in use.
"""
+from __future__ import print_function
import sys
import time
import random
-import asyncio
+import trollius as asyncio
import asyncio.streams
+from trollius import From, Return
class MyServer:
@@ -61,29 +63,32 @@ class MyServer:
out one or more lines back to the client with the result.
"""
while True:
- data = (yield from client_reader.readline()).decode("utf-8")
+ data = (yield From(client_reader.readline()))
+ data = data.decode("utf-8")
if not data: # an empty string means the client disconnected
break
- cmd, *args = data.rstrip().split(' ')
+ parts = data.rstrip().split(' ')
+ cmd = parts[0]
+ args = parts[1:]
if cmd == 'add':
arg1 = float(args[0])
arg2 = float(args[1])
retval = arg1 + arg2
- client_writer.write("{!r}\n".format(retval).encode("utf-8"))
+ client_writer.write("{0!r}\n".format(retval).encode("utf-8"))
elif cmd == 'repeat':
times = int(args[0])
msg = args[1]
client_writer.write("begin\n".encode("utf-8"))
for idx in range(times):
- client_writer.write("{}. {}\n".format(
+ client_writer.write("{0}. {1}\n".format(
idx+1, msg + 'x'*random.randint(10, 50))
.encode("utf-8"))
client_writer.write("end\n".encode("utf-8"))
else:
- print("Bad command {!r}".format(data), file=sys.stderr)
+ print("Bad command {0!r}".format(data), file=sys.stderr)
# This enables us to have flow control in our connection.
- yield from client_writer.drain()
+ yield From(client_writer.drain())
def start(self, loop):
"""
@@ -119,42 +124,44 @@ def main():
@asyncio.coroutine
def client():
- reader, writer = yield from asyncio.streams.open_connection(
- '127.0.0.1', 12345, loop=loop)
+ reader, writer = yield From(asyncio.streams.open_connection(
+ '127.0.0.1', 12345, loop=loop))
def send(msg):
print("> " + msg)
writer.write((msg + '\n').encode("utf-8"))
def recv():
- msgback = (yield from reader.readline()).decode("utf-8").rstrip()
+ msgback = (yield From(reader.readline()))
+ msgback = msgback.decode("utf-8").rstrip()
print("< " + msgback)
- return msgback
+ raise Return(msgback)
# send a line
send("add 1 2")
- msg = yield from recv()
+ msg = yield From(recv())
Ns = list(range(100, 100000, 10000))
times = []
for N in Ns:
t0 = time.time()
- send("repeat {} hello world ".format(N))
- msg = yield from recv()
+ send("repeat {0} hello world ".format(N))
+ msg = yield From(recv())
assert msg == 'begin'
while True:
- msg = (yield from reader.readline()).decode("utf-8").rstrip()
+ msg = (yield From(reader.readline()))
+ msg = msg.decode("utf-8").rstrip()
if msg == 'end':
break
t1 = time.time()
dt = t1 - t0
- print("Time taken: {:.3f} seconds ({:.6f} per repetition)"
+ print("Time taken: {0:.3f} seconds ({1:.6f} per repetition)"
.format(dt, dt/N))
times.append(dt)
writer.close()
- yield from asyncio.sleep(0.5)
+ yield From(asyncio.sleep(0.5))
# creates a client and connects to our server
try:
diff --git a/examples/udp_echo.py b/examples/udp_echo.py
index 93ac7e6..bd64639 100755
--- a/examples/udp_echo.py
+++ b/examples/udp_echo.py
@@ -1,8 +1,8 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
"""UDP echo example."""
import argparse
import sys
-import asyncio
+import trollius as asyncio
try:
import signal
except ImportError:
@@ -32,12 +32,12 @@ class MyClientUdpEchoProtocol:
def connection_made(self, transport):
self.transport = transport
- print('sending "{}"'.format(self.message))
+ print('sending "{0}"'.format(self.message))
self.transport.sendto(self.message.encode())
print('waiting to receive')
def datagram_received(self, data, addr):
- print('received "{}"'.format(data.decode()))
+ print('received "{0}"'.format(data.decode()))
self.transport.close()
def error_received(self, exc):
diff --git a/overlapped.c b/overlapped.c
index 6842efb..2c85676 100644
--- a/overlapped.c
+++ b/overlapped.c
@@ -31,6 +31,18 @@
#define T_HANDLE T_POINTER
+#if PY_MAJOR_VERSION >= 3
+# define PYTHON3
+#endif
+
+#ifndef Py_MIN
+# define Py_MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
+#endif
+
+#ifndef Py_MAX
+# define Py_MAX(X, Y) (((X) > (Y)) ? (X) : (Y))
+#endif
+
enum {TYPE_NONE, TYPE_NOT_STARTED, TYPE_READ, TYPE_WRITE, TYPE_ACCEPT,
TYPE_CONNECT, TYPE_DISCONNECT, TYPE_CONNECT_NAMED_PIPE,
TYPE_WAIT_NAMED_PIPE_AND_CONNECT};
@@ -69,6 +81,7 @@ SetFromWindowsErr(DWORD err)
if (err == 0)
err = GetLastError();
+#if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3) || PY_MAJOR_VERSION > 3
switch (err) {
case ERROR_CONNECTION_REFUSED:
exception_type = PyExc_ConnectionRefusedError;
@@ -79,6 +92,9 @@ SetFromWindowsErr(DWORD err)
default:
exception_type = PyExc_OSError;
}
+#else
+ exception_type = PyExc_WindowsError;
+#endif
return PyErr_SetExcFromWindowsErr(exception_type, err);
}
@@ -328,7 +344,11 @@ overlapped_CreateEvent(PyObject *self, PyObject *args)
Py_UNICODE *Name;
HANDLE Event;
+#ifdef PYTHON3
if (!PyArg_ParseTuple(args, "O" F_BOOL F_BOOL "Z",
+#else
+ if (!PyArg_ParseTuple(args, "O" F_BOOL F_BOOL "z",
+#endif
&EventAttributes, &ManualReset,
&InitialState, &Name))
return NULL;
@@ -805,7 +825,11 @@ Overlapped_WriteFile(OverlappedObject *self, PyObject *args)
return NULL;
}
+#ifdef PYTHON3
if (!PyArg_Parse(bufobj, "y*", &self->write_buffer))
+#else
+ if (!PyArg_Parse(bufobj, "s*", &self->write_buffer))
+#endif
return NULL;
#if SIZEOF_SIZE_T > SIZEOF_LONG
@@ -861,7 +885,11 @@ Overlapped_WSASend(OverlappedObject *self, PyObject *args)
return NULL;
}
+#ifdef PYTHON3
if (!PyArg_Parse(bufobj, "y*", &self->write_buffer))
+#else
+ if (!PyArg_Parse(bufobj, "s*", &self->write_buffer))
+#endif
return NULL;
#if SIZEOF_SIZE_T > SIZEOF_LONG
@@ -1328,6 +1356,7 @@ static PyMethodDef overlapped_functions[] = {
{NULL}
};
+#ifdef PYTHON3
static struct PyModuleDef overlapped_module = {
PyModuleDef_HEAD_INIT,
"_overlapped",
@@ -1339,12 +1368,13 @@ static struct PyModuleDef overlapped_module = {
NULL,
NULL
};
+#endif
#define WINAPI_CONSTANT(fmt, con) \
PyDict_SetItemString(d, #con, Py_BuildValue(fmt, con))
-PyMODINIT_FUNC
-PyInit__overlapped(void)
+PyObject*
+_init_overlapped(void)
{
PyObject *m, *d;
@@ -1360,7 +1390,11 @@ PyInit__overlapped(void)
if (PyType_Ready(&OverlappedType) < 0)
return NULL;
+#ifdef PYTHON3
m = PyModule_Create(&overlapped_module);
+#else
+ m = Py_InitModule("_overlapped", overlapped_functions);
+#endif
if (PyModule_AddObject(m, "Overlapped", (PyObject *)&OverlappedType) < 0)
return NULL;
@@ -1375,6 +1409,22 @@ PyInit__overlapped(void)
WINAPI_CONSTANT(F_DWORD, SO_UPDATE_ACCEPT_CONTEXT);
WINAPI_CONSTANT(F_DWORD, SO_UPDATE_CONNECT_CONTEXT);
WINAPI_CONSTANT(F_DWORD, TF_REUSE_SOCKET);
+ WINAPI_CONSTANT(F_DWORD, ERROR_CONNECTION_REFUSED);
+ WINAPI_CONSTANT(F_DWORD, ERROR_CONNECTION_ABORTED);
return m;
}
+
+#ifdef PYTHON3
+PyMODINIT_FUNC
+PyInit__overlapped(void)
+{
+ return _init_overlapped();
+}
+#else
+PyMODINIT_FUNC
+init_overlapped(void)
+{
+ _init_overlapped();
+}
+#endif
diff --git a/release.py b/release.py
index c43822b..5bd9be0 100644
--- a/release.py
+++ b/release.py
@@ -21,10 +21,12 @@ import sys
import tempfile
import textwrap
-PROJECT = 'asyncio'
-DEBUG_ENV_VAR = 'PYTHONASYNCIODEBUG'
+PROJECT = 'trollius'
+DEBUG_ENV_VAR = 'TROLLIUSDEBUG'
PYTHON_VERSIONS = (
+ (2, 7),
(3, 3),
+ (3, 4),
)
PY3 = (sys.version_info >= (3,))
HG = 'hg'
diff --git a/run_aiotest.py b/run_aiotest.py
index 8d6fa29..da13328 100644
--- a/run_aiotest.py
+++ b/run_aiotest.py
@@ -1,14 +1,14 @@
import aiotest.run
-import asyncio
import sys
+import trollius
if sys.platform == 'win32':
- from asyncio.windows_utils import socketpair
+ from trollius.windows_utils import socketpair
else:
from socket import socketpair
config = aiotest.TestConfig()
-config.asyncio = asyncio
+config.asyncio = trollius
config.socketpair = socketpair
-config.new_event_pool_policy = asyncio.DefaultEventLoopPolicy
+config.new_event_pool_policy = trollius.DefaultEventLoopPolicy
config.call_soon_check_closed = True
aiotest.run.main(config)
diff --git a/runtests.py b/runtests.py
index 8cb56fe..1156599 100644..100755
--- a/runtests.py
+++ b/runtests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
"""Run Tulip unittests.
Usage:
@@ -20,86 +20,100 @@ runtests.py --coverage is equivalent of:
# Originally written by Beech Horn (for NDB).
-import argparse
+from __future__ import print_function
+import optparse
import gc
import logging
import os
import random
import re
import sys
-import unittest
import textwrap
-import importlib.machinery
+from trollius.compat import PY33
+if PY33:
+ import importlib.machinery
+else:
+ import imp
try:
import coverage
except ImportError:
coverage = None
+if sys.version_info < (3,):
+ sys.exc_clear()
-from unittest.signals import installHandler
-
-assert sys.version >= '3.3', 'Please use Python 3.3 or higher.'
-
-ARGS = argparse.ArgumentParser(description="Run all unittests.")
-ARGS.add_argument(
- '-v', action="store", dest='verbose',
- nargs='?', const=1, type=int, default=0, help='verbose')
-ARGS.add_argument(
+try:
+ import unittest
+ from unittest.signals import installHandler
+except ImportError:
+ import unittest2 as unittest
+ from unittest2.signals import installHandler
+
+ARGS = optparse.OptionParser(description="Run all unittests.", usage="%prog [options] [pattern] [pattern2 ...]")
+ARGS.add_option(
+ '-v', '--verbose', action="store_true", dest='verbose',
+ default=0, help='verbose')
+ARGS.add_option(
'-x', action="store_true", dest='exclude', help='exclude tests')
-ARGS.add_argument(
+ARGS.add_option(
'-f', '--failfast', action="store_true", default=False,
dest='failfast', help='Stop on first fail or error')
-ARGS.add_argument(
+ARGS.add_option(
'-c', '--catch', action="store_true", default=False,
dest='catchbreak', help='Catch control-C and display results')
-ARGS.add_argument(
+ARGS.add_option(
'--forever', action="store_true", dest='forever', default=False,
help='run tests forever to catch sporadic errors')
-ARGS.add_argument(
+ARGS.add_option(
'--findleaks', action='store_true', dest='findleaks',
help='detect tests that leak memory')
-ARGS.add_argument('-r', '--randomize', action='store_true',
- help='randomize test execution order.')
-ARGS.add_argument('--seed', type=int,
- help='random seed to reproduce a previous random run')
-ARGS.add_argument(
+ARGS.add_option(
+ '-r', '--randomize', action='store_true',
+ help='randomize test execution order.')
+ARGS.add_option(
+ '--seed', type=int,
+ help='random seed to reproduce a previous random run')
+ARGS.add_option(
'-q', action="store_true", dest='quiet', help='quiet')
-ARGS.add_argument(
+ARGS.add_option(
'--tests', action="store", dest='testsdir', default='tests',
help='tests directory')
-ARGS.add_argument(
+ARGS.add_option(
'--coverage', action="store_true", dest='coverage',
help='enable html coverage report')
-ARGS.add_argument(
- 'pattern', action="store", nargs="*",
- help='optional regex patterns to match test ids (default all tests)')
-COV_ARGS = argparse.ArgumentParser(description="Run all unittests.")
-COV_ARGS.add_argument(
- '--coverage', action="store", dest='coverage', nargs='?', const='',
- help='enable coverage report and provide python files directory')
+
+if PY33:
+ def load_module(modname, sourcefile):
+ loader = importlib.machinery.SourceFileLoader(modname, sourcefile)
+ return loader.load_module()
+else:
+ def load_module(modname, sourcefile):
+ return imp.load_source(modname, sourcefile)
def load_modules(basedir, suffix='.py'):
+ import trollius.test_utils
+
def list_dir(prefix, dir):
files = []
modpath = os.path.join(dir, '__init__.py')
if os.path.isfile(modpath):
mod = os.path.split(dir)[-1]
- files.append(('{}{}'.format(prefix, mod), modpath))
+ files.append(('{0}{1}'.format(prefix, mod), modpath))
- prefix = '{}{}.'.format(prefix, mod)
+ prefix = '{0}{1}.'.format(prefix, mod)
for name in os.listdir(dir):
path = os.path.join(dir, name)
if os.path.isdir(path):
- files.extend(list_dir('{}{}.'.format(prefix, name), path))
+ files.extend(list_dir('{0}{1}.'.format(prefix, name), path))
else:
if (name != '__init__.py' and
name.endswith(suffix) and
not name.startswith(('.', '_'))):
- files.append(('{}{}'.format(prefix, name[:-3]), path))
+ files.append(('{0}{1}'.format(prefix, name[:-3]), path))
return files
@@ -107,13 +121,17 @@ def load_modules(basedir, suffix='.py'):
for modname, sourcefile in list_dir('', basedir):
if modname == 'runtests':
continue
+ if modname == 'test_asyncio' and sys.version_info <= (3, 3):
+ print("Skipping '{0}': need at least Python 3.3".format(modname),
+ file=sys.stderr)
+ continue
try:
- loader = importlib.machinery.SourceFileLoader(modname, sourcefile)
- mods.append((loader.load_module(), sourcefile))
+ mod = load_module(modname, sourcefile)
+ mods.append((mod, sourcefile))
except SyntaxError:
raise
- except unittest.SkipTest as err:
- print("Skipping '{}': {}".format(modname, err), file=sys.stderr)
+ except trollius.test_utils.SkipTest as err:
+ print("Skipping '{0}': {1}".format(modname, err), file=sys.stderr)
return mods
@@ -198,7 +216,7 @@ class TestRunner(unittest.TextTestRunner):
def run(self, test):
result = super().run(test)
if result.leaks:
- self.stream.writeln("{} tests leaks:".format(len(result.leaks)))
+ self.stream.writeln("{0} tests leaks:".format(len(result.leaks)))
for name, leaks in result.leaks:
self.stream.writeln(' '*4 + name + ':')
for leak in leaks:
@@ -218,7 +236,7 @@ def _runtests(args, tests):
def runtests():
- args = ARGS.parse_args()
+ args, pattern = ARGS.parse_args()
if args.coverage and coverage is None:
URL = "bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py"
@@ -238,15 +256,15 @@ def runtests():
testsdir = os.path.abspath(args.testsdir)
if not os.path.isdir(testsdir):
- print("Tests directory is not found: {}\n".format(testsdir))
+ print("Tests directory is not found: {0}\n".format(testsdir))
ARGS.print_help()
return
excludes = includes = []
if args.exclude:
- excludes = args.pattern
+ excludes = pattern
else:
- includes = args.pattern
+ includes = pattern
v = 0 if args.quiet else args.verbose + 1
failfast = args.failfast
@@ -273,8 +291,8 @@ def runtests():
finder = TestsFinder(args.testsdir, includes, excludes)
if args.catchbreak:
installHandler()
- import asyncio.coroutines
- if asyncio.coroutines._DEBUG:
+ import trollius.coroutines
+ if trollius.coroutines._DEBUG:
print("Run tests in debug mode")
else:
print("Run tests in release mode")
@@ -297,7 +315,7 @@ def runtests():
cov.report(show_missing=False)
here = os.path.dirname(os.path.abspath(__file__))
print("\nFor html report:")
- print("open file://{}/htmlcov/index.html".format(here))
+ print("open file://{0}/htmlcov/index.html".format(here))
if __name__ == '__main__':
diff --git a/setup.py b/setup.py
index 2581bfd..2072837 100644
--- a/setup.py
+++ b/setup.py
@@ -1,49 +1,69 @@
# Release procedure:
-# - run tox (to run runtests.py and run_aiotest.py)
-# - maybe test examples
-# - update version in setup.py
+# - fill Tulip changelog
+# - run maybe update_tulip.sh
+# - run unit tests with concurrent.futures
+# - run unit tests without concurrent.futures
+# - run unit tests without ssl: set sys.modules['ssl']=None at startup
+# - test examples
+# - update version in setup.py (version) and doc/conf.py (version, release)
+# - set release date in doc/changelog.rst
# - hg ci
-# - hg tag VERSION
+# - hg tag trollius-VERSION
# - hg push
-# - run on Linux: python setup.py register sdist upload
-# - run on Windows: python release.py VERSION
-# - increment version in setup.py
+# - python setup.py register sdist bdist_wheel upload
+# - increment version in setup.py (version) and doc/conf.py (version, release)
# - hg ci && hg push
import os
+import sys
try:
from setuptools import setup, Extension
+ SETUPTOOLS = True
except ImportError:
+ SETUPTOOLS = False
# Use distutils.core as a fallback.
# We won't be able to build the Wheel file on Windows.
from distutils.core import setup, Extension
+with open("README") as fp:
+ long_description = fp.read()
+
extensions = []
if os.name == 'nt':
ext = Extension(
- 'asyncio._overlapped', ['overlapped.c'], libraries=['ws2_32'],
+ 'trollius._overlapped', ['overlapped.c'], libraries=['ws2_32'],
)
extensions.append(ext)
-with open("README") as fp:
- long_description = fp.read()
+requirements = []
+if sys.version_info < (2, 7):
+ requirements.append('ordereddict')
+if sys.version_info < (3,):
+ requirements.append('futures')
-setup(
- name="asyncio",
- version="3.4.3",
+install_options = {
+ "name": "trollius",
+ "version": "1.0.4",
+ "license": "Apache License 2.0",
+ "author": 'Victor Stinner',
+ "author_email": 'victor.stinner@gmail.com',
- description="reference implementation of PEP 3156",
- long_description=long_description,
- url="http://www.python.org/dev/peps/pep-3156/",
+ "description": "Port of the Tulip project (asyncio module, PEP 3156) on Python 2",
+ "long_description": long_description,
+ "url": "https://bitbucket.org/enovance/trollius/",
- classifiers=[
+ "classifiers": [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.3",
+ "License :: OSI Approved :: Apache Software License",
],
- packages=["asyncio"],
- test_suite="runtests.runtests",
+ "packages": ["trollius"],
+ "test_suite": "runtests.runtests",
+
+ "ext_modules": extensions,
+}
+if SETUPTOOLS:
+ install_options['install_requires'] = requirements
- ext_modules=extensions,
-)
+setup(**install_options)
diff --git a/tests/echo3.py b/tests/echo3.py
index 0644967..a009ea3 100644
--- a/tests/echo3.py
+++ b/tests/echo3.py
@@ -1,4 +1,12 @@
import os
+import sys
+
+asyncio_path = os.path.join(os.path.dirname(__file__), '..')
+asyncio_path = os.path.abspath(asyncio_path)
+
+sys.path.insert(0, asyncio_path)
+from trollius.py33_exceptions import wrap_error
+sys.path.remove(asyncio_path)
if __name__ == '__main__':
while True:
@@ -6,6 +14,6 @@ if __name__ == '__main__':
if not buf:
break
try:
- os.write(1, b'OUT:'+buf)
+ wrap_error(os.write, 1, b'OUT:'+buf)
except OSError as ex:
os.write(2, b'ERR:' + ex.__class__.__name__.encode('ascii'))
diff --git a/tests/test_asyncio.py b/tests/test_asyncio.py
new file mode 100644
index 0000000..39d9e1a
--- /dev/null
+++ b/tests/test_asyncio.py
@@ -0,0 +1,141 @@
+from trollius import test_utils
+from trollius import From, Return
+import trollius
+import trollius.coroutines
+import unittest
+
+try:
+ import asyncio
+except ImportError:
+ from trollius.test_utils import SkipTest
+ raise SkipTest('need asyncio')
+
+
+@asyncio.coroutine
+def asyncio_noop(value):
+ yield from []
+ return (value,)
+
+@asyncio.coroutine
+def asyncio_coroutine(coro, value):
+ res = yield from coro
+ return res + (value,)
+
+@trollius.coroutine
+def trollius_noop(value):
+ yield From(None)
+ raise Return((value,))
+
+@trollius.coroutine
+def trollius_coroutine(coro, value):
+ res = yield trollius.From(coro)
+ raise trollius.Return(res + (value,))
+
+
+class AsyncioTests(test_utils.TestCase):
+ def setUp(self):
+ policy = trollius.get_event_loop_policy()
+
+ asyncio.set_event_loop_policy(policy)
+ self.addCleanup(asyncio.set_event_loop_policy, None)
+
+ self.loop = policy.new_event_loop()
+ self.addCleanup(self.loop.close)
+ policy.set_event_loop(self.loop)
+
+ def test_policy(self):
+ self.assertIs(asyncio.get_event_loop(), self.loop)
+
+ def test_asyncio(self):
+ coro = asyncio_noop("asyncio")
+ res = self.loop.run_until_complete(coro)
+ self.assertEqual(res, ("asyncio",))
+
+ def test_asyncio_in_trollius(self):
+ coro1 = asyncio_noop(1)
+ coro2 = asyncio_coroutine(coro1, 2)
+ res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
+ self.assertEqual(res, (1, 2, 3))
+
+ def test_trollius_in_asyncio(self):
+ coro1 = trollius_noop(4)
+ coro2 = trollius_coroutine(coro1, 5)
+ res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
+ self.assertEqual(res, (4, 5, 6))
+
+ def test_step_future(self):
+ old_debug = trollius.coroutines._DEBUG
+ try:
+ def step_future():
+ future = asyncio.Future()
+ self.loop.call_soon(future.set_result, "asyncio.Future")
+ return (yield from future)
+
+ # test in release mode
+ trollius.coroutines._DEBUG = False
+ result = self.loop.run_until_complete(step_future())
+ self.assertEqual(result, "asyncio.Future")
+
+ # test in debug mode
+ trollius.coroutines._DEBUG = True
+ result = self.loop.run_until_complete(step_future())
+ self.assertEqual(result, "asyncio.Future")
+ finally:
+ trollius.coroutines._DEBUG = old_debug
+
+ def test_async(self):
+ fut = asyncio.Future()
+ self.assertIs(fut._loop, self.loop)
+
+ fut2 = trollius.async(fut)
+ self.assertIs(fut2, fut)
+ self.assertIs(fut._loop, self.loop)
+
+ def test_wrap_future(self):
+ fut = asyncio.Future()
+ self.assertIs(trollius.wrap_future(fut), fut)
+
+ def test_run_until_complete(self):
+ fut = asyncio.Future()
+ fut.set_result("ok")
+ self.assertEqual(self.loop.run_until_complete(fut),
+ "ok")
+
+ def test_coroutine_decorator(self):
+ @trollius.coroutine
+ def asyncio_future(fut):
+ return fut
+
+ fut = asyncio.Future()
+ self.loop.call_soon(fut.set_result, 'ok')
+ res = self.loop.run_until_complete(asyncio_future(fut))
+ self.assertEqual(res, "ok")
+
+ def test_as_completed(self):
+ fut = asyncio.Future()
+ fut.set_result("ok")
+
+ with self.assertRaises(TypeError):
+ for f in trollius.as_completed(fut):
+ pass
+
+ @trollius.coroutine
+ def get_results(fut):
+ results = []
+ for f in trollius.as_completed([fut]):
+ res = yield trollius.From(f)
+ results.append(res)
+ raise trollius.Return(results)
+
+ results = self.loop.run_until_complete(get_results(fut))
+ self.assertEqual(results, ["ok"])
+
+ def test_gather(self):
+ fut = asyncio.Future()
+ fut.set_result("ok")
+ results = self.loop.run_until_complete(trollius.gather(fut))
+ self.assertEqual(results, ["ok"])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_base_events.py b/tests/test_base_events.py
index afb65b2..ec86a25 100644
--- a/tests/test_base_events.py
+++ b/tests/test_base_events.py
@@ -7,13 +7,16 @@ import socket
import sys
import time
import unittest
-from unittest import mock
-import asyncio
-from asyncio import base_events
-from asyncio import constants
-from asyncio import test_support as support
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import Return, From
+from trollius import base_events
+from trollius import constants
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.py33_exceptions import BlockingIOError
+from trollius.test_utils import mock
+from trollius.time_monotonic import time_monotonic
MOCK_ANY = mock.ANY
@@ -210,9 +213,9 @@ class BaseEventLoopTests(test_utils.TestCase):
f.cancel() # Don't complain about abandoned Future.
def test__run_once(self):
- h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
+ h1 = asyncio.TimerHandle(time_monotonic() + 5.0, lambda: True, (),
self.loop)
- h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
+ h2 = asyncio.TimerHandle(time_monotonic() + 10.0, lambda: True, (),
self.loop)
h1.cancel()
@@ -233,7 +236,7 @@ class BaseEventLoopTests(test_utils.TestCase):
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution issues.
@@ -258,23 +261,21 @@ class BaseEventLoopTests(test_utils.TestCase):
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
- handle = None
- processed = False
+ non_local = {'handle': None, 'processed': False}
def cb(loop):
- nonlocal processed, handle
- processed = True
- handle = loop.call_soon(lambda: True)
+ non_local['processed'] = True
+ non_local['handle'] = loop.call_soon(lambda: True)
- h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
+ h = asyncio.TimerHandle(time_monotonic() - 1, cb, (self.loop,),
self.loop)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
- self.assertTrue(processed)
- self.assertEqual([handle], list(self.loop._ready))
+ self.assertTrue(non_local['processed'])
+ self.assertEqual([non_local['handle']], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
@@ -419,7 +420,7 @@ class BaseEventLoopTests(test_utils.TestCase):
1/0
# Test call_soon (events.Handle)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
@@ -429,7 +430,7 @@ class BaseEventLoopTests(test_utils.TestCase):
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
@@ -440,18 +441,21 @@ class BaseEventLoopTests(test_utils.TestCase):
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
+ self.loop.set_debug(True)
+ asyncio.set_event_loop(self.loop)
@asyncio.coroutine
def zero_error_coro():
- yield from asyncio.sleep(0.01, loop=self.loop)
+ yield From(asyncio.sleep(0.01, loop=self.loop))
1/0
# Test Future.__del__
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
fut = asyncio.async(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
+ support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
@@ -494,7 +498,7 @@ class BaseEventLoopTests(test_utils.TestCase):
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
@@ -517,7 +521,7 @@ class BaseEventLoopTests(test_utils.TestCase):
self.loop.set_exception_handler(handler)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
@@ -525,7 +529,7 @@ class BaseEventLoopTests(test_utils.TestCase):
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
- _context = None
+ contexts = []
class Loop(base_events.BaseEventLoop):
@@ -533,8 +537,7 @@ class BaseEventLoopTests(test_utils.TestCase):
_process_events = mock.Mock()
def default_exception_handler(self, context):
- nonlocal _context
- _context = context
+ contexts.append(context)
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
@@ -547,7 +550,7 @@ class BaseEventLoopTests(test_utils.TestCase):
loop.call_soon(zero_error)
loop._run_once()
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
@@ -556,9 +559,9 @@ class BaseEventLoopTests(test_utils.TestCase):
def custom_handler(loop, context):
raise ValueError('ham')
- _context = None
+ del contexts[:]
loop.set_exception_handler(custom_handler)
- with mock.patch('asyncio.base_events.logger') as log:
+ with mock.patch('trollius.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
@@ -567,33 +570,25 @@ class BaseEventLoopTests(test_utils.TestCase):
# Check that original context was passed to default
# exception handler.
- self.assertIn('context', _context)
- self.assertIs(type(_context['context']['exception']),
+ context = contexts[0]
+ self.assertIn('context', context)
+ self.assertIs(type(context['context']['exception']),
ZeroDivisionError)
def test_env_var_debug(self):
code = '\n'.join((
- 'import asyncio',
- 'loop = asyncio.get_event_loop()',
+ 'import trollius',
+ 'loop = trollius.get_event_loop()',
'print(loop.get_debug())'))
- # Test with -E to not fail if the unit test was run with
- # PYTHONASYNCIODEBUG set to a non-empty string
- sts, stdout, stderr = support.assert_python_ok('-E', '-c', code)
- self.assertEqual(stdout.rstrip(), b'False')
-
sts, stdout, stderr = support.assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='')
+ TROLLIUSDEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = support.assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='1')
+ TROLLIUSDEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
- sts, stdout, stderr = support.assert_python_ok('-E', '-c', code,
- PYTHONASYNCIODEBUG='1')
- self.assertEqual(stdout.rstrip(), b'False')
-
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@@ -727,7 +722,7 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
@@ -735,36 +730,39 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- yield from []
- return [(2, 1, 6, '', ('107.6.106.82', 80)),
- (2, 1, 6, '', ('107.6.106.82', 80))]
+ yield From(None)
+ raise Return([(2, 1, 6, '', ('107.6.106.82', 80)),
+ (2, 1, 6, '', ('107.6.106.82', 80))])
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
- idx = -1
- errors = ['err1', 'err2']
+ non_local = {
+ 'idx': -1,
+ 'errors': ['err1', 'err2'],
+ }
def _socket(*args, **kw):
- nonlocal idx, errors
- idx += 1
- raise OSError(errors[idx])
+ non_local['idx'] += 1
+ raise socket.error(non_local['errors'][non_local['idx']])
+ m_socket.error = socket.error
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
+ m_socket.error = socket.error
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
@@ -793,7 +791,7 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- yield from []
+ yield From(None)
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
@@ -801,24 +799,24 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- yield from []
- return [(2, 1, 6, '', ('107.6.106.82', 80))]
+ yield From(None)
+ raise Return([(2, 1, 6, '', ('107.6.106.82', 80))])
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError
+ self.loop.sock_connect.side_effect = socket.error
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
@@ -831,22 +829,23 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError
+ self.loop.sock_connect.side_effect = socket.error
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
- with self.assertRaises(OSError):
+ with self.assertRaises(socket.error):
self.loop.run_until_complete(coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
- err = OSError('Err')
+ err = socket.error('Err')
err.strerror = 'Err'
raise err
+ m_socket.error = socket.error
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
@@ -859,12 +858,12 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError('Err2')
+ self.loop.sock_connect.side_effect = socket.error('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
@@ -887,7 +886,7 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
@@ -900,7 +899,9 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.return_value = ()
+ f = asyncio.Future(loop=self.loop)
+ f.set_result(())
+ self.loop.sock_connect.return_value = f
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
@@ -973,21 +974,20 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_server_empty_host(self):
# if host is empty string use None instead
- host = object()
+ non_local = {'host': object()}
@asyncio.coroutine
def getaddrinfo(*args, **kw):
- nonlocal host
- host = args[0]
- yield from []
+ non_local['host'] = args[0]
+ yield From(None)
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
- self.assertRaises(OSError, self.loop.run_until_complete, fut)
- self.assertIsNone(host)
+ self.assertRaises(socket.error, self.loop.run_until_complete, fut)
+ self.assertIsNone(non_local['host'])
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
@@ -999,18 +999,25 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
- getaddrinfo = self.loop.getaddrinfo = mock.Mock()
- getaddrinfo.return_value = []
+ @asyncio.coroutine
+ def getaddrinfo(*args, **kw):
+ raise Return([])
+
+ def getaddrinfo_task(*args, **kwds):
+ return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
+
+ self.loop.getaddrinfo = getaddrinfo_task
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
- self.assertRaises(OSError, self.loop.run_until_complete, f)
+ self.assertRaises(socket.error, self.loop.run_until_complete, f)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_server_cant_bind(self, m_socket):
- class Err(OSError):
+ class Err(socket.error):
strerror = 'error'
+ m_socket.error = socket.error
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
@@ -1018,18 +1025,19 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
- self.assertRaises(OSError, self.loop.run_until_complete, fut)
+ self.assertRaises(socket.error, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
+ m_socket.error = socket.error
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
@@ -1043,29 +1051,31 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
- self.loop.sock_connect.side_effect = OSError
+ self.loop.sock_connect.side_effect = socket.error
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_socket_err(self, m_socket):
+ m_socket.error = socket.error
m_socket.getaddrinfo = socket.getaddrinfo
- m_socket.socket.side_effect = OSError
+ m_socket.socket.side_effect = socket.error
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
- @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
+ @test_utils.skipUnless(support.IPV6_ENABLED,
+ 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
@@ -1073,14 +1083,15 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_setblk_err(self, m_socket):
- m_socket.socket.return_value.setblocking.side_effect = OSError
+ m_socket.error = socket.error
+ m_socket.socket.return_value.setblocking.side_effect = socket.error
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
- OSError, self.loop.run_until_complete, coro)
+ socket.error, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
@@ -1089,12 +1100,13 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
- @mock.patch('asyncio.base_events.socket')
+ @mock.patch('trollius.base_events.socket')
def test_create_datagram_endpoint_cant_bind(self, m_socket):
- class Err(OSError):
+ class Err(socket.error):
pass
m_socket.AF_INET6 = socket.AF_INET6
+ m_socket.error = socket.error
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
@@ -1112,11 +1124,11 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
- sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
+ sock.accept.side_effect = socket.error(errno.EMFILE, 'Too many open files')
self.loop.remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
@@ -1149,14 +1161,14 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
with self.assertRaises(TypeError):
self.loop.run_in_executor(None, func)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
- yield from ()
+ yield From(None)
loop.stop()
asyncio.set_event_loop(self.loop)
@@ -1166,14 +1178,16 @@ class BaseEventLoopWithSelectorTests(test_utils.TestCase):
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
- fmt, *args = m_logger.warning.call_args[0]
+ fmt = m_logger.warning.call_args[0][0]
+ args = m_logger.warning.call_args[0][1:]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> took .* seconds$")
# slow task
asyncio.async(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
- fmt, *args = m_logger.warning.call_args[0]
+ fmt = m_logger.warning.call_args[0][0]
+ args = m_logger.warning.call_args[0][1:]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> took .* seconds$")
diff --git a/tests/test_events.py b/tests/test_events.py
index 04dc880..e0f8207 100644
--- a/tests/test_events.py
+++ b/tests/test_events.py
@@ -1,5 +1,6 @@
"""Tests for events.py."""
+import contextlib
import functools
import gc
import io
@@ -8,25 +9,38 @@ import platform
import re
import signal
import socket
-try:
- import ssl
-except ImportError:
- ssl = None
import subprocess
import sys
import threading
-import time
import errno
import unittest
-from unittest import mock
import weakref
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+try:
+ import concurrent
+except ImportError:
+ concurrent = None
+
+from trollius import Return, From
+from trollius import futures
-import asyncio
-from asyncio import proactor_events
-from asyncio import selector_events
-from asyncio import test_support as support
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import compat
+from trollius import events
+from trollius import proactor_events
+from trollius import selector_events
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.py33_exceptions import (wrap_error,
+ BlockingIOError, ConnectionRefusedError,
+ FileNotFoundError)
+from trollius.test_utils import mock
+from trollius.time_monotonic import time_monotonic
def data_file(filename):
@@ -91,7 +105,7 @@ class MyBaseProto(asyncio.Protocol):
class MyProto(MyBaseProto):
def connection_made(self, transport):
- super().connection_made(transport)
+ super(MyProto, self).connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
@@ -183,7 +197,7 @@ class MySubprocessProtocol(asyncio.SubprocessProtocol):
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
- self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
+ self.disconnects = dict((fd, futures.Future(loop=loop)) for fd in range(3))
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
@@ -217,10 +231,10 @@ class MySubprocessProtocol(asyncio.SubprocessProtocol):
self.returncode = self.transport.get_returncode()
-class EventLoopTestsMixin:
+class EventLoopTestsMixin(object):
def setUp(self):
- super().setUp()
+ super(EventLoopTestsMixin, self).setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
@@ -231,12 +245,12 @@ class EventLoopTestsMixin:
self.loop.close()
gc.collect()
- super().tearDown()
+ super(EventLoopTestsMixin, self).tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
- yield
+ yield From(None)
@asyncio.coroutine
def coro2():
@@ -259,10 +273,13 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def cb():
self.loop.stop()
- yield from asyncio.sleep(0.1, loop=self.loop)
+ yield From(asyncio.sleep(0.1, loop=self.loop))
+
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
+ for task in asyncio.Task.all_tasks(loop=self.loop):
+ task._log_destroy_pending = False
def test_call_later(self):
results = []
@@ -272,9 +289,9 @@ class EventLoopTestsMixin:
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
- t0 = time.monotonic()
+ t0 = time_monotonic()
self.loop.run_forever()
- t1 = time.monotonic()
+ t1 = time_monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
@@ -325,13 +342,14 @@ class EventLoopTestsMixin:
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
+ @test_utils.skipIf(concurrent is None, 'need concurrent.futures')
def test_run_in_executor(self):
def run(arg):
- return (arg, threading.get_ident())
+ return (arg, threading.current_thread().ident)
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
- self.assertNotEqual(thread_id, threading.get_ident())
+ self.assertNotEqual(thread_id, threading.current_thread().ident)
def test_reader_callback(self):
r, w = test_utils.socketpair()
@@ -419,7 +437,7 @@ class EventLoopTestsMixin:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
@@ -459,13 +477,12 @@ class EventLoopTestsMixin:
conn.close()
listener.close()
- @unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
+ @test_utils.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
- caught = 0
+ non_local = {'caught': 0}
def my_handler():
- nonlocal caught
- caught += 1
+ non_local['caught'] += 1
# Check error behavior first.
self.assertRaises(
@@ -494,7 +511,7 @@ class EventLoopTestsMixin:
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
- test_utils.run_until(self.loop, lambda: caught)
+ test_utils.run_until(self.loop, lambda: non_local['caught'])
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
@@ -503,30 +520,28 @@ class EventLoopTestsMixin:
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
- @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
+ @test_utils.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
- caught = 0
+ non_local = {'caught': 0}
def my_handler():
- nonlocal caught
- caught += 1
+ non_local['caught'] += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
- self.assertEqual(caught, 1)
+ self.assertEqual(non_local['caught'], 1)
- @unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
+ @test_utils.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
- caught = 0
+ non_local = {'caught': 0}
def my_handler(*args):
- nonlocal caught
- caught += 1
+ non_local['caught'] += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
@@ -534,7 +549,7 @@ class EventLoopTestsMixin:
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
- self.assertEqual(caught, 1)
+ self.assertEqual(non_local['caught'], 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
@@ -553,7 +568,7 @@ class EventLoopTestsMixin:
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
@@ -610,8 +625,9 @@ class EventLoopTestsMixin:
self._basetest_create_ssl_connection(conn_fut, check_sockname)
# ssl.Purpose was introduced in Python 3.4
+ #if not asyncio.BACKPORT_SSL_CONTEXT:
if hasattr(ssl, 'Purpose'):
- def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
+ def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=None, capath=None,
cadata=None):
"""
@@ -628,17 +644,16 @@ class EventLoopTestsMixin:
self._basetest_create_ssl_connection(conn_fut, check_sockname)
self.assertEqual(m.call_count, 1)
- # With the real ssl.create_default_context(), certificate
- # validation will fail
- with self.assertRaises(ssl.SSLError) as cm:
- conn_fut = create_connection(ssl=True)
- # Ignore the "SSL handshake failed" log in debug mode
- with test_utils.disable_logger():
- self._basetest_create_ssl_connection(conn_fut, check_sockname)
-
- self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ # With the real ssl.create_default_context(), certificate
+ # validation will fail
+ with self.assertRaises(ssl.SSLError) as cm:
+ conn_fut = create_connection(ssl=True)
+ # Ignore the "SSL handshake failed" log in debug mode
+ with test_utils.disable_logger():
+ self._basetest_create_ssl_connection(conn_fut, check_sockname)
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
@@ -647,8 +662,8 @@ class EventLoopTestsMixin:
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection)
- @unittest.skipIf(ssl is None, 'No ssl module')
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
@@ -679,10 +694,11 @@ class EventLoopTestsMixin:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
- self.assertIn(str(httpd.address), cm.exception.strerror)
+ # FIXME: address missing from the message?
+ #self.assertIn(str(httpd.address), cm.exception.strerror)
def test_create_server(self):
proto = MyProto(self.loop)
@@ -729,7 +745,7 @@ class EventLoopTestsMixin:
return server, path
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
@@ -757,20 +773,23 @@ class EventLoopTestsMixin:
# close server
server.close()
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
- with sock:
+ try:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
+ finally:
+ sock.close()
def _create_ssl_context(self, certfile, keyfile=None):
- sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext.options |= ssl.OP_NO_SSLv2
+ sslcontext = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
@@ -789,7 +808,7 @@ class EventLoopTestsMixin:
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
@@ -823,8 +842,8 @@ class EventLoopTestsMixin:
# stop serving
server.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
@@ -854,13 +873,14 @@ class EventLoopTestsMixin:
# stop serving
server.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(asyncio.BACKPORT_SSL_CONTEXT, 'need ssl.SSLContext')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
@@ -871,21 +891,22 @@ class EventLoopTestsMixin:
ssl=sslcontext_client)
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
- 'certificate verify failed '):
+ 'certificate verify failed'):
self.loop.run_until_complete(f_c)
# close connection
self.assertIsNone(proto.transport)
server.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipIf(asyncio.BACKPORT_SSL_CONTEXT, 'need ssl.SSLContext')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
@@ -897,51 +918,61 @@ class EventLoopTestsMixin:
server_hostname='invalid')
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
- 'certificate verify failed '):
+ 'certificate verify failed'):
self.loop.run_until_complete(f_c)
# close connection
self.assertIsNone(proto.transport)
server.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext_client.options |= ssl.OP_NO_SSLv2
- sslcontext_client.verify_mode = ssl.CERT_REQUIRED
- sslcontext_client.load_verify_locations(
- cafile=SIGNING_CA)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(
+ cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
+ if compat.PY3:
+ err_msg = "hostname '127.0.0.1' doesn't match 'localhost'"
+ else:
+ # http://bugs.python.org/issue22861
+ err_msg = "hostname '127.0.0.1' doesn't match u'localhost'"
+
# incorrect server_hostname
- f_c = self.loop.create_connection(MyProto, host, port,
- ssl=sslcontext_client)
- with test_utils.disable_logger():
- with self.assertRaisesRegex(
- ssl.CertificateError,
- "hostname '127.0.0.1' doesn't match 'localhost'"):
- self.loop.run_until_complete(f_c)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ f_c = self.loop.create_connection(MyProto, host, port,
+ ssl=sslcontext_client)
+ with test_utils.disable_logger():
+ with self.assertRaisesRegex(
+ ssl.CertificateError,
+ err_msg):
+ self.loop.run_until_complete(f_c)
+
+ # close connection
+ proto.transport.close()
- # close connection
- proto.transport.close()
server.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext_client.options |= ssl.OP_NO_SSLv2
- sslcontext_client.verify_mode = ssl.CERT_REQUIRED
- sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
@@ -956,37 +987,40 @@ class EventLoopTestsMixin:
client.close()
server.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
- sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext_client.options |= ssl.OP_NO_SSLv2
- sslcontext_client.verify_mode = ssl.CERT_REQUIRED
- sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
+ sslcontext_client = asyncio.SSLContext(ssl.PROTOCOL_SSLv23)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ sslcontext_client.options |= ssl.OP_NO_SSLv2
+ sslcontext_client.verify_mode = ssl.CERT_REQUIRED
+ sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
- # Connection succeeds with correct CA and server hostname.
- f_c = self.loop.create_connection(MyProto, host, port,
- ssl=sslcontext_client,
- server_hostname='localhost')
- client, pr = self.loop.run_until_complete(f_c)
+ if not asyncio.BACKPORT_SSL_CONTEXT:
+ # Connection succeeds with correct CA and server hostname.
+ f_c = self.loop.create_connection(MyProto, host, port,
+ ssl=sslcontext_client,
+ server_hostname='localhost')
+ client, pr = self.loop.run_until_complete(f_c)
+
+ # close connection
+ proto.transport.close()
+ client.close()
- # close connection
- proto.transport.close()
- client.close()
server.close()
def test_create_server_sock(self):
- proto = asyncio.Future(loop=self.loop)
+ non_local = {'proto': asyncio.Future(loop=self.loop)}
class TestMyProto(MyProto):
def connection_made(self, transport):
- super().connection_made(transport)
- proto.set_result(self)
+ super(TestMyProto, self).connection_made(transport)
+ non_local['proto'].set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@@ -1016,19 +1050,19 @@ class EventLoopTestsMixin:
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
- with self.assertRaises(OSError) as cm:
+ with self.assertRaises(socket.error) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
- @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
+ @test_utils.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
- super().connection_made(transport)
+ super(TestMyProto, self).connection_made(transport)
f_proto.set_result(self)
try_count = 0
@@ -1037,7 +1071,7 @@ class EventLoopTestsMixin:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
- except OSError as ex:
+ except socket.error as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
@@ -1073,21 +1107,21 @@ class EventLoopTestsMixin:
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
-
server.close()
client = socket.socket()
self.assertRaises(
- ConnectionRefusedError, client.connect, ('127.0.0.1', port))
+ ConnectionRefusedError, wrap_error, client.connect,
+ ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
- super().__init__(loop=self.loop)
+ super(TestMyDatagramProto, inner_self).__init__(loop=self.loop)
def datagram_received(self, data, addr):
- super().datagram_received(data, addr)
+ super(TestMyDatagramProto, self).datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
@@ -1139,7 +1173,7 @@ class EventLoopTestsMixin:
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
- @unittest.skipUnless(sys.platform != 'win32',
+ @test_utils.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
@@ -1149,8 +1183,8 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def connect():
- t, p = yield from self.loop.connect_read_pipe(
- lambda: proto, pipeobj)
+ t, p = yield From(self.loop.connect_read_pipe(
+ lambda: proto, pipeobj))
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
@@ -1174,7 +1208,7 @@ class EventLoopTestsMixin:
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
- @unittest.skipUnless(sys.platform != 'win32',
+ @test_utils.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@@ -1189,8 +1223,8 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def connect():
- t, p = yield from self.loop.connect_read_pipe(lambda: proto,
- master_read_obj)
+ t, p = yield From(self.loop.connect_read_pipe(lambda: proto,
+ master_read_obj))
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
@@ -1214,8 +1248,8 @@ class EventLoopTestsMixin:
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
- @unittest.skipUnless(sys.platform != 'win32',
- "Don't support pipes for Windows")
+ @test_utils.skipUnless(sys.platform != 'win32',
+ "Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
@@ -1253,12 +1287,17 @@ class EventLoopTestsMixin:
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
- @unittest.skipUnless(sys.platform != 'win32',
+ @test_utils.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
- pipeobj = io.open(wsock.detach(), 'wb', 1024)
+ if hasattr(wsock, 'detach'):
+ wsock_fd = wsock.detach()
+ else:
+ # Python 2
+ wsock_fd = wsock.fileno()
+ pipeobj = io.open(wsock_fd, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
@@ -1276,8 +1315,8 @@ class EventLoopTestsMixin:
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
- @unittest.skipUnless(sys.platform != 'win32',
- "Don't support pipes for Windows")
+ @test_utils.skipUnless(sys.platform != 'win32',
+ "Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
@@ -1332,19 +1371,19 @@ class EventLoopTestsMixin:
def main():
try:
self.loop.call_soon(f.cancel)
- yield from f
+ yield From(f)
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
- return res
+ raise Return(res)
- start = time.monotonic()
+ start = time_monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
- elapsed = time.monotonic() - start
+ elapsed = time_monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
@@ -1368,19 +1407,20 @@ class EventLoopTestsMixin:
@asyncio.coroutine
def wait():
loop = self.loop
- yield from asyncio.sleep(1e-2, loop=loop)
- yield from asyncio.sleep(1e-4, loop=loop)
- yield from asyncio.sleep(1e-6, loop=loop)
- yield from asyncio.sleep(1e-8, loop=loop)
- yield from asyncio.sleep(1e-10, loop=loop)
+ yield From(asyncio.sleep(1e-2, loop=loop))
+ yield From(asyncio.sleep(1e-4, loop=loop))
+ yield From(asyncio.sleep(1e-6, loop=loop))
+ yield From(asyncio.sleep(1e-8, loop=loop))
+ yield From(asyncio.sleep(1e-10, loop=loop))
self.loop.run_until_complete(wait())
- # The ideal number of call is 12, but on some platforms, the selector
+ # The ideal number of call is 22, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
- self.assertLessEqual(self.loop._run_once_counter, 20,
- {'clock_resolution': self.loop._clock_resolution,
+ self.assertLessEqual(self.loop._run_once_counter, 30,
+ {'calls': self.loop._run_once_counter,
+ 'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_sock_connect_address(self):
@@ -1394,7 +1434,7 @@ class EventLoopTestsMixin:
for family, address in addresses:
for sock_type in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
sock = socket.socket(family, sock_type)
- with sock:
+ with contextlib.closing(sock):
sock.setblocking(False)
connect = self.loop.sock_connect(sock, address)
with self.assertRaises(ValueError) as cm:
@@ -1468,7 +1508,7 @@ class EventLoopTestsMixin:
self.loop.add_signal_handler(signal.SIGTERM, func)
-class SubprocessTestsMixin:
+class SubprocessTestsMixin(object):
def check_terminated(self, returncode):
if sys.platform == 'win32':
@@ -1595,7 +1635,7 @@ class SubprocessTestsMixin:
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
- @unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
+ @test_utils.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
@@ -1685,6 +1725,7 @@ class SubprocessTestsMixin:
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
+ @test_utils.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
@@ -1699,9 +1740,9 @@ class SubprocessTestsMixin:
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
- yield from self.loop.subprocess_exec(
+ yield From(self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
- 'pwd', **kwds)
+ 'pwd', **kwds))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
@@ -1715,9 +1756,9 @@ class SubprocessTestsMixin:
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
- yield from self.loop.subprocess_shell(
+ yield From(self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
- cmd, **kwds)
+ cmd, **kwds))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
@@ -1777,18 +1818,18 @@ if sys.platform == 'win32':
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
- from asyncio import selectors
+ from trollius import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
- super().setUp()
+ super(UnixEventLoopTestsMixin, self).setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
- super().tearDown()
+ super(UnixEventLoopTestsMixin, self).tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
@@ -1804,16 +1845,16 @@ else:
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
- @unittest.skipIf(sys.platform.startswith('openbsd'),
- 'test hangs on OpenBSD')
+ @test_utils.skipIf(sys.platform.startswith('openbsd'),
+ 'test hangs on OpenBSD')
def test_read_pty_output(self):
- super().test_read_pty_output()
+ super(KqueueEventLoopTests, self).test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
- super().test_write_pty()
+ super(KqueueEventLoopTests, self).test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
@@ -1938,7 +1979,7 @@ class HandleTests(test_utils.TestCase):
self.loop.get_debug.return_value = True
# simple function
- create_filename = __file__
+ create_filename = sys._getframe().f_code.co_filename
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
@@ -1965,38 +2006,30 @@ class HandleTests(test_utils.TestCase):
loop.set_debug(True)
self.set_event_loop(loop)
- def check_source_traceback(h):
- lineno = sys._getframe(1).f_lineno - 1
- self.assertIsInstance(h._source_traceback, list)
- self.assertEqual(h._source_traceback[-1][:3],
- (__file__,
- lineno,
- 'test_handle_source_traceback'))
-
# call_soon
h = loop.call_soon(noop)
- check_source_traceback(h)
+ self.check_soure_traceback(h._source_traceback, -1)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
- check_source_traceback(h)
+ self.check_soure_traceback(h._source_traceback, -1)
# call_later
h = loop.call_later(0, noop)
- check_source_traceback(h)
+ self.check_soure_traceback(h._source_traceback, -1)
# call_at
h = loop.call_later(0, noop)
- check_source_traceback(h)
+ self.check_soure_traceback(h._source_traceback, -1)
-class TimerTests(unittest.TestCase):
+class TimerTests(test_utils.TestCase):
def setUp(self):
self.loop = mock.Mock()
def test_hash(self):
- when = time.monotonic()
+ when = time_monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
@@ -2006,7 +2039,7 @@ class TimerTests(unittest.TestCase):
return args
args = (1, 2, 3)
- when = time.monotonic()
+ when = time_monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
@@ -2041,7 +2074,7 @@ class TimerTests(unittest.TestCase):
self.loop.get_debug.return_value = True
# simple function
- create_filename = __file__
+ create_filename = sys._getframe().f_code.co_filename
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
@@ -2062,7 +2095,7 @@ class TimerTests(unittest.TestCase):
def callback(*args):
return args
- when = time.monotonic()
+ when = time_monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
@@ -2099,7 +2132,7 @@ class TimerTests(unittest.TestCase):
self.assertIs(NotImplemented, h1.__ne__(h3))
-class AbstractEventLoopTests(unittest.TestCase):
+class AbstractEventLoopTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
@@ -2112,13 +2145,16 @@ class AbstractEventLoopTests(unittest.TestCase):
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
- self.assertRaises(
- NotImplementedError, loop.is_closed)
+ # skip some tests if the AbstractEventLoop class comes from asyncio
+ # and the asyncio version (python version in fact) is older than 3.4.2
+ if events.asyncio is None or sys.version_info >= (3, 4, 2):
+ self.assertRaises(
+ NotImplementedError, loop.is_closed)
+ self.assertRaises(
+ NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
- NotImplementedError, loop.create_task, None)
- self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
@@ -2187,7 +2223,7 @@ class AbstractEventLoopTests(unittest.TestCase):
NotImplementedError, loop.set_debug, f)
-class ProtocolsAbsTests(unittest.TestCase):
+class ProtocolsAbsTests(test_utils.TestCase):
def test_empty(self):
f = mock.Mock()
@@ -2211,7 +2247,7 @@ class ProtocolsAbsTests(unittest.TestCase):
self.assertIsNone(sp.process_exited())
-class PolicyTests(unittest.TestCase):
+class PolicyTests(test_utils.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
@@ -2254,7 +2290,7 @@ class PolicyTests(unittest.TestCase):
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
- @mock.patch('asyncio.events.threading.current_thread')
+ @mock.patch('trollius.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
diff --git a/tests/test_futures.py b/tests/test_futures.py
index 7c56462..9aace27 100644
--- a/tests/test_futures.py
+++ b/tests/test_futures.py
@@ -1,17 +1,24 @@
"""Tests for futures.py."""
-import concurrent.futures
+try:
+ import concurrent.futures
+except ImportError:
+ concurrent = None
import re
import sys
import threading
import unittest
-from unittest import mock
-import asyncio
-from asyncio import test_support as support
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import compat
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.test_utils import mock
+def get_thread_ident():
+ return threading.current_thread().ident
+
def _fakefunc(f):
return f
@@ -39,10 +46,6 @@ class FutureTests(test_utils.TestCase):
f = asyncio.Future()
self.assertIs(f._loop, self.loop)
- def test_constructor_positional(self):
- # Make sure Future doesn't accept a positional argument
- self.assertRaises(TypeError, asyncio.Future, 42)
-
def test_cancel(self):
f = asyncio.Future(loop=self.loop)
self.assertTrue(f.cancel())
@@ -86,24 +89,6 @@ class FutureTests(test_utils.TestCase):
f.set_exception(RuntimeError)
self.assertIsInstance(f.exception(), RuntimeError)
- def test_yield_from_twice(self):
- f = asyncio.Future(loop=self.loop)
-
- def fixture():
- yield 'A'
- x = yield from f
- yield 'B', x
- y = yield from f
- yield 'C', y
-
- g = fixture()
- self.assertEqual(next(g), 'A') # yield 'A'.
- self.assertEqual(next(g), f) # First yield from f.
- f.set_result(42)
- self.assertEqual(next(g), ('B', 42)) # yield 'B', x.
- # The second "yield from f" does not yield f.
- self.assertEqual(next(g), ('C', 42)) # yield 'C', y.
-
def test_future_repr(self):
self.loop.set_debug(True)
f_pending_debug = asyncio.Future(loop=self.loop)
@@ -135,7 +120,8 @@ class FutureTests(test_utils.TestCase):
def func_repr(func):
filename, lineno = test_utils.get_function_source(func)
- text = '%s() at %s:%s' % (func.__qualname__, filename, lineno)
+ func_name = getattr(func, '__qualname__', func.__name__)
+ text = '%s() at %s:%s' % (func_name, filename, lineno)
return re.escape(text)
f_one_callbacks = asyncio.Future(loop=self.loop)
@@ -194,32 +180,20 @@ class FutureTests(test_utils.TestCase):
newf_cancelled._copy_state(f_cancelled)
self.assertTrue(newf_cancelled.cancelled())
- def test_iter(self):
- fut = asyncio.Future(loop=self.loop)
-
- def coro():
- yield from fut
-
- def test():
- arg1, arg2 = coro()
-
- self.assertRaises(AssertionError, test)
- fut.cancel()
-
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_abandoned(self, m_log):
fut = asyncio.Future(loop=self.loop)
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_result_unretrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_result_retrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
@@ -227,15 +201,18 @@ class FutureTests(test_utils.TestCase):
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_exception_unretrieved(self, m_log):
+ self.loop.set_debug(True)
+ asyncio.set_event_loop(self.loop)
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
del fut
test_utils.run_briefly(self.loop)
+ support.gc_collect()
self.assertTrue(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_exception_retrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
@@ -243,7 +220,7 @@ class FutureTests(test_utils.TestCase):
del fut
self.assertFalse(m_log.error.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_tb_logger_exception_result_retrieved(self, m_log):
fut = asyncio.Future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
@@ -251,32 +228,35 @@ class FutureTests(test_utils.TestCase):
del fut
self.assertFalse(m_log.error.called)
+ @test_utils.skipIf(concurrent is None, 'need concurrent.futures')
def test_wrap_future(self):
def run(arg):
- return (arg, threading.get_ident())
+ return (arg, get_thread_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1, loop=self.loop)
res, ident = self.loop.run_until_complete(f2)
self.assertIsInstance(f2, asyncio.Future)
self.assertEqual(res, 'oi')
- self.assertNotEqual(ident, threading.get_ident())
+ self.assertNotEqual(ident, get_thread_ident())
def test_wrap_future_future(self):
f1 = asyncio.Future(loop=self.loop)
f2 = asyncio.wrap_future(f1)
self.assertIs(f1, f2)
- @mock.patch('asyncio.futures.events')
+ @test_utils.skipIf(concurrent is None, 'need concurrent.futures')
+ @mock.patch('trollius.futures.events')
def test_wrap_future_use_global_loop(self, m_events):
def run(arg):
- return (arg, threading.get_ident())
+ return (arg, get_thread_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1)
self.assertIs(m_events.get_event_loop.return_value, f2._loop)
+ @test_utils.skipIf(concurrent is None, 'need concurrent.futures')
def test_wrap_future_cancel(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
@@ -285,6 +265,7 @@ class FutureTests(test_utils.TestCase):
self.assertTrue(f1.cancelled())
self.assertTrue(f2.cancelled())
+ @test_utils.skipIf(concurrent is None, 'need concurrent.futures')
def test_wrap_future_cancel2(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
@@ -299,14 +280,9 @@ class FutureTests(test_utils.TestCase):
self.loop.set_debug(True)
future = asyncio.Future(loop=self.loop)
- lineno = sys._getframe().f_lineno - 1
- self.assertIsInstance(future._source_traceback, list)
- self.assertEqual(future._source_traceback[-1][:3],
- (__file__,
- lineno,
- 'test_future_source_traceback'))
-
- @mock.patch('asyncio.base_events.logger')
+ self.check_soure_traceback(future._source_traceback, -1)
+
+ @mock.patch('trollius.base_events.logger')
def check_future_exception_never_retrieved(self, debug, m_log):
self.loop.set_debug(debug)
@@ -355,12 +331,16 @@ class FutureTests(test_utils.TestCase):
r'.*\n'
r'MemoryError$'
).format(filename=re.escape(frame[0]), lineno=frame[1])
- else:
+ elif compat.PY3:
regex = (r'^Future/Task exception was never retrieved\n'
r'Traceback \(most recent call last\):\n'
r'.*\n'
r'MemoryError$'
)
+ else:
+ regex = (r'^Future/Task exception was never retrieved\n'
+ r'MemoryError$'
+ )
m_log.error.assert_called_once_with(mock.ANY, exc_info=False)
message = m_log.error.call_args[0][0]
self.assertRegex(message, re.compile(regex, re.DOTALL))
diff --git a/tests/test_locks.py b/tests/test_locks.py
index dda4577..ec7dbba 100644
--- a/tests/test_locks.py
+++ b/tests/test_locks.py
@@ -1,11 +1,12 @@
"""Tests for lock.py"""
import unittest
-from unittest import mock
import re
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import From, Return
+from trollius import test_utils
+from trollius.test_utils import mock
STR_RGX_REPR = (
@@ -42,7 +43,7 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- yield from lock
+ yield From(lock.acquire())
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
@@ -53,7 +54,8 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from lock)
+ yield From(lock.acquire())
+ raise Return(lock)
res = self.loop.run_until_complete(acquire_lock())
@@ -71,21 +73,21 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- if (yield from lock.acquire()):
+ if (yield From(lock.acquire())):
result.append(1)
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- if (yield from lock.acquire()):
+ if (yield From(lock.acquire())):
result.append(2)
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- if (yield from lock.acquire()):
+ if (yield From(lock.acquire())):
result.append(3)
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
@@ -147,22 +149,22 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def lockit(name, blocker):
- yield from lock.acquire()
+ yield From(lock.acquire())
try:
if blocker is not None:
- yield from blocker
+ yield From(blocker)
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
@@ -170,7 +172,7 @@ class LockTests(test_utils.TestCase):
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
@@ -194,7 +196,7 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from lock)
+ raise Return((yield From(lock)))
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
@@ -206,9 +208,9 @@ class LockTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from lock)
+ raise Return((yield From(lock)))
- # This spells "yield from lock" outside a generator.
+ # This spells "yield From(lock)" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
@@ -228,7 +230,7 @@ class LockTests(test_utils.TestCase):
except RuntimeError as err:
self.assertEqual(
str(err),
- '"yield from" should be used as context manager expression')
+ '"yield" should be used as context manager expression')
self.assertFalse(lock.locked())
@@ -273,30 +275,30 @@ class EventTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(1)
@asyncio.coroutine
def c2(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(2)
@asyncio.coroutine
def c3(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
- test_utils.run_briefly(self.loop)
- self.assertEqual([3, 1, 2], result)
+ test_utils.run_briefly(self.loop, 2)
+ self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
@@ -338,9 +340,9 @@ class EventTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- if (yield from ev.wait()):
+ if (yield From(ev.wait())):
result.append(1)
- return True
+ raise Return(True)
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -386,56 +388,56 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(1)
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(2)
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(3)
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
- test_utils.run_briefly(self.loop)
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
@@ -475,11 +477,11 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait_for(predicate)):
+ yield From(cond.acquire())
+ if (yield From(cond.wait_for(predicate))):
result.append(1)
cond.release()
- return True
+ raise Return(True)
t = asyncio.Task(c1(result), loop=self.loop)
@@ -520,27 +522,27 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(1)
cond.release()
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(2)
cond.release()
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(3)
cond.release()
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
@@ -552,14 +554,16 @@ class ConditionTests(test_utils.TestCase):
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 4)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
@@ -576,19 +580,19 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(1)
cond.release()
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from cond.acquire()
- if (yield from cond.wait()):
+ yield From(cond.acquire())
+ if (yield From(cond.wait())):
result.append(2)
cond.release()
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
@@ -599,7 +603,8 @@ class ConditionTests(test_utils.TestCase):
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 4)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
@@ -636,7 +641,7 @@ class ConditionTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_cond():
- return (yield from cond)
+ raise Return((yield From(cond)))
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
@@ -652,7 +657,7 @@ class ConditionTests(test_utils.TestCase):
except RuntimeError as err:
self.assertEqual(
str(err),
- '"yield from" should be used as context manager expression')
+ '"yield From" should be used as context manager expression')
self.assertFalse(cond.locked())
@@ -718,7 +723,8 @@ class SemaphoreTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from sem)
+ yield From(sem.acquire())
+ raise Return(sem)
res = self.loop.run_until_complete(acquire_lock())
@@ -743,33 +749,34 @@ class SemaphoreTests(test_utils.TestCase):
@asyncio.coroutine
def c1(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(1)
- return True
+ raise Return(True)
@asyncio.coroutine
def c2(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(2)
- return True
+ raise Return(True)
@asyncio.coroutine
def c3(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(3)
- return True
+ raise Return(True)
@asyncio.coroutine
def c4(result):
- yield from sem.acquire()
+ yield From(sem.acquire())
result.append(4)
- return True
+ raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
- test_utils.run_briefly(self.loop)
+ # each coroutine requires 2 runs of the event loop
+ test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
@@ -829,7 +836,7 @@ class SemaphoreTests(test_utils.TestCase):
@asyncio.coroutine
def acquire_lock():
- return (yield from sem)
+ raise Return((yield From(sem)))
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
@@ -849,7 +856,7 @@ class SemaphoreTests(test_utils.TestCase):
except RuntimeError as err:
self.assertEqual(
str(err),
- '"yield from" should be used as context manager expression')
+ '"yield" should be used as context manager expression')
self.assertEqual(2, sem._value)
diff --git a/tests/test_proactor_events.py b/tests/test_proactor_events.py
index 9e9b41a..9801889 100644
--- a/tests/test_proactor_events.py
+++ b/tests/test_proactor_events.py
@@ -2,14 +2,15 @@
import socket
import unittest
-from unittest import mock
-import asyncio
-from asyncio.proactor_events import BaseProactorEventLoop
-from asyncio.proactor_events import _ProactorSocketTransport
-from asyncio.proactor_events import _ProactorWritePipeTransport
-from asyncio.proactor_events import _ProactorDuplexPipeTransport
-from asyncio import test_utils
+from trollius import test_utils
+from trollius.proactor_events import BaseProactorEventLoop
+from trollius.proactor_events import _ProactorDuplexPipeTransport
+from trollius.proactor_events import _ProactorSocketTransport
+from trollius.proactor_events import _ProactorWritePipeTransport
+from trollius.py33_exceptions import ConnectionAbortedError, ConnectionResetError
+from trollius.test_utils import mock
+import trollius as asyncio
class ProactorSocketTransportTests(test_utils.TestCase):
@@ -139,7 +140,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
self.loop._proactor.send.return_value.add_done_callback.\
assert_called_with(tr._loop_writing)
- @mock.patch('asyncio.proactor_events.logger')
+ @mock.patch('trollius.proactor_events.logger')
def test_loop_writing_err(self, m_log):
err = self.loop._proactor.send.side_effect = OSError()
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
@@ -213,7 +214,7 @@ class ProactorSocketTransportTests(test_utils.TestCase):
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_fatal_error(self, m_logging):
tr = _ProactorSocketTransport(self.loop, self.sock, self.protocol)
tr._force_close = mock.Mock()
@@ -522,7 +523,7 @@ class BaseProactorEventLoopTests(test_utils.TestCase):
def test_process_events(self):
self.loop._process_events([])
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_create_server(self, m_log):
pf = mock.Mock()
call_soon = self.loop.call_soon = mock.Mock()
diff --git a/tests/test_queues.py b/tests/test_queues.py
index 3d4ac51..3492bd5 100644
--- a/tests/test_queues.py
+++ b/tests/test_queues.py
@@ -1,10 +1,11 @@
"""Tests for queues.py"""
import unittest
-from unittest import mock
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import Return, From
+from trollius import test_utils
+from trollius.test_utils import mock
class _QueueTestBase(test_utils.TestCase):
@@ -32,7 +33,7 @@ class QueueBasicTests(_QueueTestBase):
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
- id_is_present = hex(id(q)) in fn(q)
+ id_is_present = ("%x" % id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
@@ -41,7 +42,7 @@ class QueueBasicTests(_QueueTestBase):
# Start a task that waits to get.
asyncio.Task(q.get(), loop=loop)
# Let it start waiting.
- yield from asyncio.sleep(0.1, loop=loop)
+ yield From(asyncio.sleep(0.1, loop=loop))
self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator
q.put_nowait(0)
@@ -55,7 +56,7 @@ class QueueBasicTests(_QueueTestBase):
# Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop)
# Let it start waiting.
- yield from asyncio.sleep(0.1, loop=loop)
+ yield From(asyncio.sleep(0.1, loop=loop))
self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator
q.get_nowait()
@@ -127,21 +128,22 @@ class QueueBasicTests(_QueueTestBase):
@asyncio.coroutine
def putter():
for i in range(3):
- yield from q.put(i)
+ yield From(q.put(i))
have_been_put.append(i)
- return True
+ raise Return(True)
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
- yield from asyncio.sleep(0.01, loop=loop)
+ yield From(None) # one extra iteration for the putter coroutine
+ yield From(asyncio.sleep(0.01, loop=loop))
# The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item.
- yield from asyncio.sleep(0.01, loop=loop)
+ yield From(asyncio.sleep(0.01, loop=loop))
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
@@ -161,7 +163,8 @@ class QueueGetTests(_QueueTestBase):
@asyncio.coroutine
def queue_get():
- return (yield from q.get())
+ result = (yield From(q.get()))
+ raise Return(result)
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
@@ -189,25 +192,24 @@ class QueueGetTests(_QueueTestBase):
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
- finished = False
+ non_local = {'finished': False}
@asyncio.coroutine
def queue_get():
- nonlocal finished
started.set()
- res = yield from q.get()
- finished = True
- return res
+ res = yield From(q.get())
+ non_local['finished'] = True
+ raise Return(res)
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
- yield from started.wait()
- self.assertFalse(finished)
- res = yield from queue_get_task
- self.assertTrue(finished)
- return res
+ yield From(started.wait())
+ self.assertFalse(non_local['finished'])
+ res = yield From(queue_get_task)
+ self.assertTrue(non_local['finished'])
+ raise Return(res)
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
@@ -237,14 +239,16 @@ class QueueGetTests(_QueueTestBase):
@asyncio.coroutine
def queue_get():
- return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
+ result = (yield From(asyncio.wait_for(q.get(), 0.051, loop=loop)))
+ raise Return(result)
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
- yield from asyncio.sleep(0.01, loop=loop) # let the task start
+ yield From(asyncio.sleep(0.01, loop=loop)) # let the task start
q.put_nowait(1)
- return (yield from get_task)
+ result = (yield From(get_task))
+ raise Return(result)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
@@ -280,12 +284,13 @@ class QueuePutTests(_QueueTestBase):
@asyncio.coroutine
def queue_put():
# No maxsize, won't block.
- yield from q.put(1)
+ yield From(q.put(1))
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
+ @asyncio.coroutine
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
@@ -295,24 +300,24 @@ class QueuePutTests(_QueueTestBase):
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
- finished = False
+ non_local = {'finished': False}
@asyncio.coroutine
def queue_put():
- nonlocal finished
started.set()
- yield from q.put(1)
- yield from q.put(2)
- finished = True
+ yield From(q.put(1))
+ yield From(q.put(2))
+ non_local['finished'] = True
@asyncio.coroutine
def queue_get():
- loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
- yield from started.wait()
- self.assertFalse(finished)
- yield from queue_put_task
- self.assertTrue(finished)
+ yield From(None)
+ loop.call_later(0.01, q.get_nowait)
+ yield From(started.wait())
+ self.assertFalse(non_local['finished'])
+ yield From(queue_put_task)
+ self.assertTrue(non_local['finished'])
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
@@ -337,8 +342,8 @@ class QueuePutTests(_QueueTestBase):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
- yield from q.put(1)
- yield from q.put(2)
+ yield From(q.put(1))
+ yield From(q.put(2))
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
@@ -347,12 +352,13 @@ class QueuePutTests(_QueueTestBase):
@asyncio.coroutine
def queue_put():
- yield from q.put(1)
- return True
+ yield From(q.put(1))
+ raise Return(True)
@asyncio.coroutine
def test():
- return (yield from q.get())
+ result = (yield From(q.get()))
+ raise Return(result)
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
@@ -419,7 +425,7 @@ class JoinableQueueTests(_QueueTestBase):
for i in range(100):
q.put_nowait(i)
- accumulator = 0
+ non_local = {'accumulator': 0}
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
@@ -427,11 +433,9 @@ class JoinableQueueTests(_QueueTestBase):
@asyncio.coroutine
def worker():
- nonlocal accumulator
-
while running:
- item = yield from q.get()
- accumulator += item
+ item = yield From(q.get())
+ non_local['accumulator'] += item
q.task_done()
@asyncio.coroutine
@@ -439,11 +443,11 @@ class JoinableQueueTests(_QueueTestBase):
tasks = [asyncio.Task(worker(), loop=self.loop)
for index in range(2)]
- yield from q.join()
- return tasks
+ yield From(q.join())
+ raise Return(tasks)
tasks = self.loop.run_until_complete(test())
- self.assertEqual(sum(range(100)), accumulator)
+ self.assertEqual(sum(range(100)), non_local['accumulator'])
# close running generators
running = False
@@ -459,8 +463,8 @@ class JoinableQueueTests(_QueueTestBase):
@asyncio.coroutine
def join():
- yield from q.join()
- yield from q.join()
+ yield From(q.join())
+ yield From(q.join())
self.loop.run_until_complete(join())
diff --git a/tests/test_selector_events.py b/tests/test_selector_events.py
index ff114f8..7e78586 100644
--- a/tests/test_selector_events.py
+++ b/tests/test_selector_events.py
@@ -2,22 +2,40 @@
import errno
import socket
+import sys
import unittest
-from unittest import mock
try:
import ssl
except ImportError:
ssl = None
-
-import asyncio
-from asyncio import selectors
-from asyncio import test_utils
-from asyncio.selector_events import BaseSelectorEventLoop
-from asyncio.selector_events import _SelectorTransport
-from asyncio.selector_events import _SelectorSslTransport
-from asyncio.selector_events import _SelectorSocketTransport
-from asyncio.selector_events import _SelectorDatagramTransport
-
+else:
+ HAS_SNI = getattr(ssl, 'HAS_SNI', False)
+ from trollius.py3_ssl import SSLWantReadError, SSLWantWriteError
+
+import trollius as asyncio
+from trollius.py33_exceptions import (
+ BlockingIOError, InterruptedError,
+ ConnectionResetError, ConnectionRefusedError)
+from trollius import selectors
+from trollius import test_utils
+from trollius.selector_events import BaseSelectorEventLoop
+from trollius.selector_events import _SelectorDatagramTransport
+from trollius.selector_events import _SelectorSocketTransport
+from trollius.selector_events import _SelectorSslTransport
+from trollius.selector_events import _SelectorTransport
+from trollius.selector_events import _SSL_REQUIRES_SELECT
+from trollius.test_utils import mock
+
+
+if sys.version_info >= (3,):
+ UNICODE_STR = 'unicode'
+else:
+ UNICODE_STR = unicode('unicode')
+ try:
+ memoryview
+ except NameError:
+ # Python 2.6
+ memoryview = buffer
MOCK_ANY = mock.ANY
@@ -48,7 +66,7 @@ class BaseSelectorEventLoopTests(test_utils.TestCase):
transport = self.loop._make_socket_transport(m, asyncio.Protocol())
self.assertIsInstance(transport, _SelectorSocketTransport)
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
def test_make_ssl_transport(self):
m = mock.Mock()
self.loop.add_reader = mock.Mock()
@@ -61,7 +79,7 @@ class BaseSelectorEventLoopTests(test_utils.TestCase):
m, asyncio.Protocol(), m, waiter)
self.assertIsInstance(transport, _SelectorSslTransport)
- @mock.patch('asyncio.selector_events.ssl', None)
+ @mock.patch('trollius.selector_events.ssl', None)
def test_make_ssl_transport_without_ssl_error(self):
m = mock.Mock()
self.loop.add_reader = mock.Mock()
@@ -684,7 +702,7 @@ class SelectorTransportTests(test_utils.TestCase):
self.assertFalse(self.loop.readers)
self.assertEqual(1, self.loop.remove_reader_count[7])
- @mock.patch('asyncio.log.logger.error')
+ @mock.patch('trollius.log.logger.error')
def test_fatal_error(self, m_exc):
exc = OSError()
tr = _SelectorTransport(self.loop, self.sock, self.protocol, None)
@@ -854,7 +872,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
transport = _SelectorSocketTransport(
self.loop, self.sock, self.protocol)
transport.write(data)
- self.sock.send.assert_called_with(data)
+ self.sock.send.assert_called_with(b'data')
def test_write_no_data(self):
transport = _SelectorSocketTransport(
@@ -930,7 +948,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
self.loop.assert_writer(7, transport._write_ready)
self.assertEqual(list_to_buffer([b'data']), transport._buffer)
- @mock.patch('asyncio.selector_events.logger')
+ @mock.patch('trollius.selector_events.logger')
def test_write_exception(self, m_log):
err = self.sock.send.side_effect = OSError()
@@ -1048,7 +1066,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
err,
'Fatal write error on socket transport')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.base_events.logger')
def test_write_ready_exception_and_close(self, m_log):
self.sock.send.side_effect = OSError()
remove_writer = self.loop.remove_writer = mock.Mock()
@@ -1086,7 +1104,7 @@ class SelectorSocketTransportTests(test_utils.TestCase):
tr.close()
-@unittest.skipIf(ssl is None, 'No ssl module')
+@test_utils.skipIf(ssl is None, 'No ssl module')
class SelectorSslTransportTests(test_utils.TestCase):
def setUp(self):
@@ -1120,14 +1138,14 @@ class SelectorSslTransportTests(test_utils.TestCase):
def test_on_handshake_reader_retry(self):
self.loop.set_debug(False)
- self.sslsock.do_handshake.side_effect = ssl.SSLWantReadError
+ self.sslsock.do_handshake.side_effect = SSLWantReadError
transport = _SelectorSslTransport(
self.loop, self.sock, self.protocol, self.sslcontext)
self.loop.assert_reader(1, transport._on_handshake, None)
def test_on_handshake_writer_retry(self):
self.loop.set_debug(False)
- self.sslsock.do_handshake.side_effect = ssl.SSLWantWriteError
+ self.sslsock.do_handshake.side_effect = SSLWantWriteError
transport = _SelectorSslTransport(
self.loop, self.sock, self.protocol, self.sslcontext)
self.loop.assert_writer(1, transport._on_handshake, None)
@@ -1195,7 +1213,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
def test_write_str(self):
transport = self._make_one()
- self.assertRaises(TypeError, transport.write, 'str')
+ self.assertRaises(TypeError, transport.write, UNICODE_STR)
def test_write_closing(self):
transport = self._make_one()
@@ -1204,7 +1222,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport.write(b'data')
self.assertEqual(transport._conn_lost, 2)
- @mock.patch('asyncio.selector_events.logger')
+ @mock.patch('trollius.selector_events.logger')
def test_write_exception(self, m_log):
transport = self._make_one()
transport._conn_lost = 1
@@ -1216,6 +1234,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport.write(b'data')
m_log.warning.assert_called_with('socket.send() raised exception.')
+ @test_utils.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv(self):
self.sslsock.recv.return_value = b'data'
transport = self._make_one()
@@ -1237,6 +1256,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
self.loop.add_writer.assert_called_with(
transport._sock_fd, transport._write_ready)
+ @test_utils.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_eof(self):
self.sslsock.recv.return_value = b''
transport = self._make_one()
@@ -1245,6 +1265,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport.close.assert_called_with()
self.protocol.eof_received.assert_called_with()
+ @test_utils.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_conn_reset(self):
err = self.sslsock.recv.side_effect = ConnectionResetError()
transport = self._make_one()
@@ -1253,8 +1274,9 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport._read_ready()
transport._force_close.assert_called_with(err)
+ @test_utils.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_retry(self):
- self.sslsock.recv.side_effect = ssl.SSLWantReadError
+ self.sslsock.recv.side_effect = SSLWantReadError
transport = self._make_one()
transport._read_ready()
self.assertTrue(self.sslsock.recv.called)
@@ -1268,10 +1290,11 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport._read_ready()
self.assertFalse(self.protocol.data_received.called)
+ @test_utils.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_write(self):
self.loop.remove_reader = mock.Mock()
self.loop.add_writer = mock.Mock()
- self.sslsock.recv.side_effect = ssl.SSLWantWriteError
+ self.sslsock.recv.side_effect = SSLWantWriteError
transport = self._make_one()
transport._read_ready()
self.assertFalse(self.protocol.data_received.called)
@@ -1281,6 +1304,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
self.loop.add_writer.assert_called_with(
transport._sock_fd, transport._write_ready)
+ @test_utils.skipIf(_SSL_REQUIRES_SELECT, 'buggy ssl with the workaround')
def test_read_ready_recv_exc(self):
err = self.sslsock.recv.side_effect = OSError()
transport = self._make_one()
@@ -1344,7 +1368,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport = self._make_one()
transport._buffer = list_to_buffer([b'data'])
- self.sslsock.send.side_effect = ssl.SSLWantWriteError
+ self.sslsock.send.side_effect = SSLWantWriteError
transport._write_ready()
self.assertEqual(list_to_buffer([b'data']), transport._buffer)
@@ -1357,7 +1381,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
transport._buffer = list_to_buffer([b'data'])
self.loop.remove_writer = mock.Mock()
- self.sslsock.send.side_effect = ssl.SSLWantReadError
+ self.sslsock.send.side_effect = SSLWantReadError
transport._write_ready()
self.assertFalse(self.protocol.data_received.called)
self.assertTrue(transport._write_wants_read)
@@ -1405,7 +1429,7 @@ class SelectorSslTransportTests(test_utils.TestCase):
self.assertEqual(tr._conn_lost, 1)
self.assertEqual(1, self.loop.remove_reader_count[1])
- @unittest.skipIf(ssl is None, 'No SSL support')
+ @test_utils.skipIf(ssl is None, 'No SSL support')
def test_server_hostname(self):
_SelectorSslTransport(
self.loop, self.sock, self.protocol, self.sslcontext,
@@ -1415,9 +1439,9 @@ class SelectorSslTransportTests(test_utils.TestCase):
server_hostname='localhost')
-class SelectorSslWithoutSslTransportTests(unittest.TestCase):
+class SelectorSslWithoutSslTransportTests(test_utils.TestCase):
- @mock.patch('asyncio.selector_events.ssl', None)
+ @mock.patch('trollius.selector_events.ssl', None)
def test_ssl_transport_requires_ssl_module(self):
Mock = mock.Mock
with self.assertRaises(RuntimeError):
@@ -1500,7 +1524,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
transport.sendto(data, ('0.0.0.0', 1234))
self.assertTrue(self.sock.sendto.called)
self.assertEqual(
- self.sock.sendto.call_args[0], (data, ('0.0.0.0', 1234)))
+ self.sock.sendto.call_args[0], (b'data', ('0.0.0.0', 1234)))
def test_sendto_no_data(self):
transport = _SelectorDatagramTransport(
@@ -1561,7 +1585,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
self.assertEqual(
[(b'data', ('0.0.0.0', 12345))], list(transport._buffer))
- @mock.patch('asyncio.selector_events.logger')
+ @mock.patch('trollius.selector_events.logger')
def test_sendto_exception(self, m_log):
data = b'data'
err = self.sock.sendto.side_effect = RuntimeError()
@@ -1614,7 +1638,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
def test_sendto_str(self):
transport = _SelectorDatagramTransport(
self.loop, self.sock, self.protocol)
- self.assertRaises(TypeError, transport.sendto, 'str', ())
+ self.assertRaises(TypeError, transport.sendto, UNICODE_STR, ())
def test_sendto_connected_addr(self):
transport = _SelectorDatagramTransport(
@@ -1717,7 +1741,7 @@ class SelectorDatagramTransportTests(test_utils.TestCase):
self.assertFalse(transport._fatal_error.called)
self.assertTrue(self.protocol.error_received.called)
- @mock.patch('asyncio.base_events.logger.error')
+ @mock.patch('trollius.base_events.logger.error')
def test_fatal_error_connected(self, m_exc):
transport = _SelectorDatagramTransport(
self.loop, self.sock, self.protocol, ('0.0.0.0', 1))
diff --git a/tests/test_selectors.py b/tests/test_selectors.py
index d91c78b..c332bf3 100644
--- a/tests/test_selectors.py
+++ b/tests/test_selectors.py
@@ -1,9 +1,10 @@
"""Tests for selectors.py."""
import unittest
-from unittest import mock
-from asyncio import selectors
+from trollius import selectors
+from trollius import test_utils
+from trollius.test_utils import mock
class FakeSelector(selectors._BaseSelectorImpl):
@@ -13,7 +14,7 @@ class FakeSelector(selectors._BaseSelectorImpl):
raise NotImplementedError
-class _SelectorMappingTests(unittest.TestCase):
+class _SelectorMappingTests(test_utils.TestCase):
def test_len(self):
s = FakeSelector()
@@ -60,7 +61,7 @@ class _SelectorMappingTests(unittest.TestCase):
self.assertEqual(1, counter)
-class BaseSelectorTests(unittest.TestCase):
+class BaseSelectorTests(test_utils.TestCase):
def test_fileobj_to_fd(self):
self.assertEqual(10, selectors._fileobj_to_fd(10))
diff --git a/tests/test_streams.py b/tests/test_streams.py
index 73a375a..4f4a684 100644
--- a/tests/test_streams.py
+++ b/tests/test_streams.py
@@ -1,18 +1,21 @@
"""Tests for streams.py."""
import gc
+import io
import os
import socket
import sys
import unittest
-from unittest import mock
try:
import ssl
except ImportError:
ssl = None
-import asyncio
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import Return, From
+from trollius import compat
+from trollius import test_utils
+from trollius.test_utils import mock
class StreamReaderTests(test_utils.TestCase):
@@ -29,9 +32,9 @@ class StreamReaderTests(test_utils.TestCase):
self.loop.close()
gc.collect()
- super().tearDown()
+ super(StreamReaderTests, self).tearDown()
- @mock.patch('asyncio.streams.events')
+ @mock.patch('trollius.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader()
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
@@ -53,7 +56,7 @@ class StreamReaderTests(test_utils.TestCase):
loop=self.loop)
self._basetest_open_connection(conn_fut)
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
@@ -72,7 +75,7 @@ class StreamReaderTests(test_utils.TestCase):
writer.close()
- @unittest.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
@@ -82,8 +85,8 @@ class StreamReaderTests(test_utils.TestCase):
self._basetest_open_connection_no_loop_ssl(conn_fut)
- @unittest.skipIf(ssl is None, 'No ssl module')
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipIf(ssl is None, 'No ssl module')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
@@ -109,7 +112,7 @@ class StreamReaderTests(test_utils.TestCase):
loop=self.loop)
self._basetest_open_connection_error(conn_fut)
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address,
@@ -417,7 +420,7 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def readline():
- yield from stream.readline()
+ yield From(stream.readline())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
@@ -431,7 +434,7 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def read_a_line():
- yield from stream.readline()
+ yield From(stream.readline())
t = asyncio.Task(read_a_line(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -452,7 +455,7 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def handle_client(self, client_reader, client_writer):
- data = yield from client_reader.readline()
+ data = yield From(client_reader.readline())
client_writer.write(data)
def start(self):
@@ -491,14 +494,14 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def client(addr):
- reader, writer = yield from asyncio.open_connection(
- *addr, loop=self.loop)
+ reader, writer = yield From(asyncio.open_connection(
+ *addr, loop=self.loop))
# send a line
writer.write(b"hello world!\n")
# read it back
- msgback = yield from reader.readline()
+ msgback = yield From(reader.readline())
writer.close()
- return msgback
+ raise Return(msgback)
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
@@ -516,7 +519,7 @@ class StreamReaderTests(test_utils.TestCase):
server.stop()
self.assertEqual(msg, b"hello world!\n")
- @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
+ @test_utils.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_start_unix_server(self):
class MyServer:
@@ -528,7 +531,7 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def handle_client(self, client_reader, client_writer):
- data = yield from client_reader.readline()
+ data = yield From(client_reader.readline())
client_writer.write(data)
def start(self):
@@ -559,14 +562,14 @@ class StreamReaderTests(test_utils.TestCase):
@asyncio.coroutine
def client(path):
- reader, writer = yield from asyncio.open_unix_connection(
- path, loop=self.loop)
+ reader, writer = yield From(asyncio.open_unix_connection(
+ path, loop=self.loop))
# send a line
writer.write(b"hello world!\n")
# read it back
- msgback = yield from reader.readline()
+ msgback = yield From(reader.readline())
writer.close()
- return msgback
+ raise Return(msgback)
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
@@ -586,7 +589,7 @@ class StreamReaderTests(test_utils.TestCase):
server.stop()
self.assertEqual(msg, b"hello world!\n")
- @unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
+ @test_utils.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See Tulip issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
@@ -603,7 +606,7 @@ os.close(fd)
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
- pipe = open(rfd, 'rb', 0)
+ pipe = io.open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = self.loop.run_until_complete(
@@ -613,8 +616,11 @@ os.close(fd)
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
+ kw = {'loop': self.loop}
+ if compat.PY3:
+ kw['pass_fds'] = set((wfd,))
proc = self.loop.run_until_complete(
- asyncio.create_subprocess_exec(*args, pass_fds={wfd}, loop=self.loop))
+ asyncio.create_subprocess_exec(*args, **kw))
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
diff --git a/tests/test_subprocess.py b/tests/test_subprocess.py
index 5cb0f03..80ca58b 100644
--- a/tests/test_subprocess.py
+++ b/tests/test_subprocess.py
@@ -1,47 +1,55 @@
+from trollius import subprocess
+from trollius import test_utils
+import trollius as asyncio
+import os
import signal
import sys
import unittest
-from unittest import mock
+from trollius import From, Return
+from trollius import test_support as support
+from trollius.test_utils import mock
+from trollius.py33_exceptions import BrokenPipeError, ConnectionResetError
-import asyncio
-from asyncio import subprocess
-from asyncio import test_support as support
-from asyncio import test_utils
if sys.platform != 'win32':
- from asyncio import unix_events
+ from trollius import unix_events
+
# Program blocking
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
# Program copying input to output
-PROGRAM_CAT = [
- sys.executable, '-c',
- ';'.join(('import sys',
- 'data = sys.stdin.buffer.read()',
- 'sys.stdout.buffer.write(data)'))]
+if sys.version_info >= (3,):
+ PROGRAM_CAT = ';'.join(('import sys',
+ 'data = sys.stdin.buffer.read()',
+ 'sys.stdout.buffer.write(data)'))
+else:
+ PROGRAM_CAT = ';'.join(('import sys',
+ 'data = sys.stdin.read()',
+ 'sys.stdout.write(data)'))
+PROGRAM_CAT = [sys.executable, '-c', PROGRAM_CAT]
-class SubprocessMixin:
+class SubprocessMixin(object):
def test_stdin_stdout(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
- proc = yield from asyncio.create_subprocess_exec(
- *args,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- loop=self.loop)
+ proc = yield From(asyncio.create_subprocess_exec(
+ *args,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ loop=self.loop))
# feed data
proc.stdin.write(data)
- yield from proc.stdin.drain()
+ yield From(proc.stdin.drain())
proc.stdin.close()
# get output and exitcode
- data = yield from proc.stdout.read()
- exitcode = yield from proc.wait()
- return (exitcode, data)
+ data = yield From(proc.stdout.read())
+ exitcode = yield From(proc.wait())
+ raise Return(exitcode, data)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
@@ -54,13 +62,13 @@ class SubprocessMixin:
@asyncio.coroutine
def run(data):
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
- loop=self.loop)
- stdout, stderr = yield from proc.communicate(data)
- return proc.returncode, stdout
+ loop=self.loop))
+ stdout, stderr = yield From(proc.communicate(data))
+ raise Return(proc.returncode, stdout)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
@@ -75,10 +83,14 @@ class SubprocessMixin:
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 7)
+ @test_utils.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
def test_start_new_session(self):
+ def start_new_session():
+ os.setsid()
+
# start the new process in a new session
create = asyncio.create_subprocess_shell('exit 8',
- start_new_session=True,
+ preexec_fn=start_new_session,
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
@@ -108,9 +120,13 @@ class SubprocessMixin:
else:
self.assertEqual(-signal.SIGTERM, returncode)
- @unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
+ @test_utils.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_send_signal(self):
- code = 'import time; print("sleeping", flush=True); time.sleep(3600)'
+ code = '; '.join((
+ 'import sys, time',
+ 'print("sleeping")',
+ 'sys.stdout.flush()',
+ 'time.sleep(3600)'))
args = [sys.executable, '-c', code]
create = asyncio.create_subprocess_exec(*args, loop=self.loop, stdout=subprocess.PIPE)
proc = self.loop.run_until_complete(create)
@@ -118,12 +134,12 @@ class SubprocessMixin:
@asyncio.coroutine
def send_signal(proc):
# basic synchronization to wait until the program is sleeping
- line = yield from proc.stdout.readline()
+ line = yield From(proc.stdout.readline())
self.assertEqual(line, b'sleeping\n')
proc.send_signal(signal.SIGHUP)
- returncode = (yield from proc.wait())
- return returncode
+ returncode = yield From(proc.wait())
+ raise Return(returncode)
returncode = self.loop.run_until_complete(send_signal(proc))
self.assertEqual(-signal.SIGHUP, returncode)
@@ -146,7 +162,7 @@ class SubprocessMixin:
@asyncio.coroutine
def write_stdin(proc, data):
proc.stdin.write(data)
- yield from proc.stdin.drain()
+ yield From(proc.stdin.drain())
coro = write_stdin(proc, large_data)
# drain() must raise BrokenPipeError or ConnectionResetError
@@ -174,22 +190,22 @@ class SubprocessMixin:
'sys.stdout.write("x" * %s)' % size,
'sys.stdout.flush()',
))
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
limit=limit,
- loop=self.loop)
+ loop=self.loop))
stdout_transport = proc._transport.get_pipe_transport(1)
stdout_transport.pause_reading = mock.Mock()
stdout_transport.resume_reading = mock.Mock()
- stdout, stderr = yield from proc.communicate()
+ stdout, stderr = yield From(proc.communicate())
# The child process produced more than limit bytes of output,
# the stream reader transport should pause the protocol to not
# allocate too much memory.
- return (stdout, stdout_transport)
+ raise Return(stdout, stdout_transport)
# Issue #22685: Ensure that the stream reader pauses the protocol
# when the child process produces too much data
@@ -205,16 +221,16 @@ class SubprocessMixin:
@asyncio.coroutine
def len_message(message):
code = 'import sys; data = sys.stdin.read(); print(len(data))'
- proc = yield from asyncio.create_subprocess_exec(
+ proc = yield From(asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
close_fds=False,
- loop=self.loop)
- stdout, stderr = yield from proc.communicate(message)
- exitcode = yield from proc.wait()
- return (stdout, exitcode)
+ loop=self.loop))
+ stdout, stderr = yield From(proc.communicate(message))
+ exitcode = yield From(proc.wait())
+ raise Return(stdout, exitcode)
output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
self.assertEqual(output.rstrip(), b'3')
@@ -242,7 +258,7 @@ if sys.platform != 'win32':
policy = asyncio.get_event_loop_policy()
policy.set_child_watcher(None)
self.loop.close()
- super().tearDown()
+ super(SubprocessWatcherMixin, self).tearDown()
class SubprocessSafeWatcherTests(SubprocessWatcherMixin,
test_utils.TestCase):
@@ -269,7 +285,7 @@ else:
policy = asyncio.get_event_loop_policy()
self.loop.close()
policy.set_event_loop(None)
- super().tearDown()
+ super(SubprocessProactorTests, self).tearDown()
if __name__ == '__main__':
diff --git a/tests/test_tasks.py b/tests/test_tasks.py
index c005366..b23a624 100644
--- a/tests/test_tasks.py
+++ b/tests/test_tasks.py
@@ -6,14 +6,16 @@ import sys
import types
import unittest
import weakref
-from unittest import mock
-import asyncio
-from asyncio import coroutines
-from asyncio import test_support as support
-from asyncio import test_utils
+import trollius as asyncio
+from trollius import From, Return
+from trollius import coroutines
+from trollius import test_support as support
+from trollius import test_utils
+from trollius.test_utils import mock
+PY33 = (sys.version_info >= (3, 3))
PY34 = (sys.version_info >= (3, 4))
PY35 = (sys.version_info >= (3, 5))
@@ -135,9 +137,14 @@ class TaskTests(test_utils.TestCase):
self.loop.set_debug(False)
@asyncio.coroutine
+ def noop():
+ yield From(None)
+ raise Return('abc')
+
+ @asyncio.coroutine
def notmuch():
- yield from []
- return 'abc'
+ yield From(noop())
+ raise Return('abc')
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
@@ -151,7 +158,7 @@ class TaskTests(test_utils.TestCase):
# test coroutine object
gen = notmuch()
- if coroutines._DEBUG or PY35:
+ if PY35 or (coroutines._DEBUG and PY33):
coro_qualname = 'TaskTests.test_task_repr.<locals>.notmuch'
else:
coro_qualname = 'notmuch'
@@ -195,7 +202,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def notmuch():
- # notmuch() function doesn't use yield from: it will be wrapped by
+ # notmuch() function doesn't use yield: it will be wrapped by
# @coroutine decorator
return 123
@@ -208,12 +215,15 @@ class TaskTests(test_utils.TestCase):
# test coroutine object
gen = notmuch()
- if coroutines._DEBUG or PY35:
+ if PY35 or coroutines._DEBUG:
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
- coro_qualname = 'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch'
+ if PY35 or (coroutines._DEBUG and PY33):
+ coro_qualname = 'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch'
+ else:
+ coro_qualname = 'notmuch'
else:
# On Python < 3.5, generators inherit the name of the code, not of
# the function. See: http://bugs.python.org/issue21205
@@ -260,7 +270,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def wait_for(fut):
- return (yield from fut)
+ res = yield From(fut)
+ raise Return(res)
fut = asyncio.Future(loop=self.loop)
task = asyncio.Task(wait_for(fut), loop=self.loop)
@@ -274,9 +285,9 @@ class TaskTests(test_utils.TestCase):
def test_task_basics(self):
@asyncio.coroutine
def outer():
- a = yield from inner1()
- b = yield from inner2()
- return a+b
+ a = yield From(inner1())
+ b = yield From(inner2())
+ raise Return(a+b)
@asyncio.coroutine
def inner1():
@@ -300,10 +311,11 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from asyncio.sleep(10.0, loop=loop)
- return 12
+ yield From(asyncio.sleep(10.0, loop=loop))
+ raise Return(12)
t = asyncio.Task(task(), loop=loop)
+ test_utils.run_briefly(loop)
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
@@ -314,9 +326,9 @@ class TaskTests(test_utils.TestCase):
def test_cancel_yield(self):
@asyncio.coroutine
def task():
- yield
- yield
- return 12
+ yield From(None)
+ yield From(None)
+ raise Return(12)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start coro
@@ -332,8 +344,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from f
- return 12
+ yield From(f)
+ raise Return(12)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start task
@@ -348,8 +360,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from f
- return 12
+ yield From(f)
+ raise Return(12)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -370,11 +382,11 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from fut1
+ yield From(fut1)
try:
- yield from fut2
+ yield From(fut2)
except asyncio.CancelledError:
- return 42
+ raise Return(42)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -395,13 +407,13 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def task():
- yield from fut1
+ yield From(fut1)
try:
- yield from fut2
+ yield From(fut2)
except asyncio.CancelledError:
pass
- res = yield from fut3
- return res
+ res = yield From(fut3)
+ raise Return(res)
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -428,8 +440,8 @@ class TaskTests(test_utils.TestCase):
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
- yield from asyncio.sleep(100, loop=loop)
- return 12
+ yield From(asyncio.sleep(100, loop=loop))
+ raise Return(12)
t = asyncio.Task(task(), loop=loop)
self.assertRaises(
@@ -451,17 +463,16 @@ class TaskTests(test_utils.TestCase):
loop = self.new_test_loop(gen)
- x = 0
+ non_local = {'x': 0}
waiters = []
@asyncio.coroutine
def task():
- nonlocal x
- while x < 10:
+ while non_local['x'] < 10:
waiters.append(asyncio.sleep(0.1, loop=loop))
- yield from waiters[-1]
- x += 1
- if x == 2:
+ yield From(waiters[-1])
+ non_local['x'] += 1
+ if non_local['x'] == 3:
loop.stop()
t = asyncio.Task(task(), loop=loop)
@@ -470,7 +481,7 @@ class TaskTests(test_utils.TestCase):
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
- self.assertEqual(x, 2)
+ self.assertEqual(non_local['x'], 3)
self.assertAlmostEqual(0.3, loop.time())
# close generators
@@ -481,6 +492,7 @@ class TaskTests(test_utils.TestCase):
def test_wait_for(self):
+ @asyncio.coroutine
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
@@ -490,27 +502,34 @@ class TaskTests(test_utils.TestCase):
loop = self.new_test_loop(gen)
- foo_running = None
+ non_local = {'foo_running': None}
@asyncio.coroutine
def foo():
- nonlocal foo_running
- foo_running = True
+ non_local['foo_running'] = True
try:
- yield from asyncio.sleep(0.2, loop=loop)
+ yield From(asyncio.sleep(0.2, loop=loop))
finally:
- foo_running = False
- return 'done'
+ non_local['foo_running'] = False
+ raise Return('done')
fut = asyncio.Task(foo(), loop=loop)
+ test_utils.run_briefly(loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1, loop=loop))
+
+ # Trollius issue #2: need to run the loop briefly to ensure that the
+ # cancellation is propagated to all tasks
+ waiter = asyncio.Future(loop=loop)
+ fut.add_done_callback(lambda f: waiter.set_result(True))
+ loop.run_until_complete(waiter)
+
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
- self.assertEqual(foo_running, False)
+ self.assertEqual(non_local['foo_running'], False)
def test_wait_for_blocking(self):
loop = self.new_test_loop()
@@ -537,17 +556,24 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- yield from asyncio.sleep(0.2, loop=loop)
- return 'done'
+ yield From(asyncio.sleep(0.2, loop=loop))
+ raise Return('done')
asyncio.set_event_loop(loop)
try:
fut = asyncio.Task(foo(), loop=loop)
+ test_utils.run_briefly(loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.01))
finally:
asyncio.set_event_loop(None)
+ # Trollius issue #2: need to run the loop briefly to ensure that the
+ # cancellation is propagated to all tasks
+ waiter = asyncio.Future(loop=loop)
+ fut.add_done_callback(lambda f: waiter.set_result(True))
+ loop.run_until_complete(waiter)
+
self.assertAlmostEqual(0.01, loop.time())
self.assertTrue(fut.done())
self.assertTrue(fut.cancelled())
@@ -583,10 +609,10 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a], loop=loop)
+ done, pending = yield From(asyncio.wait([b, a], loop=loop))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
- return 42
+ raise Return(42)
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
@@ -613,10 +639,10 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a])
+ done, pending = yield From(asyncio.wait([b, a]))
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
- return 42
+ raise Return(42)
asyncio.set_event_loop(loop)
res = loop.run_until_complete(
@@ -637,7 +663,7 @@ class TaskTests(test_utils.TestCase):
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
- self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
+ self.assertEqual(set(f.result() for f in done), set(('test', 'spam')))
def test_wait_errors(self):
self.assertRaises(
@@ -671,8 +697,8 @@ class TaskTests(test_utils.TestCase):
loop=loop)
done, pending = loop.run_until_complete(task)
- self.assertEqual({b}, done)
- self.assertEqual({a}, pending)
+ self.assertEqual(set((b,)), done)
+ self.assertEqual(set((a,)), pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
@@ -688,12 +714,12 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def coro1():
- yield
+ yield From(None)
@asyncio.coroutine
def coro2():
- yield
- yield
+ yield From(None)
+ yield From(None)
a = asyncio.Task(coro1(), loop=self.loop)
b = asyncio.Task(coro2(), loop=self.loop)
@@ -703,7 +729,7 @@ class TaskTests(test_utils.TestCase):
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
- self.assertEqual({a, b}, done)
+ self.assertEqual(set((a, b)), done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
@@ -732,8 +758,8 @@ class TaskTests(test_utils.TestCase):
loop=loop)
done, pending = loop.run_until_complete(task)
- self.assertEqual({b}, done)
- self.assertEqual({a}, pending)
+ self.assertEqual(set((b,)), done)
+ self.assertEqual(set((a,)), pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
@@ -756,7 +782,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def exc():
- yield from asyncio.sleep(0.01, loop=loop)
+ yield From(asyncio.sleep(0.01, loop=loop))
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
@@ -764,8 +790,8 @@ class TaskTests(test_utils.TestCase):
loop=loop)
done, pending = loop.run_until_complete(task)
- self.assertEqual({b}, done)
- self.assertEqual({a}, pending)
+ self.assertEqual(set((b,)), done)
+ self.assertEqual(set((a,)), pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
@@ -787,14 +813,14 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleeper():
- yield from asyncio.sleep(0.15, loop=loop)
+ yield From(asyncio.sleep(0.15, loop=loop))
raise ZeroDivisionError('really')
b = asyncio.Task(sleeper(), loop=loop)
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a], loop=loop)
+ done, pending = yield From(asyncio.wait([b, a], loop=loop))
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
@@ -824,8 +850,8 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
- done, pending = yield from asyncio.wait([b, a], timeout=0.11,
- loop=loop)
+ done, pending = yield From(asyncio.wait([b, a], timeout=0.11,
+ loop=loop))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
@@ -875,17 +901,16 @@ class TaskTests(test_utils.TestCase):
# disable "slow callback" warning
loop.slow_callback_duration = 1.0
completed = set()
- time_shifted = False
+ non_local = {'time_shifted': False}
@asyncio.coroutine
def sleeper(dt, x):
- nonlocal time_shifted
- yield from asyncio.sleep(dt, loop=loop)
+ yield From(asyncio.sleep(dt, loop=loop))
completed.add(x)
- if not time_shifted and 'a' in completed and 'b' in completed:
- time_shifted = True
+ if not non_local['time_shifted'] and 'a' in completed and 'b' in completed:
+ non_local['time_shifted'] = True
loop.advance_time(0.14)
- return x
+ raise Return(x)
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
@@ -895,8 +920,8 @@ class TaskTests(test_utils.TestCase):
def foo():
values = []
for f in asyncio.as_completed([b, c, a], loop=loop):
- values.append((yield from f))
- return values
+ values.append((yield From(f)))
+ raise Return(values)
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
@@ -928,11 +953,11 @@ class TaskTests(test_utils.TestCase):
if values:
loop.advance_time(0.02)
try:
- v = yield from f
+ v = yield From(f)
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
- return values
+ raise Return(values)
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(len(res), 2, res)
@@ -959,7 +984,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop):
- v = yield from f
+ v = yield From(f)
self.assertEqual(v, 'a')
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
@@ -975,7 +1000,7 @@ class TaskTests(test_utils.TestCase):
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.10, 'b', loop=loop)
- fs = {a, b}
+ fs = set((a, b))
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
@@ -1000,12 +1025,12 @@ class TaskTests(test_utils.TestCase):
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.05, 'b', loop=loop)
- fs = {a, b}
+ fs = set((a, b))
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs, loop=loop)
done, pending = loop.run_until_complete(waiter)
- self.assertEqual(set(f.result() for f in done), {'a', 'b'})
+ self.assertEqual(set(f.result() for f in done), set(('a', 'b')))
def test_as_completed_duplicate_coroutines(self):
@@ -1019,13 +1044,13 @@ class TaskTests(test_utils.TestCase):
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')],
loop=self.loop):
- result.append((yield from f))
- return result
+ result.append((yield From(f)))
+ raise Return(result)
fut = asyncio.Task(runner(), loop=self.loop)
self.loop.run_until_complete(fut)
result = fut.result()
- self.assertEqual(set(result), {'ham', 'spam'})
+ self.assertEqual(set(result), set(('ham', 'spam')))
self.assertEqual(len(result), 2)
def test_sleep(self):
@@ -1041,9 +1066,9 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleeper(dt, arg):
- yield from asyncio.sleep(dt/2, loop=loop)
- res = yield from asyncio.sleep(dt/2, arg, loop=loop)
- return res
+ yield From(asyncio.sleep(dt/2, loop=loop))
+ res = yield From(asyncio.sleep(dt/2, arg, loop=loop))
+ raise Return(res)
t = asyncio.Task(sleeper(0.1, 'yeah'), loop=loop)
loop.run_until_complete(t)
@@ -1063,22 +1088,21 @@ class TaskTests(test_utils.TestCase):
t = asyncio.Task(asyncio.sleep(10.0, 'yeah', loop=loop),
loop=loop)
- handle = None
+ non_local = {'handle': None}
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
- nonlocal handle
- handle = orig_call_later(delay, callback, *args)
- return handle
+ non_local['handle'] = orig_call_later(delay, callback, *args)
+ return non_local['handle']
loop.call_later = call_later
test_utils.run_briefly(loop)
- self.assertFalse(handle._cancelled)
+ self.assertFalse(non_local['handle']._cancelled)
t.cancel()
test_utils.run_briefly(loop)
- self.assertTrue(handle._cancelled)
+ self.assertTrue(non_local['handle']._cancelled)
def test_task_cancel_sleeping_task(self):
@@ -1093,18 +1117,18 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleep(dt):
- yield from asyncio.sleep(dt, loop=loop)
+ yield From(asyncio.sleep(dt, loop=loop))
@asyncio.coroutine
def doit():
sleeper = asyncio.Task(sleep(5000), loop=loop)
loop.call_later(0.1, sleeper.cancel)
try:
- yield from sleeper
+ yield From(sleeper)
except asyncio.CancelledError:
- return 'cancelled'
+ raise Return('cancelled')
else:
- return 'slept in'
+ raise Return('slept in')
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
@@ -1115,7 +1139,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def coro():
- yield from fut
+ yield From(fut)
task = asyncio.Task(coro(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1143,9 +1167,9 @@ class TaskTests(test_utils.TestCase):
def test_step_result(self):
@asyncio.coroutine
def notmuch():
- yield None
- yield 1
- return 'ko'
+ yield From(None)
+ yield From(1)
+ raise Return('ko')
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
@@ -1156,19 +1180,18 @@ class TaskTests(test_utils.TestCase):
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
- super().__init__(*args, **kwds)
+ super(Fut, self).__init__(*args, **kwds)
def add_done_callback(self, fn):
self.cb_added = True
- super().add_done_callback(fn)
+ super(Fut, self).add_done_callback(fn)
fut = Fut(loop=self.loop)
- result = None
+ non_local = {'result': None}
@asyncio.coroutine
def wait_for_future():
- nonlocal result
- result = yield from fut
+ non_local['result'] = yield From(fut)
t = asyncio.Task(wait_for_future(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1177,7 +1200,7 @@ class TaskTests(test_utils.TestCase):
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
- self.assertIs(res, result)
+ self.assertIs(res, non_local['result'])
self.assertTrue(t.done())
self.assertIsNone(t.result())
@@ -1203,24 +1226,24 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def sleeper():
- yield from asyncio.sleep(10, loop=loop)
+ yield From(asyncio.sleep(10, loop=loop))
base_exc = BaseException()
@asyncio.coroutine
def notmutch():
try:
- yield from sleeper()
+ yield From(sleeper())
except asyncio.CancelledError:
raise base_exc
task = asyncio.Task(notmutch(), loop=loop)
- test_utils.run_briefly(loop)
+ test_utils.run_briefly(loop, 2)
task.cancel()
self.assertFalse(task.done())
- self.assertRaises(BaseException, test_utils.run_briefly, loop)
+ self.assertRaises(BaseException, test_utils.run_briefly, loop, 2)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
@@ -1241,37 +1264,6 @@ class TaskTests(test_utils.TestCase):
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
- def test_yield_vs_yield_from(self):
- fut = asyncio.Future(loop=self.loop)
-
- @asyncio.coroutine
- def wait_for_future():
- yield fut
-
- task = wait_for_future()
- with self.assertRaises(RuntimeError):
- self.loop.run_until_complete(task)
-
- self.assertFalse(fut.done())
-
- def test_yield_vs_yield_from_generator(self):
- @asyncio.coroutine
- def coro():
- yield
-
- @asyncio.coroutine
- def wait_for_future():
- gen = coro()
- try:
- yield gen
- finally:
- gen.close()
-
- task = wait_for_future()
- self.assertRaises(
- RuntimeError,
- self.loop.run_until_complete, task)
-
def test_coroutine_non_gen_function(self):
@asyncio.coroutine
def func():
@@ -1322,7 +1314,7 @@ class TaskTests(test_utils.TestCase):
@asyncio.coroutine
def coro1(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
- yield from fut1
+ yield From(fut1)
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
fut2.set_result(True)
@@ -1330,7 +1322,7 @@ class TaskTests(test_utils.TestCase):
def coro2(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
fut1.set_result(True)
- yield from fut2
+ yield From(fut2)
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
task1 = asyncio.Task(coro1(self.loop), loop=self.loop)
@@ -1345,54 +1337,50 @@ class TaskTests(test_utils.TestCase):
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
- nonlocal proof
try:
- yield from waiter
+ yield From(waiter)
except asyncio.CancelledError:
- proof += 1
+ non_local['proof'] += 1
raise
else:
self.fail('got past sleep() in inner()')
@asyncio.coroutine
def outer():
- nonlocal proof
try:
- yield from inner()
+ yield From(inner())
except asyncio.CancelledError:
- proof += 100 # Expect this path.
+ non_local['proof'] += 100 # Expect this path.
else:
- proof += 10
+ non_local['proof'] += 10
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
- self.assertEqual(proof, 101)
+ self.assertEqual(non_local['proof'], 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
- nonlocal proof
- yield from waiter
- proof += 1
+ yield From(waiter)
+ non_local['proof'] += 1
@asyncio.coroutine
def outer():
- nonlocal proof
- d, p = yield from asyncio.wait([inner()], loop=self.loop)
- proof += 100
+ d, p = yield From(asyncio.wait([inner()], loop=self.loop))
+ non_local['proof'] += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1401,7 +1389,7 @@ class TaskTests(test_utils.TestCase):
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
- self.assertEqual(proof, 1)
+ self.assertEqual(non_local['proof'], 1)
def test_shield_result(self):
inner = asyncio.Future(loop=self.loop)
@@ -1435,20 +1423,18 @@ class TaskTests(test_utils.TestCase):
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
- nonlocal proof
- yield from waiter
- proof += 1
+ yield From(waiter)
+ non_local['proof'] += 1
@asyncio.coroutine
def outer():
- nonlocal proof
- yield from asyncio.shield(inner(), loop=self.loop)
- proof += 100
+ yield From(asyncio.shield(inner(), loop=self.loop))
+ non_local['proof'] += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
@@ -1457,7 +1443,7 @@ class TaskTests(test_utils.TestCase):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
- self.assertEqual(proof, 1)
+ self.assertEqual(non_local['proof'], 1)
def test_shield_gather(self):
child1 = asyncio.Future(loop=self.loop)
@@ -1526,7 +1512,7 @@ class TaskTests(test_utils.TestCase):
def coro():
# The actual coroutine.
self.assertTrue(gen.gi_running)
- yield from fut
+ yield From(fut)
# A completed Future used to run the coroutine.
fut = asyncio.Future(loop=self.loop)
@@ -1568,13 +1554,15 @@ class TaskTests(test_utils.TestCase):
try:
@asyncio.coroutine
def t1():
- return (yield from t2())
+ res = yield From(t2())
+ raise Return(res)
@asyncio.coroutine
def t2():
f = asyncio.Future(loop=self.loop)
asyncio.Task(t3(f), loop=self.loop)
- return (yield from f)
+ res = yield From(f)
+ raise Return(res)
@asyncio.coroutine
def t3(f):
@@ -1589,7 +1577,7 @@ class TaskTests(test_utils.TestCase):
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
- return a
+ raise Return(a)
def call(arg):
cw = asyncio.coroutines.CoroWrapper(foo(), foo)
@@ -1597,7 +1585,8 @@ class TaskTests(test_utils.TestCase):
try:
cw.send(arg)
except StopIteration as ex:
- return ex.args[0]
+ ex.raised = True
+ return ex.value
else:
raise AssertionError('StopIteration was expected')
@@ -1606,18 +1595,19 @@ class TaskTests(test_utils.TestCase):
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
- def foo(): yield from []
+ def foo():
+ yield From(None)
cw = asyncio.coroutines.CoroWrapper(foo(), foo)
wd['cw'] = cw # Would fail without __weakref__ slot.
cw.gen = None # Suppress warning from __del__.
- @unittest.skipUnless(PY34,
- 'need python 3.4 or later')
+ @test_utils.skipUnless(PY34,
+ 'need python 3.4 or later')
def test_log_destroyed_pending_task(self):
@asyncio.coroutine
def kill_me(loop):
future = asyncio.Future(loop=loop)
- yield from future
+ yield From(future)
# at this point, the only reference to kill_me() task is
# the Task._wakeup() method in future._callbacks
raise Exception("code never reached")
@@ -1629,7 +1619,7 @@ class TaskTests(test_utils.TestCase):
# schedule the task
coro = kill_me(self.loop)
task = asyncio.async(coro, loop=self.loop)
- self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), {task})
+ self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), set((task,)))
# execute the task so it waits for future
self.loop._run_once()
@@ -1653,7 +1643,7 @@ class TaskTests(test_utils.TestCase):
})
mock_handler.reset_mock()
- @mock.patch('asyncio.coroutines.logger')
+ @mock.patch('trollius.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
debug = asyncio.coroutines._DEBUG
try:
@@ -1664,7 +1654,7 @@ class TaskTests(test_utils.TestCase):
finally:
asyncio.coroutines._DEBUG = debug
- tb_filename = __file__
+ tb_filename = sys._getframe().f_code.co_filename
tb_lineno = sys._getframe().f_lineno + 2
# create a coroutine object but don't use it
coro_noop()
@@ -1673,12 +1663,13 @@ class TaskTests(test_utils.TestCase):
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
+ coro_name = getattr(coro_noop, '__qualname__', coro_noop.__name__)
regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> was never yielded from\n'
r'Coroutine object created at \(most recent call last\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
- % (re.escape(coro_noop.__qualname__),
+ % (re.escape(coro_name),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
@@ -1688,14 +1679,23 @@ class TaskTests(test_utils.TestCase):
self.loop.set_debug(True)
task = asyncio.Task(coroutine_function(), loop=self.loop)
- lineno = sys._getframe().f_lineno - 1
- self.assertIsInstance(task._source_traceback, list)
- self.assertEqual(task._source_traceback[-1][:3],
- (__file__,
- lineno,
- 'test_task_source_traceback'))
+ self.check_soure_traceback(task._source_traceback, -1)
self.loop.run_until_complete(task)
+ def test_coroutine_class(self):
+ # Trollius issue #9
+ self.loop.set_debug(True)
+
+ class MyClass(object):
+ def __call__(self):
+ return 7
+
+ obj = MyClass()
+ coro_func = asyncio.coroutine(obj)
+ coro_obj = coro_func()
+ res = self.loop.run_until_complete(coro_obj)
+ self.assertEqual(res, 7)
+
class GatherTestsBase:
@@ -1771,30 +1771,19 @@ class GatherTestsBase:
aio_path = os.path.dirname(os.path.dirname(asyncio.__file__))
code = '\n'.join((
- 'import asyncio.coroutines',
- 'print(asyncio.coroutines._DEBUG)'))
-
- # Test with -E to not fail if the unit test was run with
- # PYTHONASYNCIODEBUG set to a non-empty string
- sts, stdout, stderr = support.assert_python_ok('-E', '-c', code,
- PYTHONPATH=aio_path)
- self.assertEqual(stdout.rstrip(), b'False')
+ 'import trollius.coroutines',
+ 'print(trollius.coroutines._DEBUG)'))
sts, stdout, stderr = support.assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='',
+ TROLLIUSDEBUG='',
PYTHONPATH=aio_path)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = support.assert_python_ok('-c', code,
- PYTHONASYNCIODEBUG='1',
+ TROLLIUSDEBUG='1',
PYTHONPATH=aio_path)
self.assertEqual(stdout.rstrip(), b'True')
- sts, stdout, stderr = support.assert_python_ok('-E', '-c', code,
- PYTHONASYNCIODEBUG='1',
- PYTHONPATH=aio_path)
- self.assertEqual(stdout.rstrip(), b'False')
-
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
@@ -1883,7 +1872,7 @@ class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def setUp(self):
- super().setUp()
+ super(CoroutineGatherTests, self).setUp()
asyncio.set_event_loop(self.one_loop)
def wrap_futures(self, *futures):
@@ -1891,7 +1880,8 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
for fut in futures:
@asyncio.coroutine
def coro(fut=fut):
- return (yield from fut)
+ result = (yield From(fut))
+ raise Return(result)
coros.append(coro())
return coros
@@ -1923,44 +1913,42 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
- proof = 0
+ non_local = {'proof': 0}
waiter = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def inner():
- nonlocal proof
- yield from waiter
- proof += 1
+ yield From(waiter)
+ non_local['proof'] += 1
child1 = asyncio.async(inner(), loop=self.one_loop)
child2 = asyncio.async(inner(), loop=self.one_loop)
- gatherer = None
+ non_local['gatherer'] = None
@asyncio.coroutine
def outer():
- nonlocal proof, gatherer
- gatherer = asyncio.gather(child1, child2, loop=self.one_loop)
- yield from gatherer
- proof += 100
+ non_local['gatherer'] = asyncio.gather(child1, child2, loop=self.one_loop)
+ yield From(non_local['gatherer'])
+ non_local['proof'] += 100
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
- self.assertFalse(gatherer.cancel())
+ self.assertFalse(non_local['gatherer'].cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
- self.assertEqual(proof, 0)
+ self.assertEqual(non_local['proof'], 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
@asyncio.coroutine
def inner(f):
- yield from f
+ yield From(f)
raise RuntimeError('should not be ignored')
a = asyncio.Future(loop=self.one_loop)
@@ -1968,7 +1956,7 @@ class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
@asyncio.coroutine
def outer():
- yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop)
+ yield From(asyncio.gather(inner(a), inner(b), loop=self.one_loop))
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
diff --git a/tests/test_transports.py b/tests/test_transports.py
index 3b6e3d6..42f7729 100644
--- a/tests/test_transports.py
+++ b/tests/test_transports.py
@@ -1,13 +1,20 @@
"""Tests for transports.py."""
import unittest
-from unittest import mock
-import asyncio
-from asyncio import transports
+import trollius as asyncio
+from trollius import test_utils
+from trollius import transports
+from trollius.test_utils import mock
+try:
+ memoryview
+except NameError:
+ # Python 2.6
+ memoryview = buffer
-class TransportTests(unittest.TestCase):
+
+class TransportTests(test_utils.TestCase):
def test_ctor_extra_is_none(self):
transport = asyncio.Transport()
diff --git a/tests/test_unix_events.py b/tests/test_unix_events.py
index 4b825dc..f6af226 100644
--- a/tests/test_unix_events.py
+++ b/tests/test_unix_events.py
@@ -1,6 +1,7 @@
"""Tests for unix_events.py."""
import collections
+import contextlib
import errno
import io
import os
@@ -11,22 +12,23 @@ import sys
import tempfile
import threading
import unittest
-from unittest import mock
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
-import asyncio
-from asyncio import log
-from asyncio import test_utils
-from asyncio import unix_events
+import trollius as asyncio
+from trollius import log
+from trollius import test_utils
+from trollius import unix_events
+from trollius.py33_exceptions import BlockingIOError, ChildProcessError
+from trollius.test_utils import mock
MOCK_ANY = mock.ANY
-@unittest.skipUnless(signal, 'Signals are not supported')
+@test_utils.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
@@ -51,7 +53,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.set_wakeup_fd.side_effect = ValueError
@@ -61,13 +63,13 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
@asyncio.coroutine
def simple_coroutine():
- yield from []
+ yield None
# callback must not be a coroutine function
coro_func = simple_coroutine
@@ -79,7 +81,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.add_signal_handler,
signal.SIGINT, func)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -89,7 +91,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -107,8 +109,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
- @mock.patch('asyncio.unix_events.signal')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.unix_events.signal')
+ @mock.patch('trollius.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
@@ -124,8 +126,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
- @mock.patch('asyncio.unix_events.signal')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.unix_events.signal')
+ @mock.patch('trollius.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
@@ -139,7 +141,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -152,7 +154,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
@@ -169,8 +171,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
- @mock.patch('asyncio.unix_events.signal')
- @mock.patch('asyncio.base_events.logger')
+ @mock.patch('trollius.unix_events.signal')
+ @mock.patch('trollius.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
@@ -180,7 +182,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
@@ -190,7 +192,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
@@ -202,7 +204,7 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
- @mock.patch('asyncio.unix_events.signal')
+ @mock.patch('trollius.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
@@ -219,8 +221,8 @@ class SelectorEventLoopSignalTests(test_utils.TestCase):
m_signal.set_wakeup_fd.assert_called_once_with(-1)
-@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
- 'UNIX Sockets are not supported')
+@test_utils.skipUnless(hasattr(socket, 'AF_UNIX'),
+ 'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
@@ -231,7 +233,7 @@ class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
- with sock:
+ with contextlib.closing(sock):
coro = self.loop.create_unix_server(lambda: None, path)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
@@ -259,18 +261,19 @@ class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
- with sock:
+ with contextlib.closing(sock):
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Socket was expected'):
self.loop.run_until_complete(coro)
- @mock.patch('asyncio.unix_events.socket')
+ @mock.patch('trollius.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
+ m_socket.error = socket.error
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
@@ -322,7 +325,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
- blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking')
+ blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
@@ -381,7 +384,7 @@ class UnixReadPipeTransportTests(test_utils.TestCase):
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
- @mock.patch('asyncio.log.logger.error')
+ @mock.patch('trollius.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = unix_events._UnixReadPipeTransport(
@@ -485,7 +488,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
- blocking_patcher = mock.patch('asyncio.unix_events._set_nonblocking')
+ blocking_patcher = mock.patch('trollius.unix_events._set_nonblocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
@@ -571,7 +574,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
- @mock.patch('asyncio.unix_events.logger')
+ @mock.patch('trollius.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = unix_events._UnixWritePipeTransport(
@@ -671,7 +674,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual([b'data'], tr._buffer)
- @mock.patch('asyncio.log.logger.error')
+ @mock.patch('trollius.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = unix_events._UnixWritePipeTransport(
@@ -793,7 +796,7 @@ class UnixWritePipeTransportTests(test_utils.TestCase):
self.assertFalse(self.protocol.connection_lost.called)
-class AbstractChildWatcherTests(unittest.TestCase):
+class AbstractChildWatcherTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
@@ -812,7 +815,7 @@ class AbstractChildWatcherTests(unittest.TestCase):
NotImplementedError, watcher.__exit__, f, f, f)
-class BaseChildWatcherTests(unittest.TestCase):
+class BaseChildWatcherTests(test_utils.TestCase):
def test_not_implemented(self):
f = mock.Mock()
@@ -882,19 +885,27 @@ class ChildWatcherTestsMixin:
def waitpid_mocks(func):
def wrapped_func(self):
+ exit_stack = []
+
def patch(target, wrapper):
- return mock.patch(target, wraps=wrapper,
- new_callable=mock.Mock)
-
- with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
- patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
- patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
- patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
- patch('os.waitpid', self.waitpid) as m_waitpid:
+ m = mock.patch(target, wraps=wrapper)
+ exit_stack.append(m)
+ return m.__enter__()
+
+ m_waitpid = patch('os.waitpid', self.waitpid)
+ m_WIFEXITED = patch('os.WIFEXITED', self.WIFEXITED)
+ m_WIFSIGNALED = patch('os.WIFSIGNALED', self.WIFSIGNALED)
+ m_WEXITSTATUS = patch('os.WEXITSTATUS', self.WEXITSTATUS)
+ m_WTERMSIG = patch('os.WTERMSIG', self.WTERMSIG)
+ try:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
+ finally:
+ for obj in reversed(exit_stack):
+ obj.__exit__(None, None, None)
+
return wrapped_func
@waitpid_mocks
@@ -1367,17 +1378,18 @@ class ChildWatcherTestsMixin:
callback1 = mock.Mock()
callback2 = mock.Mock()
- with self.ignore_warnings, self.watcher:
- self.running = True
- # child 1 terminates
- self.add_zombie(591, 7)
- # an unknown child terminates
- self.add_zombie(593, 17)
+ with self.ignore_warnings:
+ with self.watcher:
+ self.running = True
+ # child 1 terminates
+ self.add_zombie(591, 7)
+ # an unknown child terminates
+ self.add_zombie(593, 17)
- self.watcher._sig_chld()
+ self.watcher._sig_chld()
- self.watcher.add_child_handler(591, callback1)
- self.watcher.add_child_handler(592, callback2)
+ self.watcher.add_child_handler(591, callback1)
+ self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@@ -1396,15 +1408,15 @@ class ChildWatcherTestsMixin:
self.loop = self.new_test_loop()
patch = mock.patch.object
- with patch(old_loop, "remove_signal_handler") as m_old_remove, \
- patch(self.loop, "add_signal_handler") as m_new_add:
+ with patch(old_loop, "remove_signal_handler") as m_old_remove:
+ with patch(self.loop, "add_signal_handler") as m_new_add:
- self.watcher.attach_loop(self.loop)
+ self.watcher.attach_loop(self.loop)
- m_old_remove.assert_called_once_with(
- signal.SIGCHLD)
- m_new_add.assert_called_once_with(
- signal.SIGCHLD, self.watcher._sig_chld)
+ m_old_remove.assert_called_once_with(
+ signal.SIGCHLD)
+ m_new_add.assert_called_once_with(
+ signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
@@ -1516,7 +1528,7 @@ class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
return asyncio.FastChildWatcher()
-class PolicyTests(unittest.TestCase):
+class PolicyTests(test_utils.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
diff --git a/tests/test_windows_events.py b/tests/test_windows_events.py
index 9b264a6..4dedf8e 100644
--- a/tests/test_windows_events.py
+++ b/tests/test_windows_events.py
@@ -1,16 +1,17 @@
+from trollius import test_utils
import os
import sys
import unittest
if sys.platform != 'win32':
- raise unittest.SkipTest('Windows only')
+ raise test_utils.SkipTest('Windows only')
-import _winapi
-
-import asyncio
-from asyncio import _overlapped
-from asyncio import test_utils
-from asyncio import windows_events
+import trollius as asyncio
+from trollius import Return, From
+from trollius import _overlapped
+from trollius import py33_winapi as _winapi
+from trollius import windows_events
+from trollius.py33_exceptions import PermissionError, FileNotFoundError
class UpperProto(asyncio.Protocol):
@@ -57,38 +58,38 @@ class ProactorTests(test_utils.TestCase):
ADDRESS = r'\\.\pipe\_test_pipe-%s' % os.getpid()
with self.assertRaises(FileNotFoundError):
- yield from self.loop.create_pipe_connection(
- asyncio.Protocol, ADDRESS)
+ yield From(self.loop.create_pipe_connection(
+ asyncio.Protocol, ADDRESS))
- [server] = yield from self.loop.start_serving_pipe(
- UpperProto, ADDRESS)
+ [server] = yield From(self.loop.start_serving_pipe(
+ UpperProto, ADDRESS))
self.assertIsInstance(server, windows_events.PipeServer)
clients = []
for i in range(5):
stream_reader = asyncio.StreamReader(loop=self.loop)
protocol = asyncio.StreamReaderProtocol(stream_reader)
- trans, proto = yield from self.loop.create_pipe_connection(
- lambda: protocol, ADDRESS)
+ trans, proto = yield From(self.loop.create_pipe_connection(
+ lambda: protocol, ADDRESS))
self.assertIsInstance(trans, asyncio.Transport)
self.assertEqual(protocol, proto)
clients.append((stream_reader, trans))
for i, (r, w) in enumerate(clients):
- w.write('lower-{}\n'.format(i).encode())
+ w.write('lower-{0}\n'.format(i).encode())
for i, (r, w) in enumerate(clients):
- response = yield from r.readline()
- self.assertEqual(response, 'LOWER-{}\n'.format(i).encode())
+ response = yield From(r.readline())
+ self.assertEqual(response, 'LOWER-{0}\n'.format(i).encode())
w.close()
server.close()
with self.assertRaises(FileNotFoundError):
- yield from self.loop.create_pipe_connection(
- asyncio.Protocol, ADDRESS)
+ yield From(self.loop.create_pipe_connection(
+ asyncio.Protocol, ADDRESS))
- return 'done'
+ raise Return('done')
def test_wait_for_handle(self):
event = _overlapped.CreateEvent(None, True, False, None)
diff --git a/tests/test_windows_utils.py b/tests/test_windows_utils.py
index 92db24e..5188d7f 100644
--- a/tests/test_windows_utils.py
+++ b/tests/test_windows_utils.py
@@ -3,16 +3,17 @@
import socket
import sys
import unittest
-from unittest import mock
if sys.platform != 'win32':
- raise unittest.SkipTest('Windows only')
+ from trollius.test_utils import SkipTest
+ raise SkipTest('Windows only')
-import _winapi
-
-from asyncio import _overlapped
-from asyncio import test_support as support
-from asyncio import windows_utils
+from trollius import _overlapped
+from trollius import py33_winapi as _winapi
+from trollius import test_support as support
+from trollius import test_utils
+from trollius import windows_utils
+from trollius.test_utils import mock
class WinsocketpairTests(unittest.TestCase):
@@ -27,14 +28,15 @@ class WinsocketpairTests(unittest.TestCase):
ssock, csock = windows_utils.socketpair()
self.check_winsocketpair(ssock, csock)
- @unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
+ @test_utils.skipUnless(support.IPV6_ENABLED,
+ 'IPv6 not supported or enabled')
def test_winsocketpair_ipv6(self):
ssock, csock = windows_utils.socketpair(family=socket.AF_INET6)
self.check_winsocketpair(ssock, csock)
- @unittest.skipIf(hasattr(socket, 'socketpair'),
- 'socket.socketpair is available')
- @mock.patch('asyncio.windows_utils.socket')
+ @test_utils.skipIf(hasattr(socket, 'socketpair'),
+ 'socket.socketpair is available')
+ @mock.patch('trollius.windows_utils.socket')
def test_winsocketpair_exc(self, m_socket):
m_socket.AF_INET = socket.AF_INET
m_socket.SOCK_STREAM = socket.SOCK_STREAM
@@ -52,9 +54,9 @@ class WinsocketpairTests(unittest.TestCase):
self.assertRaises(ValueError,
windows_utils.socketpair, proto=1)
- @unittest.skipIf(hasattr(socket, 'socketpair'),
- 'socket.socketpair is available')
- @mock.patch('asyncio.windows_utils.socket')
+ @test_utils.skipIf(hasattr(socket, 'socketpair'),
+ 'socket.socketpair is available')
+ @mock.patch('trollius.windows_utils.socket')
def test_winsocketpair_close(self, m_socket):
m_socket.AF_INET = socket.AF_INET
m_socket.SOCK_STREAM = socket.SOCK_STREAM
@@ -80,7 +82,7 @@ class PipeTests(unittest.TestCase):
ERROR_IO_INCOMPLETE = 996
try:
ov1.getresult()
- except OSError as e:
+ except WindowsError as e:
self.assertEqual(e.winerror, ERROR_IO_INCOMPLETE)
else:
raise RuntimeError('expected ERROR_IO_INCOMPLETE')
@@ -90,15 +92,15 @@ class PipeTests(unittest.TestCase):
self.assertEqual(ov2.error, 0)
ov2.WriteFile(h2, b"hello")
- self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
+ self.assertIn(ov2.error, set((0, _winapi.ERROR_IO_PENDING)))
- res = _winapi.WaitForMultipleObjects([ov2.event], False, 100)
+ res = _winapi.WaitForSingleObject(ov2.event, 100)
self.assertEqual(res, _winapi.WAIT_OBJECT_0)
self.assertFalse(ov1.pending)
self.assertEqual(ov1.error, ERROR_IO_INCOMPLETE)
self.assertFalse(ov2.pending)
- self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
+ self.assertIn(ov2.error, set((0, _winapi.ERROR_IO_PENDING)))
self.assertEqual(ov1.getresult(), b"hello")
finally:
_winapi.CloseHandle(h1)
diff --git a/tox.ini b/tox.ini
index 040b25a..4fae0c8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,11 +1,30 @@
[tox]
-envlist = py33,py34
+envlist = py26,py27,py32,py33,py34
[testenv]
deps=
aiotest
setenv =
- PYTHONASYNCIODEBUG = 1
+ TROLLIUSDEBUG = 1
commands=
python runtests.py -r {posargs}
python run_aiotest.py -r {posargs}
+
+[testenv:py26]
+deps=
+ aiotest
+ futures
+ mock
+ ordereddict
+ unittest2
+
+[testenv:py27]
+deps=
+ aiotest
+ futures
+ mock
+
+[testenv:py32]
+deps=
+ aiotest
+ mock
diff --git a/asyncio/__init__.py b/trollius/__init__.py
index 3911fb4..20790db 100644
--- a/asyncio/__init__.py
+++ b/trollius/__init__.py
@@ -1,10 +1,10 @@
-"""The asyncio package, tracking PEP 3156."""
+"""The trollius package, tracking PEP 3156."""
import sys
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
# Do this first, so the other submodules can use "from . import selectors".
-# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
+# Prefer trollius/selectors.py over the stdlib one, as ours may be newer.
try:
from . import selectors
except ImportError:
@@ -23,6 +23,7 @@ from .events import *
from .futures import *
from .locks import *
from .protocols import *
+from .py33_exceptions import *
from .queues import *
from .streams import *
from .subprocess import *
@@ -31,6 +32,7 @@ from .transports import *
__all__ = (coroutines.__all__ +
events.__all__ +
+ py33_exceptions.__all__ +
futures.__all__ +
locks.__all__ +
protocols.__all__ +
@@ -46,3 +48,10 @@ if sys.platform == 'win32': # pragma: no cover
else:
from .unix_events import * # pragma: no cover
__all__ += unix_events.__all__
+
+try:
+ from .py3_ssl import *
+ __all__ += py3_ssl.__all__
+except ImportError:
+ # SSL support is optionnal
+ pass
diff --git a/asyncio/base_events.py b/trollius/base_events.py
index b1a5422..fe5f0a2 100644
--- a/asyncio/base_events.py
+++ b/trollius/base_events.py
@@ -15,23 +15,29 @@ to modify the meaning of the API call itself.
import collections
-import concurrent.futures
import heapq
import inspect
import logging
import os
import socket
import subprocess
-import time
-import traceback
import sys
+import traceback
+try:
+ from collections import OrderedDict
+except ImportError:
+ # Python 2.6: use ordereddict backport
+ from ordereddict import OrderedDict
+from . import compat
from . import coroutines
from . import events
from . import futures
from . import tasks
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
+from .executor import get_default_executor
from .log import logger
+from .time_monotonic import time_monotonic, time_monotonic_resolution
__all__ = ['BaseEventLoop', 'Server']
@@ -90,10 +96,10 @@ def _check_resolved_address(sock, address):
# already resolved.
try:
socket.getaddrinfo(host, port,
- family=family,
- type=(sock.type & ~type_mask),
- proto=sock.proto,
- flags=socket.AI_NUMERICHOST)
+ family,
+ (sock.type & ~type_mask),
+ sock.proto,
+ socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), got %r: %s"
% (address, err))
@@ -153,10 +159,10 @@ class Server(events.AbstractServer):
@coroutine
def wait_closed(self):
if self.sockets is None or self._waiters is None:
- return
+ raise Return()
waiter = futures.Future(loop=self._loop)
self._waiters.append(waiter)
- yield from waiter
+ yield From(waiter)
class BaseEventLoop(events.AbstractEventLoop):
@@ -169,10 +175,9 @@ class BaseEventLoop(events.AbstractEventLoop):
self._default_executor = None
self._internal_fds = 0
self._running = False
- self._clock_resolution = time.get_clock_info('monotonic').resolution
+ self._clock_resolution = time_monotonic_resolution
self._exception_handler = None
- self._debug = (not sys.flags.ignore_environment
- and bool(os.environ.get('PYTHONASYNCIODEBUG')))
+ self._debug = bool(os.environ.get('TROLLIUSDEBUG'))
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
@@ -193,12 +198,12 @@ class BaseEventLoop(events.AbstractEventLoop):
del task._source_traceback[-1]
return task
- def _make_socket_transport(self, sock, protocol, waiter=None, *,
+ def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
- def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
+ def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
@@ -271,7 +276,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""
self._check_closed()
- new_task = not isinstance(future, futures.Future)
+ new_task = not isinstance(future, futures._FUTURE_CLASSES)
future = tasks.async(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
@@ -319,7 +324,7 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
- self._scheduled.clear()
+ del self._scheduled[:]
executor = self._default_executor
if executor is not None:
self._default_executor = None
@@ -340,7 +345,7 @@ class BaseEventLoop(events.AbstractEventLoop):
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
- return time.monotonic()
+ return time_monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
@@ -451,7 +456,7 @@ class BaseEventLoop(events.AbstractEventLoop):
if executor is None:
executor = self._default_executor
if executor is None:
- executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
+ executor = get_default_executor()
self._default_executor = executor
return futures.wrap_future(executor.submit(callback, *args), loop=self)
@@ -483,7 +488,7 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug(msg)
return addrinfo
- def getaddrinfo(self, host, port, *,
+ def getaddrinfo(self, host, port,
family=0, type=0, proto=0, flags=0):
if self._debug:
return self.run_in_executor(None, self._getaddrinfo_debug,
@@ -496,7 +501,7 @@ class BaseEventLoop(events.AbstractEventLoop):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@coroutine
- def create_connection(self, protocol_factory, host=None, port=None, *,
+ def create_connection(self, protocol_factory, host=None, port=None,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""Connect to a TCP server.
@@ -546,15 +551,15 @@ class BaseEventLoop(events.AbstractEventLoop):
else:
f2 = None
- yield from tasks.wait(fs, loop=self)
+ yield From(tasks.wait(fs, loop=self))
infos = f1.result()
if not infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
@@ -566,11 +571,11 @@ class BaseEventLoop(events.AbstractEventLoop):
try:
sock.bind(laddr)
break
- except OSError as exc:
- exc = OSError(
+ except socket.error as exc:
+ exc = socket.error(
exc.errno, 'error while '
'attempting to bind on address '
- '{!r}: {}'.format(
+ '{0!r}: {1}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
@@ -579,8 +584,8 @@ class BaseEventLoop(events.AbstractEventLoop):
continue
if self._debug:
logger.debug("connect %r to %r", sock, address)
- yield from self.sock_connect(sock, address)
- except OSError as exc:
+ yield From(self.sock_connect(sock, address))
+ except socket.error as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
@@ -600,7 +605,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
- raise OSError('Multiple exceptions: {}'.format(
+ raise socket.error('Multiple exceptions: {0}'.format(
', '.join(str(exc) for exc in exceptions)))
elif sock is None:
@@ -609,15 +614,15 @@ class BaseEventLoop(events.AbstractEventLoop):
sock.setblocking(False)
- transport, protocol = yield from self._create_connection_transport(
- sock, protocol_factory, ssl, server_hostname)
+ transport, protocol = yield From(self._create_connection_transport(
+ sock, protocol_factory, ssl, server_hostname))
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
@@ -632,12 +637,12 @@ class BaseEventLoop(events.AbstractEventLoop):
else:
transport = self._make_socket_transport(sock, protocol, waiter)
- yield from waiter
- return transport, protocol
+ yield From(waiter)
+ raise Return(transport, protocol)
@coroutine
def create_datagram_endpoint(self, protocol_factory,
- local_addr=None, remote_addr=None, *,
+ local_addr=None, remote_addr=None,
family=0, proto=0, flags=0):
"""Create datagram connection."""
if not (local_addr or remote_addr):
@@ -646,17 +651,17 @@ class BaseEventLoop(events.AbstractEventLoop):
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join address by (family, protocol)
- addr_infos = collections.OrderedDict()
+ addr_infos = OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
- infos = yield from self.getaddrinfo(
+ infos = yield From(self.getaddrinfo(
*addr, family=family, type=socket.SOCK_DGRAM,
- proto=proto, flags=flags)
+ proto=proto, flags=flags))
if not infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
@@ -688,9 +693,9 @@ class BaseEventLoop(events.AbstractEventLoop):
if local_addr:
sock.bind(local_address)
if remote_addr:
- yield from self.sock_connect(sock, remote_address)
+ yield From(self.sock_connect(sock, remote_address))
r_addr = remote_address
- except OSError as exc:
+ except socket.error as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
@@ -716,12 +721,11 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
- yield from waiter
- return transport, protocol
+ yield From(waiter)
+ raise Return(transport, protocol)
@coroutine
def create_server(self, protocol_factory, host=None, port=None,
- *,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
@@ -748,11 +752,11 @@ class BaseEventLoop(events.AbstractEventLoop):
if host == '':
host = None
- infos = yield from self.getaddrinfo(
+ infos = yield From(self.getaddrinfo(
host, port, family=family,
- type=socket.SOCK_STREAM, proto=0, flags=flags)
+ type=socket.SOCK_STREAM, proto=0, flags=flags))
if not infos:
- raise OSError('getaddrinfo() returned empty list')
+ raise socket.error('getaddrinfo() returned empty list')
completed = False
try:
@@ -780,10 +784,11 @@ class BaseEventLoop(events.AbstractEventLoop):
True)
try:
sock.bind(sa)
- except OSError as err:
- raise OSError(err.errno, 'error while attempting '
- 'to bind on address %r: %s'
- % (sa, err.strerror.lower()))
+ except socket.error as err:
+ raise socket.error(err.errno,
+ 'error while attempting '
+ 'to bind on address %r: %s'
+ % (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
@@ -801,29 +806,29 @@ class BaseEventLoop(events.AbstractEventLoop):
self._start_serving(protocol_factory, sock, ssl, server)
if self._debug:
logger.info("%r is serving", server)
- return server
+ raise Return(server)
@coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
- yield from waiter
+ yield From(waiter)
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
- yield from waiter
+ yield From(waiter)
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
- return transport, protocol
+ raise Return(transport, protocol)
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
@@ -839,11 +844,11 @@ class BaseEventLoop(events.AbstractEventLoop):
logger.debug(' '.join(info))
@coroutine
- def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
+ def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
- if not isinstance(cmd, (bytes, str)):
+ if not isinstance(cmd, compat.string_types):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
@@ -857,17 +862,20 @@ class BaseEventLoop(events.AbstractEventLoop):
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
- transport = yield from self._make_subprocess_transport(
- protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
+ transport = yield From(self._make_subprocess_transport(
+ protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs))
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
- return transport, protocol
+ raise Return(transport, protocol)
@coroutine
- def subprocess_exec(self, protocol_factory, program, *args,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, universal_newlines=False,
- shell=False, bufsize=0, **kwargs):
+ def subprocess_exec(self, protocol_factory, program, *args, **kwargs):
+ stdin = kwargs.pop('stdin', subprocess.PIPE)
+ stdout = kwargs.pop('stdout', subprocess.PIPE)
+ stderr = kwargs.pop('stderr', subprocess.PIPE)
+ universal_newlines = kwargs.pop('universal_newlines', False)
+ shell = kwargs.pop('shell', False)
+ bufsize = kwargs.pop('bufsize', 0)
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
@@ -876,7 +884,7 @@ class BaseEventLoop(events.AbstractEventLoop):
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
- if not isinstance(arg, (str, bytes)):
+ if not isinstance(arg, compat.string_types ):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
@@ -886,12 +894,12 @@ class BaseEventLoop(events.AbstractEventLoop):
# (password) and may be too long
debug_log = 'execute program %r' % program
self._log_subprocess(debug_log, stdin, stdout, stderr)
- transport = yield from self._make_subprocess_transport(
+ transport = yield From(self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
- bufsize, **kwargs)
+ bufsize, **kwargs))
if self._debug:
logger.info('%s: %r' % (debug_log, transport))
- return transport, protocol
+ raise Return(transport, protocol)
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
@@ -907,7 +915,7 @@ class BaseEventLoop(events.AbstractEventLoop):
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
- 'got {!r}'.format(handler))
+ 'got {0!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
@@ -926,13 +934,21 @@ class BaseEventLoop(events.AbstractEventLoop):
exception = context.get('exception')
if exception is not None:
- exc_info = (type(exception), exception, exception.__traceback__)
+ if hasattr(exception, '__traceback__'):
+ # Python 3
+ tb = exception.__traceback__
+ else:
+ # call_exception_handler() is usually called indirectly
+ # from an except block. If it's not the case, the traceback
+ # is undefined...
+ tb = sys.exc_info()[2]
+ exc_info = (type(exception), exception, tb)
else:
exc_info = False
log_lines = [message]
for key in sorted(context):
- if key in {'message', 'exception'}:
+ if key in ('message', 'exception'):
continue
value = context[key]
if key == 'source_traceback':
@@ -941,7 +957,7 @@ class BaseEventLoop(events.AbstractEventLoop):
value += tb.rstrip()
else:
value = repr(value)
- log_lines.append('{}: {}'.format(key, value))
+ log_lines.append('{0}: {1}'.format(key, value))
logger.error('\n'.join(log_lines), exc_info=exc_info)
@@ -1021,7 +1037,7 @@ class BaseEventLoop(events.AbstractEventLoop):
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
- self._timer_cancelled_count / sched_count >
+ float(self._timer_cancelled_count) / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
diff --git a/asyncio/base_subprocess.py b/trollius/base_subprocess.py
index 81698b0..ba137ab 100644
--- a/asyncio/base_subprocess.py
+++ b/trollius/base_subprocess.py
@@ -3,7 +3,7 @@ import subprocess
from . import protocols
from . import transports
-from .coroutines import coroutine
+from .coroutines import coroutine, From
from .log import logger
@@ -12,7 +12,7 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
def __init__(self, loop, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
- super().__init__(extra)
+ super(BaseSubprocessTransport, self).__init__(extra)
self._protocol = protocol
self._loop = loop
self._pid = None
@@ -101,19 +101,19 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
proc = self._proc
loop = self._loop
if proc.stdin is not None:
- _, pipe = yield from loop.connect_write_pipe(
+ _, pipe = yield From(loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0),
- proc.stdin)
+ proc.stdin))
self._pipes[0] = pipe
if proc.stdout is not None:
- _, pipe = yield from loop.connect_read_pipe(
+ _, pipe = yield From(loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1),
- proc.stdout)
+ proc.stdout))
self._pipes[1] = pipe
if proc.stderr is not None:
- _, pipe = yield from loop.connect_read_pipe(
+ _, pipe = yield From(loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2),
- proc.stderr)
+ proc.stderr))
self._pipes[2] = pipe
assert self._pending_calls is not None
diff --git a/trollius/compat.py b/trollius/compat.py
new file mode 100644
index 0000000..7947842
--- /dev/null
+++ b/trollius/compat.py
@@ -0,0 +1,61 @@
+"""
+Compatibility constants and functions for the different Python versions.
+"""
+import sys
+
+# Python 2.6 or older?
+PY26 = (sys.version_info < (2, 7))
+
+# Python 3.0 or newer?
+PY3 = (sys.version_info >= (3,))
+
+# Python 3.3 or newer?
+PY33 = (sys.version_info >= (3, 3))
+
+# Python 3.4 or newer?
+PY34 = sys.version_info >= (3, 4)
+
+if PY3:
+ integer_types = (int,)
+ bytes_type = bytes
+ text_type = str
+ string_types = (bytes, str)
+ BYTES_TYPES = (bytes, bytearray, memoryview)
+else:
+ integer_types = (int, long,)
+ bytes_type = str
+ text_type = unicode
+ string_types = basestring
+ if PY26:
+ BYTES_TYPES = (str, bytearray, buffer)
+ else: # Python 2.7
+ BYTES_TYPES = (str, bytearray, memoryview, buffer)
+
+def flatten_bytes(data):
+ """
+ Convert bytes-like objects (bytes, bytearray, memoryview, buffer) to
+ a bytes string.
+ """
+ if not isinstance(data, BYTES_TYPES):
+ raise TypeError('data argument must be byte-ish (%r)',
+ type(data))
+ if PY34:
+ # In Python 3.4, socket.send() and bytes.join() accept memoryview
+ # and bytearray
+ return data
+ if not data:
+ return b''
+ if not PY3 and isinstance(data, (buffer, bytearray)):
+ return str(data)
+ elif not PY26 and isinstance(data, memoryview):
+ return data.tobytes()
+ else:
+ return data
+
+if PY3:
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+else:
+ exec("""def reraise(tp, value, tb=None): raise tp, value, tb""")
diff --git a/asyncio/constants.py b/trollius/constants.py
index f9e1232..f9e1232 100644
--- a/asyncio/constants.py
+++ b/trollius/constants.py
diff --git a/trollius/coroutines.py b/trollius/coroutines.py
new file mode 100644
index 0000000..650bc13
--- /dev/null
+++ b/trollius/coroutines.py
@@ -0,0 +1,342 @@
+__all__ = ['coroutine',
+ 'iscoroutinefunction', 'iscoroutine']
+
+import functools
+import inspect
+import opcode
+import os
+import sys
+import traceback
+import types
+
+from . import compat
+from . import events
+from . import futures
+from .log import logger
+
+
+# Opcode of "yield from" instruction
+_YIELD_FROM = opcode.opmap.get('YIELD_FROM', None)
+
+# If you set _DEBUG to true, @coroutine will wrap the resulting
+# generator objects in a CoroWrapper instance (defined below). That
+# instance will log a message when the generator is never iterated
+# over, which may happen when you forget to use "yield" with a
+# coroutine call. Note that the value of the _DEBUG flag is taken
+# when the decorator is used, so to be of any use it must be set
+# before you define your coroutines. A downside of using this feature
+# is that tracebacks show entries for the CoroWrapper.__next__ method
+# when _DEBUG is true.
+_DEBUG = bool(os.environ.get('TROLLIUSDEBUG'))
+
+
+if _YIELD_FROM is not None:
+ # Check for CPython issue #21209
+ exec('''if 1:
+ def has_yield_from_bug():
+ class MyGen:
+ def __init__(self):
+ self.send_args = None
+ def __iter__(self):
+ return self
+ def __next__(self):
+ return 42
+ def send(self, *what):
+ self.send_args = what
+ return None
+ def yield_from_gen(gen):
+ yield from gen
+ value = (1, 2, 3)
+ gen = MyGen()
+ coro = yield_from_gen(gen)
+ next(coro)
+ coro.send(value)
+ return gen.send_args != (value,)
+''')
+ _YIELD_FROM_BUG = has_yield_from_bug()
+ del has_yield_from_bug
+else:
+ _YIELD_FROM_BUG = False
+
+
+if compat.PY33:
+ # Don't use the Return class on Python 3.3 and later to support asyncio
+ # coroutines (to avoid the warning emited in Return destructor).
+ #
+ # The problem is that Return inherits from StopIteration. "yield from
+ # trollius_coroutine". Task._step() does not receive the Return exception,
+ # because "yield from" handles it internally. So it's not possible to set
+ # the raised attribute to True to avoid the warning in Return destructor.
+ def Return(*args):
+ if not args:
+ value = None
+ elif len(args) == 1:
+ value = args[0]
+ else:
+ value = args
+ return StopIteration(value)
+else:
+ class Return(StopIteration):
+ def __init__(self, *args):
+ StopIteration.__init__(self)
+ if not args:
+ self.value = None
+ elif len(args) == 1:
+ self.value = args[0]
+ else:
+ self.value = args
+ self.raised = False
+ if _DEBUG:
+ frame = sys._getframe(1)
+ self._source_traceback = traceback.extract_stack(frame)
+ # explicitly clear the reference to avoid reference cycles
+ frame = None
+ else:
+ self._source_traceback = None
+
+ def __del__(self):
+ if self.raised:
+ return
+
+ fmt = 'Return(%r) used without raise'
+ if self._source_traceback:
+ fmt += '\nReturn created at (most recent call last):\n'
+ tb = ''.join(traceback.format_list(self._source_traceback))
+ fmt += tb.rstrip()
+ logger.error(fmt, self.value)
+
+
+def _coroutine_at_yield_from(coro):
+ """Test if the last instruction of a coroutine is "yield from".
+
+ Return False if the coroutine completed.
+ """
+ frame = coro.gi_frame
+ if frame is None:
+ return False
+ code = coro.gi_code
+ assert frame.f_lasti >= 0
+ offset = frame.f_lasti + 1
+ instr = code.co_code[offset]
+ return (instr == _YIELD_FROM)
+
+
+class CoroWrapper(object):
+ # Wrapper for coroutine object in _DEBUG mode.
+
+ def __init__(self, gen, func):
+ assert inspect.isgenerator(gen), gen
+ self.gen = gen
+ self.func = func
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+ # __name__, __qualname__, __doc__ attributes are set by the coroutine()
+ # decorator
+
+ def __repr__(self):
+ coro_repr = _format_coroutine(self)
+ if self._source_traceback:
+ frame = self._source_traceback[-1]
+ coro_repr += ', created at %s:%s' % (frame[0], frame[1])
+ return '<%s %s>' % (self.__class__.__name__, coro_repr)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self.gen)
+ next = __next__
+
+ if _YIELD_FROM_BUG:
+ # For for CPython issue #21209: using "yield from" and a custom
+ # generator, generator.send(tuple) unpacks the tuple instead of passing
+ # the tuple unchanged. Check if the caller is a generator using "yield
+ # from" to decide if the parameter should be unpacked or not.
+ def send(self, *value):
+ frame = sys._getframe()
+ caller = frame.f_back
+ assert caller.f_lasti >= 0
+ if caller.f_code.co_code[caller.f_lasti] != _YIELD_FROM:
+ value = value[0]
+ return self.gen.send(value)
+ else:
+ def send(self, value):
+ return self.gen.send(value)
+
+ def throw(self, exc):
+ return self.gen.throw(exc)
+
+ def close(self):
+ return self.gen.close()
+
+ @property
+ def gi_frame(self):
+ return self.gen.gi_frame
+
+ @property
+ def gi_running(self):
+ return self.gen.gi_running
+
+ @property
+ def gi_code(self):
+ return self.gen.gi_code
+
+ def __del__(self):
+ # Be careful accessing self.gen.frame -- self.gen might not exist.
+ gen = getattr(self, 'gen', None)
+ frame = getattr(gen, 'gi_frame', None)
+ if frame is not None and frame.f_lasti == -1:
+ msg = '%r was never yielded from' % self
+ tb = getattr(self, '_source_traceback', ())
+ if tb:
+ tb = ''.join(traceback.format_list(tb))
+ msg += ('\nCoroutine object created at '
+ '(most recent call last):\n')
+ msg += tb.rstrip()
+ logger.error(msg)
+
+if not compat.PY34:
+ # Backport functools.update_wrapper() from Python 3.4:
+ # - Python 2.7 fails if assigned attributes don't exist
+ # - Python 2.7 and 3.1 don't set the __wrapped__ attribute
+ # - Python 3.2 and 3.3 set __wrapped__ before updating __dict__
+ def _update_wrapper(wrapper,
+ wrapped,
+ assigned = functools.WRAPPER_ASSIGNMENTS,
+ updated = functools.WRAPPER_UPDATES):
+ """Update a wrapper function to look like the wrapped function
+
+ wrapper is the function to be updated
+ wrapped is the original function
+ assigned is a tuple naming the attributes assigned directly
+ from the wrapped function to the wrapper function (defaults to
+ functools.WRAPPER_ASSIGNMENTS)
+ updated is a tuple naming the attributes of the wrapper that
+ are updated with the corresponding attribute from the wrapped
+ function (defaults to functools.WRAPPER_UPDATES)
+ """
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
+ # from the wrapped function when updating __dict__
+ wrapper.__wrapped__ = wrapped
+ # Return the wrapper so this can be used as a decorator via partial()
+ return wrapper
+
+ def _wraps(wrapped,
+ assigned = functools.WRAPPER_ASSIGNMENTS,
+ updated = functools.WRAPPER_UPDATES):
+ """Decorator factory to apply update_wrapper() to a wrapper function
+
+ Returns a decorator that invokes update_wrapper() with the decorated
+ function as the wrapper argument and the arguments to wraps() as the
+ remaining arguments. Default arguments are as for update_wrapper().
+ This is a convenience function to simplify applying partial() to
+ update_wrapper().
+ """
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+else:
+ _wraps = functools.wraps
+
+def coroutine(func):
+ """Decorator to mark coroutines.
+
+ If the coroutine is not yielded from before it is destroyed,
+ an error message is logged.
+ """
+ if inspect.isgeneratorfunction(func):
+ coro = func
+ else:
+ @_wraps(func)
+ def coro(*args, **kw):
+ res = func(*args, **kw)
+ if (isinstance(res, futures._FUTURE_CLASSES)
+ or inspect.isgenerator(res)):
+ res = yield From(res)
+ raise Return(res)
+
+ if not _DEBUG:
+ wrapper = coro
+ else:
+ @_wraps(func)
+ def wrapper(*args, **kwds):
+ coro_wrapper = CoroWrapper(coro(*args, **kwds), func)
+ if coro_wrapper._source_traceback:
+ del coro_wrapper._source_traceback[-1]
+ for attr in ('__name__', '__qualname__', '__doc__'):
+ try:
+ value = getattr(func, attr)
+ except AttributeError:
+ pass
+ else:
+ setattr(coro_wrapper, attr, value)
+ return coro_wrapper
+ if not compat.PY3:
+ wrapper.__wrapped__ = func
+
+ wrapper._is_coroutine = True # For iscoroutinefunction().
+ return wrapper
+
+
+def iscoroutinefunction(func):
+ """Return True if func is a decorated coroutine function."""
+ return getattr(func, '_is_coroutine', False)
+
+
+_COROUTINE_TYPES = (types.GeneratorType, CoroWrapper)
+if events.asyncio is not None:
+ # Accept also asyncio CoroWrapper for interoperability
+ if hasattr(events.asyncio, 'coroutines'):
+ _COROUTINE_TYPES += (events.asyncio.coroutines.CoroWrapper,)
+ else:
+ # old Tulip/Python versions
+ _COROUTINE_TYPES += (events.asyncio.tasks.CoroWrapper,)
+
+def iscoroutine(obj):
+ """Return True if obj is a coroutine object."""
+ return isinstance(obj, _COROUTINE_TYPES)
+
+
+def _format_coroutine(coro):
+ assert iscoroutine(coro)
+ coro_name = getattr(coro, '__qualname__', coro.__name__)
+
+ filename = coro.gi_code.co_filename
+ if (isinstance(coro, CoroWrapper)
+ and not inspect.isgeneratorfunction(coro.func)):
+ filename, lineno = events._get_function_source(coro.func)
+ if coro.gi_frame is None:
+ coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
+ else:
+ coro_repr = '%s() running, defined at %s:%s' % (coro_name, filename, lineno)
+ elif coro.gi_frame is not None:
+ lineno = coro.gi_frame.f_lineno
+ coro_repr = '%s() running at %s:%s' % (coro_name, filename, lineno)
+ else:
+ lineno = coro.gi_code.co_firstlineno
+ coro_repr = '%s() done, defined at %s:%s' % (coro_name, filename, lineno)
+
+ return coro_repr
+
+
+class FromWrapper(object):
+ __slots__ = ('obj',)
+
+ def __init__(self, obj):
+ if isinstance(obj, FromWrapper):
+ obj = obj.obj
+ assert not isinstance(obj, FromWrapper)
+ self.obj = obj
+
+def From(obj):
+ if not _DEBUG:
+ return obj
+ else:
+ return FromWrapper(obj)
diff --git a/trollius/events.py b/trollius/events.py
new file mode 100644
index 0000000..fa723e3
--- /dev/null
+++ b/trollius/events.py
@@ -0,0 +1,633 @@
+"""Event loop and event loop policy."""
+from __future__ import absolute_import
+
+__all__ = ['AbstractEventLoopPolicy',
+ 'AbstractEventLoop', 'AbstractServer',
+ 'Handle', 'TimerHandle',
+ 'get_event_loop_policy', 'set_event_loop_policy',
+ 'get_event_loop', 'set_event_loop', 'new_event_loop',
+ 'get_child_watcher', 'set_child_watcher',
+ ]
+
+import functools
+import inspect
+import socket
+import subprocess
+import sys
+import threading
+import traceback
+try:
+ import reprlib # Python 3
+except ImportError:
+ import repr as reprlib # Python 2
+
+from trollius import compat
+try:
+ import asyncio
+except (ImportError, SyntaxError):
+ # ignore SyntaxError for convenience: ignore SyntaxError caused by "yield
+ # from" if asyncio module is in the Python path
+ asyncio = None
+
+
+_PY34 = sys.version_info >= (3, 4)
+
+if not compat.PY34:
+ # Backported functools.unwrap() from Python 3.4, without the stop parameter
+ # (not needed here)
+ #
+ # @trollius.coroutine decorator chains wrapper using @functools.wrap
+ # backported from Python 3.4.
+ def _unwrap(func):
+ f = func # remember the original func for error reporting
+ memo = set((id(f),)) # Memoise by id to tolerate non-hashable objects
+ while hasattr(func, '__wrapped__'):
+ func = func.__wrapped__
+ id_func = id(func)
+ if id_func in memo:
+ raise ValueError('wrapper loop when unwrapping {0!r}'.format(f))
+ memo.add(id_func)
+ return func
+else:
+ _unwrap = inspect.unwrap
+
+
+def _get_function_source(func):
+ func = _unwrap(func)
+ if inspect.isfunction(func):
+ code = func.__code__
+ return (code.co_filename, code.co_firstlineno)
+ if isinstance(func, functools.partial):
+ return _get_function_source(func.func)
+ if _PY34 and isinstance(func, functools.partialmethod):
+ return _get_function_source(func.func)
+ return None
+
+
+def _format_args(args):
+ """Format function arguments.
+
+ Special case for a single parameter: ('hello',) is formatted as ('hello').
+ """
+ # use reprlib to limit the length of the output
+ args_repr = reprlib.repr(args)
+ if len(args) == 1 and args_repr.endswith(',)'):
+ args_repr = args_repr[:-2] + ')'
+ return args_repr
+
+
+def _format_callback(func, args, suffix=''):
+ if isinstance(func, functools.partial):
+ if args is not None:
+ suffix = _format_args(args) + suffix
+ return _format_callback(func.func, func.args, suffix)
+
+ if compat.PY33:
+ func_repr = getattr(func, '__qualname__', None)
+ else:
+ func_repr = getattr(func, '__name__', None)
+ if not func_repr:
+ func_repr = repr(func)
+
+ if args is not None:
+ func_repr += _format_args(args)
+ if suffix:
+ func_repr += suffix
+
+ source = _get_function_source(func)
+ if source:
+ func_repr += ' at %s:%s' % source
+ return func_repr
+
+
+class Handle(object):
+ """Object returned by callback registration methods."""
+
+ __slots__ = ('_callback', '_args', '_cancelled', '_loop',
+ '_source_traceback', '_repr', '__weakref__')
+
+ def __init__(self, callback, args, loop):
+ assert not isinstance(callback, Handle), 'A Handle is not a callback'
+ self._loop = loop
+ self._callback = callback
+ self._args = args
+ self._cancelled = False
+ self._repr = None
+ if self._loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+ else:
+ self._source_traceback = None
+
+ def _repr_info(self):
+ info = [self.__class__.__name__]
+ if self._cancelled:
+ info.append('cancelled')
+ if self._callback is not None:
+ info.append(_format_callback(self._callback, self._args))
+ if self._source_traceback:
+ frame = self._source_traceback[-1]
+ info.append('created at %s:%s' % (frame[0], frame[1]))
+ return info
+
+ def __repr__(self):
+ if self._repr is not None:
+ return self._repr
+ info = self._repr_info()
+ return '<%s>' % ' '.join(info)
+
+ def cancel(self):
+ if not self._cancelled:
+ self._cancelled = True
+ if self._loop.get_debug():
+ # Keep a representation in debug mode to keep callback and
+ # parameters. For example, to log the warning
+ # "Executing <Handle...> took 2.5 second"
+ self._repr = repr(self)
+ self._callback = None
+ self._args = None
+
+ def _run(self):
+ try:
+ self._callback(*self._args)
+ except Exception as exc:
+ cb = _format_callback(self._callback, self._args)
+ msg = 'Exception in callback {0}'.format(cb)
+ context = {
+ 'message': msg,
+ 'exception': exc,
+ 'handle': self,
+ }
+ if self._source_traceback:
+ context['source_traceback'] = self._source_traceback
+ self._loop.call_exception_handler(context)
+ self = None # Needed to break cycles when an exception occurs.
+
+
+class TimerHandle(Handle):
+ """Object returned by timed callback registration methods."""
+
+ __slots__ = ['_scheduled', '_when']
+
+ def __init__(self, when, callback, args, loop):
+ assert when is not None
+ super(TimerHandle, self).__init__(callback, args, loop)
+ if self._source_traceback:
+ del self._source_traceback[-1]
+ self._when = when
+ self._scheduled = False
+
+ def _repr_info(self):
+ info = super(TimerHandle, self)._repr_info()
+ pos = 2 if self._cancelled else 1
+ info.insert(pos, 'when=%s' % self._when)
+ return info
+
+ def __hash__(self):
+ return hash(self._when)
+
+ def __lt__(self, other):
+ return self._when < other._when
+
+ def __le__(self, other):
+ if self._when < other._when:
+ return True
+ return self.__eq__(other)
+
+ def __gt__(self, other):
+ return self._when > other._when
+
+ def __ge__(self, other):
+ if self._when > other._when:
+ return True
+ return self.__eq__(other)
+
+ def __eq__(self, other):
+ if isinstance(other, TimerHandle):
+ return (self._when == other._when and
+ self._callback == other._callback and
+ self._args == other._args and
+ self._cancelled == other._cancelled)
+ return NotImplemented
+
+ def __ne__(self, other):
+ equal = self.__eq__(other)
+ return NotImplemented if equal is NotImplemented else not equal
+
+ def cancel(self):
+ if not self._cancelled:
+ self._loop._timer_handle_cancelled(self)
+ super(TimerHandle, self).cancel()
+
+
+class AbstractServer(object):
+ """Abstract server returned by create_server()."""
+
+ def close(self):
+ """Stop serving. This leaves existing connections open."""
+ return NotImplemented
+
+ def wait_closed(self):
+ """Coroutine to wait until service is closed."""
+ return NotImplemented
+
+
+if asyncio is not None:
+ # Reuse asyncio classes so asyncio.set_event_loop() and
+ # asyncio.set_event_loop_policy() accept Trollius event loop and trollius
+ # event loop policy
+ AbstractEventLoop = asyncio.AbstractEventLoop
+ AbstractEventLoopPolicy = asyncio.AbstractEventLoopPolicy
+else:
+ class AbstractEventLoop(object):
+ """Abstract event loop."""
+
+ # Running and stopping the event loop.
+
+ def run_forever(self):
+ """Run the event loop until stop() is called."""
+ raise NotImplementedError
+
+ def run_until_complete(self, future):
+ """Run the event loop until a Future is done.
+
+ Return the Future's result, or raise its exception.
+ """
+ raise NotImplementedError
+
+ def stop(self):
+ """Stop the event loop as soon as reasonable.
+
+ Exactly how soon that is may depend on the implementation, but
+ no more I/O callbacks should be scheduled.
+ """
+ raise NotImplementedError
+
+ def is_running(self):
+ """Return whether the event loop is currently running."""
+ raise NotImplementedError
+
+ def is_closed(self):
+ """Returns True if the event loop was closed."""
+ raise NotImplementedError
+
+ def close(self):
+ """Close the loop.
+
+ The loop should not be running.
+
+ This is idempotent and irreversible.
+
+ No other methods should be called after this one.
+ """
+ raise NotImplementedError
+
+ # Methods scheduling callbacks. All these return Handles.
+
+ def _timer_handle_cancelled(self, handle):
+ """Notification that a TimerHandle has been cancelled."""
+ raise NotImplementedError
+
+ def call_soon(self, callback, *args):
+ return self.call_later(0, callback, *args)
+
+ def call_later(self, delay, callback, *args):
+ raise NotImplementedError
+
+ def call_at(self, when, callback, *args):
+ raise NotImplementedError
+
+ def time(self):
+ raise NotImplementedError
+
+ # Method scheduling a coroutine object: create a task.
+
+ def create_task(self, coro):
+ raise NotImplementedError
+
+ # Methods for interacting with threads.
+
+ def call_soon_threadsafe(self, callback, *args):
+ raise NotImplementedError
+
+ def run_in_executor(self, executor, callback, *args):
+ raise NotImplementedError
+
+ def set_default_executor(self, executor):
+ raise NotImplementedError
+
+ # Network I/O methods returning Futures.
+
+ def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0):
+ raise NotImplementedError
+
+ def getnameinfo(self, sockaddr, flags=0):
+ raise NotImplementedError
+
+ def create_connection(self, protocol_factory, host=None, port=None,
+ ssl=None, family=0, proto=0, flags=0, sock=None,
+ local_addr=None, server_hostname=None):
+ raise NotImplementedError
+
+ def create_server(self, protocol_factory, host=None, port=None,
+ family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
+ sock=None, backlog=100, ssl=None, reuse_address=None):
+ """A coroutine which creates a TCP server bound to host and port.
+
+ The return value is a Server object which can be used to stop
+ the service.
+
+ If host is an empty string or None all interfaces are assumed
+ and a list of multiple sockets will be returned (most likely
+ one for IPv4 and another one for IPv6).
+
+ family can be set to either AF_INET or AF_INET6 to force the
+ socket to use IPv4 or IPv6. If not set it will be determined
+ from host (defaults to AF_UNSPEC).
+
+ flags is a bitmask for getaddrinfo().
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+
+ backlog is the maximum number of queued connections passed to
+ listen() (defaults to 100).
+
+ ssl can be set to an SSLContext to enable SSL over the
+ accepted connections.
+
+ reuse_address tells the kernel to reuse a local socket in
+ TIME_WAIT state, without waiting for its natural timeout to
+ expire. If not specified will automatically be set to True on
+ UNIX.
+ """
+ raise NotImplementedError
+
+ def create_unix_connection(self, protocol_factory, path,
+ ssl=None, sock=None,
+ server_hostname=None):
+ raise NotImplementedError
+
+ def create_unix_server(self, protocol_factory, path,
+ sock=None, backlog=100, ssl=None):
+ """A coroutine which creates a UNIX Domain Socket server.
+
+ The return value is a Server object, which can be used to stop
+ the service.
+
+ path is a str, representing a file systsem path to bind the
+ server socket to.
+
+ sock can optionally be specified in order to use a preexisting
+ socket object.
+
+ backlog is the maximum number of queued connections passed to
+ listen() (defaults to 100).
+
+ ssl can be set to an SSLContext to enable SSL over the
+ accepted connections.
+ """
+ raise NotImplementedError
+
+ def create_datagram_endpoint(self, protocol_factory,
+ local_addr=None, remote_addr=None,
+ family=0, proto=0, flags=0):
+ raise NotImplementedError
+
+ # Pipes and subprocesses.
+
+ def connect_read_pipe(self, protocol_factory, pipe):
+ """Register read pipe in event loop. Set the pipe to non-blocking mode.
+
+ protocol_factory should instantiate object with Protocol interface.
+ pipe is a file-like object.
+ Return pair (transport, protocol), where transport supports the
+ ReadTransport interface."""
+ # The reason to accept file-like object instead of just file descriptor
+ # is: we need to own pipe and close it at transport finishing
+ # Can got complicated errors if pass f.fileno(),
+ # close fd in pipe transport then close f and vise versa.
+ raise NotImplementedError
+
+ def connect_write_pipe(self, protocol_factory, pipe):
+ """Register write pipe in event loop.
+
+ protocol_factory should instantiate object with BaseProtocol interface.
+ Pipe is file-like object already switched to nonblocking.
+ Return pair (transport, protocol), where transport support
+ WriteTransport interface."""
+ # The reason to accept file-like object instead of just file descriptor
+ # is: we need to own pipe and close it at transport finishing
+ # Can got complicated errors if pass f.fileno(),
+ # close fd in pipe transport then close f and vise versa.
+ raise NotImplementedError
+
+ def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ **kwargs):
+ raise NotImplementedError
+
+ def subprocess_exec(self, protocol_factory, *args, **kwargs):
+ raise NotImplementedError
+
+ # Ready-based callback registration methods.
+ # The add_*() methods return None.
+ # The remove_*() methods return True if something was removed,
+ # False if there was nothing to delete.
+
+ def add_reader(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_reader(self, fd):
+ raise NotImplementedError
+
+ def add_writer(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_writer(self, fd):
+ raise NotImplementedError
+
+ # Completion based I/O methods returning Futures.
+
+ def sock_recv(self, sock, nbytes):
+ raise NotImplementedError
+
+ def sock_sendall(self, sock, data):
+ raise NotImplementedError
+
+ def sock_connect(self, sock, address):
+ raise NotImplementedError
+
+ def sock_accept(self, sock):
+ raise NotImplementedError
+
+ # Signal handling.
+
+ def add_signal_handler(self, sig, callback, *args):
+ raise NotImplementedError
+
+ def remove_signal_handler(self, sig):
+ raise NotImplementedError
+
+ # Error handlers.
+
+ def set_exception_handler(self, handler):
+ raise NotImplementedError
+
+ def default_exception_handler(self, context):
+ raise NotImplementedError
+
+ def call_exception_handler(self, context):
+ raise NotImplementedError
+
+ # Debug flag management.
+
+ def get_debug(self):
+ raise NotImplementedError
+
+ def set_debug(self, enabled):
+ raise NotImplementedError
+
+
+ class AbstractEventLoopPolicy(object):
+ """Abstract policy for accessing the event loop."""
+
+ def get_event_loop(self):
+ """Get the event loop for the current context.
+
+ Returns an event loop object implementing the BaseEventLoop interface,
+ or raises an exception in case no event loop has been set for the
+ current context and the current policy does not specify to create one.
+
+ It should never return None."""
+ raise NotImplementedError
+
+ def set_event_loop(self, loop):
+ """Set the event loop for the current context to loop."""
+ raise NotImplementedError
+
+ def new_event_loop(self):
+ """Create and return a new event loop object according to this
+ policy's rules. If there's need to set this loop as the event loop for
+ the current context, set_event_loop must be called explicitly."""
+ raise NotImplementedError
+
+ # Child processes handling (Unix only).
+
+ def get_child_watcher(self):
+ "Get the watcher for child processes."
+ raise NotImplementedError
+
+ def set_child_watcher(self, watcher):
+ """Set the watcher for child processes."""
+ raise NotImplementedError
+
+
+class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
+ """Default policy implementation for accessing the event loop.
+
+ In this policy, each thread has its own event loop. However, we
+ only automatically create an event loop by default for the main
+ thread; other threads by default have no event loop.
+
+ Other policies may have different rules (e.g. a single global
+ event loop, or automatically creating an event loop per thread, or
+ using some other notion of context to which an event loop is
+ associated).
+ """
+
+ _loop_factory = None
+
+ class _Local(threading.local):
+ _loop = None
+ _set_called = False
+
+ def __init__(self):
+ self._local = self._Local()
+
+ def get_event_loop(self):
+ """Get the event loop.
+
+ This may be None or an instance of EventLoop.
+ """
+ if (self._local._loop is None and
+ not self._local._set_called and
+ isinstance(threading.current_thread(), threading._MainThread)):
+ self.set_event_loop(self.new_event_loop())
+ if self._local._loop is None:
+ raise RuntimeError('There is no current event loop in thread %r.'
+ % threading.current_thread().name)
+ return self._local._loop
+
+ def set_event_loop(self, loop):
+ """Set the event loop."""
+ self._local._set_called = True
+ assert loop is None or isinstance(loop, AbstractEventLoop)
+ self._local._loop = loop
+
+ def new_event_loop(self):
+ """Create a new event loop.
+
+ You must call set_event_loop() to make this the current event
+ loop.
+ """
+ return self._loop_factory()
+
+
+# Event loop policy. The policy itself is always global, even if the
+# policy's rules say that there is an event loop per thread (or other
+# notion of context). The default policy is installed by the first
+# call to get_event_loop_policy().
+_event_loop_policy = None
+
+# Lock for protecting the on-the-fly creation of the event loop policy.
+_lock = threading.Lock()
+
+
+def _init_event_loop_policy():
+ global _event_loop_policy
+ with _lock:
+ if _event_loop_policy is None: # pragma: no branch
+ from . import DefaultEventLoopPolicy
+ _event_loop_policy = DefaultEventLoopPolicy()
+
+
+def get_event_loop_policy():
+ """Get the current event loop policy."""
+ if _event_loop_policy is None:
+ _init_event_loop_policy()
+ return _event_loop_policy
+
+
+def set_event_loop_policy(policy):
+ """Set the current event loop policy.
+
+ If policy is None, the default policy is restored."""
+ global _event_loop_policy
+ assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
+ _event_loop_policy = policy
+
+
+def get_event_loop():
+ """Equivalent to calling get_event_loop_policy().get_event_loop()."""
+ return get_event_loop_policy().get_event_loop()
+
+
+def set_event_loop(loop):
+ """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
+ get_event_loop_policy().set_event_loop(loop)
+
+
+def new_event_loop():
+ """Equivalent to calling get_event_loop_policy().new_event_loop()."""
+ return get_event_loop_policy().new_event_loop()
+
+
+def get_child_watcher():
+ """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
+ return get_event_loop_policy().get_child_watcher()
+
+
+def set_child_watcher(watcher):
+ """Equivalent to calling
+ get_event_loop_policy().set_child_watcher(watcher)."""
+ return get_event_loop_policy().set_child_watcher(watcher)
diff --git a/trollius/executor.py b/trollius/executor.py
new file mode 100644
index 0000000..9e7fdd7
--- /dev/null
+++ b/trollius/executor.py
@@ -0,0 +1,84 @@
+from .log import logger
+
+__all__ = (
+ 'CancelledError', 'TimeoutError',
+ 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
+ )
+
+# Argument for default thread pool executor creation.
+_MAX_WORKERS = 5
+
+try:
+ import concurrent.futures
+ import concurrent.futures._base
+except ImportError:
+ FIRST_COMPLETED = 'FIRST_COMPLETED'
+ FIRST_EXCEPTION = 'FIRST_EXCEPTION'
+ ALL_COMPLETED = 'ALL_COMPLETED'
+
+ class Future(object):
+ def __init__(self, callback, args):
+ try:
+ self._result = callback(*args)
+ self._exception = None
+ except Exception as err:
+ self._result = None
+ self._exception = err
+ self.callbacks = []
+
+ def cancelled(self):
+ return False
+
+ def done(self):
+ return True
+
+ def exception(self):
+ return self._exception
+
+ def result(self):
+ if self._exception is not None:
+ raise self._exception
+ else:
+ return self._result
+
+ def add_done_callback(self, callback):
+ callback(self)
+
+ class Error(Exception):
+ """Base class for all future-related exceptions."""
+ pass
+
+ class CancelledError(Error):
+ """The Future was cancelled."""
+ pass
+
+ class TimeoutError(Error):
+ """The operation exceeded the given deadline."""
+ pass
+
+ class SynchronousExecutor:
+ """
+ Synchronous executor: submit() blocks until it gets the result.
+ """
+ def submit(self, callback, *args):
+ return Future(callback, args)
+
+ def shutdown(self, wait):
+ pass
+
+ def get_default_executor():
+ logger.error("concurrent.futures module is missing: "
+ "use a synchrounous executor as fallback!")
+ return SynchronousExecutor()
+else:
+ FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
+ FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
+ ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+
+ Future = concurrent.futures.Future
+ Error = concurrent.futures._base.Error
+ CancelledError = concurrent.futures.CancelledError
+ TimeoutError = concurrent.futures.TimeoutError
+
+ def get_default_executor():
+ return concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
diff --git a/asyncio/futures.py b/trollius/futures.py
index f46d008..867107c 100644
--- a/asyncio/futures.py
+++ b/trollius/futures.py
@@ -5,13 +5,17 @@ __all__ = ['CancelledError', 'TimeoutError',
'Future', 'wrap_future',
]
-import concurrent.futures._base
import logging
-import reprlib
import sys
import traceback
+try:
+ import reprlib # Python 3
+except ImportError:
+ import repr as reprlib # Python 2
+from . import compat
from . import events
+from . import executor
# States for Future.
_PENDING = 'PENDING'
@@ -20,10 +24,9 @@ _FINISHED = 'FINISHED'
_PY34 = sys.version_info >= (3, 4)
-# TODO: Do we really want to depend on concurrent.futures internals?
-Error = concurrent.futures._base.Error
-CancelledError = concurrent.futures.CancelledError
-TimeoutError = concurrent.futures.TimeoutError
+Error = executor.Error
+CancelledError = executor.CancelledError
+TimeoutError = executor.TimeoutError
STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging
@@ -33,7 +36,7 @@ class InvalidStateError(Error):
# TODO: Show the future, its state, the method, and the required state.
-class _TracebackLogger:
+class _TracebackLogger(object):
"""Helper to log a traceback upon destruction if not cleared.
This solves a nasty problem with Futures and Tasks that have an
@@ -75,7 +78,7 @@ class _TracebackLogger:
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
- one callback (typically set by 'yield from') and usually that
+ one callback (typically set by 'yield From') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
@@ -113,7 +116,7 @@ class _TracebackLogger:
self.loop.call_exception_handler({'message': msg})
-class Future:
+class Future(object):
"""This class is *almost* compatible with concurrent.futures.Future.
Differences:
@@ -137,12 +140,14 @@ class Future:
_loop = None
_source_traceback = None
- _blocking = False # proper use of future (yield vs yield from)
+ # Used by Python 2 to raise the exception with the original traceback
+ # in the exception() method in debug mode
+ _exception_tb = None
- _log_traceback = False # Used for Python 3.4 and later
- _tb_logger = None # Used for Python 3.3 only
+ _log_traceback = False # Used for Python >= 3.4
+ _tb_logger = None # Used for Python <= 3.3
- def __init__(self, *, loop=None):
+ def __init__(self, loop=None):
"""Initialize the future.
The optional event_loop argument allows to explicitly set the event
@@ -169,23 +174,23 @@ class Future:
if size == 1:
cb = format_cb(cb[0])
elif size == 2:
- cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
+ cb = '{0}, {1}'.format(format_cb(cb[0]), format_cb(cb[1]))
elif size > 2:
- cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
- size-2,
- format_cb(cb[-1]))
+ cb = '{0}, <{1} more>, {2}'.format(format_cb(cb[0]),
+ size-2,
+ format_cb(cb[-1]))
return 'cb=[%s]' % cb
def _repr_info(self):
info = [self._state.lower()]
if self._state == _FINISHED:
if self._exception is not None:
- info.append('exception={!r}'.format(self._exception))
+ info.append('exception={0!r}'.format(self._exception))
else:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(self._result)
- info.append('result={}'.format(result))
+ info.append('result={0}'.format(result))
if self._callbacks:
info.append(self._format_callbacks())
if self._source_traceback:
@@ -273,8 +278,13 @@ class Future:
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
+ exc_tb = self._exception_tb
+ self._exception_tb = None
if self._exception is not None:
- raise self._exception
+ if exc_tb is not None:
+ compat.reraise(type(self._exception), self._exception, exc_tb)
+ else:
+ raise self._exception
return self._result
def exception(self):
@@ -293,6 +303,7 @@ class Future:
if self._tb_logger is not None:
self._tb_logger.clear()
self._tb_logger = None
+ self._exception_tb = None
return self._exception
def add_done_callback(self, fn):
@@ -335,31 +346,61 @@ class Future:
InvalidStateError.
"""
if self._state != _PENDING:
- raise InvalidStateError('{}: {!r}'.format(self._state, self))
+ raise InvalidStateError('{0}: {1!r}'.format(self._state, self))
self._result = result
self._state = _FINISHED
self._schedule_callbacks()
+ def _get_exception_tb(self):
+ return self._exception_tb
+
def set_exception(self, exception):
+ self._set_exception_with_tb(exception, None)
+
+ def _set_exception_with_tb(self, exception, exc_tb):
"""Mark the future done and set an exception.
If the future is already done when this method is called, raises
InvalidStateError.
"""
if self._state != _PENDING:
- raise InvalidStateError('{}: {!r}'.format(self._state, self))
+ raise InvalidStateError('{0}: {1!r}'.format(self._state, self))
if isinstance(exception, type):
exception = exception()
self._exception = exception
+ if exc_tb is not None:
+ self._exception_tb = exc_tb
+ exc_tb = None
+ elif self._loop.get_debug() and not compat.PY3:
+ self._exception_tb = sys.exc_info()[2]
self._state = _FINISHED
self._schedule_callbacks()
if _PY34:
self._log_traceback = True
else:
self._tb_logger = _TracebackLogger(self, exception)
- # Arrange for the logger to be activated after all callbacks
- # have had a chance to call result() or exception().
- self._loop.call_soon(self._tb_logger.activate)
+ if hasattr(exception, '__traceback__'):
+ # Python 3: exception contains a link to the traceback
+
+ # Arrange for the logger to be activated after all callbacks
+ # have had a chance to call result() or exception().
+ self._loop.call_soon(self._tb_logger.activate)
+ else:
+ if self._loop.get_debug():
+ frame = sys._getframe(1)
+ tb = ['Traceback (most recent call last):\n']
+ if self._exception_tb is not None:
+ tb += traceback.format_tb(self._exception_tb)
+ else:
+ tb += traceback.format_stack(frame)
+ tb += traceback.format_exception_only(type(exception), exception)
+ self._tb_logger.tb = tb
+ else:
+ self._tb_logger.tb = traceback.format_exception_only(
+ type(exception),
+ exception)
+
+ self._tb_logger.exc = None
# Truly internal methods.
@@ -382,20 +423,18 @@ class Future:
result = other.result()
self.set_result(result)
- def __iter__(self):
- if not self.done():
- self._blocking = True
- yield self # This tells Task to wait for completion.
- assert self.done(), "yield from wasn't used with future"
- return self.result() # May raise too.
-
+if events.asyncio is not None:
+ # Accept also asyncio Future objects for interoperability
+ _FUTURE_CLASSES = (Future, events.asyncio.Future)
+else:
+ _FUTURE_CLASSES = Future
-def wrap_future(fut, *, loop=None):
+def wrap_future(fut, loop=None):
"""Wrap concurrent.futures.Future object."""
- if isinstance(fut, Future):
+ if isinstance(fut, _FUTURE_CLASSES):
return fut
- assert isinstance(fut, concurrent.futures.Future), \
- 'concurrent.futures.Future is expected, got {!r}'.format(fut)
+ assert isinstance(fut, executor.Future), \
+ 'concurrent.futures.Future is expected, got {0!r}'.format(fut)
if loop is None:
loop = events.get_event_loop()
new_future = Future(loop=loop)
diff --git a/asyncio/locks.py b/trollius/locks.py
index b943e9d..b704813 100644
--- a/asyncio/locks.py
+++ b/trollius/locks.py
@@ -6,7 +6,7 @@ import collections
from . import events
from . import futures
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
class _ContextManager:
@@ -15,7 +15,7 @@ class _ContextManager:
This enables the following idiom for acquiring and releasing a
lock around a block:
- with (yield from lock):
+ with (yield From(lock)):
<block>
while failing loudly when accidentally using:
@@ -39,7 +39,7 @@ class _ContextManager:
self._lock = None # Crudely prevent reuse.
-class Lock:
+class Lock(object):
"""Primitive lock objects.
A primitive lock is a synchronization primitive that is not owned
@@ -61,16 +61,16 @@ class Lock:
release() call resets the state to unlocked; first coroutine which
is blocked in acquire() is being processed.
- acquire() is a coroutine and should be called with 'yield from'.
+ acquire() is a coroutine and should be called with 'yield From'.
- Locks also support the context management protocol. '(yield from lock)'
+ Locks also support the context management protocol. '(yield From(lock))'
should be used as context manager expression.
Usage:
lock = Lock()
...
- yield from lock
+ yield From(lock)
try:
...
finally:
@@ -80,20 +80,20 @@ class Lock:
lock = Lock()
...
- with (yield from lock):
+ with (yield From(lock)):
...
Lock objects can be tested for locking state:
if not lock.locked():
- yield from lock
+ yield From(lock)
else:
# lock is acquired
...
"""
- def __init__(self, *, loop=None):
+ def __init__(self, loop=None):
self._waiters = collections.deque()
self._locked = False
if loop is not None:
@@ -102,11 +102,11 @@ class Lock:
self._loop = events.get_event_loop()
def __repr__(self):
- res = super().__repr__()
+ res = super(Lock, self).__repr__()
extra = 'locked' if self._locked else 'unlocked'
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
def locked(self):
"""Return True if lock is acquired."""
@@ -121,14 +121,14 @@ class Lock:
"""
if not self._waiters and not self._locked:
self._locked = True
- return True
+ raise Return(True)
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
+ yield From(fut)
self._locked = True
- return True
+ raise Return(True)
finally:
self._waiters.remove(fut)
@@ -155,31 +155,15 @@ class Lock:
def __enter__(self):
raise RuntimeError(
- '"yield from" should be used as context manager expression')
+ '"yield" should be used as context manager expression')
def __exit__(self, *args):
# This must exist because __enter__ exists, even though that
# always raises; that's how the with-statement works.
pass
- def __iter__(self):
- # This is not a coroutine. It is meant to enable the idiom:
- #
- # with (yield from lock):
- # <block>
- #
- # as an alternative to:
- #
- # yield from lock.acquire()
- # try:
- # <block>
- # finally:
- # lock.release()
- yield from self.acquire()
- return _ContextManager(self)
-
-
-class Event:
+
+class Event(object):
"""Asynchronous equivalent to threading.Event.
Class implementing event objects. An event manages a flag that can be set
@@ -188,7 +172,7 @@ class Event:
false.
"""
- def __init__(self, *, loop=None):
+ def __init__(self, loop=None):
self._waiters = collections.deque()
self._value = False
if loop is not None:
@@ -197,11 +181,11 @@ class Event:
self._loop = events.get_event_loop()
def __repr__(self):
- res = super().__repr__()
+ res = super(Event, self).__repr__()
extra = 'set' if self._value else 'unset'
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
def is_set(self):
"""Return True if and only if the internal flag is true."""
@@ -234,18 +218,18 @@ class Event:
set() to set the flag to true, then return True.
"""
if self._value:
- return True
+ raise Return(True)
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
- return True
+ yield From(fut)
+ raise Return(True)
finally:
self._waiters.remove(fut)
-class Condition:
+class Condition(object):
"""Asynchronous equivalent to threading.Condition.
This class implements condition variable objects. A condition variable
@@ -255,7 +239,7 @@ class Condition:
A new Lock object is created and used as the underlying lock.
"""
- def __init__(self, lock=None, *, loop=None):
+ def __init__(self, lock=None, loop=None):
if loop is not None:
self._loop = loop
else:
@@ -275,11 +259,11 @@ class Condition:
self._waiters = collections.deque()
def __repr__(self):
- res = super().__repr__()
+ res = super(Condition, self).__repr__()
extra = 'locked' if self.locked() else 'unlocked'
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
@coroutine
def wait(self):
@@ -301,13 +285,13 @@ class Condition:
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
- return True
+ yield From(fut)
+ raise Return(True)
finally:
self._waiters.remove(fut)
finally:
- yield from self.acquire()
+ yield From(self.acquire())
@coroutine
def wait_for(self, predicate):
@@ -319,9 +303,9 @@ class Condition:
"""
result = predicate()
while not result:
- yield from self.wait()
+ yield From(self.wait())
result = predicate()
- return result
+ raise Return(result)
def notify(self, n=1):
"""By default, wake up one coroutine waiting on this condition, if any.
@@ -357,18 +341,13 @@ class Condition:
def __enter__(self):
raise RuntimeError(
- '"yield from" should be used as context manager expression')
+ '"yield From" should be used as context manager expression')
def __exit__(self, *args):
pass
- def __iter__(self):
- # See comment in Lock.__iter__().
- yield from self.acquire()
- return _ContextManager(self)
-
-class Semaphore:
+class Semaphore(object):
"""A Semaphore implementation.
A semaphore manages an internal counter which is decremented by each
@@ -383,7 +362,7 @@ class Semaphore:
ValueError is raised.
"""
- def __init__(self, value=1, *, loop=None):
+ def __init__(self, value=1, loop=None):
if value < 0:
raise ValueError("Semaphore initial value must be >= 0")
self._value = value
@@ -394,12 +373,12 @@ class Semaphore:
self._loop = events.get_event_loop()
def __repr__(self):
- res = super().__repr__()
- extra = 'locked' if self.locked() else 'unlocked,value:{}'.format(
+ res = super(Semaphore, self).__repr__()
+ extra = 'locked' if self.locked() else 'unlocked,value:{0}'.format(
self._value)
if self._waiters:
- extra = '{},waiters:{}'.format(extra, len(self._waiters))
- return '<{} [{}]>'.format(res[1:-1], extra)
+ extra = '{0},waiters:{1}'.format(extra, len(self._waiters))
+ return '<{0} [{1}]>'.format(res[1:-1], extra)
def locked(self):
"""Returns True if semaphore can not be acquired immediately."""
@@ -417,14 +396,14 @@ class Semaphore:
"""
if not self._waiters and self._value > 0:
self._value -= 1
- return True
+ raise Return(True)
fut = futures.Future(loop=self._loop)
self._waiters.append(fut)
try:
- yield from fut
+ yield From(fut)
self._value -= 1
- return True
+ raise Return(True)
finally:
self._waiters.remove(fut)
@@ -441,16 +420,11 @@ class Semaphore:
def __enter__(self):
raise RuntimeError(
- '"yield from" should be used as context manager expression')
+ '"yield" should be used as context manager expression')
def __exit__(self, *args):
pass
- def __iter__(self):
- # See comment in Lock.__iter__().
- yield from self.acquire()
- return _ContextManager(self)
-
class BoundedSemaphore(Semaphore):
"""A bounded semaphore implementation.
@@ -459,11 +433,11 @@ class BoundedSemaphore(Semaphore):
above the initial value.
"""
- def __init__(self, value=1, *, loop=None):
+ def __init__(self, value=1, loop=None):
self._bound_value = value
- super().__init__(value, loop=loop)
+ super(BoundedSemaphore, self).__init__(value, loop=loop)
def release(self):
if self._value >= self._bound_value:
raise ValueError('BoundedSemaphore released too many times')
- super().release()
+ super(BoundedSemaphore, self).release()
diff --git a/asyncio/log.py b/trollius/log.py
index 23a7074..23a7074 100644
--- a/asyncio/log.py
+++ b/trollius/log.py
diff --git a/asyncio/proactor_events.py b/trollius/proactor_events.py
index e67cf65..37c8e57 100644
--- a/asyncio/proactor_events.py
+++ b/trollius/proactor_events.py
@@ -13,6 +13,9 @@ from . import constants
from . import futures
from . import transports
from .log import logger
+from .compat import flatten_bytes
+from .py33_exceptions import (BrokenPipeError,
+ ConnectionAbortedError, ConnectionResetError)
class _ProactorBasePipeTransport(transports._FlowControlMixin,
@@ -21,7 +24,7 @@ class _ProactorBasePipeTransport(transports._FlowControlMixin,
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
- super().__init__(extra, loop)
+ super(_ProactorBasePipeTransport, self).__init__(extra, loop)
self._set_extra(sock)
self._sock = sock
self._protocol = protocol
@@ -128,7 +131,8 @@ class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
- super().__init__(loop, sock, protocol, waiter, extra, server)
+ super(_ProactorReadPipeTransport, self).__init__(loop, sock, protocol,
+ waiter, extra, server)
self._paused = False
self._loop.call_soon(self._loop_reading)
@@ -205,9 +209,7 @@ class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
"""Transport for write pipes."""
def write(self, data):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if self._eof_written:
raise RuntimeError('write_eof() already called')
@@ -286,7 +288,7 @@ class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
def __init__(self, *args, **kw):
- super().__init__(*args, **kw)
+ super(_ProactorWritePipeTransport, self).__init__(*args, **kw)
self._read_fut = self._loop._proactor.recv(self._sock, 16)
self._read_fut.add_done_callback(self._pipe_closed)
@@ -353,7 +355,7 @@ class _ProactorSocketTransport(_ProactorReadPipeTransport,
class BaseProactorEventLoop(base_events.BaseEventLoop):
def __init__(self, proactor):
- super().__init__()
+ super(BaseProactorEventLoop, self).__init__()
logger.debug('Using proactor: %s', proactor.__class__.__name__)
self._proactor = proactor
self._selector = proactor # convenient alias
@@ -389,7 +391,7 @@ class BaseProactorEventLoop(base_events.BaseEventLoop):
return
self._stop_accept_futures()
self._close_self_pipe()
- super().close()
+ super(BaseProactorEventLoop, self).close()
self._proactor.close()
self._proactor = None
self._selector = None
diff --git a/asyncio/protocols.py b/trollius/protocols.py
index 52fc25c..d218f5c 100644
--- a/asyncio/protocols.py
+++ b/trollius/protocols.py
@@ -4,7 +4,7 @@ __all__ = ['BaseProtocol', 'Protocol', 'DatagramProtocol',
'SubprocessProtocol']
-class BaseProtocol:
+class BaseProtocol(object):
"""Common base class for protocol interfaces.
Usually user implements protocols that derived from BaseProtocol
diff --git a/trollius/py27_weakrefset.py b/trollius/py27_weakrefset.py
new file mode 100644
index 0000000..990c3a6
--- /dev/null
+++ b/trollius/py27_weakrefset.py
@@ -0,0 +1,202 @@
+# Access WeakSet through the weakref module.
+# This code is separated-out because it is needed
+# by abc.py to load everything else at startup.
+
+from _weakref import ref
+
+__all__ = ['WeakSet']
+
+
+class _IterationGuard(object):
+ # This context manager registers itself in the current iterators of the
+ # weak container, such as to delay all removals until the context manager
+ # exits.
+ # This technique should be relatively thread-safe (since sets are).
+
+ def __init__(self, weakcontainer):
+ # Don't create cycles
+ self.weakcontainer = ref(weakcontainer)
+
+ def __enter__(self):
+ w = self.weakcontainer()
+ if w is not None:
+ w._iterating.add(self)
+ return self
+
+ def __exit__(self, e, t, b):
+ w = self.weakcontainer()
+ if w is not None:
+ s = w._iterating
+ s.remove(self)
+ if not s:
+ w._commit_removals()
+
+
+class WeakSet(object):
+ def __init__(self, data=None):
+ self.data = set()
+ def _remove(item, selfref=ref(self)):
+ self = selfref()
+ if self is not None:
+ if self._iterating:
+ self._pending_removals.append(item)
+ else:
+ self.data.discard(item)
+ self._remove = _remove
+ # A list of keys to be removed
+ self._pending_removals = []
+ self._iterating = set()
+ if data is not None:
+ self.update(data)
+
+ def _commit_removals(self):
+ l = self._pending_removals
+ discard = self.data.discard
+ while l:
+ discard(l.pop())
+
+ def __iter__(self):
+ with _IterationGuard(self):
+ for itemref in self.data:
+ item = itemref()
+ if item is not None:
+ yield item
+
+ def __len__(self):
+ return len(self.data) - len(self._pending_removals)
+
+ def __contains__(self, item):
+ try:
+ wr = ref(item)
+ except TypeError:
+ return False
+ return wr in self.data
+
+ def __reduce__(self):
+ return (self.__class__, (list(self),),
+ getattr(self, '__dict__', None))
+
+ __hash__ = None
+
+ def add(self, item):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.add(ref(item, self._remove))
+
+ def clear(self):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.clear()
+
+ def copy(self):
+ return self.__class__(self)
+
+ def pop(self):
+ if self._pending_removals:
+ self._commit_removals()
+ while True:
+ try:
+ itemref = self.data.pop()
+ except KeyError:
+ raise KeyError('pop from empty WeakSet')
+ item = itemref()
+ if item is not None:
+ return item
+
+ def remove(self, item):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.remove(ref(item))
+
+ def discard(self, item):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.discard(ref(item))
+
+ def update(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ for element in other:
+ self.add(element)
+
+ def __ior__(self, other):
+ self.update(other)
+ return self
+
+ def difference(self, other):
+ newset = self.copy()
+ newset.difference_update(other)
+ return newset
+ __sub__ = difference
+
+ def difference_update(self, other):
+ self.__isub__(other)
+ def __isub__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ if self is other:
+ self.data.clear()
+ else:
+ self.data.difference_update(ref(item) for item in other)
+ return self
+
+ def intersection(self, other):
+ return self.__class__(item for item in other if item in self)
+ __and__ = intersection
+
+ def intersection_update(self, other):
+ self.__iand__(other)
+ def __iand__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.intersection_update(ref(item) for item in other)
+ return self
+
+ def issubset(self, other):
+ return self.data.issubset(ref(item) for item in other)
+ __le__ = issubset
+
+ def __lt__(self, other):
+ return self.data < set(ref(item) for item in other)
+
+ def issuperset(self, other):
+ return self.data.issuperset(ref(item) for item in other)
+ __ge__ = issuperset
+
+ def __gt__(self, other):
+ return self.data > set(ref(item) for item in other)
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.data == set(ref(item) for item in other)
+
+ def __ne__(self, other):
+ opposite = self.__eq__(other)
+ if opposite is NotImplemented:
+ return NotImplemented
+ return not opposite
+
+ def symmetric_difference(self, other):
+ newset = self.copy()
+ newset.symmetric_difference_update(other)
+ return newset
+ __xor__ = symmetric_difference
+
+ def symmetric_difference_update(self, other):
+ self.__ixor__(other)
+ def __ixor__(self, other):
+ if self._pending_removals:
+ self._commit_removals()
+ if self is other:
+ self.data.clear()
+ else:
+ self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
+ return self
+
+ def union(self, other):
+ return self.__class__(e for s in (self, other) for e in s)
+ __or__ = union
+
+ def isdisjoint(self, other):
+ return len(self.intersection(other)) == 0
diff --git a/trollius/py33_exceptions.py b/trollius/py33_exceptions.py
new file mode 100644
index 0000000..94cbfca
--- /dev/null
+++ b/trollius/py33_exceptions.py
@@ -0,0 +1,144 @@
+__all__ = ['BlockingIOError', 'BrokenPipeError', 'ChildProcessError',
+ 'ConnectionRefusedError', 'ConnectionResetError',
+ 'InterruptedError', 'ConnectionAbortedError', 'PermissionError',
+ 'FileNotFoundError',
+ ]
+
+import errno
+import select
+import socket
+import sys
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+from .compat import PY33
+
+if PY33:
+ import builtins
+ BlockingIOError = builtins.BlockingIOError
+ BrokenPipeError = builtins.BrokenPipeError
+ ChildProcessError = builtins.ChildProcessError
+ ConnectionRefusedError = builtins.ConnectionRefusedError
+ ConnectionResetError = builtins.ConnectionResetError
+ InterruptedError = builtins.InterruptedError
+ ConnectionAbortedError = builtins.ConnectionAbortedError
+ PermissionError = builtins.PermissionError
+ FileNotFoundError = builtins.FileNotFoundError
+ ProcessLookupError = builtins.ProcessLookupError
+
+else:
+ # Python < 3.3
+ class BlockingIOError(OSError):
+ pass
+
+ class BrokenPipeError(OSError):
+ pass
+
+ class ChildProcessError(OSError):
+ pass
+
+ class ConnectionRefusedError(OSError):
+ pass
+
+ class InterruptedError(OSError):
+ pass
+
+ class ConnectionResetError(OSError):
+ pass
+
+ class ConnectionAbortedError(OSError):
+ pass
+
+ class PermissionError(OSError):
+ pass
+
+ class FileNotFoundError(OSError):
+ pass
+
+ class ProcessLookupError(OSError):
+ pass
+
+
+_MAP_ERRNO = {
+ errno.EACCES: PermissionError,
+ errno.EAGAIN: BlockingIOError,
+ errno.EALREADY: BlockingIOError,
+ errno.ECHILD: ChildProcessError,
+ errno.ECONNABORTED: ConnectionAbortedError,
+ errno.ECONNREFUSED: ConnectionRefusedError,
+ errno.ECONNRESET: ConnectionResetError,
+ errno.EINPROGRESS: BlockingIOError,
+ errno.EINTR: InterruptedError,
+ errno.ENOENT: FileNotFoundError,
+ errno.EPERM: PermissionError,
+ errno.EPIPE: BrokenPipeError,
+ errno.ESHUTDOWN: BrokenPipeError,
+ errno.EWOULDBLOCK: BlockingIOError,
+ errno.ESRCH: ProcessLookupError,
+}
+
+if sys.platform == 'win32':
+ from trollius import _overlapped
+ _MAP_ERRNO.update({
+ _overlapped.ERROR_CONNECTION_REFUSED: ConnectionRefusedError,
+ _overlapped.ERROR_CONNECTION_ABORTED: ConnectionAbortedError,
+ _overlapped.ERROR_NETNAME_DELETED: ConnectionResetError,
+ })
+
+
+def get_error_class(key, default):
+ return _MAP_ERRNO.get(key, default)
+
+
+if sys.version_info >= (3,):
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+else:
+ exec("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+def _wrap_error(exc, mapping, key):
+ if key not in mapping:
+ return
+ new_err_cls = mapping[key]
+ new_err = new_err_cls(*exc.args)
+
+ # raise a new exception with the original traceback
+ if hasattr(exc, '__traceback__'):
+ traceback = exc.__traceback__
+ else:
+ traceback = sys.exc_info()[2]
+ reraise(new_err_cls, new_err, traceback)
+
+
+if not PY33:
+ def wrap_error(func, *args, **kw):
+ """
+ Wrap socket.error, IOError, OSError, select.error to raise new specialized
+ exceptions of Python 3.3 like InterruptedError (PEP 3151).
+ """
+ try:
+ return func(*args, **kw)
+ except (socket.error, IOError, OSError) as exc:
+ if ssl is not None and isinstance(exc, ssl.SSLError):
+ raise
+ if hasattr(exc, 'winerror'):
+ _wrap_error(exc, _MAP_ERRNO, exc.winerror)
+ # _MAP_ERRNO does not contain all Windows errors.
+ # For some errors like "file not found", exc.errno should
+ # be used (ex: ENOENT).
+ _wrap_error(exc, _MAP_ERRNO, exc.errno)
+ raise
+ except select.error as exc:
+ if exc.args:
+ _wrap_error(exc, _MAP_ERRNO, exc.args[0])
+ raise
+else:
+ def wrap_error(func, *args, **kw):
+ return func(*args, **kw)
diff --git a/trollius/py33_winapi.py b/trollius/py33_winapi.py
new file mode 100644
index 0000000..792bc45
--- /dev/null
+++ b/trollius/py33_winapi.py
@@ -0,0 +1,75 @@
+
+__all__ = [
+ 'CloseHandle', 'CreateNamedPipe', 'CreateFile', 'ConnectNamedPipe',
+ 'NULL',
+ 'GENERIC_READ', 'GENERIC_WRITE', 'OPEN_EXISTING', 'INFINITE',
+ 'PIPE_ACCESS_INBOUND',
+ 'PIPE_ACCESS_DUPLEX', 'PIPE_TYPE_MESSAGE', 'PIPE_READMODE_MESSAGE',
+ 'PIPE_WAIT', 'PIPE_UNLIMITED_INSTANCES', 'NMPWAIT_WAIT_FOREVER',
+ 'FILE_FLAG_OVERLAPPED', 'FILE_FLAG_FIRST_PIPE_INSTANCE',
+ 'WaitForMultipleObjects', 'WaitForSingleObject',
+ 'WAIT_OBJECT_0', 'ERROR_IO_PENDING',
+ ]
+
+try:
+ # FIXME: use _overlapped on Python 3.3? see windows_utils.pipe()
+ from _winapi import (
+ CloseHandle, CreateNamedPipe, CreateFile, ConnectNamedPipe,
+ NULL,
+ GENERIC_READ, GENERIC_WRITE, OPEN_EXISTING, INFINITE,
+ PIPE_ACCESS_INBOUND,
+ PIPE_ACCESS_DUPLEX, PIPE_TYPE_MESSAGE, PIPE_READMODE_MESSAGE,
+ PIPE_WAIT, PIPE_UNLIMITED_INSTANCES, NMPWAIT_WAIT_FOREVER,
+ FILE_FLAG_OVERLAPPED, FILE_FLAG_FIRST_PIPE_INSTANCE,
+ WaitForMultipleObjects, WaitForSingleObject,
+ WAIT_OBJECT_0, ERROR_IO_PENDING,
+ )
+except ImportError:
+ # Python < 3.3
+ from _multiprocessing import win32
+ import _subprocess
+
+ from trollius import _overlapped
+
+ CloseHandle = win32.CloseHandle
+ CreateNamedPipe = win32.CreateNamedPipe
+ CreateFile = win32.CreateFile
+ NULL = win32.NULL
+
+ GENERIC_READ = win32.GENERIC_READ
+ GENERIC_WRITE = win32.GENERIC_WRITE
+ OPEN_EXISTING = win32.OPEN_EXISTING
+ INFINITE = win32.INFINITE
+
+ PIPE_ACCESS_INBOUND = win32.PIPE_ACCESS_INBOUND
+ PIPE_ACCESS_DUPLEX = win32.PIPE_ACCESS_DUPLEX
+ PIPE_READMODE_MESSAGE = win32.PIPE_READMODE_MESSAGE
+ PIPE_TYPE_MESSAGE = win32.PIPE_TYPE_MESSAGE
+ PIPE_WAIT = win32.PIPE_WAIT
+ PIPE_UNLIMITED_INSTANCES = win32.PIPE_UNLIMITED_INSTANCES
+ NMPWAIT_WAIT_FOREVER = win32.NMPWAIT_WAIT_FOREVER
+
+ FILE_FLAG_OVERLAPPED = 0x40000000
+ FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000
+
+ WAIT_OBJECT_0 = _subprocess.WAIT_OBJECT_0
+ WaitForSingleObject = _subprocess.WaitForSingleObject
+ ERROR_IO_PENDING = _overlapped.ERROR_IO_PENDING
+
+ def ConnectNamedPipe(handle, overlapped):
+ ov = _overlapped.Overlapped()
+ ov.ConnectNamedPipe(handle)
+ return ov
+
+ def WaitForMultipleObjects(events, wait_all, timeout):
+ if not wait_all:
+ raise NotImplementedError()
+
+ for ev in events:
+ res = WaitForSingleObject(ev, timeout)
+ if res != WAIT_OBJECT_0:
+ err = win32.GetLastError()
+ msg = _overlapped.FormatMessage(err)
+ raise WindowsError(err, msg)
+
+ return WAIT_OBJECT_0
diff --git a/trollius/py3_ssl.py b/trollius/py3_ssl.py
new file mode 100644
index 0000000..c592ee6
--- /dev/null
+++ b/trollius/py3_ssl.py
@@ -0,0 +1,149 @@
+"""
+Backport SSL functions and exceptions:
+- BACKPORT_SSL_ERRORS (bool)
+- SSLWantReadError, SSLWantWriteError, SSLEOFError
+- BACKPORT_SSL_CONTEXT (bool)
+- SSLContext
+- wrap_socket()
+- wrap_ssl_error()
+"""
+import errno
+import ssl
+import sys
+from trollius.py33_exceptions import _wrap_error
+
+__all__ = ["SSLContext", "BACKPORT_SSL_ERRORS", "BACKPORT_SSL_CONTEXT",
+ "SSLWantReadError", "SSLWantWriteError", "SSLEOFError",
+ ]
+
+try:
+ SSLWantReadError = ssl.SSLWantReadError
+ SSLWantWriteError = ssl.SSLWantWriteError
+ SSLEOFError = ssl.SSLEOFError
+ BACKPORT_SSL_ERRORS = False
+except AttributeError:
+ # Python < 3.3
+ BACKPORT_SSL_ERRORS = True
+
+ class SSLWantReadError(ssl.SSLError):
+ pass
+
+ class SSLWantWriteError(ssl.SSLError):
+ pass
+
+ class SSLEOFError(ssl.SSLError):
+ pass
+
+
+try:
+ SSLContext = ssl.SSLContext
+ BACKPORT_SSL_CONTEXT = False
+ wrap_socket = ssl.wrap_socket
+except AttributeError:
+ # Python < 3.2
+ BACKPORT_SSL_CONTEXT = True
+
+ if (sys.version_info < (2, 6, 6)):
+ # SSLSocket constructor has bugs in Python older than 2.6.6:
+ # http://bugs.python.org/issue5103
+ # http://bugs.python.org/issue7943
+ from socket import socket, error as socket_error, _delegate_methods
+ import _ssl
+
+ class BackportSSLSocket(ssl.SSLSocket):
+ # Override SSLSocket.__init__()
+ def __init__(self, sock, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=ssl.CERT_NONE,
+ ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=None,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True):
+ socket.__init__(self, _sock=sock._sock)
+ # The initializer for socket overrides the methods send(), recv(), etc.
+ # in the instancce, which we don't need -- but we want to provide the
+ # methods defined in SSLSocket.
+ for attr in _delegate_methods:
+ try:
+ delattr(self, attr)
+ except AttributeError:
+ pass
+
+ if certfile and not keyfile:
+ keyfile = certfile
+ # see if it's connected
+ try:
+ socket.getpeername(self)
+ except socket_error as e:
+ if e.errno != errno.ENOTCONN:
+ raise
+ # no, no connection yet
+ self._connected = False
+ self._sslobj = None
+ else:
+ # yes, create the SSL object
+ self._connected = True
+ self._sslobj = _ssl.sslwrap(self._sock, server_side,
+ keyfile, certfile,
+ cert_reqs, ssl_version, ca_certs)
+ if do_handshake_on_connect:
+ self.do_handshake()
+ self.keyfile = keyfile
+ self.certfile = certfile
+ self.cert_reqs = cert_reqs
+ self.ssl_version = ssl_version
+ self.ca_certs = ca_certs
+ self.do_handshake_on_connect = do_handshake_on_connect
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+
+ def wrap_socket(sock, server_hostname=None, **kwargs):
+ # ignore server_hostname parameter, not supported
+ kwargs.pop('server_hostname', None)
+ return BackportSSLSocket(sock, **kwargs)
+ else:
+ _wrap_socket = ssl.wrap_socket
+
+ def wrap_socket(sock, **kwargs):
+ # ignore server_hostname parameter, not supported
+ kwargs.pop('server_hostname', None)
+ return _wrap_socket(sock, **kwargs)
+
+
+ class SSLContext(object):
+ def __init__(self, protocol=ssl.PROTOCOL_SSLv23):
+ self.protocol = protocol
+ self.certfile = None
+ self.keyfile = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def wrap_socket(self, sock, **kwargs):
+ return wrap_socket(sock,
+ ssl_version=self.protocol,
+ certfile=self.certfile,
+ keyfile=self.keyfile,
+ **kwargs)
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_NONE
+
+
+if BACKPORT_SSL_ERRORS:
+ _MAP_ERRORS = {
+ ssl.SSL_ERROR_WANT_READ: SSLWantReadError,
+ ssl.SSL_ERROR_WANT_WRITE: SSLWantWriteError,
+ ssl.SSL_ERROR_EOF: SSLEOFError,
+ }
+
+ def wrap_ssl_error(func, *args, **kw):
+ try:
+ return func(*args, **kw)
+ except ssl.SSLError as exc:
+ if exc.args:
+ _wrap_error(exc, _MAP_ERRORS, exc.args[0])
+ raise
+else:
+ def wrap_ssl_error(func, *args, **kw):
+ return func(*args, **kw)
diff --git a/asyncio/queues.py b/trollius/queues.py
index 41551a9..ca04140 100644
--- a/asyncio/queues.py
+++ b/trollius/queues.py
@@ -9,7 +9,7 @@ import heapq
from . import events
from . import futures
from . import locks
-from .tasks import coroutine
+from .coroutines import coroutine, From, Return
class QueueEmpty(Exception):
@@ -22,19 +22,19 @@ class QueueFull(Exception):
pass
-class Queue:
+class Queue(object):
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
- is an integer greater than 0, then "yield from put()" will block when the
+ is an integer greater than 0, then "yield From(put())" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
- with qsize(), since your single-threaded asyncio application won't be
+ with qsize(), since your single-threaded trollius application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
- def __init__(self, maxsize=0, *, loop=None):
+ def __init__(self, maxsize=0, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
@@ -57,20 +57,20 @@ class Queue:
self._queue.append(item)
def __repr__(self):
- return '<{} at {:#x} {}>'.format(
+ return '<{0} at {1:#x} {2}>'.format(
type(self).__name__, id(self), self._format())
def __str__(self):
- return '<{} {}>'.format(type(self).__name__, self._format())
+ return '<{0} {1}>'.format(type(self).__name__, self._format())
def _format(self):
- result = 'maxsize={!r}'.format(self._maxsize)
+ result = 'maxsize={0!r}'.format(self._maxsize)
if getattr(self, '_queue', None):
- result += ' _queue={!r}'.format(list(self._queue))
+ result += ' _queue={0!r}'.format(list(self._queue))
if self._getters:
- result += ' _getters[{}]'.format(len(self._getters))
+ result += ' _getters[{0}]'.format(len(self._getters))
if self._putters:
- result += ' _putters[{}]'.format(len(self._putters))
+ result += ' _putters[{0}]'.format(len(self._putters))
return result
def _consume_done_getters(self):
@@ -111,7 +111,7 @@ class Queue:
def put(self, item):
"""Put an item into the queue.
- If you yield from put(), wait until a free slot is available
+ If you yield From(put()), wait until a free slot is available
before adding item.
"""
self._consume_done_getters()
@@ -130,7 +130,7 @@ class Queue:
waiter = futures.Future(loop=self._loop)
self._putters.append((item, waiter))
- yield from waiter
+ yield From(waiter)
else:
self._put(item)
@@ -161,7 +161,7 @@ class Queue:
def get(self):
"""Remove and return an item from the queue.
- If you yield from get(), wait until a item is available.
+ If you yield From(get()), wait until a item is available.
"""
self._consume_done_putters()
if self._putters:
@@ -175,15 +175,16 @@ class Queue:
# ChannelTest.test_wait.
self._loop.call_soon(putter._set_result_unless_cancelled, None)
- return self._get()
+ raise Return(self._get())
elif self.qsize():
- return self._get()
+ raise Return(self._get())
else:
waiter = futures.Future(loop=self._loop)
self._getters.append(waiter)
- return (yield from waiter)
+ result = yield From(waiter)
+ raise Return(result)
def get_nowait(self):
"""Remove and return an item from the queue.
@@ -238,8 +239,8 @@ class LifoQueue(Queue):
class JoinableQueue(Queue):
"""A subclass of Queue with task_done() and join() methods."""
- def __init__(self, maxsize=0, *, loop=None):
- super().__init__(maxsize=maxsize, loop=loop)
+ def __init__(self, maxsize=0, loop=None):
+ super(JoinableQueue, self).__init__(maxsize=maxsize, loop=loop)
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
@@ -247,11 +248,11 @@ class JoinableQueue(Queue):
def _format(self):
result = Queue._format(self)
if self._unfinished_tasks:
- result += ' tasks={}'.format(self._unfinished_tasks)
+ result += ' tasks={0}'.format(self._unfinished_tasks)
return result
def _put(self, item):
- super()._put(item)
+ super(JoinableQueue, self)._put(item)
self._unfinished_tasks += 1
self._finished.clear()
@@ -285,4 +286,4 @@ class JoinableQueue(Queue):
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
- yield from self._finished.wait()
+ yield From(self._finished.wait())
diff --git a/asyncio/selector_events.py b/trollius/selector_events.py
index 7df8b86..2f13427 100644
--- a/asyncio/selector_events.py
+++ b/trollius/selector_events.py
@@ -10,8 +10,12 @@ import collections
import errno
import functools
import socket
+import sys
try:
import ssl
+ from .py3_ssl import (
+ wrap_ssl_error, SSLContext, BACKPORT_SSL_CONTEXT, SSLWantReadError,
+ SSLWantWriteError)
except ImportError: # pragma: no cover
ssl = None
@@ -21,7 +25,25 @@ from . import events
from . import futures
from . import selectors
from . import transports
+from .compat import flatten_bytes
from .log import logger
+from .py33_exceptions import (wrap_error,
+ BlockingIOError, InterruptedError, ConnectionAbortedError, BrokenPipeError,
+ ConnectionResetError)
+
+# On Mac OS 10.6 with Python 2.6.1 or OpenIndiana 148 with Python 2.6.4,
+# _SelectorSslTransport._read_ready() hangs if the socket has no data.
+# Example: test_events.test_create_server_ssl()
+_SSL_REQUIRES_SELECT = (sys.version_info < (2, 6, 6))
+if _SSL_REQUIRES_SELECT:
+ import select
+
+
+def _get_socket_error(sock, address):
+ err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ # Jump to the except clause below.
+ raise OSError(err, 'Connect call failed %s' % (address,))
def _test_selector_event(selector, fd, event):
@@ -42,7 +64,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""
def __init__(self, selector=None):
- super().__init__()
+ super(BaseSelectorEventLoop, self).__init__()
if selector is None:
selector = selectors.DefaultSelector()
@@ -50,12 +72,12 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
self._selector = selector
self._make_self_pipe()
- def _make_socket_transport(self, sock, protocol, waiter=None, *,
+ def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
- def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
+ def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
extra=None, server=None):
return _SelectorSslTransport(
@@ -73,7 +95,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if self.is_closed():
return
self._close_self_pipe()
- super().close()
+ super(BaseSelectorEventLoop, self).close()
if self._selector is not None:
self._selector.close()
self._selector = None
@@ -103,7 +125,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def _read_from_self(self):
while True:
try:
- data = self._ssock.recv(4096)
+ data = wrap_error(self._ssock.recv, 4096)
if not data:
break
self._process_self_data(data)
@@ -121,7 +143,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
csock = self._csock
if csock is not None:
try:
- csock.send(b'\0')
+ wrap_error(csock.send, b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
@@ -136,14 +158,14 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
- conn, addr = sock.accept()
+ conn, addr = wrap_error(sock.accept)
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
- except OSError as exc:
+ except socket.error as exc:
# There's nowhere to send the error, so just log it.
# TODO: Someone will want an error handler for this.
if exc.errno in (errno.EMFILE, errno.ENFILE,
@@ -278,7 +300,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if fut.cancelled():
return
try:
- data = sock.recv(n)
+ data = wrap_error(sock.recv, n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
@@ -315,7 +337,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
return
try:
- n = sock.send(data)
+ n = wrap_error(sock.send, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
@@ -356,7 +378,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
try:
while True:
try:
- sock.connect(address)
+ wrap_error(sock.connect, address)
except InterruptedError:
continue
else:
@@ -378,10 +400,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
return
try:
- err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- # Jump to any except clause below.
- raise OSError(err, 'Connect call failed %s' % (address,))
+ wrap_error(_get_socket_error, sock, address)
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
@@ -413,7 +432,7 @@ class BaseSelectorEventLoop(base_events.BaseEventLoop):
if fut.cancelled():
return
try:
- conn, address = sock.accept()
+ conn, address = wrap_error(sock.accept)
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
@@ -449,7 +468,7 @@ class _SelectorTransport(transports._FlowControlMixin,
_buffer_factory = bytearray # Constructs initial value for self._buffer.
def __init__(self, loop, sock, protocol, extra, server=None):
- super().__init__(extra, loop)
+ super(_SelectorTransport, self).__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
@@ -524,7 +543,7 @@ class _SelectorTransport(transports._FlowControlMixin,
if self._conn_lost:
return
if self._buffer:
- self._buffer.clear()
+ del self._buffer[:]
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
@@ -553,7 +572,7 @@ class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
- super().__init__(loop, sock, protocol, extra, server)
+ super(_SelectorSocketTransport, self).__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
@@ -585,7 +604,7 @@ class _SelectorSocketTransport(_SelectorTransport):
def _read_ready(self):
try:
- data = self._sock.recv(self.max_size)
+ data = wrap_error(self._sock.recv, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
@@ -606,9 +625,7 @@ class _SelectorSocketTransport(_SelectorTransport):
self.close()
def write(self, data):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
@@ -623,7 +640,7 @@ class _SelectorSocketTransport(_SelectorTransport):
if not self._buffer:
# Optimization: try to send now.
try:
- n = self._sock.send(data)
+ n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
@@ -643,13 +660,14 @@ class _SelectorSocketTransport(_SelectorTransport):
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
+ data = flatten_bytes(self._buffer)
try:
- n = self._sock.send(self._buffer)
+ n = wrap_error(self._sock.send, data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
- self._buffer.clear()
+ del self._buffer[:]
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
@@ -698,11 +716,12 @@ class _SelectorSslTransport(_SelectorTransport):
sslcontext.check_hostname = False
else:
# Fallback for Python 3.3.
- sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- sslcontext.options |= ssl.OP_NO_SSLv2
- sslcontext.options |= ssl.OP_NO_SSLv3
- sslcontext.set_default_verify_paths()
- sslcontext.verify_mode = ssl.CERT_REQUIRED
+ sslcontext = SSLContext(ssl.PROTOCOL_SSLv23)
+ if not BACKPORT_SSL_CONTEXT:
+ sslcontext.options |= ssl.OP_NO_SSLv2
+ sslcontext.options |= ssl.OP_NO_SSLv3
+ sslcontext.set_default_verify_paths()
+ sslcontext.verify_mode = ssl.CERT_REQUIRED
wrap_kwargs = {
'server_side': server_side,
@@ -712,7 +731,7 @@ class _SelectorSslTransport(_SelectorTransport):
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
- super().__init__(loop, sslsock, protocol, extra, server)
+ super(_SelectorSslTransport, self).__init__(loop, sslsock, protocol, extra, server)
self._server_hostname = server_hostname
self._waiter = waiter
@@ -731,12 +750,12 @@ class _SelectorSslTransport(_SelectorTransport):
def _on_handshake(self, start_time):
try:
- self._sock.do_handshake()
- except ssl.SSLWantReadError:
+ wrap_ssl_error(self._sock.do_handshake)
+ except SSLWantReadError:
self._loop.add_reader(self._sock_fd,
self._on_handshake, start_time)
return
- except ssl.SSLWantWriteError:
+ except SSLWantWriteError:
self._loop.add_writer(self._sock_fd,
self._on_handshake, start_time)
return
@@ -778,8 +797,9 @@ class _SelectorSslTransport(_SelectorTransport):
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
- compression=self._sock.compression(),
)
+ if hasattr(self._sock, 'compression'):
+ self._extra['compression'] = self._sock.compression()
self._read_wants_write = False
self._write_wants_read = False
@@ -820,6 +840,9 @@ class _SelectorSslTransport(_SelectorTransport):
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
+ def _sock_recv(self):
+ return wrap_ssl_error(self._sock.recv, self.max_size)
+
def _read_ready(self):
if self._write_wants_read:
self._write_wants_read = False
@@ -829,10 +852,16 @@ class _SelectorSslTransport(_SelectorTransport):
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
- data = self._sock.recv(self.max_size)
- except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
+ if _SSL_REQUIRES_SELECT:
+ rfds = (self._sock.fileno(),)
+ rfds = select.select(rfds, (), (), 0.0)[0]
+ if not rfds:
+ # False alarm.
+ return
+ data = wrap_error(self._sock_recv)
+ except (BlockingIOError, InterruptedError, SSLWantReadError):
pass
- except ssl.SSLWantWriteError:
+ except SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
@@ -861,17 +890,18 @@ class _SelectorSslTransport(_SelectorTransport):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
+ data = flatten_bytes(self._buffer)
try:
- n = self._sock.send(self._buffer)
- except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
+ n = wrap_error(self._sock.send, data)
+ except (BlockingIOError, InterruptedError, SSLWantWriteError):
n = 0
- except ssl.SSLWantReadError:
+ except SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
- self._buffer.clear()
+ del self._buffer[:]
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
@@ -886,9 +916,7 @@ class _SelectorSslTransport(_SelectorTransport):
self._call_connection_lost(None)
def write(self, data):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if not data:
return
@@ -915,7 +943,8 @@ class _SelectorDatagramTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
- super().__init__(loop, sock, protocol, extra)
+ super(_SelectorDatagramTransport, self).__init__(loop, sock,
+ protocol, extra)
self._address = address
self._loop.add_reader(self._sock_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
@@ -928,7 +957,7 @@ class _SelectorDatagramTransport(_SelectorTransport):
def _read_ready(self):
try:
- data, addr = self._sock.recvfrom(self.max_size)
+ data, addr = wrap_error(self._sock.recvfrom, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
@@ -939,9 +968,7 @@ class _SelectorDatagramTransport(_SelectorTransport):
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
- if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ data = flatten_bytes(data)
if not data:
return
@@ -959,9 +986,9 @@ class _SelectorDatagramTransport(_SelectorTransport):
# Attempt to send it right away first.
try:
if self._address:
- self._sock.send(data)
+ wrap_error(self._sock.send, data)
else:
- self._sock.sendto(data, addr)
+ wrap_error(self._sock.sendto, data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
@@ -982,9 +1009,9 @@ class _SelectorDatagramTransport(_SelectorTransport):
data, addr = self._buffer.popleft()
try:
if self._address:
- self._sock.send(data)
+ wrap_error(self._sock.send, data)
else:
- self._sock.sendto(data, addr)
+ wrap_error(self._sock.sendto, data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
diff --git a/asyncio/selectors.py b/trollius/selectors.py
index faa2d3d..86fe5fc 100644
--- a/asyncio/selectors.py
+++ b/trollius/selectors.py
@@ -11,6 +11,9 @@ import math
import select
import sys
+from .py33_exceptions import wrap_error, InterruptedError
+from .compat import integer_types
+
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
@@ -29,16 +32,16 @@ def _fileobj_to_fd(fileobj):
Raises:
ValueError if the object is invalid
"""
- if isinstance(fileobj, int):
+ if isinstance(fileobj, integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
- "{!r}".format(fileobj)) from None
+ "{0!r}".format(fileobj))
if fd < 0:
- raise ValueError("Invalid file descriptor: {}".format(fd))
+ raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
@@ -61,13 +64,13 @@ class _SelectorMapping(Mapping):
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
-class BaseSelector(metaclass=ABCMeta):
+class BaseSelector(object):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
@@ -81,6 +84,7 @@ class BaseSelector(metaclass=ABCMeta):
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
+ __metaclass__ = ABCMeta
@abstractmethod
def register(self, fileobj, events, data=None):
@@ -177,7 +181,7 @@ class BaseSelector(metaclass=ABCMeta):
try:
return mapping[fileobj]
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
@abstractmethod
def get_map(self):
@@ -221,12 +225,12 @@ class _BaseSelectorImpl(BaseSelector):
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
- raise ValueError("Invalid events: {!r}".format(events))
+ raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
- raise KeyError("{!r} (FD {}) is already registered"
+ raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
@@ -236,7 +240,7 @@ class _BaseSelectorImpl(BaseSelector):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
@@ -244,7 +248,7 @@ class _BaseSelectorImpl(BaseSelector):
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
- raise KeyError("{!r} is not registered".format(fileobj)) from None
+ raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
@@ -279,12 +283,12 @@ class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
- super().__init__()
+ super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
@@ -292,7 +296,7 @@ class SelectSelector(_BaseSelectorImpl):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
@@ -308,7 +312,8 @@ class SelectSelector(_BaseSelectorImpl):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
- r, w, _ = self._select(self._readers, self._writers, [], timeout)
+ r, w, _ = wrap_error(self._select,
+ self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
@@ -332,11 +337,11 @@ if hasattr(select, 'poll'):
"""Poll-based selector."""
def __init__(self):
- super().__init__()
+ super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(PollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
@@ -346,7 +351,7 @@ if hasattr(select, 'poll'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
@@ -358,10 +363,10 @@ if hasattr(select, 'poll'):
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1e3)
+ timeout = int(math.ceil(timeout * 1e3))
ready = []
try:
- fd_event_list = self._poll.poll(timeout)
+ fd_event_list = wrap_error(self._poll.poll, timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
@@ -383,14 +388,14 @@ if hasattr(select, 'epoll'):
"""Epoll-based selector."""
def __init__(self):
- super().__init__()
+ super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(EpollSelector, self).register(fileobj, events, data)
epoll_events = 0
if events & EVENT_READ:
epoll_events |= select.EPOLLIN
@@ -400,7 +405,7 @@ if hasattr(select, 'epoll'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(EpollSelector, self).unregister(fileobj)
try:
self._epoll.unregister(key.fd)
except OSError:
@@ -426,7 +431,7 @@ if hasattr(select, 'epoll'):
ready = []
try:
- fd_event_list = self._epoll.poll(timeout, max_ev)
+ fd_event_list = wrap_error(self._epoll.poll, timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
@@ -443,7 +448,7 @@ if hasattr(select, 'epoll'):
def close(self):
self._epoll.close()
- super().close()
+ super(EpollSelector, self).close()
if hasattr(select, 'devpoll'):
@@ -452,14 +457,14 @@ if hasattr(select, 'devpoll'):
"""Solaris /dev/poll selector."""
def __init__(self):
- super().__init__()
+ super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
@@ -469,7 +474,7 @@ if hasattr(select, 'devpoll'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
@@ -501,7 +506,7 @@ if hasattr(select, 'devpoll'):
def close(self):
self._devpoll.close()
- super().close()
+ super(DevpollSelector, self).close()
if hasattr(select, 'kqueue'):
@@ -510,14 +515,14 @@ if hasattr(select, 'kqueue'):
"""Kqueue-based selector."""
def __init__(self):
- super().__init__()
+ super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
- key = super().register(fileobj, events, data)
+ key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
@@ -529,7 +534,7 @@ if hasattr(select, 'kqueue'):
return key
def unregister(self, fileobj):
- key = super().unregister(fileobj)
+ key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
@@ -554,7 +559,8 @@ if hasattr(select, 'kqueue'):
max_ev = len(self._fd_to_key)
ready = []
try:
- kev_list = self._kqueue.control(None, max_ev, timeout)
+ kev_list = wrap_error(self._kqueue.control,
+ None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
@@ -573,7 +579,7 @@ if hasattr(select, 'kqueue'):
def close(self):
self._kqueue.close()
- super().close()
+ super(KqueueSelector, self).close()
# Choose the best implementation: roughly, epoll|kqueue|devpoll > poll > select.
diff --git a/asyncio/streams.py b/trollius/streams.py
index c77eb60..8c2a32b 100644
--- a/asyncio/streams.py
+++ b/trollius/streams.py
@@ -14,7 +14,8 @@ from . import coroutines
from . import events
from . import futures
from . import protocols
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
+from .py33_exceptions import ConnectionResetError
from .log import logger
@@ -36,7 +37,7 @@ class IncompleteReadError(EOFError):
@coroutine
-def open_connection(host=None, port=None, *,
+def open_connection(host=None, port=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
@@ -59,14 +60,14 @@ def open_connection(host=None, port=None, *,
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
- transport, _ = yield from loop.create_connection(
- lambda: protocol, host, port, **kwds)
+ transport, _ = yield From(loop.create_connection(
+ lambda: protocol, host, port, **kwds))
writer = StreamWriter(transport, protocol, reader, loop)
- return reader, writer
+ raise Return(reader, writer)
@coroutine
-def start_server(client_connected_cb, host=None, port=None, *,
+def start_server(client_connected_cb, host=None, port=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
@@ -98,28 +99,29 @@ def start_server(client_connected_cb, host=None, port=None, *,
loop=loop)
return protocol
- return (yield from loop.create_server(factory, host, port, **kwds))
+ result = yield From(loop.create_server(factory, host, port, **kwds))
+ raise Return(result)
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
@coroutine
- def open_unix_connection(path=None, *,
+ def open_unix_connection(path=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
- transport, _ = yield from loop.create_unix_connection(
- lambda: protocol, path, **kwds)
+ transport, _ = yield From(loop.create_unix_connection(
+ lambda: protocol, path, **kwds))
writer = StreamWriter(transport, protocol, reader, loop)
- return reader, writer
+ raise Return(reader, writer)
@coroutine
- def start_unix_server(client_connected_cb, path=None, *,
+ def start_unix_server(client_connected_cb, path=None,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
@@ -131,7 +133,8 @@ if hasattr(socket, 'AF_UNIX'):
loop=loop)
return protocol
- return (yield from loop.create_unix_server(factory, path, **kwds))
+ res = (yield From(loop.create_unix_server(factory, path, **kwds)))
+ raise Return(res)
class FlowControlMixin(protocols.Protocol):
@@ -194,7 +197,7 @@ class FlowControlMixin(protocols.Protocol):
assert waiter is None or waiter.cancelled()
waiter = futures.Future(loop=self._loop)
self._drain_waiter = waiter
- yield from waiter
+ yield From(waiter)
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
@@ -207,7 +210,7 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
- super().__init__(loop=loop)
+ super(StreamReaderProtocol, self).__init__(loop=loop)
self._stream_reader = stream_reader
self._stream_writer = None
self._client_connected_cb = client_connected_cb
@@ -228,7 +231,7 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
self._stream_reader.feed_eof()
else:
self._stream_reader.set_exception(exc)
- super().connection_lost(exc)
+ super(StreamReaderProtocol, self).connection_lost(exc)
def data_received(self, data):
self._stream_reader.feed_data(data)
@@ -237,7 +240,7 @@ class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
self._stream_reader.feed_eof()
-class StreamWriter:
+class StreamWriter(object):
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
@@ -290,16 +293,16 @@ class StreamWriter:
The intended use is to write
w.write(data)
- yield from w.drain()
+ yield From(w.drain())
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
- yield from self._protocol._drain_helper()
+ yield From(self._protocol._drain_helper())
-class StreamReader:
+class StreamReader(object):
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
# The line length limit is a security feature;
@@ -398,7 +401,7 @@ class StreamReader:
ichar = self._buffer.find(b'\n')
if ichar < 0:
line.extend(self._buffer)
- self._buffer.clear()
+ del self._buffer[:]
else:
ichar += 1
line.extend(self._buffer[:ichar])
@@ -415,12 +418,12 @@ class StreamReader:
if not_enough:
self._waiter = self._create_waiter('readline')
try:
- yield from self._waiter
+ yield From(self._waiter)
finally:
self._waiter = None
self._maybe_resume_transport()
- return bytes(line)
+ raise Return(bytes(line))
@coroutine
def read(self, n=-1):
@@ -428,7 +431,7 @@ class StreamReader:
raise self._exception
if not n:
- return b''
+ raise Return(b'')
if n < 0:
# This used to just loop creating a new waiter hoping to
@@ -437,29 +440,29 @@ class StreamReader:
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
- block = yield from self.read(self._limit)
+ block = yield From(self.read(self._limit))
if not block:
break
blocks.append(block)
- return b''.join(blocks)
+ raise Return(b''.join(blocks))
else:
if not self._buffer and not self._eof:
self._waiter = self._create_waiter('read')
try:
- yield from self._waiter
+ yield From(self._waiter)
finally:
self._waiter = None
if n < 0 or len(self._buffer) <= n:
data = bytes(self._buffer)
- self._buffer.clear()
+ del self._buffer[:]
else:
# n > 0 and len(self._buffer) > n
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
- return data
+ raise Return(data)
@coroutine
def readexactly(self, n):
@@ -475,11 +478,11 @@ class StreamReader:
blocks = []
while n > 0:
- block = yield from self.read(n)
+ block = yield From(self.read(n))
if not block:
partial = b''.join(blocks)
raise IncompleteReadError(partial, len(partial) + n)
blocks.append(block)
n -= len(block)
- return b''.join(blocks)
+ raise Return(b''.join(blocks))
diff --git a/asyncio/subprocess.py b/trollius/subprocess.py
index f6d6a14..cd2398a 100644
--- a/asyncio/subprocess.py
+++ b/trollius/subprocess.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import collections
@@ -8,13 +10,14 @@ from . import futures
from . import protocols
from . import streams
from . import tasks
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
+from .py33_exceptions import (BrokenPipeError, ConnectionResetError,
+ ProcessLookupError)
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
-DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
@@ -22,7 +25,7 @@ class SubprocessStreamProtocol(streams.FlowControlMixin,
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
- super().__init__(loop=loop)
+ super(SubprocessStreamProtocol, self).__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self.waiter = futures.Future(loop=loop)
@@ -107,7 +110,9 @@ class Process:
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
- self.pid = transport.get_pid()
+ # transport.get_pid() cannot be used because it fails
+ # if the process already exited
+ self.pid = self._transport.get_extra_info('subprocess').pid
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@@ -121,12 +126,12 @@ class Process:
"""Wait until the process exit and return the process return code."""
returncode = self._transport.get_returncode()
if returncode is not None:
- return returncode
+ raise Return(returncode)
waiter = futures.Future(loop=self._loop)
self._protocol._waiters.append(waiter)
- yield from waiter
- return waiter.result()
+ yield From(waiter)
+ raise Return(waiter.result())
def _check_alive(self):
if self._transport.get_returncode() is not None:
@@ -152,7 +157,7 @@ class Process:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
- yield from self.stdin.drain()
+ yield From(self.stdin.drain())
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
@@ -177,12 +182,12 @@ class Process:
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
- output = yield from stream.read()
+ output = yield From(stream.read())
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
- return output
+ raise Return(output)
@coroutine
def communicate(self, input=None):
@@ -198,38 +203,45 @@ class Process:
stderr = self._read_stream(2)
else:
stderr = self._noop()
- stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
- loop=self._loop)
- yield from self.wait()
- return (stdout, stderr)
+ stdin, stdout, stderr = yield From(tasks.gather(stdin, stdout, stderr,
+ loop=self._loop))
+ yield From(self.wait())
+ raise Return(stdout, stderr)
@coroutine
-def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
- loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
+def create_subprocess_shell(cmd, **kwds):
+ stdin = kwds.pop('stdin', None)
+ stdout = kwds.pop('stdout', None)
+ stderr = kwds.pop('stderr', None)
+ loop = kwds.pop('loop', None)
+ limit = kwds.pop('limit', streams._DEFAULT_LIMIT)
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
- transport, protocol = yield from loop.subprocess_shell(
- protocol_factory,
- cmd, stdin=stdin, stdout=stdout,
- stderr=stderr, **kwds)
- yield from protocol.waiter
- return Process(transport, protocol, loop)
+ transport, protocol = yield From(loop.subprocess_shell(
+ protocol_factory,
+ cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, **kwds))
+ yield From(protocol.waiter)
+ raise Return(Process(transport, protocol, loop))
@coroutine
-def create_subprocess_exec(program, *args, stdin=None, stdout=None,
- stderr=None, loop=None,
- limit=streams._DEFAULT_LIMIT, **kwds):
+def create_subprocess_exec(program, *args, **kwds):
+ stdin = kwds.pop('stdin', None)
+ stdout = kwds.pop('stdout', None)
+ stderr = kwds.pop('stderr', None)
+ loop = kwds.pop('loop', None)
+ limit = kwds.pop('limit', streams._DEFAULT_LIMIT)
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
- transport, protocol = yield from loop.subprocess_exec(
- protocol_factory,
- program, *args,
- stdin=stdin, stdout=stdout,
- stderr=stderr, **kwds)
- yield from protocol.waiter
- return Process(transport, protocol, loop)
+ transport, protocol = yield From(loop.subprocess_exec(
+ protocol_factory,
+ program, *args,
+ stdin=stdin, stdout=stdout,
+ stderr=stderr, **kwds))
+ yield From(protocol.waiter)
+ raise Return(Process(transport, protocol, loop))
diff --git a/asyncio/tasks.py b/trollius/tasks.py
index 9aebffd..4d91de8 100644
--- a/asyncio/tasks.py
+++ b/trollius/tasks.py
@@ -1,27 +1,40 @@
"""Support for tasks, coroutines and the scheduler."""
+from __future__ import print_function
__all__ = ['Task',
'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED',
'wait', 'wait_for', 'as_completed', 'sleep', 'async',
- 'gather', 'shield',
+ 'gather', 'shield', 'Return', 'From',
]
-import concurrent.futures
import functools
-import inspect
import linecache
import sys
import traceback
-import weakref
+try:
+ from weakref import WeakSet
+except ImportError:
+ # Python 2.6
+ from .py27_weakrefset import WeakSet
+from . import compat
from . import coroutines
from . import events
+from . import executor
from . import futures
-from .coroutines import coroutine
+from .locks import Lock, Condition, Semaphore, _ContextManager
+from .coroutines import coroutine, From, Return, iscoroutinefunction, iscoroutine
+
_PY34 = (sys.version_info >= (3, 4))
+@coroutine
+def _lock_coroutine(lock):
+ yield From(lock.acquire())
+ raise Return(_ContextManager(lock))
+
+
class Task(futures.Future):
"""A coroutine wrapped in a Future."""
@@ -35,7 +48,7 @@ class Task(futures.Future):
# must be _wakeup().
# Weak set containing all tasks alive.
- _all_tasks = weakref.WeakSet()
+ _all_tasks = WeakSet()
# Dictionary containing tasks that are currently active in
# all running event loops. {EventLoop: Task}
@@ -65,11 +78,11 @@ class Task(futures.Future):
"""
if loop is None:
loop = events.get_event_loop()
- return {t for t in cls._all_tasks if t._loop is loop}
+ return set(t for t in cls._all_tasks if t._loop is loop)
- def __init__(self, coro, *, loop=None):
+ def __init__(self, coro, loop=None):
assert coroutines.iscoroutine(coro), repr(coro) # Not a coroutine function!
- super().__init__(loop=loop)
+ super(Task, self).__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._coro = iter(coro) # Use the iterator just in case.
@@ -94,7 +107,7 @@ class Task(futures.Future):
futures.Future.__del__(self)
def _repr_info(self):
- info = super()._repr_info()
+ info = super(Task, self)._repr_info()
if self._must_cancel:
# replace status
@@ -107,7 +120,7 @@ class Task(futures.Future):
info.insert(2, 'wait_for=%r' % self._fut_waiter)
return info
- def get_stack(self, *, limit=None):
+ def get_stack(self, limit=None):
"""Return the list of stack frames for this task's coroutine.
If the coroutine is not done, this returns the stack where it is
@@ -150,7 +163,7 @@ class Task(futures.Future):
tb = tb.tb_next
return frames
- def print_stack(self, *, limit=None, file=None):
+ def print_stack(self, limit=None, file=None):
"""Print the stack or traceback for this task's coroutine.
This produces output similar to that of the traceback module,
@@ -217,9 +230,9 @@ class Task(futures.Future):
self._must_cancel = True
return True
- def _step(self, value=None, exc=None):
+ def _step(self, value=None, exc=None, exc_tb=None):
assert not self.done(), \
- '_step(): already done: {!r}, {!r}, {!r}'.format(self, value, exc)
+ '_step(): already done: {0!r}, {1!r}, {2!r}'.format(self, value, exc)
if self._must_cancel:
if not isinstance(exc, futures.CancelledError):
exc = futures.CancelledError()
@@ -227,6 +240,10 @@ class Task(futures.Future):
coro = self._coro
self._fut_waiter = None
+ if exc_tb is not None:
+ init_exc = exc
+ else:
+ init_exc = None
self.__class__._current_tasks[self._loop] = self
# Call either coro.throw(exc) or coro.send(value).
try:
@@ -237,71 +254,104 @@ class Task(futures.Future):
else:
result = next(coro)
except StopIteration as exc:
- self.set_result(exc.value)
+ if compat.PY33:
+ # asyncio Task object? get the result of the coroutine
+ result = exc.value
+ else:
+ if isinstance(exc, Return):
+ exc.raised = True
+ result = exc.value
+ else:
+ result = None
+ self.set_result(result)
except futures.CancelledError as exc:
- super().cancel() # I.e., Future.cancel(self).
- except Exception as exc:
- self.set_exception(exc)
+ super(Task, self).cancel() # I.e., Future.cancel(self).
except BaseException as exc:
- self.set_exception(exc)
- raise
+ if exc is init_exc:
+ self._set_exception_with_tb(exc, exc_tb)
+ exc_tb = None
+ else:
+ self.set_exception(exc)
+
+ if not isinstance(exc, Exception):
+ # reraise BaseException
+ raise
else:
- if isinstance(result, futures.Future):
- # Yielded Future must come from Future.__iter__().
- if result._blocking:
- result._blocking = False
- result.add_done_callback(self._wakeup)
- self._fut_waiter = result
- if self._must_cancel:
- if self._fut_waiter.cancel():
- self._must_cancel = False
+ if coroutines._DEBUG:
+ if not coroutines._coroutine_at_yield_from(self._coro):
+ # trollius coroutine must "yield From(...)"
+ if not isinstance(result, coroutines.FromWrapper):
+ self._loop.call_soon(
+ self._step, None,
+ RuntimeError("yield used without From"))
+ return
+ result = result.obj
else:
- self._loop.call_soon(
- self._step, None,
- RuntimeError(
- 'yield was used instead of yield from '
- 'in task {!r} with {!r}'.format(self, result)))
+ # asyncio coroutine using "yield from ..."
+ if isinstance(result, coroutines.FromWrapper):
+ result = result.obj
+ elif isinstance(result, coroutines.FromWrapper):
+ result = result.obj
+
+ if iscoroutine(result):
+ # "yield coroutine" creates a task, the current task
+ # will wait until the new task is done
+ result = self._loop.create_task(result)
+ # FIXME: faster check. common base class? hasattr?
+ elif isinstance(result, (Lock, Condition, Semaphore)):
+ coro = _lock_coroutine(result)
+ result = self._loop.create_task(coro)
+
+ if isinstance(result, futures._FUTURE_CLASSES):
+ # Yielded Future must come from Future.__iter__().
+ result.add_done_callback(self._wakeup)
+ self._fut_waiter = result
+ if self._must_cancel:
+ if self._fut_waiter.cancel():
+ self._must_cancel = False
elif result is None:
# Bare yield relinquishes control for one event loop iteration.
self._loop.call_soon(self._step)
- elif inspect.isgenerator(result):
- # Yielding a generator is just wrong.
- self._loop.call_soon(
- self._step, None,
- RuntimeError(
- 'yield was used instead of yield from for '
- 'generator in task {!r} with {}'.format(
- self, result)))
else:
# Yielding something else is an error.
self._loop.call_soon(
self._step, None,
RuntimeError(
- 'Task got bad yield: {!r}'.format(result)))
+ 'Task got bad yield: {0!r}'.format(result)))
finally:
self.__class__._current_tasks.pop(self._loop)
self = None # Needed to break cycles when an exception occurs.
def _wakeup(self, future):
- try:
- value = future.result()
- except Exception as exc:
- # This may also be a cancellation.
- self._step(None, exc)
+ if (future._state == futures._FINISHED
+ and future._exception is not None):
+ # Get the traceback before calling exception(), because calling
+ # the exception() method clears the traceback
+ exc_tb = future._get_exception_tb()
+ exc = future.exception()
+ self._step(None, exc, exc_tb)
+ exc_tb = None
else:
- self._step(value, None)
+ try:
+ value = future.result()
+ except Exception as exc:
+ # This may also be a cancellation.
+ self._step(None, exc)
+ else:
+ self._step(value, None)
self = None # Needed to break cycles when an exception occurs.
# wait() and as_completed() similar to those in PEP 3148.
-FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED
-FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION
-ALL_COMPLETED = concurrent.futures.ALL_COMPLETED
+# Export symbols in trollius.tasks for compatibility with Tulip
+FIRST_COMPLETED = executor.FIRST_COMPLETED
+FIRST_EXCEPTION = executor.FIRST_EXCEPTION
+ALL_COMPLETED = executor.ALL_COMPLETED
@coroutine
-def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
+def wait(fs, loop=None, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the Futures and coroutines given by fs to complete.
The sequence futures must not be empty.
@@ -312,24 +362,25 @@ def wait(fs, *, loop=None, timeout=None, return_when=ALL_COMPLETED):
Usage:
- done, pending = yield from asyncio.wait(fs)
+ done, pending = yield From(trollius.wait(fs))
Note: This does not raise TimeoutError! Futures that aren't done
when the timeout occurs are returned in the second set.
"""
- if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+ if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
if not fs:
raise ValueError('Set of coroutines/Futures is empty.')
if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED):
- raise ValueError('Invalid return_when value: {}'.format(return_when))
+ raise ValueError('Invalid return_when value: {0}'.format(return_when))
if loop is None:
loop = events.get_event_loop()
- fs = {async(f, loop=loop) for f in set(fs)}
+ fs = set(async(f, loop=loop) for f in set(fs))
- return (yield from _wait(fs, timeout, return_when, loop))
+ result = yield From(_wait(fs, timeout, return_when, loop))
+ raise Return(result)
def _release_waiter(waiter, *args):
@@ -338,7 +389,7 @@ def _release_waiter(waiter, *args):
@coroutine
-def wait_for(fut, timeout, *, loop=None):
+def wait_for(fut, timeout, loop=None):
"""Wait for the single Future or coroutine to complete, with timeout.
Coroutine will be wrapped in Task.
@@ -349,14 +400,14 @@ def wait_for(fut, timeout, *, loop=None):
Usage:
- result = yield from asyncio.wait_for(fut, 10.0)
+ result = yield From(trollius.wait_for(fut, 10.0))
"""
if loop is None:
loop = events.get_event_loop()
if timeout is None:
- return (yield from fut)
+ raise Return((yield From(fut)))
waiter = futures.Future(loop=loop)
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
@@ -367,10 +418,10 @@ def wait_for(fut, timeout, *, loop=None):
try:
# wait until the future completes or the timeout
- yield from waiter
+ yield From(waiter)
if fut.done():
- return fut.result()
+ raise Return(fut.result())
else:
fut.remove_done_callback(cb)
fut.cancel()
@@ -390,12 +441,11 @@ def _wait(fs, timeout, return_when, loop):
timeout_handle = None
if timeout is not None:
timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
- counter = len(fs)
+ non_local = {'counter': len(fs)}
def _on_completion(f):
- nonlocal counter
- counter -= 1
- if (counter <= 0 or
+ non_local['counter'] -= 1
+ if (non_local['counter'] <= 0 or
return_when == FIRST_COMPLETED or
return_when == FIRST_EXCEPTION and (not f.cancelled() and
f.exception() is not None)):
@@ -408,7 +458,7 @@ def _wait(fs, timeout, return_when, loop):
f.add_done_callback(_on_completion)
try:
- yield from waiter
+ yield From(waiter)
finally:
if timeout_handle is not None:
timeout_handle.cancel()
@@ -420,11 +470,11 @@ def _wait(fs, timeout, return_when, loop):
done.add(f)
else:
pending.add(f)
- return done, pending
+ raise Return(done, pending)
# This is *not* a @coroutine! It is just an iterator (yielding Futures).
-def as_completed(fs, *, loop=None, timeout=None):
+def as_completed(fs, loop=None, timeout=None):
"""Return an iterator whose values are coroutines.
When waiting for the yielded coroutines you'll get the results (or
@@ -434,18 +484,18 @@ def as_completed(fs, *, loop=None, timeout=None):
This differs from PEP 3148; the proper way to use this is:
for f in as_completed(fs):
- result = yield from f # The 'yield from' may raise.
+ result = yield From(f) # The 'yield' may raise.
# Use result.
- If a timeout is specified, the 'yield from' will raise
+ If a timeout is specified, the 'yield' will raise
TimeoutError when the timeout occurs before all Futures are done.
Note: The futures 'f' are not necessarily members of fs.
"""
- if isinstance(fs, futures.Future) or coroutines.iscoroutine(fs):
+ if isinstance(fs, futures._FUTURE_CLASSES) or coroutines.iscoroutine(fs):
raise TypeError("expect a list of futures, not %s" % type(fs).__name__)
loop = loop if loop is not None else events.get_event_loop()
- todo = {async(f, loop=loop) for f in set(fs)}
+ todo = set(async(f, loop=loop) for f in set(fs))
from .queues import Queue # Import here to avoid circular import problem.
done = Queue(loop=loop)
timeout_handle = None
@@ -466,11 +516,11 @@ def as_completed(fs, *, loop=None, timeout=None):
@coroutine
def _wait_for_one():
- f = yield from done.get()
+ f = yield From(done.get())
if f is None:
# Dummy value from _on_timeout().
raise futures.TimeoutError
- return f.result() # May raise f.exception().
+ raise Return(f.result()) # May raise f.exception().
for f in todo:
f.add_done_callback(_on_completion)
@@ -481,23 +531,27 @@ def as_completed(fs, *, loop=None, timeout=None):
@coroutine
-def sleep(delay, result=None, *, loop=None):
+def sleep(delay, result=None, loop=None):
"""Coroutine that completes after a given time (in seconds)."""
future = futures.Future(loop=loop)
h = future._loop.call_later(delay,
future._set_result_unless_cancelled, result)
try:
- return (yield from future)
+ result = yield From(future)
+ raise Return(result)
finally:
h.cancel()
-def async(coro_or_future, *, loop=None):
+def async(coro_or_future, loop=None):
"""Wrap a coroutine in a future.
If the argument is a Future, it is returned directly.
"""
- if isinstance(coro_or_future, futures.Future):
+ # FIXME: only check if coroutines._DEBUG is True?
+ if isinstance(coro_or_future, coroutines.FromWrapper):
+ coro_or_future = coro_or_future.obj
+ if isinstance(coro_or_future, futures._FUTURE_CLASSES):
if loop is not None and loop is not coro_or_future._loop:
raise ValueError('loop argument must agree with Future')
return coro_or_future
@@ -520,8 +574,8 @@ class _GatheringFuture(futures.Future):
cancelled.
"""
- def __init__(self, children, *, loop=None):
- super().__init__(loop=loop)
+ def __init__(self, children, loop=None):
+ super(_GatheringFuture, self).__init__(loop=loop)
self._children = children
def cancel(self):
@@ -532,7 +586,7 @@ class _GatheringFuture(futures.Future):
return True
-def gather(*coros_or_futures, loop=None, return_exceptions=False):
+def gather(*coros_or_futures, **kw):
"""Return a future aggregating results from the given coroutines
or futures.
@@ -552,6 +606,11 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
prevent the cancellation of one child to cause other children to
be cancelled.)
"""
+ loop = kw.pop('loop', None)
+ return_exceptions = kw.pop('return_exceptions', False)
+ if kw:
+ raise TypeError("unexpected keyword")
+
if not coros_or_futures:
outer = futures.Future(loop=loop)
outer.set_result([])
@@ -559,7 +618,7 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
arg_to_fut = {}
for arg in set(coros_or_futures):
- if not isinstance(arg, futures.Future):
+ if not isinstance(arg, futures._FUTURE_CLASSES):
fut = async(arg, loop=loop)
if loop is None:
loop = fut._loop
@@ -577,11 +636,10 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
children = [arg_to_fut[arg] for arg in coros_or_futures]
nchildren = len(children)
outer = _GatheringFuture(children, loop=loop)
- nfinished = 0
+ non_local = {'nfinished': 0}
results = [None] * nchildren
def _done_callback(i, fut):
- nonlocal nfinished
if outer._state != futures._PENDING:
if fut._exception is not None:
# Mark exception retrieved.
@@ -600,8 +658,8 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
else:
res = fut._result
results[i] = res
- nfinished += 1
- if nfinished == nchildren:
+ non_local['nfinished'] += 1
+ if non_local['nfinished'] == nchildren:
outer.set_result(results)
for i, fut in enumerate(children):
@@ -609,16 +667,16 @@ def gather(*coros_or_futures, loop=None, return_exceptions=False):
return outer
-def shield(arg, *, loop=None):
+def shield(arg, loop=None):
"""Wait for a future, shielding it from cancellation.
The statement
- res = yield from shield(something())
+ res = yield From(shield(something()))
is exactly equivalent to the statement
- res = yield from something()
+ res = yield From(something())
*except* that if the coroutine containing it is cancelled, the
task running in something() is not cancelled. From the POV of
@@ -631,7 +689,7 @@ def shield(arg, *, loop=None):
you can combine shield() with a try/except clause, as follows:
try:
- res = yield from shield(something())
+ res = yield From(shield(something()))
except CancelledError:
res = None
"""
diff --git a/asyncio/test_support.py b/trollius/test_support.py
index 336f3ac..bf1526c 100644
--- a/asyncio/test_support.py
+++ b/trollius/test_support.py
@@ -4,6 +4,7 @@
# Ignore symbol TEST_HOME_DIR: test_events works without it
+from __future__ import absolute_import
import functools
import gc
import os
@@ -14,6 +15,7 @@ import subprocess
import sys
import time
+from trollius import test_utils
# A constant likely larger than the underlying OS pipe buffer size, to
# make writes blocking.
@@ -39,7 +41,9 @@ def _assert_python(expected_success, *args, **env_vars):
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars
- cmd_line = [sys.executable, '-X', 'faulthandler']
+ cmd_line = [sys.executable]
+ if sys.version_info >= (3, 3):
+ cmd_line.extend(('-X', 'faulthandler'))
if isolated and sys.version_info >= (3, 4):
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
@@ -247,7 +251,7 @@ def requires_mac_ver(*min_version):
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
- raise unittest.SkipTest(
+ raise test_utils.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
@@ -274,7 +278,7 @@ def _requires_unix_version(sysname, min_version):
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
- raise unittest.SkipTest(
+ raise test_utils.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
diff --git a/asyncio/test_utils.py b/trollius/test_utils.py
index 3e5eee5..7f24000 100644
--- a/asyncio/test_utils.py
+++ b/trollius/test_utils.py
@@ -7,20 +7,32 @@ import logging
import os
import re
import socket
-import socketserver
import sys
import tempfile
import threading
import time
-import unittest
-from unittest import mock
-from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
+ import socketserver
+ from http.server import HTTPServer
+except ImportError:
+ # Python 2
+ import SocketServer as socketserver
+ from BaseHTTPServer import HTTPServer
+
+try:
+ from unittest import mock
+except ImportError:
+ # Python < 3.3
+ import mock
+
+try:
import ssl
+ from .py3_ssl import SSLContext, wrap_socket
except ImportError: # pragma: no cover
+ # SSL support disabled in Python
ssl = None
from . import base_events
@@ -37,27 +49,116 @@ if sys.platform == 'win32': # pragma: no cover
else:
from socket import socketpair # pragma: no cover
+try:
+ import unittest
+ skipIf = unittest.skipIf
+ skipUnless = unittest.skipUnless
+ SkipTest = unittest.SkipTest
+ _TestCase = unittest.TestCase
+except AttributeError:
+ # Python 2.6: use the backported unittest module called "unittest2"
+ import unittest2
+ skipIf = unittest2.skipIf
+ skipUnless = unittest2.skipUnless
+ SkipTest = unittest2.SkipTest
+ _TestCase = unittest2.TestCase
+
+
+if not hasattr(_TestCase, 'assertRaisesRegex'):
+ class _BaseTestCaseContext:
+
+ def __init__(self, test_case):
+ self.test_case = test_case
+
+ def _raiseFailure(self, standardMsg):
+ msg = self.test_case._formatMessage(self.msg, standardMsg)
+ raise self.test_case.failureException(msg)
+
+
+ class _AssertRaisesBaseContext(_BaseTestCaseContext):
+
+ def __init__(self, expected, test_case, callable_obj=None,
+ expected_regex=None):
+ _BaseTestCaseContext.__init__(self, test_case)
+ self.expected = expected
+ self.test_case = test_case
+ if callable_obj is not None:
+ try:
+ self.obj_name = callable_obj.__name__
+ except AttributeError:
+ self.obj_name = str(callable_obj)
+ else:
+ self.obj_name = None
+ if isinstance(expected_regex, (bytes, str)):
+ expected_regex = re.compile(expected_regex)
+ self.expected_regex = expected_regex
+ self.msg = None
+
+ def handle(self, name, callable_obj, args, kwargs):
+ """
+ If callable_obj is None, assertRaises/Warns is being used as a
+ context manager, so check for a 'msg' kwarg and return self.
+ If callable_obj is not None, call it passing args and kwargs.
+ """
+ if callable_obj is None:
+ self.msg = kwargs.pop('msg', None)
+ return self
+ with self:
+ callable_obj(*args, **kwargs)
+
+
+ class _AssertRaisesContext(_AssertRaisesBaseContext):
+ """A context manager used to implement TestCase.assertRaises* methods."""
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ if exc_type is None:
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ if self.obj_name:
+ self._raiseFailure("{0} not raised by {1}".format(exc_name,
+ self.obj_name))
+ else:
+ self._raiseFailure("{0} not raised".format(exc_name))
+ if not issubclass(exc_type, self.expected):
+ # let unexpected exceptions pass through
+ return False
+ self.exception = exc_value
+ if self.expected_regex is None:
+ return True
+
+ expected_regex = self.expected_regex
+ if not expected_regex.search(str(exc_value)):
+ self._raiseFailure('"{0}" does not match "{1}"'.format(
+ expected_regex.pattern, str(exc_value)))
+ return True
+
def dummy_ssl_context():
if ssl is None:
return None
else:
- return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ return SSLContext(ssl.PROTOCOL_SSLv23)
-def run_briefly(loop):
+def run_briefly(loop, steps=1):
@coroutine
def once():
pass
- gen = once()
- t = loop.create_task(gen)
- # Don't log a warning if the task is not done after run_until_complete().
- # It occurs if the loop is stopped or if a task raises a BaseException.
- t._log_destroy_pending = False
- try:
- loop.run_until_complete(t)
- finally:
- gen.close()
+ for step in range(steps):
+ gen = once()
+ t = loop.create_task(gen)
+ # Don't log a warning if the task is not done after run_until_complete().
+ # It occurs if the loop is stopped or if a task raises a BaseException.
+ t._log_destroy_pending = False
+ try:
+ loop.run_until_complete(t)
+ finally:
+ gen.close()
def run_until(loop, pred, timeout=30):
@@ -89,12 +190,12 @@ class SilentWSGIRequestHandler(WSGIRequestHandler):
pass
-class SilentWSGIServer(WSGIServer):
+class SilentWSGIServer(WSGIServer, object):
request_timeout = 2
def get_request(self):
- request, client_addr = super().get_request()
+ request, client_addr = super(SilentWSGIServer, self).get_request()
request.settimeout(self.request_timeout)
return request, client_addr
@@ -115,10 +216,10 @@ class SSLWSGIServerMixin:
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
- ssock = ssl.wrap_socket(request,
- keyfile=keyfile,
- certfile=certfile,
- server_side=True)
+ ssock = wrap_socket(request,
+ keyfile=keyfile,
+ certfile=certfile,
+ server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
@@ -131,7 +232,7 @@ class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
-def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
+def _run_test_server(address, use_ssl, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
@@ -158,7 +259,7 @@ def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
if hasattr(socket, 'AF_UNIX'):
- class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
+ class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer, object):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
@@ -166,7 +267,7 @@ if hasattr(socket, 'AF_UNIX'):
self.server_port = 80
- class UnixWSGIServer(UnixHTTPServer, WSGIServer):
+ class UnixWSGIServer(UnixHTTPServer, WSGIServer, object):
request_timeout = 2
@@ -175,7 +276,7 @@ if hasattr(socket, 'AF_UNIX'):
self.setup_environ()
def get_request(self):
- request, client_addr = super().get_request()
+ request, client_addr = super(UnixWSGIServer, self).get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
@@ -214,18 +315,20 @@ if hasattr(socket, 'AF_UNIX'):
@contextlib.contextmanager
- def run_test_unix_server(*, use_ssl=False):
+ def run_test_unix_server(use_ssl=False):
with unix_socket_path() as path:
- yield from _run_test_server(address=path, use_ssl=use_ssl,
- server_cls=SilentUnixWSGIServer,
- server_ssl_cls=UnixSSLWSGIServer)
+ for item in _run_test_server(address=path, use_ssl=use_ssl,
+ server_cls=SilentUnixWSGIServer,
+ server_ssl_cls=UnixSSLWSGIServer):
+ yield item
@contextlib.contextmanager
-def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
- yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
- server_cls=SilentWSGIServer,
- server_ssl_cls=SSLWSGIServer)
+def run_test_server(host='127.0.0.1', port=0, use_ssl=False):
+ for item in _run_test_server(address=(host, port), use_ssl=use_ssl,
+ server_cls=SilentWSGIServer,
+ server_ssl_cls=SSLWSGIServer):
+ yield item
def make_test_protocol(base):
@@ -278,7 +381,7 @@ class TestLoop(base_events.BaseEventLoop):
"""
def __init__(self, gen=None):
- super().__init__()
+ super(TestLoop, self).__init__()
if gen is None:
def gen():
@@ -327,11 +430,11 @@ class TestLoop(base_events.BaseEventLoop):
return False
def assert_reader(self, fd, callback, *args):
- assert fd in self.readers, 'fd {} is not registered'.format(fd)
+ assert fd in self.readers, 'fd {0} is not registered'.format(fd)
handle = self.readers[fd]
- assert handle._callback == callback, '{!r} != {!r}'.format(
+ assert handle._callback == callback, '{0!r} != {1!r}'.format(
handle._callback, callback)
- assert handle._args == args, '{!r} != {!r}'.format(
+ assert handle._args == args, '{0!r} != {1!r}'.format(
handle._args, args)
def add_writer(self, fd, callback, *args):
@@ -346,11 +449,11 @@ class TestLoop(base_events.BaseEventLoop):
return False
def assert_writer(self, fd, callback, *args):
- assert fd in self.writers, 'fd {} is not registered'.format(fd)
+ assert fd in self.writers, 'fd {0} is not registered'.format(fd)
handle = self.writers[fd]
- assert handle._callback == callback, '{!r} != {!r}'.format(
+ assert handle._callback == callback, '{0!r} != {1!r}'.format(
handle._callback, callback)
- assert handle._args == args, '{!r} != {!r}'.format(
+ assert handle._args == args, '{0!r} != {1!r}'.format(
handle._args, args)
def reset_counters(self):
@@ -358,7 +461,7 @@ class TestLoop(base_events.BaseEventLoop):
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
- super()._run_once()
+ super(TestLoop, self)._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
@@ -366,7 +469,7 @@ class TestLoop(base_events.BaseEventLoop):
def call_at(self, when, callback, *args):
self._timers.append(when)
- return super().call_at(when, callback, *args)
+ return super(TestLoop, self).call_at(when, callback, *args)
def _process_events(self, event_list):
return
@@ -399,8 +502,8 @@ def get_function_source(func):
return source
-class TestCase(unittest.TestCase):
- def set_event_loop(self, loop, *, cleanup=True):
+class TestCase(_TestCase):
+ def set_event_loop(self, loop, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
@@ -415,6 +518,48 @@ class TestCase(unittest.TestCase):
def tearDown(self):
events.set_event_loop(None)
+ if not hasattr(_TestCase, 'assertRaisesRegex'):
+ def assertRaisesRegex(self, expected_exception, expected_regex,
+ callable_obj=None, *args, **kwargs):
+ """Asserts that the message in a raised exception matches a regex.
+
+ Args:
+ expected_exception: Exception class expected to be raised.
+ expected_regex: Regex (re pattern object or string) expected
+ to be found in error message.
+ callable_obj: Function to be called.
+ msg: Optional message used in case of failure. Can only be used
+ when assertRaisesRegex is used as a context manager.
+ args: Extra args.
+ kwargs: Extra kwargs.
+ """
+ context = _AssertRaisesContext(expected_exception, self, callable_obj,
+ expected_regex)
+
+ return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
+
+ if not hasattr(_TestCase, 'assertRegex'):
+ def assertRegex(self, text, expected_regex, msg=None):
+ """Fail the test unless the text matches the regular expression."""
+ if isinstance(expected_regex, (str, bytes)):
+ assert expected_regex, "expected_regex must not be empty."
+ expected_regex = re.compile(expected_regex)
+ if not expected_regex.search(text):
+ msg = msg or "Regex didn't match"
+ msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
+ raise self.failureException(msg)
+
+ def check_soure_traceback(self, source_traceback, lineno_delta):
+ frame = sys._getframe(1)
+ filename = frame.f_code.co_filename
+ lineno = frame.f_lineno + lineno_delta
+ name = frame.f_code.co_name
+ self.assertIsInstance(source_traceback, list)
+ self.assertEqual(source_traceback[-1][:3],
+ (filename,
+ lineno,
+ name))
+
@contextlib.contextmanager
def disable_logger():
diff --git a/trollius/time_monotonic.py b/trollius/time_monotonic.py
new file mode 100644
index 0000000..e99364c
--- /dev/null
+++ b/trollius/time_monotonic.py
@@ -0,0 +1,192 @@
+"""
+Backport of time.monotonic() of Python 3.3 (PEP 418) for Python 2.7.
+
+- time_monotonic(). This clock may or may not be monotonic depending on the
+ operating system.
+- time_monotonic_resolution: Resolution of time_monotonic() clock in second
+
+Support Windows, Mac OS X, Linux, FreeBSD, OpenBSD and Solaris, but requires
+the ctypes module.
+"""
+import os
+import sys
+from .log import logger
+from .py33_exceptions import get_error_class
+
+__all__ = ('time_monotonic',)
+
+# default implementation: system clock (non monotonic!)
+from time import time as time_monotonic
+# the worst resolution is 15.6 ms on Windows
+time_monotonic_resolution = 0.050
+
+if os.name == "nt":
+ # Windows: use GetTickCount64() or GetTickCount()
+ try:
+ import ctypes
+ from ctypes import windll
+ from ctypes.wintypes import DWORD
+ except ImportError:
+ logger.error("time_monotonic import error", exc_info=True)
+ else:
+ # GetTickCount64() requires Windows Vista, Server 2008 or later
+ if hasattr(windll.kernel32, 'GetTickCount64'):
+ ULONGLONG = ctypes.c_uint64
+
+ GetTickCount64 = windll.kernel32.GetTickCount64
+ GetTickCount64.restype = ULONGLONG
+ GetTickCount64.argtypes = ()
+
+ def time_monotonic():
+ return GetTickCount64() * 1e-3
+ time_monotonic_resolution = 1e-3
+ else:
+ GetTickCount = windll.kernel32.GetTickCount
+ GetTickCount.restype = DWORD
+ GetTickCount.argtypes = ()
+
+ # Detect GetTickCount() integer overflow (32 bits, roll-over after 49.7
+ # days). It increases an internal epoch (reference time) by 2^32 each
+ # time that an overflow is detected. The epoch is stored in the
+ # process-local state and so the value of time_monotonic() may be
+ # different in two Python processes running for more than 49 days.
+ def time_monotonic():
+ ticks = GetTickCount()
+ if ticks < time_monotonic.last:
+ # Integer overflow detected
+ time_monotonic.delta += 2**32
+ time_monotonic.last = ticks
+ return (ticks + time_monotonic.delta) * 1e-3
+ time_monotonic.last = 0
+ time_monotonic.delta = 0
+ time_monotonic_resolution = 1e-3
+
+elif sys.platform == 'darwin':
+ # Mac OS X: use mach_absolute_time() and mach_timebase_info()
+ try:
+ import ctypes
+ import ctypes.util
+ libc_name = ctypes.util.find_library('c')
+ except ImportError:
+ logger.error("time_monotonic import error", exc_info=True)
+ libc_name = None
+ if libc_name:
+ libc = ctypes.CDLL(libc_name, use_errno=True)
+
+ mach_absolute_time = libc.mach_absolute_time
+ mach_absolute_time.argtypes = ()
+ mach_absolute_time.restype = ctypes.c_uint64
+
+ class mach_timebase_info_data_t(ctypes.Structure):
+ _fields_ = (
+ ('numer', ctypes.c_uint32),
+ ('denom', ctypes.c_uint32),
+ )
+ mach_timebase_info_data_p = ctypes.POINTER(mach_timebase_info_data_t)
+
+ mach_timebase_info = libc.mach_timebase_info
+ mach_timebase_info.argtypes = (mach_timebase_info_data_p,)
+ mach_timebase_info.restype = ctypes.c_int
+
+ def time_monotonic():
+ return mach_absolute_time() * time_monotonic.factor
+
+ timebase = mach_timebase_info_data_t()
+ mach_timebase_info(ctypes.byref(timebase))
+ time_monotonic.factor = float(timebase.numer) / timebase.denom * 1e-9
+ time_monotonic_resolution = time_monotonic.factor
+ del timebase
+
+elif sys.platform.startswith(("linux", "freebsd", "openbsd", "sunos")):
+ # Linux, FreeBSD, OpenBSD: use clock_gettime(CLOCK_MONOTONIC)
+ # Solaris: use clock_gettime(CLOCK_HIGHRES)
+
+ library = None
+ try:
+ import ctypes
+ import ctypes.util
+ except ImportError:
+ logger.error("time_monotonic import error", exc_info=True)
+ libraries = ()
+ else:
+ if sys.platform.startswith(("freebsd", "openbsd")):
+ libraries = ('c',)
+ elif sys.platform.startswith("linux"):
+ # Linux: in glibc 2.17+, clock_gettime() is provided by the libc,
+ # on older versions, it is provided by librt
+ libraries = ('c', 'rt')
+ else:
+ # Solaris
+ libraries = ('rt',)
+
+ for name in libraries:
+ filename = ctypes.util.find_library(name)
+ if not filename:
+ continue
+ library = ctypes.CDLL(filename, use_errno=True)
+ if not hasattr(library, 'clock_gettime'):
+ library = None
+
+ if library is not None:
+ if sys.platform.startswith("openbsd"):
+ import platform
+ release = platform.release()
+ release = tuple(map(int, release.split('.')))
+ if release >= (5, 5):
+ time_t = ctypes.c_int64
+ else:
+ time_t = ctypes.c_int32
+ else:
+ time_t = ctypes.c_long
+ clockid_t = ctypes.c_int
+
+ class timespec(ctypes.Structure):
+ _fields_ = (
+ ('tv_sec', time_t),
+ ('tv_nsec', ctypes.c_long),
+ )
+ timespec_p = ctypes.POINTER(timespec)
+
+ clock_gettime = library.clock_gettime
+ clock_gettime.argtypes = (clockid_t, timespec_p)
+ clock_gettime.restype = ctypes.c_int
+
+ def ctypes_oserror():
+ errno = ctypes.get_errno()
+ message = os.strerror(errno)
+ error_class = get_error_class(errno, OSError)
+ return error_class(errno, message)
+
+ def time_monotonic():
+ ts = timespec()
+ err = clock_gettime(time_monotonic.clk_id, ctypes.byref(ts))
+ if err:
+ raise ctypes_oserror()
+ return ts.tv_sec + ts.tv_nsec * 1e-9
+
+ if sys.platform.startswith("linux"):
+ time_monotonic.clk_id = 1 # CLOCK_MONOTONIC
+ elif sys.platform.startswith("freebsd"):
+ time_monotonic.clk_id = 4 # CLOCK_MONOTONIC
+ elif sys.platform.startswith("openbsd"):
+ time_monotonic.clk_id = 3 # CLOCK_MONOTONIC
+ else:
+ assert sys.platform.startswith("sunos")
+ time_monotonic.clk_id = 4 # CLOCK_HIGHRES
+
+ def get_resolution():
+ _clock_getres = library.clock_getres
+ _clock_getres.argtypes = (clockid_t, timespec_p)
+ _clock_getres.restype = ctypes.c_int
+
+ ts = timespec()
+ err = _clock_getres(time_monotonic.clk_id, ctypes.byref(ts))
+ if err:
+ raise ctypes_oserror()
+ return ts.tv_sec + ts.tv_nsec * 1e-9
+ time_monotonic_resolution = get_resolution()
+ del get_resolution
+
+else:
+ logger.error("time_monotonic: unspported platform %r", sys.platform)
+
diff --git a/asyncio/transports.py b/trollius/transports.py
index 22df3c7..5bdbdaf 100644
--- a/asyncio/transports.py
+++ b/trollius/transports.py
@@ -1,6 +1,7 @@
"""Abstract Transport class."""
import sys
+from .compat import flatten_bytes
_PY34 = sys.version_info >= (3, 4)
@@ -9,7 +10,7 @@ __all__ = ['BaseTransport', 'ReadTransport', 'WriteTransport',
]
-class BaseTransport:
+class BaseTransport(object):
"""Base class for transports."""
def __init__(self, extra=None):
@@ -94,12 +95,8 @@ class WriteTransport(BaseTransport):
The default implementation concatenates the arguments and
calls write() on the result.
"""
- if not _PY34:
- # In Python 3.3, bytes.join() doesn't handle memoryview.
- list_of_data = (
- bytes(data) if isinstance(data, memoryview) else data
- for data in list_of_data)
- self.write(b''.join(list_of_data))
+ data = map(flatten_bytes, list_of_data)
+ self.write(b''.join(data))
def write_eof(self):
"""Close the write end after flushing buffered data.
@@ -230,7 +227,7 @@ class _FlowControlMixin(Transport):
override set_write_buffer_limits() (e.g. to specify different
defaults).
- The subclass constructor must call super().__init__(extra). This
+ The subclass constructor must call super(Class, self).__init__(extra). This
will call set_write_buffer_limits().
The user may call set_write_buffer_limits() and
@@ -239,7 +236,7 @@ class _FlowControlMixin(Transport):
"""
def __init__(self, extra=None, loop=None):
- super().__init__(extra)
+ super(_FlowControlMixin, self).__init__(extra)
assert loop is not None
self._loop = loop
self._protocol_paused = False
diff --git a/asyncio/unix_events.py b/trollius/unix_events.py
index d1461fd..3e6210a 100644
--- a/asyncio/unix_events.py
+++ b/trollius/unix_events.py
@@ -1,4 +1,5 @@
"""Selector event loop for Unix with signal handling."""
+from __future__ import absolute_import
import errno
import os
@@ -12,14 +13,20 @@ import threading
from . import base_events
from . import base_subprocess
+from . import compat
from . import constants
from . import coroutines
from . import events
from . import selector_events
from . import selectors
from . import transports
-from .coroutines import coroutine
+from .compat import flatten_bytes
+from .coroutines import coroutine, From, Return
from .log import logger
+from .py33_exceptions import (
+ reraise, wrap_error,
+ BlockingIOError, BrokenPipeError, ConnectionResetError,
+ InterruptedError, ChildProcessError)
__all__ = ['SelectorEventLoop',
@@ -31,9 +38,10 @@ if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
-def _sighandler_noop(signum, frame):
- """Dummy signal handler."""
- pass
+if compat.PY33:
+ def _sighandler_noop(signum, frame):
+ """Dummy signal handler."""
+ pass
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
@@ -43,23 +51,27 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""
def __init__(self, selector=None):
- super().__init__(selector)
+ super(_UnixSelectorEventLoop, self).__init__(selector)
self._signal_handlers = {}
def _socketpair(self):
return socket.socketpair()
def close(self):
- super().close()
+ super(_UnixSelectorEventLoop, self).close()
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
- def _process_self_data(self, data):
- for signum in data:
- if not signum:
- # ignore null bytes written by _write_to_self()
- continue
- self._handle_signal(signum)
+ # On Python <= 3.2, the C signal handler of Python writes a null byte into
+ # the wakeup file descriptor. We cannot retrieve the signal numbers from
+ # the file descriptor.
+ if compat.PY33:
+ def _process_self_data(self, data):
+ for signum in data:
+ if not signum:
+ # ignore null bytes written by _write_to_self()
+ continue
+ self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
@@ -85,14 +97,30 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
self._signal_handlers[sig] = handle
try:
- # Register a dummy signal handler to ask Python to write the signal
- # number in the wakup file descriptor. _process_self_data() will
- # read signal numbers from this file descriptor to handle signals.
- signal.signal(sig, _sighandler_noop)
+ if compat.PY33:
+ # On Python 3.3 and newer, the C signal handler writes the
+ # signal number into the wakeup file descriptor and then calls
+ # Py_AddPendingCall() to schedule the Python signal handler.
+ #
+ # Register a dummy signal handler to ask Python to write the
+ # signal number into the wakup file descriptor.
+ # _process_self_data() will read signal numbers from this file
+ # descriptor to handle signals.
+ signal.signal(sig, _sighandler_noop)
+ else:
+ # On Python 3.2 and older, the C signal handler first calls
+ # Py_AddPendingCall() to schedule the Python signal handler,
+ # and then write a null byte into the wakeup file descriptor.
+ signal.signal(sig, self._handle_signal)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
- except OSError as exc:
+ except (RuntimeError, OSError) as exc:
+ # On Python 2, signal.signal(signal.SIGKILL, signal.SIG_IGN) raises
+ # RuntimeError(22, 'Invalid argument'). On Python 3,
+ # OSError(22, 'Invalid argument') is raised instead.
+ exc_type, exc_value, tb = sys.exc_info()
+
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
@@ -100,12 +128,12 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
- if exc.errno == errno.EINVAL:
- raise RuntimeError('sig {} cannot be caught'.format(sig))
+ if isinstance(exc, RuntimeError) or exc.errno == errno.EINVAL:
+ raise RuntimeError('sig {0} cannot be caught'.format(sig))
else:
- raise
+ reraise(exc_type, exc_value, tb)
- def _handle_signal(self, sig):
+ def _handle_signal(self, sig, frame=None):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
@@ -135,7 +163,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
- raise RuntimeError('sig {} cannot be caught'.format(sig))
+ raise RuntimeError('sig {0} cannot be caught'.format(sig))
else:
raise
@@ -154,11 +182,11 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
- raise TypeError('sig must be an int, not {!r}'.format(sig))
+ raise TypeError('sig must be an int, not {0!r}'.format(sig))
if not (1 <= sig < signal.NSIG):
raise ValueError(
- 'sig {} out of range(1, {})'.format(sig, signal.NSIG))
+ 'sig {0} out of range(1, {1})'.format(sig, signal.NSIG))
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
@@ -176,17 +204,17 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=extra, **kwargs)
- yield from transp._post_init()
+ yield From(transp._post_init())
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
- return transp
+ raise Return(transp)
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine
- def create_unix_connection(self, protocol_factory, path, *,
+ def create_unix_connection(self, protocol_factory, path,
ssl=None, sock=None,
server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str)
@@ -206,7 +234,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
- yield from self.sock_connect(sock, path)
+ yield From(self.sock_connect(sock, path))
except:
sock.close()
raise
@@ -216,12 +244,12 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
raise ValueError('no path and sock were specified')
sock.setblocking(False)
- transport, protocol = yield from self._create_connection_transport(
- sock, protocol_factory, ssl, server_hostname)
- return transport, protocol
+ transport, protocol = yield From(self._create_connection_transport(
+ sock, protocol_factory, ssl, server_hostname))
+ raise Return(transport, protocol)
@coroutine
- def create_unix_server(self, protocol_factory, path=None, *,
+ def create_unix_server(self, protocol_factory, path=None,
sock=None, backlog=100, ssl=None):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
@@ -235,13 +263,13 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
try:
sock.bind(path)
- except OSError as exc:
+ except socket.error as exc:
sock.close()
if exc.errno == errno.EADDRINUSE:
# Let's improve the error message by adding
# with what exact address it occurs.
- msg = 'Address {!r} is already in use'.format(path)
- raise OSError(errno.EADDRINUSE, msg) from None
+ msg = 'Address {0!r} is already in use'.format(path)
+ raise OSError(errno.EADDRINUSE, msg)
else:
raise
except:
@@ -254,7 +282,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
if sock.family != socket.AF_UNIX:
raise ValueError(
- 'A UNIX Domain Socket was expected, got {!r}'.format(sock))
+ 'A UNIX Domain Socket was expected, got {0!r}'.format(sock))
server = base_events.Server(self, [sock])
sock.listen(backlog)
@@ -264,6 +292,7 @@ class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
if hasattr(os, 'set_blocking'):
+ # Python 3.5 and newer
def _set_nonblocking(fd):
os.set_blocking(fd, False)
else:
@@ -280,7 +309,7 @@ class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
- super().__init__(extra)
+ super(_UnixReadPipeTransport, self).__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
@@ -315,7 +344,7 @@ class _UnixReadPipeTransport(transports.ReadTransport):
def _read_ready(self):
try:
- data = os.read(self._fileno, self.max_size)
+ data = wrap_error(os.read, self._fileno, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
@@ -374,7 +403,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
transports.WriteTransport):
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
- super().__init__(extra, loop)
+ super(_UnixWritePipeTransport, self).__init__(extra, loop)
self._extra['pipe'] = pipe
self._pipe = pipe
self._fileno = pipe.fileno()
@@ -432,9 +461,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
self._close()
def write(self, data):
- assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
- if isinstance(data, bytearray):
- data = memoryview(data)
+ data = flatten_bytes(data)
if not data:
return
@@ -448,7 +475,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
if not self._buffer:
# Attempt to send it right away first.
try:
- n = os.write(self._fileno, data)
+ n = wrap_error(os.write, self._fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
@@ -468,9 +495,9 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
data = b''.join(self._buffer)
assert data, 'Data should not be empty'
- self._buffer.clear()
+ del self._buffer[:]
try:
- n = os.write(self._fileno, data)
+ n = wrap_error(os.write, self._fileno, data)
except (BlockingIOError, InterruptedError):
self._buffer.append(data)
except Exception as exc:
@@ -533,7 +560,7 @@ class _UnixWritePipeTransport(transports._FlowControlMixin,
self._closing = True
if self._buffer:
self._loop.remove_writer(self._fileno)
- self._buffer.clear()
+ del self._buffer[:]
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
@@ -584,11 +611,20 @@ class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs)
if stdin_w is not None:
+ # Retrieve the file descriptor from stdin_w, stdin_w should not
+ # "own" the file descriptor anymore: closing stdin_fd file
+ # descriptor must close immediatly the file
stdin.close()
- self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
+ if hasattr(stdin_w, 'detach'):
+ stdin_fd = stdin_w.detach()
+ self._proc.stdin = os.fdopen(stdin_fd, 'wb', bufsize)
+ else:
+ stdin_dup = os.dup(stdin_w.fileno())
+ stdin_w.close()
+ self._proc.stdin = os.fdopen(stdin_dup, 'wb', bufsize)
-class AbstractChildWatcher:
+class AbstractChildWatcher(object):
"""Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
@@ -724,12 +760,12 @@ class SafeChildWatcher(BaseChildWatcher):
"""
def __init__(self):
- super().__init__()
+ super(SafeChildWatcher, self).__init__()
self._callbacks = {}
def close(self):
self._callbacks.clear()
- super().close()
+ super(SafeChildWatcher, self).close()
def __enter__(self):
return self
@@ -801,7 +837,7 @@ class FastChildWatcher(BaseChildWatcher):
(O(1) each time a child terminates).
"""
def __init__(self):
- super().__init__()
+ super(FastChildWatcher, self).__init__()
self._callbacks = {}
self._lock = threading.Lock()
self._zombies = {}
@@ -810,7 +846,7 @@ class FastChildWatcher(BaseChildWatcher):
def close(self):
self._callbacks.clear()
self._zombies.clear()
- super().close()
+ super(FastChildWatcher, self).close()
def __enter__(self):
with self._lock:
@@ -857,7 +893,7 @@ class FastChildWatcher(BaseChildWatcher):
# long as we're able to reap a child.
while True:
try:
- pid, status = os.waitpid(-1, os.WNOHANG)
+ pid, status = wrap_error(os.waitpid, -1, os.WNOHANG)
except ChildProcessError:
# No more child processes exist.
return
@@ -900,7 +936,7 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
_loop_factory = _UnixSelectorEventLoop
def __init__(self):
- super().__init__()
+ super(_UnixDefaultEventLoopPolicy, self).__init__()
self._watcher = None
def _init_watcher(self):
@@ -919,7 +955,7 @@ class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
the child watcher.
"""
- super().set_event_loop(loop)
+ super(_UnixDefaultEventLoopPolicy, self).set_event_loop(loop)
if self._watcher is not None and \
isinstance(threading.current_thread(), threading._MainThread):
diff --git a/asyncio/windows_events.py b/trollius/windows_events.py
index d7feb1a..0a73c18 100644
--- a/asyncio/windows_events.py
+++ b/trollius/windows_events.py
@@ -1,6 +1,5 @@
"""Selector and proactor event loops for Windows."""
-import _winapi
import errno
import math
import socket
@@ -11,12 +10,14 @@ from . import events
from . import base_subprocess
from . import futures
from . import proactor_events
+from . import py33_winapi as _winapi
from . import selector_events
from . import tasks
from . import windows_utils
from . import _overlapped
-from .coroutines import coroutine
+from .coroutines import coroutine, From, Return
from .log import logger
+from .py33_exceptions import wrap_error, get_error_class, ConnectionRefusedError
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
@@ -29,21 +30,20 @@ INFINITE = 0xffffffff
ERROR_CONNECTION_REFUSED = 1225
ERROR_CONNECTION_ABORTED = 1236
-
class _OverlappedFuture(futures.Future):
"""Subclass of Future which represents an overlapped operation.
Cancelling it will immediately cancel the overlapped operation.
"""
- def __init__(self, ov, *, loop=None):
- super().__init__(loop=loop)
+ def __init__(self, ov, loop=None):
+ super(_OverlappedFuture, self).__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._ov = ov
def _repr_info(self):
- info = super()._repr_info()
+ info = super(_OverlappedFuture, self)._repr_info()
if self._ov is not None:
state = 'pending' if self._ov.pending else 'completed'
info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
@@ -67,22 +67,22 @@ class _OverlappedFuture(futures.Future):
def cancel(self):
self._cancel_overlapped()
- return super().cancel()
+ return super(_OverlappedFuture, self).cancel()
def set_exception(self, exception):
- super().set_exception(exception)
+ super(_OverlappedFuture, self).set_exception(exception)
self._cancel_overlapped()
def set_result(self, result):
- super().set_result(result)
+ super(_OverlappedFuture, self).set_result(result)
self._ov = None
class _WaitHandleFuture(futures.Future):
"""Subclass of Future which represents a wait handle."""
- def __init__(self, iocp, ov, handle, wait_handle, *, loop=None):
- super().__init__(loop=loop)
+ def __init__(self, iocp, ov, handle, wait_handle, loop=None):
+ super(_WaitHandleFuture, self).__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
# iocp and ov are only used by cancel() to notify IocpProactor
@@ -98,7 +98,7 @@ class _WaitHandleFuture(futures.Future):
_winapi.WAIT_OBJECT_0)
def _repr_info(self):
- info = super()._repr_info()
+ info = super(_WaitHandleFuture, self)._repr_info()
info.insert(1, 'handle=%#x' % self._handle)
if self._wait_handle:
state = 'signaled' if self._poll() else 'waiting'
@@ -111,7 +111,7 @@ class _WaitHandleFuture(futures.Future):
return
try:
_overlapped.UnregisterWait(self._wait_handle)
- except OSError as exc:
+ except WindowsError as exc:
# ERROR_IO_PENDING is not an error, the wait was unregistered
if exc.winerror != _overlapped.ERROR_IO_PENDING:
context = {
@@ -127,7 +127,7 @@ class _WaitHandleFuture(futures.Future):
self._ov = None
def cancel(self):
- result = super().cancel()
+ result = super(_WaitHandleFuture, self).cancel()
if self._ov is not None:
# signal the cancellation to the overlapped object
_overlapped.PostQueuedCompletionStatus(self._iocp, True,
@@ -136,11 +136,11 @@ class _WaitHandleFuture(futures.Future):
return result
def set_exception(self, exception):
- super().set_exception(exception)
+ super(_WaitHandleFuture, self).set_exception(exception)
self._unregister_wait()
def set_result(self, result):
- super().set_result(result)
+ super(_WaitHandleFuture, self).set_result(result)
self._unregister_wait()
@@ -174,7 +174,7 @@ class PipeServer(object):
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
- h = _winapi.CreateNamedPipe(
+ h = wrap_error(_winapi.CreateNamedPipe,
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
@@ -213,7 +213,7 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
def __init__(self, proactor=None):
if proactor is None:
proactor = IocpProactor()
- super().__init__(proactor)
+ super(ProactorEventLoop, self).__init__(proactor)
def _socketpair(self):
return windows_utils.socketpair()
@@ -221,11 +221,11 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
@coroutine
def create_pipe_connection(self, protocol_factory, address):
f = self._proactor.connect_pipe(address)
- pipe = yield from f
+ pipe = yield From(f)
protocol = protocol_factory()
trans = self._make_duplex_pipe_transport(pipe, protocol,
extra={'addr': address})
- return trans, protocol
+ raise Return(trans, protocol)
@coroutine
def start_serving_pipe(self, protocol_factory, address):
@@ -272,11 +272,11 @@ class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=extra, **kwargs)
- yield from transp._post_init()
- return transp
+ yield From(transp._post_init())
+ raise Return(transp)
-class IocpProactor:
+class IocpProactor(object):
"""Proactor implementation using IOCP."""
def __init__(self, concurrency=0xffffffff):
@@ -307,18 +307,12 @@ class IocpProactor:
self._register_with_iocp(conn)
ov = _overlapped.Overlapped(NULL)
if isinstance(conn, socket.socket):
- ov.WSARecv(conn.fileno(), nbytes, flags)
+ wrap_error(ov.WSARecv, conn.fileno(), nbytes, flags)
else:
- ov.ReadFile(conn.fileno(), nbytes)
+ wrap_error(ov.ReadFile, conn.fileno(), nbytes)
def finish_recv(trans, key, ov):
- try:
- return ov.getresult()
- except OSError as exc:
- if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
- raise ConnectionResetError(*exc.args)
- else:
- raise
+ return wrap_error(ov.getresult)
return self._register(ov, conn, finish_recv)
@@ -331,13 +325,7 @@ class IocpProactor:
ov.WriteFile(conn.fileno(), buf)
def finish_send(trans, key, ov):
- try:
- return ov.getresult()
- except OSError as exc:
- if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
- raise ConnectionResetError(*exc.args)
- else:
- raise
+ return wrap_error(ov.getresult)
return self._register(ov, conn, finish_send)
@@ -348,7 +336,7 @@ class IocpProactor:
ov.AcceptEx(listener.fileno(), conn.fileno())
def finish_accept(trans, key, ov):
- ov.getresult()
+ wrap_error(ov.getresult)
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
buf = struct.pack('@P', listener.fileno())
conn.setsockopt(socket.SOL_SOCKET,
@@ -360,7 +348,7 @@ class IocpProactor:
def accept_coro(future, conn):
# Coroutine closing the accept socket if the future is cancelled
try:
- yield from future
+ yield From(future)
except futures.CancelledError:
conn.close()
raise
@@ -375,7 +363,7 @@ class IocpProactor:
# The socket needs to be locally bound before we call ConnectEx().
try:
_overlapped.BindLocal(conn.fileno(), conn.family)
- except OSError as e:
+ except WindowsError as e:
if e.winerror != errno.WSAEINVAL:
raise
# Probably already locally bound; check using getsockname().
@@ -385,7 +373,7 @@ class IocpProactor:
ov.ConnectEx(conn.fileno(), address)
def finish_connect(trans, key, ov):
- ov.getresult()
+ wrap_error(ov.getresult)
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
conn.setsockopt(socket.SOL_SOCKET,
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
@@ -399,7 +387,7 @@ class IocpProactor:
ov.ConnectNamedPipe(pipe.fileno())
def finish_accept_pipe(trans, key, ov):
- ov.getresult()
+ wrap_error(ov.getresult)
return pipe
# FIXME: Tulip issue 196: why do we need register=False?
@@ -420,7 +408,11 @@ class IocpProactor:
raise ConnectionRefusedError(0, msg, None, err)
elif err != 0:
msg = _overlapped.FormatMessage(err)
- raise OSError(0, msg, None, err)
+ err_cls = get_error_class(err, None)
+ if err_cls is not None:
+ raise err_cls(0, msg, None, err)
+ else:
+ raise WindowsError(err, msg)
else:
return windows_utils.PipeHandle(handle)
@@ -437,7 +429,7 @@ class IocpProactor:
else:
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
- ms = math.ceil(timeout * 1e3)
+ ms = int(math.ceil(timeout * 1e3))
# We only create ov so we can use ov.address as a key for the cache.
ov = _overlapped.Overlapped(NULL)
@@ -528,7 +520,7 @@ class IocpProactor:
else:
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
- ms = math.ceil(timeout * 1e3)
+ ms = int(math.ceil(timeout * 1e3))
if ms >= INFINITE:
raise ValueError("timeout too big")
@@ -590,7 +582,7 @@ class IocpProactor:
else:
try:
fut.cancel()
- except OSError as exc:
+ except WindowsError as exc:
if self._loop is not None:
context = {
'message': 'Cancelling a future failed',
diff --git a/asyncio/windows_utils.py b/trollius/windows_utils.py
index c6e4bc9..dfeb10f 100644
--- a/asyncio/windows_utils.py
+++ b/trollius/windows_utils.py
@@ -1,6 +1,7 @@
"""
Various Windows specific bits and pieces
"""
+from __future__ import absolute_import
import sys
@@ -13,7 +14,9 @@ import msvcrt
import os
import subprocess
import tempfile
-import _winapi
+
+from . import py33_winapi as _winapi
+from .py33_exceptions import wrap_error, BlockingIOError, InterruptedError
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
@@ -62,7 +65,7 @@ else:
try:
csock.setblocking(False)
try:
- csock.connect((addr, port))
+ wrap_error(csock.connect, (addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
@@ -78,7 +81,7 @@ else:
# Replacement for os.pipe() using handles instead of fds
-def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
+def pipe(duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
"""Like os.pipe() but with overlapped support and using handles not fds."""
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
(os.getpid(), next(_mmap_counter)))
@@ -112,8 +115,13 @@ def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
flags_and_attribs, _winapi.NULL)
- ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
- ov.GetOverlappedResult(True)
+ ov = _winapi.ConnectNamedPipe(h1, True)
+ if hasattr(ov, 'GetOverlappedResult'):
+ # _winapi module of Python 3.3
+ ov.GetOverlappedResult(True)
+ else:
+ # _overlapped module
+ wrap_error(ov.getresult, True)
return h1, h2
except:
if h1 is not None:
@@ -126,7 +134,7 @@ def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
# Wrapper for a pipe handle
-class PipeHandle:
+class PipeHandle(object):
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
The IOCP event loop can use these instead of socket objects.
@@ -148,7 +156,7 @@ class PipeHandle:
def fileno(self):
return self._handle
- def close(self, *, CloseHandle=_winapi.CloseHandle):
+ def close(self, CloseHandle=_winapi.CloseHandle):
if self._handle != -1:
CloseHandle(self._handle)
self._handle = -1
@@ -193,8 +201,11 @@ class Popen(subprocess.Popen):
else:
stderr_wfd = stderr
try:
- super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
- stderr=stderr_wfd, **kwds)
+ super(Popen, self).__init__(args,
+ stdin=stdin_rfd,
+ stdout=stdout_wfd,
+ stderr=stderr_wfd,
+ **kwds)
except:
for h in (stdin_wh, stdout_rh, stderr_rh):
if h is not None:
diff --git a/update-tulip-step1.sh b/update-tulip-step1.sh
new file mode 100755
index 0000000..a987ff3
--- /dev/null
+++ b/update-tulip-step1.sh
@@ -0,0 +1,9 @@
+set -e -x
+hg update trollius
+hg pull --update
+hg update default
+hg pull https://code.google.com/p/tulip/
+hg update
+hg update trollius
+hg merge default
+echo "Now fix merge conflicts"
diff --git a/update-tulip-step2.sh b/update-tulip-step2.sh
new file mode 100755
index 0000000..2666f90
--- /dev/null
+++ b/update-tulip-step2.sh
@@ -0,0 +1,39 @@
+set -e
+
+# Check for merge conflicts
+if $(hg resolve -l | grep -q -v '^R'); then
+ echo "Fix the following conflicts:"
+ hg resolve -l | grep -v '^R'
+ exit 1
+fi
+
+# Ensure that yield from is not used
+if $(hg diff|grep -q 'yield from'); then
+ echo "yield from present in changed code!"
+ hg diff | grep 'yield from' -B5 -A3
+ exit 1
+fi
+
+# Ensure that mock patchs trollius module, not asyncio
+if $(grep -q 'patch.*asyncio' tests/*.py); then
+ echo "Fix following patch lines in tests/"
+ grep 'patch.*asyncio' tests/*.py
+ exit 1
+fi
+
+# Python 2.6 compatibility
+if $(grep -q -E '\{[^0-9].*format' */*.py); then
+ echo "Issues with Python 2.6 compatibility:"
+ grep -E '\{[^0-9].*format' */*.py
+ exit 1
+fi
+if $(grep -q -E 'unittest\.skip' tests/*.py); then
+ echo "Issues with Python 2.6 compatibility:"
+ grep -E 'unittest\.skip' tests/*.py
+ exit 1
+fi
+if $(grep -q -F 'super()' */*.py); then
+ echo "Issues with Python 2.6 compatibility:"
+ grep -F 'super()' */*.py
+ exit 1
+fi
diff --git a/update-tulip-step3.sh b/update-tulip-step3.sh
new file mode 100755
index 0000000..202b44b
--- /dev/null
+++ b/update-tulip-step3.sh
@@ -0,0 +1,4 @@
+set -e -x
+./update-tulip-step2.sh
+tox -e py27,py34
+hg ci -m 'Merge Tulip into Trollius'
diff --git a/update_stdlib.sh b/update_stdlib.sh
deleted file mode 100755
index 0cdbb1b..0000000
--- a/update_stdlib.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-
-# Script to copy asyncio files to the standard library tree.
-# Optional argument is the root of the Python 3.4 tree.
-# Assumes you have already created Lib/asyncio and
-# Lib/test/test_asyncio in the destination tree.
-
-CPYTHON=${1-$HOME/cpython}
-
-if [ ! -d $CPYTHON ]
-then
- echo Bad destination $CPYTHON
- exit 1
-fi
-
-if [ ! -f asyncio/__init__.py ]
-then
- echo Bad current directory
- exit 1
-fi
-
-maybe_copy()
-{
- SRC=$1
- DST=$CPYTHON/$2
- if cmp $DST $SRC
- then
- return
- fi
- echo ======== $SRC === $DST ========
- diff -u $DST $SRC
- echo -n "Copy $SRC? [y/N/back] "
- read X
- case $X in
- [yY]*) echo Copying $SRC; cp $SRC $DST;;
- back) echo Copying TO $SRC; cp $DST $SRC;;
- *) echo Not copying $SRC;;
- esac
-}
-
-for i in `(cd asyncio && ls *.py)`
-do
- if [ $i == test_support.py ]
- then
- continue
- fi
-
- if [ $i == selectors.py ]
- then
- if [ "`(cd $CPYTHON; hg branch)`" == "3.4" ]
- then
- echo "Destination is 3.4 branch -- ignoring selectors.py"
- else
- maybe_copy asyncio/$i Lib/$i
- fi
- else
- maybe_copy asyncio/$i Lib/asyncio/$i
- fi
-done
-
-for i in `(cd tests && ls *.py *.pem)`
-do
- if [ $i == test_selectors.py ]
- then
- continue
- fi
- maybe_copy tests/$i Lib/test/test_asyncio/$i
-done
-
-maybe_copy overlapped.c Modules/overlapped.c