diff options
| author | Sergey Shepelev <temotor@gmail.com> | 2020-11-13 15:14:08 +0300 |
|---|---|---|
| committer | Sergey Shepelev <temotor@gmail.com> | 2020-11-13 15:14:08 +0300 |
| commit | 272ebc0682f8985644e80ff1d66531ac7f09c3c3 (patch) | |
| tree | a1d57c036756813bb9838c7b35f7c1940bb3c493 | |
| parent | 9d731373e2325588a33229b425100ad538eed766 (diff) | |
| download | eventlet-doc-urllib-py3.tar.gz | |
use py3 urllib import in doc and examplesdoc-urllib-py3
https://github.com/eventlet/eventlet/issues/668
| -rw-r--r-- | README.rst | 8 | ||||
| -rw-r--r-- | doc/design_patterns.rst | 6 | ||||
| -rw-r--r-- | doc/index.rst | 4 | ||||
| -rw-r--r-- | doc/real_index.html | 5 | ||||
| -rw-r--r-- | doc/ssl.rst | 4 | ||||
| -rw-r--r-- | examples/feedscraper-testclient.py | 4 | ||||
| -rw-r--r-- | examples/producer_consumer.py | 4 | ||||
| -rw-r--r-- | examples/recursive_crawler.py | 4 | ||||
| -rw-r--r-- | examples/webcrawler.py | 4 |
9 files changed, 22 insertions, 21 deletions
@@ -16,11 +16,11 @@ Quick Example Here's something you can try right on the command line:: - % python + % python3 >>> import eventlet - >>> from eventlet.green import urllib2 - >>> gt = eventlet.spawn(urllib2.urlopen, 'http://eventlet.net') - >>> gt2 = eventlet.spawn(urllib2.urlopen, 'http://secondlife.com') + >>> from eventlet.green.urllib.request import urlopen + >>> gt = eventlet.spawn(urlopen, 'http://eventlet.net') + >>> gt2 = eventlet.spawn(urlopen, 'http://secondlife.com') >>> gt2.wait() >>> gt.wait() diff --git a/doc/design_patterns.rst b/doc/design_patterns.rst index 0f84409..48f2938 100644 --- a/doc/design_patterns.rst +++ b/doc/design_patterns.rst @@ -11,14 +11,14 @@ Client Pattern The canonical client-side example is a web crawler. This use case is given a list of urls and wants to retrieve their bodies for later processing. Here is a very simple example:: import eventlet - from eventlet.green import urllib2 + from eventlet.green.urllib.request import urlopen urls = ["http://www.google.com/intl/en_ALL/images/logo.gif", "https://www.python.org/static/img/python-logo.png", "http://us.i1.yimg.com/us.yimg.com/i/ww/beta/y3.gif"] def fetch(url): - return urllib2.urlopen(url).read() + return urlopen(url).read() pool = eventlet.GreenPool() for body in pool.imap(fetch, urls): @@ -26,7 +26,7 @@ The canonical client-side example is a web crawler. This use case is given a li There is a slightly more complex version of this in the :ref:`web crawler example <web_crawler_example>`. Here's a tour of the interesting lines in this crawler. -``from eventlet.green import urllib2`` is how you import a cooperatively-yielding version of urllib2. It is the same in all respects to the standard version, except that it uses green sockets for its communication. This is an example of the :ref:`import-green` pattern. +``from eventlet.green... import urlopen`` is how you import a cooperatively-yielding version of urllib. It is the same in all respects to the standard version, except that it uses green sockets for its communication. This is an example of the :ref:`import-green` pattern. ``pool = eventlet.GreenPool()`` constructs a :class:`GreenPool <eventlet.greenpool.GreenPool>` of a thousand green threads. Using a pool is good practice because it provides an upper limit on the amount of work that this crawler will be doing simultaneously, which comes in handy when the input data changes dramatically. diff --git a/doc/index.rst b/doc/index.rst index 608c29b..f017773 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -12,10 +12,10 @@ Code talks! This is a simple web crawler that fetches a bunch of urls concurren ] import eventlet - from eventlet.green import urllib2 + from eventlet.green.urllib.request import urlopen def fetch(url): - return urllib2.urlopen(url).read() + return urlopen(url).read() pool = eventlet.GreenPool() for body in pool.imap(fetch, urls): diff --git a/doc/real_index.html b/doc/real_index.html index fe1b656..e4a1465 100644 --- a/doc/real_index.html +++ b/doc/real_index.html @@ -103,7 +103,8 @@ links to related issues or websites <p>This is a simple web “crawler” that fetches a bunch of urls using a coroutine pool. It has as much concurrency (i.e. pages being fetched simultaneously) as coroutines in the pool.</p> <pre><code class="language-python">import eventlet -from eventlet.green import urllib2 +# note: this urllib import doesn't work in Python2 +from eventlet.green.urllib.request import urlopen urls = [ @@ -114,7 +115,7 @@ urls = [ def fetch(url): - return urllib2.urlopen(url).read() + return urlopen(url).read() pool = eventlet.GreenPool() diff --git a/doc/ssl.rst b/doc/ssl.rst index cd8e9a2..3596725 100644 --- a/doc/ssl.rst +++ b/doc/ssl.rst @@ -5,9 +5,9 @@ Eventlet makes it easy to use non-blocking SSL sockets. If you're using Python In either case, the ``green`` modules handle SSL sockets transparently, just like their standard counterparts. As an example, :mod:`eventlet.green.urllib2` can be used to fetch https urls in as non-blocking a fashion as you please:: - from eventlet.green import urllib2 + from eventlet.green.urllib.request import urlopen from eventlet import spawn - bodies = [spawn(urllib2.urlopen, url) + bodies = [spawn(urlopen, url) for url in ("https://secondlife.com","https://google.com")] for b in bodies: print(b.wait().read()) diff --git a/examples/feedscraper-testclient.py b/examples/feedscraper-testclient.py index b68da8d..601f12a 100644 --- a/examples/feedscraper-testclient.py +++ b/examples/feedscraper-testclient.py @@ -1,4 +1,4 @@ -from eventlet.green import urllib2 +from eventlet.green.urllib.request import urlopen big_list_of_feeds = """ http://blog.eventlet.net/feed/ @@ -21,5 +21,5 @@ http://ln.hixie.ch/rss/html """ url = 'http://localhost:9010/' -result = urllib2.urlopen(url, big_list_of_feeds) +result = urlopen(url, big_list_of_feeds) print(result.read()) diff --git a/examples/producer_consumer.py b/examples/producer_consumer.py index 9692b7d..ca0585d 100644 --- a/examples/producer_consumer.py +++ b/examples/producer_consumer.py @@ -9,7 +9,7 @@ GreenPool handles any exceptions raised and arranges so that there's a set number of "workers", so you don't have to write that tedious management code yourself. """ -from eventlet.green import urllib2 +from eventlet.green.urllib.request import urlopen import eventlet import re @@ -22,7 +22,7 @@ def fetch(url, outq): print("fetching", url) data = '' with eventlet.Timeout(5, False): - data = urllib2.urlopen(url).read() + data = urllib2.urlopen(url).read().decode() for url_match in url_regex.finditer(data): new_url = url_match.group(0) outq.put(new_url) diff --git a/examples/recursive_crawler.py b/examples/recursive_crawler.py index 8aa51f5..de54fb4 100644 --- a/examples/recursive_crawler.py +++ b/examples/recursive_crawler.py @@ -9,7 +9,7 @@ searching for new urls, and dispatching new fetches. The GreenPool acts as sort of a job coordinator (and concurrency controller of course). """ -from eventlet.green import urllib2 +from eventlet.green.urllib.request import urlopen import eventlet import re @@ -23,7 +23,7 @@ def fetch(url, seen, pool): print("fetching", url) data = '' with eventlet.Timeout(5, False): - data = urllib2.urlopen(url).read() + data = urlopen(url).read().decode() for url_match in url_regex.finditer(data): new_url = url_match.group(0) # only send requests to eventlet.net so as not to destroy the internet diff --git a/examples/webcrawler.py b/examples/webcrawler.py index db93873..e23241d 100644 --- a/examples/webcrawler.py +++ b/examples/webcrawler.py @@ -8,7 +8,7 @@ The prints in the body of the fetch function are there to demonstrate that the requests are truly made in parallel. """ import eventlet -from eventlet.green import urllib2 +from eventlet.green.urllib.request import urlopen urls = [ @@ -20,7 +20,7 @@ urls = [ def fetch(url): print("opening", url) - body = urllib2.urlopen(url).read() + body = urlopen(url).read() print("done with", url) return url, body |
