1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
import datetime
import functools
import futures.thread
import time
import timeit
import urllib.request
URLS = ['http://www.google.com/',
'http://www.apple.com/',
'http://www.ibm.com',
'http://www.thisurlprobablydoesnotexist.com',
'http://www.slashdot.org/',
'http://www.python.org/',
'http://www.sweetapp.com/'] * 5
def load_url(url, timeout):
return urllib.request.urlopen(url, timeout=timeout).read()
def download_urls_sequential(urls, timeout=60):
url_to_content = {}
for url in urls:
try:
url_to_content[url] = load_url(url, timeout=timeout)
except:
pass
return url_to_content
def download_urls_with_executor(urls, executor, timeout=60):
try:
url_to_content = {}
fs = executor.run_to_futures(
(functools.partial(load_url, url, timeout) for url in urls),
timeout=timeout)
for future in fs.successful_futures():
url = urls[future.index]
url_to_content[url] = future.result()
return url_to_content
finally:
executor.shutdown()
def main():
for name, fn in [('sequential',
functools.partial(download_urls_sequential, URLS)),
('processes',
functools.partial(download_urls_with_executor,
URLS,
futures.ProcessPoolExecutor(10))),
('threads',
functools.partial(download_urls_with_executor,
URLS,
futures.ThreadPoolExecutor(10)))]:
print('%s: ' % name.ljust(12), end='')
start = time.time()
fn()
print('%.2f seconds' % (time.time() - start))
main()
|