summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Burke <tim.burke@gmail.com>2019-03-21 08:42:14 -0700
committerGitHub <noreply@github.com>2019-03-21 08:42:14 -0700
commitb9bf369778ec9798b3b6cffe59b7fd15f6159013 (patch)
tree5e36816960c281f25ca2f92e2079c2cd12c6d881
parent4c59e301de90525f022c293ff80e2bebc8f40340 (diff)
downloadeventlet-b9bf369778ec9798b3b6cffe59b7fd15f6159013.tar.gz
wsgi: Only send 100 Continue response if no response has been sent yet (#557)
Some applications may need to perform some long-running operation during a client-request cycle. To keep the client from timing out while waiting for the response, the application issues a status pro tempore, dribbles out whitespace (or some other filler) periodically, and expects the client to parse the final response to confirm success or failure. Previously, if the application was *too* eager and sent data before ever reading from the request body, we would write headers to the client, send that initial data, but then *still send the 100 Continue* when the application finally read the request. Since this would occur on a chunk boundary, the client cannot parse the size of the next chunk, and everything goes off the rails. Now, only be willing to send the 100 Continue response if we have not sent headers to the client.
-rw-r--r--eventlet/wsgi.py26
-rw-r--r--tests/wsgi_test.py61
2 files changed, 80 insertions, 7 deletions
diff --git a/eventlet/wsgi.py b/eventlet/wsgi.py
index c94395b..ffd9cde 100644
--- a/eventlet/wsgi.py
+++ b/eventlet/wsgi.py
@@ -92,7 +92,8 @@ class Input(object):
sock,
wfile=None,
wfile_line=None,
- chunked_input=False):
+ chunked_input=False,
+ headers_sent=None):
self.rfile = rfile
self._sock = sock
@@ -112,7 +113,18 @@ class Input(object):
self.hundred_continue_headers = None
self.is_hundred_continue_response_sent = False
+ # Hold on to a ref to the response state so we know whether we can
+ # still send the 100 Continue
+ self.headers_sent = headers_sent
+
def send_hundred_continue_response(self):
+ if self.headers_sent:
+ # To late; application has already started sending data back
+ # to the client
+ # TODO: maybe log a warning if self.hundred_continue_headers
+ # is not None?
+ return
+
towrite = []
# 100 Continue status line
@@ -446,12 +458,13 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
self.close_connection = 1
return
- self.environ = self.get_environ()
+ headers_sent = []
+ self.environ = self.get_environ(headers_sent)
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
- self.handle_one_response()
+ self.handle_one_response(headers_sent)
except socket.error as e:
# Broken pipe, connection reset by peer
if support.get_errno(e) not in BROKEN_SOCK:
@@ -459,10 +472,9 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
finally:
self.server.outstanding_requests -= 1
- def handle_one_response(self):
+ def handle_one_response(self, headers_sent):
start = time.time()
headers_set = []
- headers_sent = []
wfile = self.wfile
result = None
@@ -643,7 +655,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
host = forward + ',' + host
return (host, port)
- def get_environ(self):
+ def get_environ(self, headers_sent):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
@@ -707,7 +719,7 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
- chunked_input=chunked)
+ chunked_input=chunked, headers_sent=headers_sent)
env['eventlet.posthooks'] = []
return env
diff --git a/tests/wsgi_test.py b/tests/wsgi_test.py
index 6414219..9405d80 100644
--- a/tests/wsgi_test.py
+++ b/tests/wsgi_test.py
@@ -988,6 +988,67 @@ class TestHttpd(_TestBase):
fd.close()
sock.close()
+ def test_024d_expect_100_continue_with_eager_app_chunked(self):
+ def wsgi_app(environ, start_response):
+ # app knows it's going to do some time-intensive thing and doesn't
+ # want clients to time out, so it's protocol says to:
+ # * generally expect a successful status code,
+ # * be prepared to eat some whitespace that will get dribbled out
+ # periodically, and
+ # * parse the final status from the response body.
+ environ['eventlet.minimum_write_chunk_size'] = 0
+ start_response('202 Accepted', [])
+
+ def resp_gen():
+ yield b' '
+ environ['wsgi.input'].read()
+ yield b' '
+ yield b'\n503 Service Unavailable\n\nOops!\n'
+
+ return resp_gen()
+
+ self.site.application = wsgi_app
+ sock = eventlet.connect(self.server_addr)
+ fd = sock.makefile('rwb')
+ fd.write(b'PUT /a HTTP/1.1\r\n'
+ b'Host: localhost\r\nConnection: close\r\n'
+ b'Transfer-Encoding: chunked\r\n'
+ b'Expect: 100-continue\r\n\r\n')
+ fd.flush()
+
+ # Expect the optimistic response
+ header_lines = []
+ while True:
+ line = fd.readline()
+ if line == b'\r\n':
+ break
+ else:
+ header_lines.append(line.strip())
+ self.assertEqual(header_lines[0], b'HTTP/1.1 202 Accepted')
+
+ def chunkify(data):
+ return '{:x}'.format(len(data)).encode('ascii') + b'\r\n' + data + b'\r\n'
+
+ def expect_chunk(data):
+ expected = chunkify(data)
+ self.assertEqual(expected, fd.read(len(expected)))
+
+ # Can even see that initial whitespace
+ expect_chunk(b' ')
+
+ # Send message
+ fd.write(chunkify(b'some data'))
+ fd.write(chunkify(b'')) # end-of-message
+ fd.flush()
+
+ # Expect final response
+ expect_chunk(b' ')
+ expect_chunk(b'\n503 Service Unavailable\n\nOops!\n')
+ expect_chunk(b'') # end-of-message
+
+ fd.close()
+ sock.close()
+
def test_025_accept_errors(self):
debug.hub_exceptions(True)
listener = greensocket.socket()