summaryrefslogtreecommitdiff
path: root/lib/transfer.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/transfer.c')
-rw-r--r--lib/transfer.c85
1 files changed, 13 insertions, 72 deletions
diff --git a/lib/transfer.c b/lib/transfer.c
index 0c6056a08..a451fb74e 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -506,35 +506,6 @@ static int data_pending(const struct connectdata *conn)
#endif
}
-static void read_rewind(struct connectdata *conn,
- size_t thismuch)
-{
- DEBUGASSERT(conn->read_pos >= thismuch);
-
- conn->read_pos -= thismuch;
- conn->bits.stream_was_rewound = TRUE;
-
-#ifdef DEBUGBUILD
- {
- char buf[512 + 1];
- size_t show;
-
- show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
- if(conn->master_buffer) {
- memcpy(buf, conn->master_buffer + conn->read_pos, show);
- buf[show] = '\0';
- }
- else {
- buf[0] = '\0';
- }
-
- DEBUGF(infof(conn->data,
- "Buffer after stream rewind (read_pos = %zu): [%s]\n",
- conn->read_pos, buf));
- }
-#endif
-}
-
/*
* Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
* remote document with the time provided by CURLOPT_TIMEVAL
@@ -609,9 +580,7 @@ static CURLcode readwrite_data(struct Curl_easy *data,
conn->httpversion == 20) &&
#endif
k->size != -1 && !k->header) {
- /* make sure we don't read "too much" if we can help it since we
- might be pipelining and then someone else might want to read what
- follows! */
+ /* make sure we don't read too much */
curl_off_t totalleft = k->size - k->bytecount;
if(totalleft < (curl_off_t)bytestoread)
bytestoread = (size_t)totalleft;
@@ -693,20 +662,11 @@ static CURLcode readwrite_data(struct Curl_easy *data,
/* We've stopped dealing with input, get out of the do-while loop */
if(nread > 0) {
- if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
- infof(data,
- "Rewinding stream by : %zd"
- " bytes on url %s (zero-length body)\n",
- nread, data->state.up.path);
- read_rewind(conn, (size_t)nread);
- }
- else {
- infof(data,
- "Excess found in a non pipelined read:"
- " excess = %zd"
- " url = %s (zero-length body)\n",
- nread, data->state.up.path);
- }
+ infof(data,
+ "Excess found:"
+ " excess = %zd"
+ " url = %s (zero-length body)\n",
+ nread, data->state.up.path);
}
break;
@@ -837,19 +797,12 @@ static CURLcode readwrite_data(struct Curl_easy *data,
/* There are now possibly N number of bytes at the end of the
str buffer that weren't written to the client.
-
- We DO care about this data if we are pipelining.
Push it back to be read on the next pass. */
dataleft = conn->chunk.dataleft;
if(dataleft != 0) {
infof(conn->data, "Leftovers after chunking: %zu bytes\n",
dataleft);
- if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
- /* only attempt the rewind if we truly are pipelining */
- infof(conn->data, "Rewinding %zu bytes\n",dataleft);
- read_rewind(conn, dataleft);
- }
}
}
/* If it returned OK, we just keep going */
@@ -868,25 +821,13 @@ static CURLcode readwrite_data(struct Curl_easy *data,
excess = (size_t)(k->bytecount + nread - k->maxdownload);
if(excess > 0 && !k->ignorebody) {
- if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
- infof(data,
- "Rewinding stream by : %zu"
- " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
- ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
- ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
- excess, data->state.up.path,
- k->size, k->maxdownload, k->bytecount, nread);
- read_rewind(conn, excess);
- }
- else {
- infof(data,
- "Excess found in a non pipelined read:"
- " excess = %zu"
- ", size = %" CURL_FORMAT_CURL_OFF_T
- ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
- ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
- excess, k->size, k->maxdownload, k->bytecount);
- }
+ infof(data,
+ "Excess found in a read:"
+ " excess = %zu"
+ ", size = %" CURL_FORMAT_CURL_OFF_T
+ ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
+ ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
+ excess, k->size, k->maxdownload, k->bytecount);
}
nread = (ssize_t) (k->maxdownload - k->bytecount);