diff options
author | Richy Kim <richy@fb.com> | 2016-12-20 05:48:15 -0500 |
---|---|---|
committer | Daniel Stenberg <daniel@haxx.se> | 2017-01-19 23:38:04 +0100 |
commit | 6b7616690e5370c21e3a760321af6bf4edbabfb6 (patch) | |
tree | 15ff13a118a311f3a604686103b6d0eaaee6ced4 | |
parent | 81cb255cb355ca35453e7297af9cbc8ff7798416 (diff) | |
download | curl-6b7616690e5370c21e3a760321af6bf4edbabfb6.tar.gz |
CURLOPT_BUFFERSIZE: support enlarging receive buffer
Replace use of fixed macro BUFSIZE to define the size of the receive
buffer. Reappropriate CURLOPT_BUFFERSIZE to include enlarging receive
buffer size. Upon setting, resize buffer if larger than the current
default size up to a MAX_BUFSIZE (512KB). This can benefit protocols
like SFTP.
Closes #1222
-rw-r--r-- | docs/libcurl/curl_easy_setopt.3 | 2 | ||||
-rw-r--r-- | docs/libcurl/opts/CURLOPT_BUFFERSIZE.3 | 18 | ||||
-rw-r--r-- | docs/libcurl/symbols-in-versions | 1 | ||||
-rw-r--r-- | include/curl/curl.h | 5 | ||||
-rw-r--r-- | lib/easy.c | 6 | ||||
-rw-r--r-- | lib/file.c | 2 | ||||
-rw-r--r-- | lib/ftp.c | 7 | ||||
-rw-r--r-- | lib/http.c | 3 | ||||
-rw-r--r-- | lib/telnet.c | 5 | ||||
-rw-r--r-- | lib/url.c | 25 | ||||
-rw-r--r-- | lib/urldata.h | 5 |
11 files changed, 59 insertions, 20 deletions
diff --git a/docs/libcurl/curl_easy_setopt.3 b/docs/libcurl/curl_easy_setopt.3 index 66f573464..422cb8569 100644 --- a/docs/libcurl/curl_easy_setopt.3 +++ b/docs/libcurl/curl_easy_setopt.3 @@ -190,7 +190,7 @@ Timeout for DNS cache. See \fICURLOPT_DNS_CACHE_TIMEOUT(3)\fP .IP CURLOPT_DNS_USE_GLOBAL_CACHE OBSOLETE Enable global DNS cache. See \fICURLOPT_DNS_USE_GLOBAL_CACHE(3)\fP .IP CURLOPT_BUFFERSIZE -Ask for smaller buffer size. See \fICURLOPT_BUFFERSIZE(3)\fP +Ask for alternate buffer size. See \fICURLOPT_BUFFERSIZE(3)\fP .IP CURLOPT_PORT Port number to connect to. See \fICURLOPT_PORT(3)\fP .IP CURLOPT_TCP_FASTOPEN diff --git a/docs/libcurl/opts/CURLOPT_BUFFERSIZE.3 b/docs/libcurl/opts/CURLOPT_BUFFERSIZE.3 index 113118e73..dc5bf74f2 100644 --- a/docs/libcurl/opts/CURLOPT_BUFFERSIZE.3 +++ b/docs/libcurl/opts/CURLOPT_BUFFERSIZE.3 @@ -5,7 +5,7 @@ .\" * | (__| |_| | _ <| |___ .\" * \___|\___/|_| \_\_____| .\" * -.\" * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al. +.\" * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al. .\" * .\" * This software is licensed as described in the file COPYING, which .\" * you should have received as part of this distribution. The terms @@ -30,20 +30,22 @@ CURLcode curl_easy_setopt(CURL *handle, CURLOPT_BUFFERSIZE, long size); .SH DESCRIPTION Pass a long specifying your preferred \fIsize\fP (in bytes) for the receive buffer in libcurl. The main point of this would be that the write callback -gets called more often and with smaller chunks. This is just treated as a -request, not an order. You cannot be guaranteed to actually get the given -size. +gets called more often and with smaller chunks. Secondly, for some protocols, +there's a benefit of having a larger buffer for performance. -This size is by default set as big as possible (\fICURL_MAX_WRITE_SIZE\fP), so -it only makes sense to use this option if you want it smaller. +This is just treated as a request, not an order. You cannot be guaranteed to +actually get the given size. + +This buffer size is by default \fICURL_MAX_WRITE_SIZE\fP (16kB). The maximum +buffer size allowed to set is \fICURL_MAX_READ_SIZE\fP (512kB). .SH DEFAULT -CURL_MAX_WRITE_SIZE +CURL_MAX_WRITE_SIZE (16kB) .SH PROTOCOLS All .SH EXAMPLE TODO .SH AVAILABILITY -Added in 7.10 +Added in 7.10. Growing the buffer was added in 7.53.0. .SH RETURN VALUE Returns CURLE_OK if the option is supported, and CURLE_UNKNOWN_OPTION if not. .SH "SEE ALSO" diff --git a/docs/libcurl/symbols-in-versions b/docs/libcurl/symbols-in-versions index 89672c5e9..8834ada54 100644 --- a/docs/libcurl/symbols-in-versions +++ b/docs/libcurl/symbols-in-versions @@ -748,6 +748,7 @@ CURL_LOCK_TYPE_DNS 7.10 - 7.10.2 CURL_LOCK_TYPE_NONE 7.10 - 7.10.2 CURL_LOCK_TYPE_SSL_SESSION 7.10 - 7.10.2 CURL_MAX_HTTP_HEADER 7.19.7 +CURL_MAX_READ_SIZE 7.53.0 CURL_MAX_WRITE_SIZE 7.9.7 CURL_NETRC_IGNORED 7.9.8 CURL_NETRC_OPTIONAL 7.9.8 diff --git a/include/curl/curl.h b/include/curl/curl.h index 74ac0350e..467bb0240 100644 --- a/include/curl/curl.h +++ b/include/curl/curl.h @@ -193,6 +193,11 @@ typedef int (*curl_xferinfo_callback)(void *clientp, curl_off_t ultotal, curl_off_t ulnow); +#ifndef CURL_MAX_READ_SIZE + /* The maximum receive buffer size configurable via CURLOPT_BUFFERSIZE. */ +#define CURL_MAX_READ_SIZE 524288 +#endif + #ifndef CURL_MAX_WRITE_SIZE /* Tests have proven that 20K is a very bad buffer size for uploads on Windows, while 16K for some odd reason performed a lot better. diff --git a/lib/easy.c b/lib/easy.c index 1242369d5..bed94a444 100644 --- a/lib/easy.c +++ b/lib/easy.c @@ -870,6 +870,11 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data) * get setup on-demand in the code, as that would probably decrease * the likeliness of us forgetting to init a buffer here in the future. */ + outcurl->set.buffer_size = data->set.buffer_size; + outcurl->state.buffer = malloc(CURL_BUFSIZE(outcurl->set.buffer_size) + 1); + if(!outcurl->state.buffer) + goto fail; + outcurl->state.headerbuff = malloc(HEADERSIZE); if(!outcurl->state.headerbuff) goto fail; @@ -940,6 +945,7 @@ struct Curl_easy *curl_easy_duphandle(struct Curl_easy *data) if(outcurl) { curl_slist_free_all(outcurl->change.cookielist); outcurl->change.cookielist = NULL; + Curl_safefree(outcurl->state.buffer); Curl_safefree(outcurl->state.headerbuff); Curl_safefree(outcurl->change.url); Curl_safefree(outcurl->change.referer); diff --git a/lib/file.c b/lib/file.c index 3dbc0f2b8..e90902c50 100644 --- a/lib/file.c +++ b/lib/file.c @@ -476,7 +476,7 @@ static CURLcode file_do(struct connectdata *conn, bool *done) time_t filetime; struct tm buffer; const struct tm *tm = &buffer; - snprintf(buf, sizeof(data->state.buffer), + snprintf(buf, CURL_BUFSIZE(data->set.buffer_size), "Content-Length: %" CURL_FORMAT_CURL_OFF_T "\r\n", expected_size); result = Curl_client_write(conn, CLIENTWRITE_BOTH, buf, 0); if(result) @@ -2107,7 +2107,7 @@ static CURLcode ftp_state_mdtm_resp(struct connectdata *conn, /* we have a time, reformat it */ time_t secs=time(NULL); /* using the good old yacc/bison yuck */ - snprintf(buf, sizeof(conn->data->state.buffer), + snprintf(buf, CURL_BUFSIZE(conn->data->set.buffer_size), "%04d%02d%02d %02d:%02d:%02d GMT", year, month, day, hour, minute, second); /* now, convert this into a time() value: */ @@ -2318,7 +2318,7 @@ static CURLcode ftp_state_size_resp(struct connectdata *conn, if(instate == FTP_SIZE) { #ifdef CURL_FTP_HTTPSTYLE_HEAD if(-1 != filesize) { - snprintf(buf, sizeof(data->state.buffer), + snprintf(buf, CURL_BUFSIZE(data->set.buffer_size), "Content-Length: %" CURL_FORMAT_CURL_OFF_T "\r\n", filesize); result = Curl_client_write(conn, CLIENTWRITE_BOTH, buf, 0); if(result) @@ -2823,6 +2823,7 @@ static CURLcode ftp_statemach_act(struct connectdata *conn) case FTP_PWD: if(ftpcode == 257) { char *ptr=&data->state.buffer[4]; /* start on the first letter */ + const size_t buf_size = CURL_BUFSIZE(data->set.buffer_size); char *dir; char *store; @@ -2840,7 +2841,7 @@ static CURLcode ftp_statemach_act(struct connectdata *conn) */ /* scan for the first double-quote for non-standard responses */ - while(ptr < &data->state.buffer[sizeof(data->state.buffer)] + while(ptr < &data->state.buffer[buf_size] && *ptr != '\n' && *ptr != '\0' && *ptr != '"') ptr++; diff --git a/lib/http.c b/lib/http.c index fdaecafdc..2066520e7 100644 --- a/lib/http.c +++ b/lib/http.c @@ -297,7 +297,8 @@ static CURLcode http_output_basic(struct connectdata *conn, bool proxy) pwd = conn->passwd; } - snprintf(data->state.buffer, sizeof(data->state.buffer), "%s:%s", user, pwd); + snprintf(data->state.buffer, CURL_BUFSIZE(data->set.buffer_size), + "%s:%s", user, pwd); result = Curl_base64_encode(data, data->state.buffer, strlen(data->state.buffer), diff --git a/lib/telnet.c b/lib/telnet.c index 551af60f2..8ee7efa4c 100644 --- a/lib/telnet.c +++ b/lib/telnet.c @@ -1416,6 +1416,7 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done) /* Keep on listening and act on events */ while(keepon) { + const size_t buf_size = CURL_BUFSIZE(data->set.buffer_size); waitret = WaitForMultipleObjects(obj_count, objs, FALSE, wait_timeout); switch(waitret) { case WAIT_TIMEOUT: @@ -1451,7 +1452,7 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done) if(!readfile_read) break; - if(!ReadFile(stdin_handle, buf, sizeof(data->state.buffer), + if(!ReadFile(stdin_handle, buf, buf_size, &readfile_read, NULL)) { keepon = FALSE; result = CURLE_READ_ERROR; @@ -1470,7 +1471,7 @@ static CURLcode telnet_do(struct connectdata *conn, bool *done) case WAIT_OBJECT_0 + 1: { - if(!ReadFile(stdin_handle, buf, sizeof(data->state.buffer), + if(!ReadFile(stdin_handle, buf, buf_size, &readfile_read, NULL)) { keepon = FALSE; result = CURLE_READ_ERROR; @@ -452,6 +452,7 @@ CURLcode Curl_close(struct Curl_easy *data) } data->change.url = NULL; + Curl_safefree(data->state.buffer); Curl_safefree(data->state.headerbuff); Curl_flush_cookies(data, 1); @@ -641,6 +642,12 @@ CURLcode Curl_open(struct Curl_easy **curl) /* We do some initial setup here, all those fields that can't be just 0 */ + data->state.buffer = malloc(BUFSIZE + 1); + if(!data->state.buffer) { + DEBUGF(fprintf(stderr, "Error: malloc of buffer failed\n")); + result = CURLE_OUT_OF_MEMORY; + } + data->state.headerbuff = malloc(HEADERSIZE); if(!data->state.headerbuff) { DEBUGF(fprintf(stderr, "Error: malloc of headerbuff failed\n")); @@ -671,6 +678,7 @@ CURLcode Curl_open(struct Curl_easy **curl) if(result) { Curl_resolver_cleanup(data->state.resolver); + free(data->state.buffer); free(data->state.headerbuff); Curl_freeset(data); free(data); @@ -2268,9 +2276,20 @@ CURLcode Curl_setopt(struct Curl_easy *data, CURLoption option, */ data->set.buffer_size = va_arg(param, long); - if((data->set.buffer_size> (BUFSIZE -1)) || - (data->set.buffer_size < 1)) - data->set.buffer_size = 0; /* huge internal default */ + if(data->set.buffer_size > MAX_BUFSIZE) + data->set.buffer_size = MAX_BUFSIZE; /* huge internal default */ + else if(data->set.buffer_size < 1) + data->set.buffer_size = BUFSIZE; + + /* Resize only if larger than default buffer size. */ + if(data->set.buffer_size > BUFSIZE) { + data->state.buffer = realloc(data->state.buffer, + data->set.buffer_size + 1); + if(!data->state.buffer) { + DEBUGF(fprintf(stderr, "Error: realloc of buffer failed\n")); + result = CURLE_OUT_OF_MEMORY; + } + } break; diff --git a/lib/urldata.h b/lib/urldata.h index 96c0aca19..20057effa 100644 --- a/lib/urldata.h +++ b/lib/urldata.h @@ -201,6 +201,9 @@ /* Download buffer size, keep it fairly big for speed reasons */ #undef BUFSIZE #define BUFSIZE CURL_MAX_WRITE_SIZE +#undef MAX_BUFSIZE +#define MAX_BUFSIZE CURL_MAX_READ_SIZE +#define CURL_BUFSIZE(x) ((x)?(x):(BUFSIZE)) /* Initial size of the buffer to store headers in, it'll be enlarged in case of need. */ @@ -1304,7 +1307,7 @@ struct UrlState { char *headerbuff; /* allocated buffer to store headers in */ size_t headersize; /* size of the allocation */ - char buffer[BUFSIZE+1]; /* download buffer */ + char *buffer; /* download buffer */ char uploadbuffer[BUFSIZE+1]; /* upload buffer */ curl_off_t current_speed; /* the ProgressShow() funcion sets this, bytes / second */ |