summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Makefile.inc273
-rw-r--r--lib/c-hyper.c901
-rw-r--r--lib/c-hyper.h46
-rw-r--r--lib/http.c2170
-rw-r--r--lib/http.h55
-rw-r--r--lib/transfer.c66
-rw-r--r--lib/urldata.h35
7 files changed, 2441 insertions, 1105 deletions
diff --git a/lib/Makefile.inc b/lib/Makefile.inc
index 6d35704c0..943333272 100644
--- a/lib/Makefile.inc
+++ b/lib/Makefile.inc
@@ -44,43 +44,244 @@ LIB_VSSH_CFILES = vssh/libssh.c vssh/libssh2.c vssh/wolfssh.c
LIB_VSSH_HFILES = vssh/ssh.h
-LIB_CFILES = altsvc.c amigaos.c asyn-ares.c asyn-thread.c base64.c \
- conncache.c connect.c content_encoding.c cookie.c curl_addrinfo.c \
- curl_ctype.c curl_des.c curl_endian.c curl_fnmatch.c curl_get_line.c \
- curl_gethostname.c curl_gssapi.c curl_memrchr.c curl_multibyte.c \
- curl_ntlm_core.c curl_ntlm_wb.c curl_path.c curl_range.c curl_rtmp.c \
- curl_sasl.c curl_sspi.c curl_threads.c dict.c dotdot.c easy.c escape.c \
- file.c fileinfo.c formdata.c ftp.c url.c ftplistparser.c getenv.c getinfo.c \
- gopher.c hash.c hmac.c hostasyn.c hostcheck.c hostip.c hostip4.c hostip6.c \
- hostsyn.c http.c http2.c http_chunks.c http_digest.c http_negotiate.c \
- http_ntlm.c http_proxy.c idn_win32.c if2ip.c imap.c inet_ntop.c inet_pton.c \
- krb5.c ldap.c llist.c md4.c md5.c memdebug.c mime.c mprintf.c mqtt.c \
- multi.c netrc.c non-ascii.c nonblock.c openldap.c parsedate.c pingpong.c \
- pop3.c progress.c psl.c doh.c rand.c rename.c rtsp.c select.c \
- sendf.c setopt.c sha256.c share.c slist.c smb.c smtp.c socketpair.c socks.c \
- socks_gssapi.c socks_sspi.c speedcheck.c splay.c strcase.c strdup.c \
- strerror.c strtok.c strtoofft.c system_win32.c telnet.c tftp.c timeval.c \
- transfer.c urlapi.c version.c warnless.c wildcard.c x509asn1.c dynbuf.c \
- version_win32.c easyoptions.c easygetopt.c hsts.c
+LIB_CFILES = \
+ altsvc.c \
+ amigaos.c \
+ asyn-ares.c \
+ asyn-thread.c \
+ base64.c \
+ c-hyper.c \
+ conncache.c \
+ connect.c \
+ content_encoding.c \
+ cookie.c \
+ curl_addrinfo.c \
+ curl_ctype.c \
+ curl_des.c \
+ curl_endian.c \
+ curl_fnmatch.c \
+ curl_get_line.c \
+ curl_gethostname.c \
+ curl_gssapi.c \
+ curl_memrchr.c \
+ curl_multibyte.c \
+ curl_ntlm_core.c \
+ curl_ntlm_wb.c \
+ curl_path.c \
+ curl_range.c \
+ curl_rtmp.c \
+ curl_sasl.c \
+ curl_sspi.c \
+ curl_threads.c \
+ dict.c \
+ doh.c \
+ dotdot.c \
+ dynbuf.c \
+ easy.c \
+ easygetopt.c \
+ easyoptions.c \
+ escape.c \
+ file.c \
+ fileinfo.c \
+ formdata.c \
+ ftp.c \
+ ftplistparser.c \
+ getenv.c \
+ getinfo.c \
+ gopher.c \
+ hash.c \
+ hmac.c \
+ hostasyn.c \
+ hostcheck.c \
+ hostip.c \
+ hostip4.c \
+ hostip6.c \
+ hostsyn.c \
+ hsts.c \
+ http.c \
+ http2.c \
+ http_chunks.c \
+ http_digest.c \
+ http_negotiate.c \
+ http_ntlm.c \
+ http_proxy.c \
+ idn_win32.c \
+ if2ip.c \
+ imap.c \
+ inet_ntop.c \
+ inet_pton.c \
+ krb5.c \
+ ldap.c \
+ llist.c \
+ md4.c \
+ md5.c \
+ memdebug.c \
+ mime.c \
+ mprintf.c \
+ mqtt.c \
+ multi.c \
+ netrc.c \
+ non-ascii.c \
+ nonblock.c \
+ openldap.c \
+ parsedate.c \
+ pingpong.c \
+ pop3.c \
+ progress.c \
+ psl.c \
+ rand.c \
+ rename.c \
+ rtsp.c \
+ select.c \
+ sendf.c \
+ setopt.c \
+ sha256.c \
+ share.c \
+ slist.c \
+ smb.c \
+ smtp.c \
+ socketpair.c \
+ socks.c \
+ socks_gssapi.c \
+ socks_sspi.c \
+ speedcheck.c \
+ splay.c \
+ strcase.c \
+ strdup.c \
+ strerror.c \
+ strtok.c \
+ strtoofft.c \
+ system_win32.c \
+ telnet.c \
+ tftp.c \
+ timeval.c \
+ transfer.c \
+ url.c \
+ urlapi.c \
+ version.c \
+ version_win32.c \
+ warnless.c \
+ wildcard.c \
+ x509asn1.c
-LIB_HFILES = altsvc.h amigaos.h arpa_telnet.h asyn.h conncache.h connect.h \
- content_encoding.h cookie.h curl_addrinfo.h curl_base64.h curl_ctype.h \
- curl_des.h curl_endian.h curl_fnmatch.h curl_get_line.h curl_gethostname.h \
- curl_gssapi.h curl_hmac.h curl_ldap.h curl_md4.h curl_md5.h curl_memory.h \
- curl_memrchr.h curl_multibyte.h curl_ntlm_core.h curl_ntlm_wb.h curl_path.h \
- curl_printf.h curl_range.h curl_rtmp.h curl_sasl.h curl_krb5.h curl_setup.h \
- curl_setup_once.h curl_sha256.h curl_sspi.h curl_threads.h curlx.h dict.h \
- dotdot.h easyif.h escape.h file.h fileinfo.h formdata.h ftp.h url.h \
- ftplistparser.h getinfo.h gopher.h hash.h hostcheck.h hostip.h http.h \
- http2.h http_chunks.h http_digest.h http_negotiate.h http_ntlm.h \
- http_proxy.h if2ip.h imap.h inet_ntop.h inet_pton.h llist.h memdebug.h \
- mime.h mqtt.h multihandle.h multiif.h netrc.h non-ascii.h nonblock.h \
- parsedate.h pingpong.h pop3.h progress.h psl.h doh.h quic.h rand.h rename.h \
- rtsp.h select.h sendf.h setopt.h setup-vms.h share.h sigpipe.h slist.h \
- smb.h smtp.h sockaddr.h socketpair.h socks.h speedcheck.h splay.h strcase.h \
- strdup.h strerror.h strtok.h strtoofft.h system_win32.h telnet.h tftp.h \
- timeval.h transfer.h urlapi-int.h urldata.h warnless.h wildcard.h \
- x509asn1.h dynbuf.h version_win32.h easyoptions.h hsts.h
+LIB_HFILES = \
+ altsvc.h \
+ amigaos.h \
+ arpa_telnet.h \
+ asyn.h \
+ c-hyper.h \
+ conncache.h \
+ connect.h \
+ content_encoding.h \
+ cookie.h \
+ curl_addrinfo.h \
+ curl_base64.h \
+ curl_ctype.h \
+ curl_des.h \
+ curl_endian.h \
+ curl_fnmatch.h \
+ curl_get_line.h \
+ curl_gethostname.h \
+ curl_gssapi.h \
+ curl_hmac.h \
+ curl_krb5.h \
+ curl_ldap.h \
+ curl_md4.h \
+ curl_md5.h \
+ curl_memory.h \
+ curl_memrchr.h \
+ curl_multibyte.h \
+ curl_ntlm_core.h \
+ curl_ntlm_wb.h \
+ curl_path.h \
+ curl_printf.h \
+ curl_range.h \
+ curl_rtmp.h \
+ curl_sasl.h \
+ curl_setup.h \
+ curl_setup_once.h \
+ curl_sha256.h \
+ curl_sspi.h \
+ curl_threads.h \
+ curlx.h \
+ dict.h \
+ doh.h \
+ dotdot.h \
+ dynbuf.h \
+ easyif.h \
+ easyoptions.h \
+ escape.h \
+ file.h \
+ fileinfo.h \
+ formdata.h \
+ ftp.h \
+ ftplistparser.h \
+ getinfo.h \
+ gopher.h \
+ hash.h \
+ hostcheck.h \
+ hostip.h \
+ hsts.h \
+ http.h \
+ http2.h \
+ http_chunks.h \
+ http_digest.h \
+ http_negotiate.h \
+ http_ntlm.h \
+ http_proxy.h \
+ if2ip.h \
+ imap.h \
+ inet_ntop.h \
+ inet_pton.h \
+ llist.h \
+ memdebug.h \
+ mime.h \
+ mqtt.h \
+ multihandle.h \
+ multiif.h \
+ netrc.h \
+ non-ascii.h \
+ nonblock.h \
+ parsedate.h \
+ pingpong.h \
+ pop3.h \
+ progress.h \
+ psl.h \
+ quic.h \
+ rand.h \
+ rename.h \
+ rtsp.h \
+ select.h \
+ sendf.h \
+ setopt.h \
+ setup-vms.h \
+ share.h \
+ sigpipe.h \
+ slist.h \
+ smb.h \
+ smtp.h \
+ sockaddr.h \
+ socketpair.h \
+ socks.h \
+ speedcheck.h \
+ splay.h \
+ strcase.h \
+ strdup.h \
+ strerror.h \
+ strtok.h \
+ strtoofft.h \
+ system_win32.h \
+ telnet.h \
+ tftp.h \
+ timeval.h \
+ transfer.h \
+ url.h \
+ urlapi-int.h \
+ urldata.h \
+ version_win32.h \
+ warnless.h \
+ wildcard.h \
+ x509asn1.h
LIB_RCFILES = libcurl.rc
diff --git a/lib/c-hyper.c b/lib/c-hyper.c
new file mode 100644
index 000000000..e7b01f9d7
--- /dev/null
+++ b/lib/c-hyper.c
@@ -0,0 +1,901 @@
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER)
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#ifdef HAVE_NET_IF_H
+#include <net/if.h>
+#endif
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#include <hyper.h>
+#include "urldata.h"
+#include "sendf.h"
+#include "transfer.h"
+#include "multiif.h"
+#include "progress.h"
+
+/* The last 3 #include files should be in this order */
+#include "curl_printf.h"
+#include "curl_memory.h"
+#include "memdebug.h"
+
+static size_t read_cb(void *userp, hyper_context *ctx,
+ uint8_t *buf, size_t buflen)
+{
+ struct connectdata *conn = (struct connectdata *)userp;
+ struct Curl_easy *data = conn->data;
+ CURLcode result;
+ ssize_t nread;
+
+ (void)ctx;
+
+ result = Curl_read(conn, conn->sockfd, (char *)buf, buflen, &nread);
+ if(result == CURLE_AGAIN) {
+ /* would block, register interest */
+ if(data->hyp.read_waker)
+ hyper_waker_free(data->hyp.read_waker);
+ data->hyp.read_waker = hyper_context_waker(ctx);
+ if(!data->hyp.read_waker) {
+ failf(data, "Couldn't make the read hyper_context_waker");
+ return HYPER_IO_ERROR;
+ }
+ return HYPER_IO_PENDING;
+ }
+ else if(result) {
+ failf(data, "Curl_read failed");
+ return HYPER_IO_ERROR;
+ }
+ return (size_t)nread;
+}
+
+static size_t write_cb(void *userp, hyper_context *ctx,
+ const uint8_t *buf, size_t buflen)
+{
+ struct connectdata *conn = (struct connectdata *)userp;
+ struct Curl_easy *data = conn->data;
+ CURLcode result;
+ ssize_t nwrote;
+
+ result = Curl_write(conn, conn->sockfd, (void *)buf, buflen, &nwrote);
+ if(result == CURLE_AGAIN) {
+ /* would block, register interest */
+ if(data->hyp.write_waker)
+ hyper_waker_free(data->hyp.write_waker);
+ data->hyp.write_waker = hyper_context_waker(ctx);
+ if(!data->hyp.write_waker) {
+ failf(data, "Couldn't make the write hyper_context_waker");
+ return HYPER_IO_ERROR;
+ }
+ return HYPER_IO_PENDING;
+ }
+ else if(result) {
+ failf(data, "Curl_write failed");
+ return HYPER_IO_ERROR;
+ }
+ return (size_t)nwrote;
+}
+
+static int hyper_each_header(void *userdata,
+ const uint8_t *name,
+ size_t name_len,
+ const uint8_t *value,
+ size_t value_len)
+{
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ size_t wrote;
+ size_t len;
+ char *headp;
+ CURLcode result;
+ curl_write_callback writeheader =
+ data->set.fwrite_header? data->set.fwrite_header: data->set.fwrite_func;
+ Curl_dyn_reset(&data->state.headerb);
+ if(name_len) {
+ if(Curl_dyn_addf(&data->state.headerb, "%.*s: %.*s\r\n",
+ (int) name_len, name, (int) value_len, value))
+ return HYPER_ITER_BREAK;
+ }
+ else {
+ if(Curl_dyn_add(&data->state.headerb, "\r\n"))
+ return HYPER_ITER_BREAK;
+ }
+ len = Curl_dyn_len(&data->state.headerb);
+ headp = Curl_dyn_ptr(&data->state.headerb);
+
+ result = Curl_http_header(data, data->conn, headp);
+ if(result) {
+ data->state.hresult = result;
+ return HYPER_ITER_BREAK;
+ }
+
+ Curl_debug(data, CURLINFO_HEADER_IN, headp, len);
+
+ Curl_set_in_callback(data, true);
+ wrote = writeheader(headp, 1, len, data->set.writeheader);
+ Curl_set_in_callback(data, false);
+ if(wrote != len) {
+ data->state.hresult = CURLE_ABORTED_BY_CALLBACK;
+ return HYPER_ITER_BREAK;
+ }
+
+ data->info.header_size += (long)len;
+ data->req.headerbytecount += (long)len;
+ return HYPER_ITER_CONTINUE;
+}
+
+static int hyper_body_chunk(void *userdata, const hyper_buf *chunk)
+{
+ char *buf = (char *)hyper_buf_bytes(chunk);
+ size_t len = hyper_buf_len(chunk);
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ curl_write_callback writebody = data->set.fwrite_func;
+ struct SingleRequest *k = &data->req;
+ size_t wrote;
+
+ if(0 == k->bodywrites++) {
+ bool done = FALSE;
+ CURLcode result = Curl_http_firstwrite(data, data->conn, &done);
+ if(result || done) {
+ infof(data, "Return early from hyper_body_chunk\n");
+ data->state.hresult = result;
+ return HYPER_ITER_BREAK;
+ }
+ }
+ if(k->ignorebody)
+ return HYPER_ITER_CONTINUE;
+ Curl_debug(data, CURLINFO_DATA_IN, buf, len);
+ Curl_set_in_callback(data, true);
+ wrote = writebody(buf, 1, len, data->set.out);
+ Curl_set_in_callback(data, false);
+
+ if(wrote != len) {
+ data->state.hresult = CURLE_WRITE_ERROR;
+ return HYPER_ITER_BREAK;
+ }
+
+ data->req.bytecount += len;
+ Curl_pgrsSetDownloadCounter(data, data->req.bytecount);
+ return HYPER_ITER_CONTINUE;
+}
+
+/*
+ * Hyper does not consider the status line, the first line in a HTTP/1
+ * response, to be a header. The libcurl API does. This function sends the
+ * status line in the header callback. */
+static CURLcode status_line(struct Curl_easy *data,
+ struct connectdata *conn,
+ uint16_t http_status,
+ int http_version,
+ const uint8_t *reason, size_t rlen)
+{
+ CURLcode result;
+ size_t wrote;
+ size_t len;
+ const char *vstr;
+ curl_write_callback writeheader =
+ data->set.fwrite_header? data->set.fwrite_header: data->set.fwrite_func;
+ vstr = http_version == HYPER_HTTP_VERSION_1_1 ? "1.1" :
+ (http_version == HYPER_HTTP_VERSION_2 ? "2" : "1.0");
+ conn->httpversion =
+ http_version == HYPER_HTTP_VERSION_1_1 ? 11 :
+ (http_version == HYPER_HTTP_VERSION_2 ? 20 : 10);
+ data->req.httpcode = http_status;
+
+ result = Curl_http_statusline(data, conn);
+ if(result)
+ return result;
+
+ Curl_dyn_reset(&data->state.headerb);
+
+ result = Curl_dyn_addf(&data->state.headerb, "HTTP/%s %03d %.*s\r\n",
+ vstr,
+ (int)http_status,
+ (int)rlen, reason);
+ if(result)
+ return result;
+ len = Curl_dyn_len(&data->state.headerb);
+ Curl_debug(data, CURLINFO_HEADER_IN, Curl_dyn_ptr(&data->state.headerb),
+ len);
+ Curl_set_in_callback(data, true);
+ wrote = writeheader(Curl_dyn_ptr(&data->state.headerb), 1, len,
+ data->set.writeheader);
+ Curl_set_in_callback(data, false);
+ if(wrote != len)
+ return CURLE_WRITE_ERROR;
+
+ data->info.header_size += (long)len;
+ data->req.headerbytecount += (long)len;
+ data->req.httpcode = http_status;
+ return CURLE_OK;
+}
+
+/*
+ * Hyper does not pass on the last empty response header. The libcurl API
+ * does. This function sends an empty header in the header callback.
+ */
+static CURLcode empty_header(struct Curl_easy *data)
+{
+ return hyper_each_header(data, NULL, 0, NULL, 0) ?
+ CURLE_WRITE_ERROR : CURLE_OK;
+}
+
+static CURLcode hyperstream(struct Curl_easy *data,
+ struct connectdata *conn,
+ int *didwhat,
+ bool *done,
+ int select_res)
+{
+ hyper_response *resp = NULL;
+ uint16_t http_status;
+ int http_version;
+ hyper_headers *headers = NULL;
+ hyper_body *resp_body = NULL;
+ struct hyptransfer *h = &data->hyp;
+ hyper_task *task;
+ hyper_task *foreach;
+ hyper_error *hypererr = NULL;
+ const uint8_t *reasonp;
+ size_t reason_len;
+ CURLcode result = CURLE_OK;
+ (void)conn;
+
+ if(select_res & CURL_CSELECT_IN) {
+ if(h->read_waker)
+ hyper_waker_wake(h->read_waker);
+ h->read_waker = NULL;
+ }
+ if(select_res & CURL_CSELECT_OUT) {
+ if(h->write_waker)
+ hyper_waker_wake(h->write_waker);
+ h->write_waker = NULL;
+ }
+
+ *done = FALSE;
+ do {
+ hyper_task_return_type t;
+ task = hyper_executor_poll(h->exec);
+ if(!task) {
+ *didwhat = KEEP_RECV;
+ break;
+ }
+ t = hyper_task_type(task);
+ switch(t) {
+ case HYPER_TASK_ERROR:
+ hypererr = hyper_task_value(task);
+ break;
+ case HYPER_TASK_RESPONSE:
+ resp = hyper_task_value(task);
+ break;
+ default:
+ break;
+ }
+ hyper_task_free(task);
+
+ if(t == HYPER_TASK_ERROR) {
+ hyper_code errnum = hyper_error_code(hypererr);
+ if(errnum == HYPERE_ABORTED_BY_CALLBACK) {
+ /* override Hyper's view, might not even be an error */
+ result = data->state.hresult;
+ infof(data, "hyperstream is done (by early callback)\n");
+ }
+ else {
+ uint8_t errbuf[256];
+ size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf));
+ failf(data, "Hyper: %.*s", (int)errlen, errbuf);
+ result = CURLE_RECV_ERROR; /* not a very good return code */
+ }
+ *done = TRUE;
+ hyper_error_free(hypererr);
+ break;
+ }
+ else if(h->init) {
+ /* end of transfer */
+ *done = TRUE;
+ infof(data, "hyperstream is done!\n");
+ break;
+ }
+ else if(t != HYPER_TASK_RESPONSE) {
+ *didwhat = KEEP_RECV;
+ break;
+ }
+ /* HYPER_TASK_RESPONSE */
+
+ h->init = TRUE;
+ *didwhat = KEEP_RECV;
+ if(!resp) {
+ failf(data, "hyperstream: couldn't get response\n");
+ return CURLE_RECV_ERROR;
+ }
+
+ http_status = hyper_response_status(resp);
+ http_version = hyper_response_version(resp);
+ reasonp = hyper_response_reason_phrase(resp);
+ reason_len = hyper_response_reason_phrase_len(resp);
+
+ result = status_line(data, conn,
+ http_status, http_version, reasonp, reason_len);
+ if(result)
+ break;
+
+ headers = hyper_response_headers(resp);
+ if(!headers) {
+ failf(data, "hyperstream: couldn't get response headers\n");
+ result = CURLE_RECV_ERROR;
+ break;
+ }
+
+ /* the headers are already received */
+ hyper_headers_foreach(headers, hyper_each_header, data);
+ if(data->state.hresult) {
+ result = data->state.hresult;
+ break;
+ }
+
+ if(empty_header(data)) {
+ failf(data, "hyperstream: couldn't pass blank header\n");
+ result = CURLE_OUT_OF_MEMORY;
+ break;
+ }
+
+ resp_body = hyper_response_body(resp);
+ if(!resp_body) {
+ failf(data, "hyperstream: couldn't get response body\n");
+ result = CURLE_RECV_ERROR;
+ break;
+ }
+ foreach = hyper_body_foreach(resp_body, hyper_body_chunk, data);
+ if(!foreach) {
+ failf(data, "hyperstream: body foreach failed\n");
+ result = CURLE_OUT_OF_MEMORY;
+ break;
+ }
+ DEBUGASSERT(hyper_task_type(foreach) == HYPER_TASK_EMPTY);
+ if(HYPERE_OK != hyper_executor_push(h->exec, foreach)) {
+ failf(data, "Couldn't hyper_executor_push the body-foreach");
+ result = CURLE_OUT_OF_MEMORY;
+ break;
+ }
+
+ hyper_response_free(resp);
+ resp = NULL;
+ } while(1);
+ if(resp)
+ hyper_response_free(resp);
+ return result;
+}
+
+static CURLcode debug_request(struct Curl_easy *data,
+ const char *method,
+ const char *path,
+ bool h2)
+{
+ char *req = aprintf("%s %s HTTP/%s\r\n", method, path,
+ h2?"2":"1.1");
+ if(!req)
+ return CURLE_OUT_OF_MEMORY;
+ Curl_debug(data, CURLINFO_HEADER_OUT, req, strlen(req));
+ free(req);
+ return CURLE_OK;
+}
+
+/*
+ * Given a full header line "name: value" (optional CRLF in the input, should
+ * be in the output), add to Hyper and send to the debug callback.
+ *
+ * Supports multiple headers.
+ */
+
+CURLcode Curl_hyper_header(struct Curl_easy *data, hyper_headers *headers,
+ const char *line)
+{
+ const char *p;
+ const char *n;
+ size_t nlen;
+ const char *v;
+ size_t vlen;
+ bool newline = TRUE;
+ int numh = 0;
+
+ if(!line)
+ return CURLE_OK;
+ n = line;
+ do {
+ size_t linelen = 0;
+
+ p = strchr(n, ':');
+ if(!p)
+ /* this is fine if we already added at least one header */
+ return numh ? CURLE_OK : CURLE_BAD_FUNCTION_ARGUMENT;
+ nlen = p - n;
+ p++; /* move past the colon */
+ while(*p == ' ')
+ p++;
+ v = p;
+ p = strchr(v, '\r');
+ if(!p) {
+ p = strchr(v, '\n');
+ if(p)
+ linelen = 1; /* LF only */
+ else {
+ p = strchr(v, '\0');
+ newline = FALSE; /* no newline */
+ }
+ }
+ else
+ linelen = 2; /* CRLF ending */
+ linelen += (p - n);
+ if(!n)
+ return CURLE_BAD_FUNCTION_ARGUMENT;
+ vlen = p - v;
+
+ if(HYPERE_OK != hyper_headers_add(headers, (uint8_t *)n, nlen,
+ (uint8_t *)v, vlen)) {
+ failf(data, "hyper_headers_add host\n");
+ return CURLE_OUT_OF_MEMORY;
+ }
+ if(data->set.verbose) {
+ char *ptr = NULL;
+ if(!newline) {
+ ptr = aprintf("%.*s\r\n", (int)linelen, line);
+ if(!ptr)
+ return CURLE_OUT_OF_MEMORY;
+ Curl_debug(data, CURLINFO_HEADER_OUT, ptr, linelen + 2);
+ free(ptr);
+ }
+ else
+ Curl_debug(data, CURLINFO_HEADER_OUT, (char *)line, linelen);
+ }
+ numh++;
+ n += linelen;
+ } while(newline);
+ return CURLE_OK;
+}
+
+static CURLcode request_target(struct Curl_easy *data,
+ struct connectdata *conn,
+ const char *method,
+ bool h2,
+ hyper_request *req)
+{
+ CURLcode result;
+ struct dynbuf r;
+
+ Curl_dyn_init(&r, DYN_HTTP_REQUEST);
+
+ result = Curl_http_target(data, conn, &r);
+ if(result)
+ return result;
+
+ if(hyper_request_set_uri(req, (uint8_t *)Curl_dyn_uptr(&r),
+ Curl_dyn_len(&r))) {
+ failf(data, "error setting path\n");
+ result = CURLE_OUT_OF_MEMORY;
+ }
+ else
+ result = debug_request(data, method, Curl_dyn_ptr(&r), h2);
+
+ Curl_dyn_free(&r);
+
+ return result;
+}
+
+static int uploadpostfields(void *userdata, hyper_context *ctx,
+ hyper_buf **chunk)
+{
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ (void)ctx;
+ if(data->req.upload_done)
+ *chunk = NULL; /* nothing more to deliver */
+ else {
+ /* send everything off in a single go */
+ *chunk = hyper_buf_copy(data->set.postfields, data->req.p.http->postsize);
+ data->req.upload_done = TRUE;
+ }
+ return HYPER_POLL_READY;
+}
+
+static int uploadstreamed(void *userdata, hyper_context *ctx,
+ hyper_buf **chunk)
+{
+ size_t fillcount;
+ struct Curl_easy *data = (struct Curl_easy *)userdata;
+ CURLcode result =
+ Curl_fillreadbuffer(data->conn, data->set.upload_buffer_size,
+ &fillcount);
+ (void)ctx;
+ if(result)
+ return HYPER_POLL_ERROR;
+ if(!fillcount)
+ /* done! */
+ *chunk = NULL;
+ else
+ *chunk = hyper_buf_copy((uint8_t *)data->state.ulbuf, fillcount);
+ return HYPER_POLL_READY;
+}
+
+/*
+ * bodysend() sets up headers in the outgoing request for a HTTP transfer that
+ * sends a body
+ */
+
+static CURLcode bodysend(struct Curl_easy *data,
+ struct connectdata *conn,
+ hyper_headers *headers,
+ hyper_request *hyperreq,
+ Curl_HttpReq httpreq)
+{
+ CURLcode result;
+ struct dynbuf req;
+ if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD))
+ Curl_pgrsSetUploadSize(data, 0); /* no request body */
+ else {
+ hyper_body *body;
+ Curl_dyn_init(&req, DYN_HTTP_REQUEST);
+ result = Curl_http_bodysend(data, conn, &req, httpreq);
+
+ if(!result)
+ result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&req));
+
+ Curl_dyn_free(&req);
+
+ body = hyper_body_new();
+ hyper_body_set_userdata(body, data);
+ if(data->set.postfields)
+ hyper_body_set_data_func(body, uploadpostfields);
+ else {
+ result = Curl_get_upload_buffer(data);
+ if(result)
+ return result;
+ /* init the "upload from here" pointer */
+ data->req.upload_fromhere = data->state.ulbuf;
+ hyper_body_set_data_func(body, uploadstreamed);
+ }
+ if(HYPERE_OK != hyper_request_set_body(hyperreq, body)) {
+ /* fail */
+ hyper_body_free(body);
+ result = CURLE_OUT_OF_MEMORY;
+ }
+ }
+ return result;
+}
+
+static CURLcode cookies(struct Curl_easy *data,
+ struct connectdata *conn,
+ hyper_headers *headers)
+{
+ struct dynbuf req;
+ CURLcode result;
+ Curl_dyn_init(&req, DYN_HTTP_REQUEST);
+
+ result = Curl_http_cookies(data, conn, &req);
+ if(!result)
+ result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&req));
+ Curl_dyn_free(&req);
+ return result;
+}
+
+/*
+ * Curl_http() gets called from the generic multi_do() function when a HTTP
+ * request is to be performed. This creates and sends a properly constructed
+ * HTTP request.
+ */
+CURLcode Curl_http(struct connectdata *conn, bool *done)
+{
+ struct Curl_easy *data = conn->data;
+ struct hyptransfer *h = &data->hyp;
+ hyper_io *io = NULL;
+ hyper_clientconn_options *options = NULL;
+ hyper_task *task = NULL; /* for the handshake */
+ hyper_task *sendtask = NULL; /* for the send */
+ hyper_clientconn *client = NULL;
+ hyper_request *req = NULL;
+ hyper_headers *headers = NULL;
+ hyper_task *handshake = NULL;
+ hyper_error *hypererr = NULL;
+ CURLcode result;
+ const char *p_accept; /* Accept: string */
+ const char *method;
+ Curl_HttpReq httpreq;
+ bool h2 = FALSE;
+ const char *te = NULL; /* transfer-encoding */
+
+ /* Always consider the DO phase done after this function call, even if there
+ may be parts of the request that is not yet sent, since we can deal with
+ the rest of the request in the PERFORM phase. */
+ *done = TRUE;
+
+ infof(data, "Time for the Hyper dance\n");
+ memset(h, 0, sizeof(struct hyptransfer));
+
+ result = Curl_http_host(data, conn);
+ if(result)
+ return result;
+
+ Curl_http_method(data, conn, &method, &httpreq);
+
+ /* setup the authentication headers */
+ {
+ char *pq = NULL;
+ if(data->state.up.query) {
+ pq = aprintf("%s?%s", data->state.up.path, data->state.up.query);
+ if(!pq)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ result = Curl_http_output_auth(conn, method,
+ (pq ? pq : data->state.up.path), FALSE);
+ free(pq);
+ if(result)
+ return result;
+ }
+
+ result = Curl_http_resume(data, conn, httpreq);
+ if(result)
+ return result;
+
+ result = Curl_http_range(data, conn, httpreq);
+ if(result)
+ return result;
+
+ result = Curl_http_useragent(data, conn);
+ if(result)
+ return result;
+
+ io = hyper_io_new();
+ if(!io) {
+ failf(data, "Couldn't create hyper IO");
+ goto error;
+ }
+ /* tell Hyper how to read/write network data */
+ hyper_io_set_userdata(io, conn);
+ hyper_io_set_read(io, read_cb);
+ hyper_io_set_write(io, write_cb);
+
+ /* create an executor to poll futures */
+ if(!h->exec) {
+ h->exec = hyper_executor_new();
+ if(!h->exec) {
+ failf(data, "Couldn't create hyper executor");
+ goto error;
+ }
+ }
+
+ options = hyper_clientconn_options_new();
+ if(!options) {
+ failf(data, "Couldn't create hyper client options");
+ goto error;
+ }
+ if(conn->negnpn == CURL_HTTP_VERSION_2) {
+ hyper_clientconn_options_http2(options, 1);
+ h2 = TRUE;
+ }
+
+ hyper_clientconn_options_exec(options, h->exec);
+
+ /* "Both the `io` and the `options` are consumed in this function call" */
+ handshake = hyper_clientconn_handshake(io, options);
+ if(!handshake) {
+ failf(data, "Couldn't create hyper client handshake");
+ goto error;
+ }
+ io = NULL;
+ options = NULL;
+
+ if(HYPERE_OK != hyper_executor_push(h->exec, handshake)) {
+ failf(data, "Couldn't hyper_executor_push the handshake");
+ goto error;
+ }
+ handshake = NULL; /* ownership passed on */
+
+ task = hyper_executor_poll(h->exec);
+ if(!task) {
+ failf(data, "Couldn't hyper_executor_poll the handshake");
+ goto error;
+ }
+
+ client = hyper_task_value(task);
+ hyper_task_free(task);
+
+ req = hyper_request_new();
+ if(!req) {
+ failf(data, "Couldn't hyper_request_new");
+ goto error;
+ }
+
+ if(data->set.httpversion == CURL_HTTP_VERSION_1_0) {
+ if(HYPERE_OK != hyper_request_set_version(req,
+ HYPER_HTTP_VERSION_1_0)) {
+ failf(data, "error settting HTTP version");
+ goto error;
+ }
+ }
+
+ if(hyper_request_set_method(req, (uint8_t *)method, strlen(method))) {
+ failf(data, "error setting method");
+ goto error;
+ }
+
+ result = request_target(data, conn, method, h2, req);
+ if(result)
+ goto error;
+
+ headers = hyper_request_headers(req);
+ if(!headers) {
+ failf(data, "hyper_request_headers\n");
+ goto error;
+ }
+
+ result = Curl_http_body(data, conn, httpreq, &te);
+ if(result)
+ return result;
+
+ if(data->state.aptr.host &&
+ Curl_hyper_header(data, headers, data->state.aptr.host))
+ goto error;
+
+ if(data->state.aptr.proxyuserpwd &&
+ Curl_hyper_header(data, headers, data->state.aptr.proxyuserpwd))
+ goto error;
+
+ if(data->state.aptr.userpwd &&
+ Curl_hyper_header(data, headers, data->state.aptr.userpwd))
+ goto error;
+
+ if((data->state.use_range && data->state.aptr.rangeline) &&
+ Curl_hyper_header(data, headers, data->state.aptr.rangeline))
+ goto error;
+
+ if(data->set.str[STRING_USERAGENT] &&
+ *data->set.str[STRING_USERAGENT] &&
+ data->state.aptr.uagent &&
+ Curl_hyper_header(data, headers, data->state.aptr.uagent))
+ goto error;
+
+ p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
+ if(p_accept && Curl_hyper_header(data, headers, p_accept))
+ goto error;
+
+ if(te && Curl_hyper_header(data, headers, te))
+ goto error;
+
+#ifndef CURL_DISABLE_PROXY
+ if(conn->bits.httpproxy && !conn->bits.tunnel_proxy &&
+ !Curl_checkProxyheaders(conn, "Proxy-Connection")) {
+ if(Curl_hyper_header(data, headers, "Proxy-Connection: Keep-Alive"))
+ goto error;
+ }
+#endif
+
+ Curl_safefree(data->state.aptr.ref);
+ if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
+ data->state.aptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
+ if(!data->state.aptr.ref)
+ return CURLE_OUT_OF_MEMORY;
+ if(Curl_hyper_header(data, headers, data->state.aptr.ref))
+ goto error;
+ }
+
+ result = cookies(data, conn, headers);
+ if(result)
+ return result;
+
+ result = Curl_add_custom_headers(conn, FALSE, headers);
+ if(result)
+ return result;
+
+ if((httpreq != HTTPREQ_GET) && (httpreq != HTTPREQ_HEAD)) {
+ result = bodysend(data, conn, headers, req, httpreq);
+ if(result)
+ return result;
+ }
+
+ Curl_debug(data, CURLINFO_HEADER_OUT, (char *)"\r\n", 2);
+
+ data->req.upload_chunky = FALSE;
+ sendtask = hyper_clientconn_send(client, req);
+ if(!sendtask) {
+ failf(data, "hyper_clientconn_send\n");
+ goto error;
+ }
+
+ if(HYPERE_OK != hyper_executor_push(h->exec, sendtask)) {
+ failf(data, "Couldn't hyper_executor_push the send");
+ goto error;
+ }
+
+ hyper_clientconn_free(client);
+
+ do {
+ task = hyper_executor_poll(h->exec);
+ if(task) {
+ bool error = hyper_task_type(task) == HYPER_TASK_ERROR;
+ if(error)
+ hypererr = hyper_task_value(task);
+ hyper_task_free(task);
+ if(error)
+ goto error;
+ }
+ } while(task);
+
+ if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) {
+ /* HTTP GET/HEAD download */
+ Curl_pgrsSetUploadSize(data, 0); /* nothing */
+ Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
+ }
+ conn->datastream = hyperstream;
+
+ return CURLE_OK;
+ error:
+
+ if(io)
+ hyper_io_free(io);
+
+ if(options)
+ hyper_clientconn_options_free(options);
+
+ if(handshake)
+ hyper_task_free(handshake);
+
+ if(hypererr) {
+ uint8_t errbuf[256];
+ size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf));
+ failf(data, "Hyper: %.*s", (int)errlen, errbuf);
+ hyper_error_free(hypererr);
+ }
+ return CURLE_OUT_OF_MEMORY;
+}
+
+void Curl_hyper_done(struct Curl_easy *data)
+{
+ struct hyptransfer *h = &data->hyp;
+ if(h->exec) {
+ hyper_executor_free(h->exec);
+ h->exec = NULL;
+ }
+ if(h->read_waker) {
+ hyper_waker_free(h->read_waker);
+ h->read_waker = NULL;
+ }
+ if(h->write_waker) {
+ hyper_waker_free(h->write_waker);
+ h->write_waker = NULL;
+ }
+}
+
+#endif /* !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER) */
diff --git a/lib/c-hyper.h b/lib/c-hyper.h
new file mode 100644
index 000000000..ed0cad46c
--- /dev/null
+++ b/lib/c-hyper.h
@@ -0,0 +1,46 @@
+#ifndef HEADER_CURL_HYPER_H
+#define HEADER_CURL_HYPER_H
+/***************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+#include "curl_setup.h"
+
+#if !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER)
+
+#include <hyper.h>
+
+/* per-transfer data for the Hyper backend */
+struct hyptransfer {
+ hyper_waker *write_waker;
+ hyper_waker *read_waker;
+ const hyper_executor *exec;
+ bool init;
+};
+
+CURLcode Curl_hyper_header(struct Curl_easy *data, hyper_headers *headers,
+ const char *line);
+void Curl_hyper_done(struct Curl_easy *);
+
+#else
+#define Curl_hyper_done(x)
+
+#endif /* !defined(CURL_DISABLE_HTTP) && defined(USE_HYPER) */
+#endif /* HEADER_CURL_HYPER_H */
diff --git a/lib/http.c b/lib/http.c
index a2279eb0a..23618b9d7 100644
--- a/lib/http.c
+++ b/lib/http.c
@@ -45,6 +45,10 @@
#include <sys/param.h>
#endif
+#ifdef USE_HYPER
+#include <hyper.h>
+#endif
+
#include "urldata.h"
#include <curl/curl.h>
#include "transfer.h"
@@ -78,6 +82,7 @@
#include "strdup.h"
#include "altsvc.h"
#include "hsts.h"
+#include "c-hyper.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
@@ -1098,6 +1103,7 @@ static int http_should_fail(struct connectdata *conn)
return data->state.authproblem;
}
+#ifndef USE_HYPER
/*
* readmoredata() is a "fread() emulation" to provide POST and/or request
* data. It is used when a huge POST is to be made and the entire chunk wasn't
@@ -1324,6 +1330,8 @@ CURLcode Curl_buffer_send(struct dynbuf *in,
return result;
}
+#endif
+
/* end of the add_buffer functions */
/* ------------------------------------------------------------------------- */
@@ -1542,6 +1550,7 @@ CURLcode Curl_http_done(struct connectdata *conn,
Curl_quic_done(data, premature);
Curl_mime_cleanpart(&http->form);
Curl_dyn_reset(&data->state.headerb);
+ Curl_hyper_done(data);
if(status)
return status;
@@ -1584,6 +1593,7 @@ static bool use_http_1_1plus(const struct Curl_easy *data,
(data->set.httpversion >= CURL_HTTP_VERSION_1_1));
}
+#ifndef USE_HYPER
static const char *get_http_string(const struct Curl_easy *data,
const struct connectdata *conn)
{
@@ -1603,6 +1613,7 @@ static const char *get_http_string(const struct Curl_easy *data,
return "1.0";
}
+#endif
/* check and possibly add an Expect: header */
static CURLcode expect100(struct Curl_easy *data,
@@ -1685,7 +1696,12 @@ CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
CURLcode Curl_add_custom_headers(struct connectdata *conn,
bool is_connect,
- struct dynbuf *req)
+#ifndef USE_HYPER
+ struct dynbuf *req
+#else
+ void *req
+#endif
+ )
{
char *ptr;
struct curl_slist *h[2];
@@ -1752,7 +1768,9 @@ CURLcode Curl_add_custom_headers(struct connectdata *conn,
/* copy the source */
semicolonp = strdup(headers->data);
if(!semicolonp) {
+#ifndef USE_HYPER
Curl_dyn_free(req);
+#endif
return CURLE_OUT_OF_MEMORY;
}
/* put a colon where the semicolon is */
@@ -1813,7 +1831,11 @@ CURLcode Curl_add_custom_headers(struct connectdata *conn,
!strcasecompare(data->state.first_host, conn->host.name)))
;
else {
+#ifdef USE_HYPER
+ result = Curl_hyper_header(data, req, compare);
+#else
result = Curl_dyn_addf(req, "%s\r\n", compare);
+#endif
}
if(semicolonp)
free(semicolonp);
@@ -1904,102 +1926,14 @@ CURLcode Curl_add_timecondition(const struct connectdata *conn,
}
#endif
-/*
- * Curl_http() gets called from the generic multi_do() function when a HTTP
- * request is to be performed. This creates and sends a properly constructed
- * HTTP request.
- */
-CURLcode Curl_http(struct connectdata *conn, bool *done)
+void Curl_http_method(struct Curl_easy *data, struct connectdata *conn,
+ const char **method, Curl_HttpReq *reqp)
{
- struct Curl_easy *data = conn->data;
- CURLcode result = CURLE_OK;
- struct HTTP *http;
- const char *path = data->state.up.path;
- const char *query = data->state.up.query;
- bool paste_ftp_userpwd = FALSE;
- char ftp_typecode[sizeof("/;type=?")] = "";
- const char *host = conn->host.name;
- const char *te = ""; /* transfer-encoding */
- const char *ptr;
- const char *request;
Curl_HttpReq httpreq = data->state.httpreq;
-#if !defined(CURL_DISABLE_COOKIES)
- char *addcookies = NULL;
-#endif
- curl_off_t included_body = 0;
- const char *httpstring;
- struct dynbuf req;
- curl_off_t postsize = 0; /* curl_off_t to handle large file sizes */
- char *altused = NULL;
-
- /* Always consider the DO phase done after this function call, even if there
- may be parts of the request that is not yet sent, since we can deal with
- the rest of the request in the PERFORM phase. */
- *done = TRUE;
-
- if(conn->transport != TRNSPRT_QUIC) {
- if(conn->httpversion < 20) { /* unless the connection is re-used and
- already http2 */
- switch(conn->negnpn) {
- case CURL_HTTP_VERSION_2:
- conn->httpversion = 20; /* we know we're on HTTP/2 now */
-
- result = Curl_http2_switched(conn, NULL, 0);
- if(result)
- return result;
- break;
- case CURL_HTTP_VERSION_1_1:
- /* continue with HTTP/1.1 when explicitly requested */
- break;
- default:
- /* Check if user wants to use HTTP/2 with clear TCP*/
-#ifdef USE_NGHTTP2
- if(conn->data->set.httpversion ==
- CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
-#ifndef CURL_DISABLE_PROXY
- if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
- /* We don't support HTTP/2 proxies yet. Also it's debatable
- whether or not this setting should apply to HTTP/2 proxies. */
- infof(data, "Ignoring HTTP/2 prior knowledge due to proxy\n");
- break;
- }
-#endif
- DEBUGF(infof(data, "HTTP/2 over clean TCP\n"));
- conn->httpversion = 20;
-
- result = Curl_http2_switched(conn, NULL, 0);
- if(result)
- return result;
- }
-#endif
- break;
- }
- }
- else {
- /* prepare for a http2 request */
- result = Curl_http2_setup(conn);
- if(result)
- return result;
- }
- }
- http = data->req.p.http;
- DEBUGASSERT(http);
-
- if(!data->state.this_is_a_follow) {
- /* Free to avoid leaking memory on multiple requests*/
- free(data->state.first_host);
-
- data->state.first_host = strdup(conn->host.name);
- if(!data->state.first_host)
- return CURLE_OUT_OF_MEMORY;
-
- data->state.first_remote_port = conn->remote_port;
- }
-
+ const char *request;
if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
- data->set.upload) {
+ data->set.upload)
httpreq = HTTPREQ_PUT;
- }
/* Now set the 'request' pointer to the proper request string */
if(data->set.str[STRING_CUSTOMREQUEST])
@@ -2028,7 +1962,12 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
}
}
+ *method = request;
+ *reqp = httpreq;
+}
+CURLcode Curl_http_useragent(struct Curl_easy *data, struct connectdata *conn)
+{
/* The User-Agent string might have been allocated in url.c already, because
it might have been used in the proxy connect, but if we have got a header
with the user-agent string specified, we erase the previously made string
@@ -2037,168 +1976,23 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
free(data->state.aptr.uagent);
data->state.aptr.uagent = NULL;
}
+ return CURLE_OK;
+}
- /* setup the authentication headers */
- {
- char *pq = NULL;
- if(query && *query) {
- pq = aprintf("%s?%s", path, query);
- if(!pq)
- return CURLE_OUT_OF_MEMORY;
- }
- result = Curl_http_output_auth(conn, request, (pq ? pq : path), FALSE);
- free(pq);
- if(result)
- return result;
- }
-
- if(((data->state.authhost.multipass && !data->state.authhost.done)
- || (data->state.authproxy.multipass && !data->state.authproxy.done)) &&
- (httpreq != HTTPREQ_GET) &&
- (httpreq != HTTPREQ_HEAD)) {
- /* Auth is required and we are not authenticated yet. Make a PUT or POST
- with content-length zero as a "probe". */
- conn->bits.authneg = TRUE;
- }
- else
- conn->bits.authneg = FALSE;
-
- Curl_safefree(data->state.aptr.ref);
- if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
- data->state.aptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
- if(!data->state.aptr.ref)
- return CURLE_OUT_OF_MEMORY;
- }
- else
- data->state.aptr.ref = NULL;
-
-#if !defined(CURL_DISABLE_COOKIES)
- if(data->set.str[STRING_COOKIE] && !Curl_checkheaders(conn, "Cookie"))
- addcookies = data->set.str[STRING_COOKIE];
-#endif
-
- if(!Curl_checkheaders(conn, "Accept-Encoding") &&
- data->set.str[STRING_ENCODING]) {
- Curl_safefree(data->state.aptr.accept_encoding);
- data->state.aptr.accept_encoding =
- aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]);
- if(!data->state.aptr.accept_encoding)
- return CURLE_OUT_OF_MEMORY;
- }
- else {
- Curl_safefree(data->state.aptr.accept_encoding);
- data->state.aptr.accept_encoding = NULL;
- }
-
-#ifdef HAVE_LIBZ
- /* we only consider transfer-encoding magic if libz support is built-in */
-
- if(!Curl_checkheaders(conn, "TE") &&
- data->set.http_transfer_encoding) {
- /* When we are to insert a TE: header in the request, we must also insert
- TE in a Connection: header, so we need to merge the custom provided
- Connection: header and prevent the original to get sent. Note that if
- the user has inserted his/hers own TE: header we don't do this magic
- but then assume that the user will handle it all! */
- char *cptr = Curl_checkheaders(conn, "Connection");
-#define TE_HEADER "TE: gzip\r\n"
-
- Curl_safefree(data->state.aptr.te);
-
- if(cptr) {
- cptr = Curl_copy_header_value(cptr);
- if(!cptr)
- return CURLE_OUT_OF_MEMORY;
- }
- /* Create the (updated) Connection: header */
- data->state.aptr.te = aprintf("Connection: %s%sTE\r\n" TE_HEADER,
- cptr ? cptr : "", (cptr && *cptr) ? ", ":"");
+CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn)
+{
+ const char *ptr;
+ if(!data->state.this_is_a_follow) {
+ /* Free to avoid leaking memory on multiple requests*/
+ free(data->state.first_host);
- free(cptr);
- if(!data->state.aptr.te)
+ data->state.first_host = strdup(conn->host.name);
+ if(!data->state.first_host)
return CURLE_OUT_OF_MEMORY;
- }
-#endif
-
- switch(httpreq) {
- case HTTPREQ_POST_MIME:
- http->sendit = &data->set.mimepost;
- break;
- case HTTPREQ_POST_FORM:
- /* Convert the form structure into a mime structure. */
- Curl_mime_cleanpart(&http->form);
- result = Curl_getformdata(data, &http->form, data->set.httppost,
- data->state.fread_func);
- if(result)
- return result;
- http->sendit = &http->form;
- break;
- default:
- http->sendit = NULL;
- }
-
-#ifndef CURL_DISABLE_MIME
- if(http->sendit) {
- const char *cthdr = Curl_checkheaders(conn, "Content-Type");
-
- /* Read and seek body only. */
- http->sendit->flags |= MIME_BODY_ONLY;
-
- /* Prepare the mime structure headers & set content type. */
-
- if(cthdr)
- for(cthdr += 13; *cthdr == ' '; cthdr++)
- ;
- else if(http->sendit->kind == MIMEKIND_MULTIPART)
- cthdr = "multipart/form-data";
-
- curl_mime_headers(http->sendit, data->set.headers, 0);
- result = Curl_mime_prepare_headers(http->sendit, cthdr,
- NULL, MIMESTRATEGY_FORM);
- curl_mime_headers(http->sendit, NULL, 0);
- if(!result)
- result = Curl_mime_rewind(http->sendit);
- if(result)
- return result;
- http->postsize = Curl_mime_size(http->sendit);
- }
-#endif
-
- ptr = Curl_checkheaders(conn, "Transfer-Encoding");
- if(ptr) {
- /* Some kind of TE is requested, check if 'chunked' is chosen */
- data->req.upload_chunky =
- Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
- }
- else {
- if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
- (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
- http->postsize < 0) ||
- ((data->set.upload || httpreq == HTTPREQ_POST) &&
- data->state.infilesize == -1))) {
- if(conn->bits.authneg)
- /* don't enable chunked during auth neg */
- ;
- else if(use_http_1_1plus(data, conn)) {
- if(conn->httpversion < 20)
- /* HTTP, upload, unknown file size and not HTTP 1.0 */
- data->req.upload_chunky = TRUE;
- }
- else {
- failf(data, "Chunky upload is not supported by HTTP 1.0");
- return CURLE_UPLOAD_FAILED;
- }
- }
- else {
- /* else, no chunky upload */
- data->req.upload_chunky = FALSE;
- }
- if(data->req.upload_chunky)
- te = "Transfer-Encoding: chunked\r\n";
+ data->state.first_remote_port = conn->remote_port;
}
-
Curl_safefree(data->state.aptr.host);
ptr = Curl_checkheaders(conn, "Host");
@@ -2251,6 +2045,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
else {
/* When building Host: headers, we must put the host name within
[brackets] if the host name is a plain IPv6-address. RFC2732-style. */
+ const char *host = conn->host.name;
if(((conn->given->protocol&CURLPROTO_HTTPS) &&
(conn->remote_port == PORT_HTTPS)) ||
@@ -2273,6 +2068,24 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* without Host: we can't make a nice request */
return CURLE_OUT_OF_MEMORY;
}
+ return CURLE_OK;
+}
+
+/*
+ * Append the request-target to the HTTP request
+ */
+CURLcode Curl_http_target(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct dynbuf *r)
+{
+ CURLcode result = CURLE_OK;
+ const char *path = data->state.up.path;
+ const char *query = data->state.up.query;
+
+ if(data->set.str[STRING_TARGET]) {
+ path = data->set.str[STRING_TARGET];
+ query = NULL;
+ }
#ifndef CURL_DISABLE_PROXY
if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
@@ -2284,6 +2097,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* and no fragment part */
CURLUcode uc;
+ char *url;
CURLU *h = curl_url_dup(data->state.uh);
if(!h)
return CURLE_OUT_OF_MEMORY;
@@ -2317,7 +2131,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* Extract the URL to use in the request. Store in STRING_TEMP_URL for
clean-up reasons if the function returns before the free() further
down. */
- uc = curl_url_get(h, CURLUPART_URL, &data->set.str[STRING_TEMP_URL], 0);
+ uc = curl_url_get(h, CURLUPART_URL, &url, 0);
if(uc) {
curl_url_cleanup(h);
return CURLE_OUT_OF_MEMORY;
@@ -2325,6 +2139,12 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
curl_url_cleanup(h);
+ /* url */
+ result = Curl_dyn_add(r, url);
+ free(url);
+ if(result)
+ return (result);
+
if(strcasecompare("ftp", data->state.up.scheme)) {
if(data->set.proxy_transfer_mode) {
/* when doing ftp, append ;type=<a|i> if not present */
@@ -2340,325 +2160,128 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
}
}
if(!type) {
- char *p = ftp_typecode;
- /* avoid sending invalid URLs like ftp://example.com;type=i if the
- * user specified ftp://example.com without the slash */
- if(!*data->state.up.path && path[strlen(path) - 1] != '/') {
- *p++ = '/';
- }
- msnprintf(p, sizeof(ftp_typecode) - 1, ";type=%c",
- data->set.prefer_ascii ? 'a' : 'i');
- }
- }
- if(conn->bits.user_passwd)
- paste_ftp_userpwd = TRUE;
- }
- }
-#endif /* CURL_DISABLE_PROXY */
-
- http->p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
-
- if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
- data->state.resume_from) {
- /**********************************************************************
- * Resuming upload in HTTP means that we PUT or POST and that we have
- * got a resume_from value set. The resume value has already created
- * a Range: header that will be passed along. We need to "fast forward"
- * the file the given number of bytes and decrease the assume upload
- * file size before we continue this venture in the dark lands of HTTP.
- * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
- *********************************************************************/
-
- if(data->state.resume_from < 0) {
- /*
- * This is meant to get the size of the present remote-file by itself.
- * We don't support this now. Bail out!
- */
- data->state.resume_from = 0;
- }
-
- if(data->state.resume_from && !data->state.this_is_a_follow) {
- /* do we still game? */
-
- /* Now, let's read off the proper amount of bytes from the
- input. */
- int seekerr = CURL_SEEKFUNC_CANTSEEK;
- if(conn->seek_func) {
- Curl_set_in_callback(data, true);
- seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
- SEEK_SET);
- Curl_set_in_callback(data, false);
- }
-
- if(seekerr != CURL_SEEKFUNC_OK) {
- curl_off_t passed = 0;
-
- if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
- failf(data, "Could not seek stream");
- return CURLE_READ_ERROR;
- }
- /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
- do {
- size_t readthisamountnow =
- (data->state.resume_from - passed > data->set.buffer_size) ?
- (size_t)data->set.buffer_size :
- curlx_sotouz(data->state.resume_from - passed);
-
- size_t actuallyread =
- data->state.fread_func(data->state.buffer, 1, readthisamountnow,
- data->state.in);
-
- passed += actuallyread;
- if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
- /* this checks for greater-than only to make sure that the
- CURL_READFUNC_ABORT return code still aborts */
- failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
- " bytes from the input", passed);
- return CURLE_READ_ERROR;
- }
- } while(passed < data->state.resume_from);
- }
-
- /* now, decrease the size of the read */
- if(data->state.infilesize>0) {
- data->state.infilesize -= data->state.resume_from;
-
- if(data->state.infilesize <= 0) {
- failf(data, "File already completely uploaded");
- return CURLE_PARTIAL_FILE;
+ result = Curl_dyn_addf(r, ";type=%c",
+ data->set.prefer_ascii ? 'a' : 'i');
+ if(result)
+ return result;
}
}
- /* we've passed, proceed as normal */
- }
- }
- if(data->state.use_range) {
- /*
- * A range is selected. We use different headers whether we're downloading
- * or uploading and we always let customized headers override our internal
- * ones if any such are specified.
- */
- if(((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) &&
- !Curl_checkheaders(conn, "Range")) {
- /* if a line like this was already allocated, free the previous one */
- free(data->state.aptr.rangeline);
- data->state.aptr.rangeline = aprintf("Range: bytes=%s\r\n",
- data->state.range);
- }
- else if((httpreq == HTTPREQ_POST || httpreq == HTTPREQ_PUT) &&
- !Curl_checkheaders(conn, "Content-Range")) {
-
- /* if a line like this was already allocated, free the previous one */
- free(data->state.aptr.rangeline);
-
- if(data->set.set_resume_from < 0) {
- /* Upload resume was asked for, but we don't know the size of the
- remote part so we tell the server (and act accordingly) that we
- upload the whole file (again) */
- data->state.aptr.rangeline =
- aprintf("Content-Range: bytes 0-%" CURL_FORMAT_CURL_OFF_T
- "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.infilesize - 1, data->state.infilesize);
-
- }
- else if(data->state.resume_from) {
- /* This is because "resume" was selected */
- curl_off_t total_expected_size =
- data->state.resume_from + data->state.infilesize;
- data->state.aptr.rangeline =
- aprintf("Content-Range: bytes %s%" CURL_FORMAT_CURL_OFF_T
- "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.range, total_expected_size-1,
- total_expected_size);
- }
- else {
- /* Range was selected and then we just pass the incoming range and
- append total size */
- data->state.aptr.rangeline =
- aprintf("Content-Range: bytes %s/%" CURL_FORMAT_CURL_OFF_T "\r\n",
- data->state.range, data->state.infilesize);
- }
- if(!data->state.aptr.rangeline)
- return CURLE_OUT_OF_MEMORY;
}
}
- httpstring = get_http_string(data, conn);
-
- /* initialize a dynamic send-buffer */
- Curl_dyn_init(&req, DYN_HTTP_REQUEST);
-
- /* add the main request stuff */
- /* GET/HEAD/POST/PUT */
- result = Curl_dyn_addf(&req, "%s ", request);
- if(result)
- return result;
-
- if(data->set.str[STRING_TARGET]) {
- path = data->set.str[STRING_TARGET];
- query = NULL;
- }
-
-#ifndef CURL_DISABLE_PROXY
- /* url */
- if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
- char *url = data->set.str[STRING_TEMP_URL];
- result = Curl_dyn_add(&req, url);
- Curl_safefree(data->set.str[STRING_TEMP_URL]);
- }
else
+#else
+ (void)conn; /* not used in disabled-proxy builds */
#endif
- if(paste_ftp_userpwd)
- result = Curl_dyn_addf(&req, "ftp://%s:%s@%s", conn->user, conn->passwd,
- path + sizeof("ftp://") - 1);
- else {
- result = Curl_dyn_add(&req, path);
+ {
+ result = Curl_dyn_add(r, path);
if(result)
return result;
if(query)
- result = Curl_dyn_addf(&req, "?%s", query);
+ result = Curl_dyn_addf(r, "?%s", query);
}
- if(result)
- return result;
-
-#ifndef CURL_DISABLE_ALTSVC
- if(conn->bits.altused && !Curl_checkheaders(conn, "Alt-Used")) {
- altused = aprintf("Alt-Used: %s:%d\r\n",
- conn->conn_to_host.name, conn->conn_to_port);
- if(!altused) {
- Curl_dyn_free(&req);
- return CURLE_OUT_OF_MEMORY;
- }
- }
-#endif
- result =
- Curl_dyn_addf(&req,
- "%s" /* ftp typecode (;type=x) */
- " HTTP/%s\r\n" /* HTTP version */
- "%s" /* host */
- "%s" /* proxyuserpwd */
- "%s" /* userpwd */
- "%s" /* range */
- "%s" /* user agent */
- "%s" /* accept */
- "%s" /* TE: */
- "%s" /* accept-encoding */
- "%s" /* referer */
- "%s" /* Proxy-Connection */
- "%s" /* transfer-encoding */
- "%s",/* Alt-Used */
- ftp_typecode,
- httpstring,
- (data->state.aptr.host?data->state.aptr.host:""),
- data->state.aptr.proxyuserpwd?
- data->state.aptr.proxyuserpwd:"",
- data->state.aptr.userpwd?data->state.aptr.userpwd:"",
- (data->state.use_range && data->state.aptr.rangeline)?
- data->state.aptr.rangeline:"",
- (data->set.str[STRING_USERAGENT] &&
- *data->set.str[STRING_USERAGENT] &&
- data->state.aptr.uagent)?
- data->state.aptr.uagent:"",
- http->p_accept?http->p_accept:"",
- data->state.aptr.te?data->state.aptr.te:"",
- (data->set.str[STRING_ENCODING] &&
- *data->set.str[STRING_ENCODING] &&
- data->state.aptr.accept_encoding)?
- data->state.aptr.accept_encoding:"",
- (data->change.referer && data->state.aptr.ref)?
- data->state.aptr.ref:"" /* Referer: <data> */,
-#ifndef CURL_DISABLE_PROXY
- (conn->bits.httpproxy &&
- !conn->bits.tunnel_proxy &&
- !Curl_checkProxyheaders(conn, "Proxy-Connection"))?
- "Proxy-Connection: Keep-Alive\r\n":"",
-#else
- "",
-#endif
- te,
- altused ? altused : ""
- );
-
- /* clear userpwd and proxyuserpwd to avoid re-using old credentials
- * from re-used connections */
- Curl_safefree(data->state.aptr.userpwd);
- Curl_safefree(data->state.aptr.proxyuserpwd);
- free(altused);
+ return result;
+}
- if(result)
- return result;
+CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn,
+ Curl_HttpReq httpreq, const char **tep)
+{
+ CURLcode result = CURLE_OK;
+ const char *ptr;
+ struct HTTP *http = data->req.p.http;
+ http->postsize = 0;
- if(!(conn->handler->flags&PROTOPT_SSL) &&
- conn->httpversion != 20 &&
- (data->set.httpversion == CURL_HTTP_VERSION_2)) {
- /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
- over SSL */
- result = Curl_http2_request_upgrade(&req, conn);
+ switch(httpreq) {
+ case HTTPREQ_POST_MIME:
+ http->sendit = &data->set.mimepost;
+ break;
+ case HTTPREQ_POST_FORM:
+ /* Convert the form structure into a mime structure. */
+ Curl_mime_cleanpart(&http->form);
+ result = Curl_getformdata(data, &http->form, data->set.httppost,
+ data->state.fread_func);
if(result)
return result;
+ http->sendit = &http->form;
+ break;
+ default:
+ http->sendit = NULL;
}
-#if !defined(CURL_DISABLE_COOKIES)
- if(data->cookies || addcookies) {
- struct Cookie *co = NULL; /* no cookies from start */
- int count = 0;
+#ifndef CURL_DISABLE_MIME
+ if(http->sendit) {
+ const char *cthdr = Curl_checkheaders(conn, "Content-Type");
- if(data->cookies && data->state.cookie_engine) {
- Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
- co = Curl_cookie_getlist(data->cookies,
- data->state.aptr.cookiehost?
- data->state.aptr.cookiehost:host,
- data->state.up.path,
- (conn->handler->protocol&CURLPROTO_HTTPS)?
- TRUE:FALSE);
- Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
- }
- if(co) {
- struct Cookie *store = co;
- /* now loop through all cookies that matched */
- while(co) {
- if(co->value) {
- if(0 == count) {
- result = Curl_dyn_add(&req, "Cookie: ");
- if(result)
- break;
- }
- result = Curl_dyn_addf(&req, "%s%s=%s", count?"; ":"",
- co->name, co->value);
- if(result)
- break;
- count++;
- }
- co = co->next; /* next cookie please */
- }
- Curl_cookie_freelist(store);
- }
- if(addcookies && !result) {
- if(!count)
- result = Curl_dyn_add(&req, "Cookie: ");
- if(!result) {
- result = Curl_dyn_addf(&req, "%s%s", count?"; ":"", addcookies);
- count++;
- }
- }
- if(count && !result)
- result = Curl_dyn_add(&req, "\r\n");
+ /* Read and seek body only. */
+ http->sendit->flags |= MIME_BODY_ONLY;
+
+ /* Prepare the mime structure headers & set content type. */
+
+ if(cthdr)
+ for(cthdr += 13; *cthdr == ' '; cthdr++)
+ ;
+ else if(http->sendit->kind == MIMEKIND_MULTIPART)
+ cthdr = "multipart/form-data";
+ curl_mime_headers(http->sendit, data->set.headers, 0);
+ result = Curl_mime_prepare_headers(http->sendit, cthdr,
+ NULL, MIMESTRATEGY_FORM);
+ curl_mime_headers(http->sendit, NULL, 0);
+ if(!result)
+ result = Curl_mime_rewind(http->sendit);
if(result)
return result;
+ http->postsize = Curl_mime_size(http->sendit);
}
#endif
- result = Curl_add_timecondition(conn, &req);
- if(result)
- return result;
+ ptr = Curl_checkheaders(conn, "Transfer-Encoding");
+ if(ptr) {
+ /* Some kind of TE is requested, check if 'chunked' is chosen */
+ data->req.upload_chunky =
+ Curl_compareheader(ptr, "Transfer-Encoding:", "chunked");
+ }
+ else {
+ if((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
+ (((httpreq == HTTPREQ_POST_MIME || httpreq == HTTPREQ_POST_FORM) &&
+ http->postsize < 0) ||
+ ((data->set.upload || httpreq == HTTPREQ_POST) &&
+ data->state.infilesize == -1))) {
+ if(conn->bits.authneg)
+ /* don't enable chunked during auth neg */
+ ;
+ else if(use_http_1_1plus(data, conn)) {
+ if(conn->httpversion < 20)
+ /* HTTP, upload, unknown file size and not HTTP 1.0 */
+ data->req.upload_chunky = TRUE;
+ }
+ else {
+ failf(data, "Chunky upload is not supported by HTTP 1.0");
+ return CURLE_UPLOAD_FAILED;
+ }
+ }
+ else {
+ /* else, no chunky upload */
+ data->req.upload_chunky = FALSE;
+ }
- result = Curl_add_custom_headers(conn, FALSE, &req);
- if(result)
- return result;
+ if(data->req.upload_chunky)
+ *tep = "Transfer-Encoding: chunked\r\n";
+ }
+ return result;
+}
- http->postdata = NULL; /* nothing to post at this point */
- Curl_pgrsSetUploadSize(data, -1); /* upload size is unknown atm */
+CURLcode Curl_http_bodysend(struct Curl_easy *data, struct connectdata *conn,
+ struct dynbuf *r, Curl_HttpReq httpreq)
+{
+#ifndef USE_HYPER
+ /* Hyper always handles the body separately */
+ curl_off_t included_body = 0;
+#endif
+ CURLcode result = CURLE_OK;
+ struct HTTP *http = data->req.p.http;
+ const char *ptr;
/* If 'authdone' is FALSE, we must not set the write socket index to the
Curl_transfer() call below, as we're not ready to actually upload any
@@ -2669,42 +2292,42 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
case HTTPREQ_PUT: /* Let's PUT the data to the server! */
if(conn->bits.authneg)
- postsize = 0;
+ http->postsize = 0;
else
- postsize = data->state.infilesize;
+ http->postsize = data->state.infilesize;
- if((postsize != -1) && !data->req.upload_chunky &&
+ if((http->postsize != -1) && !data->req.upload_chunky &&
(conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
/* only add Content-Length if not uploading chunked */
- result = Curl_dyn_addf(&req, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", postsize);
+ result = Curl_dyn_addf(r, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
+ "\r\n", http->postsize);
if(result)
return result;
}
- if(postsize != 0) {
- result = expect100(data, conn, &req);
+ if(http->postsize) {
+ result = expect100(data, conn, r);
if(result)
return result;
}
/* end of headers */
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
/* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, postsize);
+ Curl_pgrsSetUploadSize(data, http->postsize);
/* this sends the buffer and frees all the buffer resources */
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
FIRSTSOCKET);
if(result)
failf(data, "Failed sending PUT request");
else
/* prepare for transfer */
Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- postsize?FIRSTSOCKET:-1);
+ http->postsize?FIRSTSOCKET:-1);
if(result)
return result;
break;
@@ -2714,11 +2337,11 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* This is form posting using mime data. */
if(conn->bits.authneg) {
/* nothing to post! */
- result = Curl_dyn_add(&req, "Content-Length: 0\r\n\r\n");
+ result = Curl_dyn_add(r, "Content-Length: 0\r\n\r\n");
if(result)
return result;
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
FIRSTSOCKET);
if(result)
failf(data, "Failed sending POST request");
@@ -2728,18 +2351,18 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
break;
}
- data->state.infilesize = postsize = http->postsize;
+ data->state.infilesize = http->postsize;
/* We only set Content-Length and allow a custom Content-Length if
we don't upload data chunked, as RFC2616 forbids us to set both
kinds of headers (Transfer-Encoding: chunked and Content-Length) */
- if(postsize != -1 && !data->req.upload_chunky &&
+ if(http->postsize != -1 && !data->req.upload_chunky &&
(conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
/* we allow replacing this header if not during auth negotiation,
although it isn't very wise to actually set your own */
- result = Curl_dyn_addf(&req,
+ result = Curl_dyn_addf(r,
"Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", postsize);
+ "\r\n", http->postsize);
if(result)
return result;
}
@@ -2750,7 +2373,7 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
struct curl_slist *hdr;
for(hdr = http->sendit->curlheaders; hdr; hdr = hdr->next) {
- result = Curl_dyn_addf(&req, "%s\r\n", hdr->data);
+ result = Curl_dyn_addf(r, "%s\r\n", hdr->data);
if(result)
return result;
}
@@ -2766,8 +2389,8 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
data->state.expect100header =
Curl_compareheader(ptr, "Expect:", "100-continue");
}
- else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
- result = expect100(data, conn, &req);
+ else if(http->postsize > EXPECT_100_THRESHOLD || http->postsize < 0) {
+ result = expect100(data, conn, r);
if(result)
return result;
}
@@ -2775,12 +2398,12 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
data->state.expect100header = FALSE;
/* make the request end in a true CRLF */
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
/* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, postsize);
+ Curl_pgrsSetUploadSize(data, http->postsize);
/* Read from mime structure. */
data->state.fread_func = (curl_read_callback) Curl_mime_read;
@@ -2788,14 +2411,14 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
http->sending = HTTPSEND_BODY;
/* this sends the buffer and frees all the buffer resources */
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
FIRSTSOCKET);
if(result)
failf(data, "Failed sending POST request");
else
/* prepare for transfer */
Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE,
- postsize?FIRSTSOCKET:-1);
+ http->postsize?FIRSTSOCKET:-1);
if(result)
return result;
@@ -2805,26 +2428,26 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* this is the simple POST, using x-www-form-urlencoded style */
if(conn->bits.authneg)
- postsize = 0;
+ http->postsize = 0;
else
/* the size of the post body */
- postsize = data->state.infilesize;
+ http->postsize = data->state.infilesize;
/* We only set Content-Length and allow a custom Content-Length if
we don't upload data chunked, as RFC2616 forbids us to set both
kinds of headers (Transfer-Encoding: chunked and Content-Length) */
- if((postsize != -1) && !data->req.upload_chunky &&
+ if((http->postsize != -1) && !data->req.upload_chunky &&
(conn->bits.authneg || !Curl_checkheaders(conn, "Content-Length"))) {
/* we allow replacing this header if not during auth negotiation,
although it isn't very wise to actually set your own */
- result = Curl_dyn_addf(&req, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
- "\r\n", postsize);
+ result = Curl_dyn_addf(r, "Content-Length: %" CURL_FORMAT_CURL_OFF_T
+ "\r\n", http->postsize);
if(result)
return result;
}
if(!Curl_checkheaders(conn, "Content-Type")) {
- result = Curl_dyn_add(&req, "Content-Type: application/"
+ result = Curl_dyn_add(r, "Content-Type: application/"
"x-www-form-urlencoded\r\n");
if(result)
return result;
@@ -2839,21 +2462,23 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
data->state.expect100header =
Curl_compareheader(ptr, "Expect:", "100-continue");
}
- else if(postsize > EXPECT_100_THRESHOLD || postsize < 0) {
- result = expect100(data, conn, &req);
+ else if(http->postsize > EXPECT_100_THRESHOLD || http->postsize < 0) {
+ result = expect100(data, conn, r);
if(result)
return result;
}
else
data->state.expect100header = FALSE;
+#ifndef USE_HYPER
+ /* With Hyper the body is always passed on separately */
if(data->set.postfields) {
/* In HTTP2, we send request body in DATA frame regardless of
its size. */
if(conn->httpversion != 20 &&
!data->state.expect100header &&
- (postsize < MAX_INITIAL_POST_SIZE)) {
+ (http->postsize < MAX_INITIAL_POST_SIZE)) {
/* if we don't use expect: 100 AND
postsize is less than MAX_INITIAL_POST_SIZE
@@ -2862,34 +2487,34 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
get the data duplicated with malloc() and family. */
/* end of headers! */
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
if(!data->req.upload_chunky) {
/* We're not sending it 'chunked', append it to the request
already now to reduce the number if send() calls */
- result = Curl_dyn_addn(&req, data->set.postfields,
- (size_t)postsize);
- included_body = postsize;
+ result = Curl_dyn_addn(r, data->set.postfields,
+ (size_t)http->postsize);
+ included_body = http->postsize;
}
else {
- if(postsize) {
+ if(http->postsize) {
char chunk[16];
/* Append the POST data chunky-style */
- msnprintf(chunk, sizeof(chunk), "%x\r\n", (int)postsize);
- result = Curl_dyn_add(&req, chunk);
+ msnprintf(chunk, sizeof(chunk), "%x\r\n", (int)http->postsize);
+ result = Curl_dyn_add(r, chunk);
if(!result) {
- included_body = postsize + strlen(chunk);
- result = Curl_dyn_addn(&req, data->set.postfields,
- (size_t)postsize);
+ included_body = http->postsize + strlen(chunk);
+ result = Curl_dyn_addn(r, data->set.postfields,
+ (size_t)http->postsize);
if(!result)
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
included_body += 2;
}
}
if(!result) {
- result = Curl_dyn_add(&req, "\x30\x0d\x0a\x0d\x0a");
+ result = Curl_dyn_add(r, "\x30\x0d\x0a\x0d\x0a");
/* 0 CR LF CR LF */
included_body += 5;
}
@@ -2897,11 +2522,10 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
if(result)
return result;
/* Make sure the progress information is accurate */
- Curl_pgrsSetUploadSize(data, postsize);
+ Curl_pgrsSetUploadSize(data, http->postsize);
}
else {
/* A huge POST coming up, do data separate from the request */
- http->postsize = postsize;
http->postdata = data->set.postfields;
http->sending = HTTPSEND_BODY;
@@ -2913,21 +2537,23 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
Curl_pgrsSetUploadSize(data, http->postsize);
/* end of headers! */
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
}
}
- else {
+ else
+#endif
+ {
/* end of headers! */
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
if(data->req.upload_chunky && conn->bits.authneg) {
/* Chunky upload is selected and we're negotiating auth still, send
end-of-data only */
- result = Curl_dyn_add(&req, (char *)"\x30\x0d\x0a\x0d\x0a");
+ result = Curl_dyn_add(r, (char *)"\x30\x0d\x0a\x0d\x0a");
/* 0 CR LF CR LF */
if(result)
return result;
@@ -2935,19 +2561,16 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
else if(data->state.infilesize) {
/* set the upload size to the progress meter */
- Curl_pgrsSetUploadSize(data, postsize?postsize:-1);
+ Curl_pgrsSetUploadSize(data, http->postsize?http->postsize:-1);
/* set the pointer to mark that we will send the post body using the
- read callback, but only if we're not in authenticate
- negotiation */
- if(!conn->bits.authneg) {
+ read callback, but only if we're not in authenticate negotiation */
+ if(!conn->bits.authneg)
http->postdata = (char *)&http->postdata;
- http->postsize = postsize;
- }
}
}
/* issue the request */
- result = Curl_buffer_send(&req, conn, &data->info.request_size,
+ result = Curl_buffer_send(r, conn, &data->info.request_size,
(size_t)included_body, FIRSTSOCKET);
if(result)
@@ -2958,12 +2581,12 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
break;
default:
- result = Curl_dyn_add(&req, "\r\n");
+ result = Curl_dyn_add(r, "\r\n");
if(result)
return result;
/* issue the request */
- result = Curl_buffer_send(&req, conn, &data->info.request_size, 0,
+ result = Curl_buffer_send(r, conn, &data->info.request_size, 0,
FIRSTSOCKET);
if(result)
@@ -2972,9 +2595,564 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
/* HTTP GET/HEAD download: */
Curl_setup_transfer(data, FIRSTSOCKET, -1, TRUE, -1);
}
+
+ return result;
+}
+
+#if !defined(CURL_DISABLE_COOKIES)
+CURLcode Curl_http_cookies(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct dynbuf *r)
+{
+ CURLcode result = CURLE_OK;
+ char *addcookies = NULL;
+ if(data->set.str[STRING_COOKIE] && !Curl_checkheaders(conn, "Cookie"))
+ addcookies = data->set.str[STRING_COOKIE];
+
+ if(data->cookies || addcookies) {
+ struct Cookie *co = NULL; /* no cookies from start */
+ int count = 0;
+
+ if(data->cookies && data->state.cookie_engine) {
+ Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
+ co = Curl_cookie_getlist(data->cookies,
+ data->state.aptr.cookiehost?
+ data->state.aptr.cookiehost:
+ conn->host.name,
+ data->state.up.path,
+ (conn->handler->protocol&CURLPROTO_HTTPS)?
+ TRUE:FALSE);
+ Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
+ }
+ if(co) {
+ struct Cookie *store = co;
+ /* now loop through all cookies that matched */
+ while(co) {
+ if(co->value) {
+ if(0 == count) {
+ result = Curl_dyn_add(r, "Cookie: ");
+ if(result)
+ break;
+ }
+ result = Curl_dyn_addf(r, "%s%s=%s", count?"; ":"",
+ co->name, co->value);
+ if(result)
+ break;
+ count++;
+ }
+ co = co->next; /* next cookie please */
+ }
+ Curl_cookie_freelist(store);
+ }
+ if(addcookies && !result) {
+ if(!count)
+ result = Curl_dyn_add(r, "Cookie: ");
+ if(!result) {
+ result = Curl_dyn_addf(r, "%s%s", count?"; ":"", addcookies);
+ count++;
+ }
+ }
+ if(count && !result)
+ result = Curl_dyn_add(r, "\r\n");
+
+ if(result)
+ return result;
+ }
+ return result;
+}
+#endif
+
+CURLcode Curl_http_range(struct Curl_easy *data,
+ struct connectdata *conn,
+ Curl_HttpReq httpreq)
+{
+ if(data->state.use_range) {
+ /*
+ * A range is selected. We use different headers whether we're downloading
+ * or uploading and we always let customized headers override our internal
+ * ones if any such are specified.
+ */
+ if(((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) &&
+ !Curl_checkheaders(conn, "Range")) {
+ /* if a line like this was already allocated, free the previous one */
+ free(data->state.aptr.rangeline);
+ data->state.aptr.rangeline = aprintf("Range: bytes=%s\r\n",
+ data->state.range);
+ }
+ else if((httpreq == HTTPREQ_POST || httpreq == HTTPREQ_PUT) &&
+ !Curl_checkheaders(conn, "Content-Range")) {
+
+ /* if a line like this was already allocated, free the previous one */
+ free(data->state.aptr.rangeline);
+
+ if(data->set.set_resume_from < 0) {
+ /* Upload resume was asked for, but we don't know the size of the
+ remote part so we tell the server (and act accordingly) that we
+ upload the whole file (again) */
+ data->state.aptr.rangeline =
+ aprintf("Content-Range: bytes 0-%" CURL_FORMAT_CURL_OFF_T
+ "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
+ data->state.infilesize - 1, data->state.infilesize);
+
+ }
+ else if(data->state.resume_from) {
+ /* This is because "resume" was selected */
+ curl_off_t total_expected_size =
+ data->state.resume_from + data->state.infilesize;
+ data->state.aptr.rangeline =
+ aprintf("Content-Range: bytes %s%" CURL_FORMAT_CURL_OFF_T
+ "/%" CURL_FORMAT_CURL_OFF_T "\r\n",
+ data->state.range, total_expected_size-1,
+ total_expected_size);
+ }
+ else {
+ /* Range was selected and then we just pass the incoming range and
+ append total size */
+ data->state.aptr.rangeline =
+ aprintf("Content-Range: bytes %s/%" CURL_FORMAT_CURL_OFF_T "\r\n",
+ data->state.range, data->state.infilesize);
+ }
+ if(!data->state.aptr.rangeline)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+ return CURLE_OK;
+}
+
+CURLcode Curl_http_resume(struct Curl_easy *data,
+ struct connectdata *conn,
+ Curl_HttpReq httpreq)
+{
+ if((HTTPREQ_POST == httpreq || HTTPREQ_PUT == httpreq) &&
+ data->state.resume_from) {
+ /**********************************************************************
+ * Resuming upload in HTTP means that we PUT or POST and that we have
+ * got a resume_from value set. The resume value has already created
+ * a Range: header that will be passed along. We need to "fast forward"
+ * the file the given number of bytes and decrease the assume upload
+ * file size before we continue this venture in the dark lands of HTTP.
+ * Resuming mime/form posting at an offset > 0 has no sense and is ignored.
+ *********************************************************************/
+
+ if(data->state.resume_from < 0) {
+ /*
+ * This is meant to get the size of the present remote-file by itself.
+ * We don't support this now. Bail out!
+ */
+ data->state.resume_from = 0;
+ }
+
+ if(data->state.resume_from && !data->state.this_is_a_follow) {
+ /* do we still game? */
+
+ /* Now, let's read off the proper amount of bytes from the
+ input. */
+ int seekerr = CURL_SEEKFUNC_CANTSEEK;
+ if(conn->seek_func) {
+ Curl_set_in_callback(data, true);
+ seekerr = conn->seek_func(conn->seek_client, data->state.resume_from,
+ SEEK_SET);
+ Curl_set_in_callback(data, false);
+ }
+
+ if(seekerr != CURL_SEEKFUNC_OK) {
+ curl_off_t passed = 0;
+
+ if(seekerr != CURL_SEEKFUNC_CANTSEEK) {
+ failf(data, "Could not seek stream");
+ return CURLE_READ_ERROR;
+ }
+ /* when seekerr == CURL_SEEKFUNC_CANTSEEK (can't seek to offset) */
+ do {
+ size_t readthisamountnow =
+ (data->state.resume_from - passed > data->set.buffer_size) ?
+ (size_t)data->set.buffer_size :
+ curlx_sotouz(data->state.resume_from - passed);
+
+ size_t actuallyread =
+ data->state.fread_func(data->state.buffer, 1, readthisamountnow,
+ data->state.in);
+
+ passed += actuallyread;
+ if((actuallyread == 0) || (actuallyread > readthisamountnow)) {
+ /* this checks for greater-than only to make sure that the
+ CURL_READFUNC_ABORT return code still aborts */
+ failf(data, "Could only read %" CURL_FORMAT_CURL_OFF_T
+ " bytes from the input", passed);
+ return CURLE_READ_ERROR;
+ }
+ } while(passed < data->state.resume_from);
+ }
+
+ /* now, decrease the size of the read */
+ if(data->state.infilesize>0) {
+ data->state.infilesize -= data->state.resume_from;
+
+ if(data->state.infilesize <= 0) {
+ failf(data, "File already completely uploaded");
+ return CURLE_PARTIAL_FILE;
+ }
+ }
+ /* we've passed, proceed as normal */
+ }
+ }
+ return CURLE_OK;
+}
+
+CURLcode Curl_http_firstwrite(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done)
+{
+ struct SingleRequest *k = &data->req;
+ DEBUGASSERT(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP));
+ if(data->req.newurl) {
+ if(conn->bits.close) {
+ /* Abort after the headers if "follow Location" is set
+ and we're set to close anyway. */
+ k->keepon &= ~KEEP_RECV;
+ *done = TRUE;
+ return CURLE_OK;
+ }
+ /* We have a new url to load, but since we want to be able to re-use this
+ connection properly, we read the full response in "ignore more" */
+ k->ignorebody = TRUE;
+ infof(data, "Ignoring the response-body\n");
+ }
+ if(data->state.resume_from && !k->content_range &&
+ (data->state.httpreq == HTTPREQ_GET) &&
+ !k->ignorebody) {
+
+ if(k->size == data->state.resume_from) {
+ /* The resume point is at the end of file, consider this fine even if it
+ doesn't allow resume from here. */
+ infof(data, "The entire document is already downloaded");
+ connclose(conn, "already downloaded");
+ /* Abort download */
+ k->keepon &= ~KEEP_RECV;
+ *done = TRUE;
+ return CURLE_OK;
+ }
+
+ /* we wanted to resume a download, although the server doesn't seem to
+ * support this and we did this with a GET (if it wasn't a GET we did a
+ * POST or PUT resume) */
+ failf(data, "HTTP server doesn't seem to support "
+ "byte ranges. Cannot resume.");
+ return CURLE_RANGE_ERROR;
+ }
+
+ if(data->set.timecondition && !data->state.range) {
+ /* A time condition has been set AND no ranges have been requested. This
+ seems to be what chapter 13.3.4 of RFC 2616 defines to be the correct
+ action for a HTTP/1.1 client */
+
+ if(!Curl_meets_timecondition(data, k->timeofdoc)) {
+ *done = TRUE;
+ /* We're simulating a http 304 from server so we return
+ what should have been returned from the server */
+ data->info.httpcode = 304;
+ infof(data, "Simulate a HTTP 304 response!\n");
+ /* we abort the transfer before it is completed == we ruin the
+ re-use ability. Close the connection */
+ connclose(conn, "Simulated 304 handling");
+ return CURLE_OK;
+ }
+ } /* we have a time condition */
+
+ return CURLE_OK;
+}
+
+#ifndef USE_HYPER
+/*
+ * Curl_http() gets called from the generic multi_do() function when a HTTP
+ * request is to be performed. This creates and sends a properly constructed
+ * HTTP request.
+ */
+CURLcode Curl_http(struct connectdata *conn, bool *done)
+{
+ struct Curl_easy *data = conn->data;
+ CURLcode result = CURLE_OK;
+ struct HTTP *http;
+ Curl_HttpReq httpreq;
+ const char *te = ""; /* transfer-encoding */
+ const char *request;
+ const char *httpstring;
+ struct dynbuf req;
+ char *altused = NULL;
+ const char *p_accept; /* Accept: string */
+
+ /* Always consider the DO phase done after this function call, even if there
+ may be parts of the request that is not yet sent, since we can deal with
+ the rest of the request in the PERFORM phase. */
+ *done = TRUE;
+
+ if(conn->transport != TRNSPRT_QUIC) {
+ if(conn->httpversion < 20) { /* unless the connection is re-used and
+ already http2 */
+ switch(conn->negnpn) {
+ case CURL_HTTP_VERSION_2:
+ conn->httpversion = 20; /* we know we're on HTTP/2 now */
+
+ result = Curl_http2_switched(conn, NULL, 0);
+ if(result)
+ return result;
+ break;
+ case CURL_HTTP_VERSION_1_1:
+ /* continue with HTTP/1.1 when explicitly requested */
+ break;
+ default:
+ /* Check if user wants to use HTTP/2 with clear TCP*/
+#ifdef USE_NGHTTP2
+ if(conn->data->set.httpversion ==
+ CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE) {
+#ifndef CURL_DISABLE_PROXY
+ if(conn->bits.httpproxy && !conn->bits.tunnel_proxy) {
+ /* We don't support HTTP/2 proxies yet. Also it's debatable
+ whether or not this setting should apply to HTTP/2 proxies. */
+ infof(data, "Ignoring HTTP/2 prior knowledge due to proxy\n");
+ break;
+ }
+#endif
+ DEBUGF(infof(data, "HTTP/2 over clean TCP\n"));
+ conn->httpversion = 20;
+
+ result = Curl_http2_switched(conn, NULL, 0);
+ if(result)
+ return result;
+ }
+#endif
+ break;
+ }
+ }
+ else {
+ /* prepare for a http2 request */
+ result = Curl_http2_setup(conn);
+ if(result)
+ return result;
+ }
+ }
+ http = data->req.p.http;
+ DEBUGASSERT(http);
+
+ result = Curl_http_host(data, conn);
+ if(result)
+ return result;
+
+ result = Curl_http_useragent(data, conn);
+ if(result)
+ return result;
+
+ Curl_http_method(data, conn, &request, &httpreq);
+
+ /* setup the authentication headers */
+ {
+ char *pq = NULL;
+ if(data->state.up.query) {
+ pq = aprintf("%s?%s", data->state.up.path, data->state.up.query);
+ if(!pq)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ result = Curl_http_output_auth(conn, request,
+ (pq ? pq : data->state.up.path), FALSE);
+ free(pq);
+ if(result)
+ return result;
+ }
+
+ if(((data->state.authhost.multipass && !data->state.authhost.done)
+ || (data->state.authproxy.multipass && !data->state.authproxy.done)) &&
+ (httpreq != HTTPREQ_GET) &&
+ (httpreq != HTTPREQ_HEAD)) {
+ /* Auth is required and we are not authenticated yet. Make a PUT or POST
+ with content-length zero as a "probe". */
+ conn->bits.authneg = TRUE;
+ }
+ else
+ conn->bits.authneg = FALSE;
+
+ Curl_safefree(data->state.aptr.ref);
+ if(data->change.referer && !Curl_checkheaders(conn, "Referer")) {
+ data->state.aptr.ref = aprintf("Referer: %s\r\n", data->change.referer);
+ if(!data->state.aptr.ref)
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ if(!Curl_checkheaders(conn, "Accept-Encoding") &&
+ data->set.str[STRING_ENCODING]) {
+ Curl_safefree(data->state.aptr.accept_encoding);
+ data->state.aptr.accept_encoding =
+ aprintf("Accept-Encoding: %s\r\n", data->set.str[STRING_ENCODING]);
+ if(!data->state.aptr.accept_encoding)
+ return CURLE_OUT_OF_MEMORY;
+ }
+ else {
+ Curl_safefree(data->state.aptr.accept_encoding);
+ data->state.aptr.accept_encoding = NULL;
+ }
+
+#ifdef HAVE_LIBZ
+ /* we only consider transfer-encoding magic if libz support is built-in */
+
+ if(!Curl_checkheaders(conn, "TE") &&
+ data->set.http_transfer_encoding) {
+ /* When we are to insert a TE: header in the request, we must also insert
+ TE in a Connection: header, so we need to merge the custom provided
+ Connection: header and prevent the original to get sent. Note that if
+ the user has inserted his/hers own TE: header we don't do this magic
+ but then assume that the user will handle it all! */
+ char *cptr = Curl_checkheaders(conn, "Connection");
+#define TE_HEADER "TE: gzip\r\n"
+
+ Curl_safefree(data->state.aptr.te);
+
+ if(cptr) {
+ cptr = Curl_copy_header_value(cptr);
+ if(!cptr)
+ return CURLE_OUT_OF_MEMORY;
+ }
+
+ /* Create the (updated) Connection: header */
+ data->state.aptr.te = aprintf("Connection: %s%sTE\r\n" TE_HEADER,
+ cptr ? cptr : "", (cptr && *cptr) ? ", ":"");
+
+ free(cptr);
+ if(!data->state.aptr.te)
+ return CURLE_OUT_OF_MEMORY;
+ }
+#endif
+
+ result = Curl_http_body(data, conn, httpreq, &te);
+ if(result)
+ return result;
+
+ p_accept = Curl_checkheaders(conn, "Accept")?NULL:"Accept: */*\r\n";
+
+ result = Curl_http_resume(data, conn, httpreq);
if(result)
return result;
- if(!postsize && (http->sending != HTTPSEND_REQUEST))
+
+ result = Curl_http_range(data, conn, httpreq);
+ if(result)
+ return result;
+
+ httpstring = get_http_string(data, conn);
+
+ /* initialize a dynamic send-buffer */
+ Curl_dyn_init(&req, DYN_HTTP_REQUEST);
+
+ /* add the main request stuff */
+ /* GET/HEAD/POST/PUT */
+ result = Curl_dyn_addf(&req, "%s ", request);
+ if(!result)
+ result = Curl_http_target(data, conn, &req);
+ if(result) {
+ Curl_dyn_free(&req);
+ return result;
+ }
+
+#ifndef CURL_DISABLE_ALTSVC
+ if(conn->bits.altused && !Curl_checkheaders(conn, "Alt-Used")) {
+ altused = aprintf("Alt-Used: %s:%d\r\n",
+ conn->conn_to_host.name, conn->conn_to_port);
+ if(!altused) {
+ Curl_dyn_free(&req);
+ return CURLE_OUT_OF_MEMORY;
+ }
+ }
+#endif
+ result =
+ Curl_dyn_addf(&req,
+ " HTTP/%s\r\n" /* HTTP version */
+ "%s" /* host */
+ "%s" /* proxyuserpwd */
+ "%s" /* userpwd */
+ "%s" /* range */
+ "%s" /* user agent */
+ "%s" /* accept */
+ "%s" /* TE: */
+ "%s" /* accept-encoding */
+ "%s" /* referer */
+ "%s" /* Proxy-Connection */
+ "%s" /* transfer-encoding */
+ "%s",/* Alt-Used */
+
+ httpstring,
+ (data->state.aptr.host?data->state.aptr.host:""),
+ data->state.aptr.proxyuserpwd?
+ data->state.aptr.proxyuserpwd:"",
+ data->state.aptr.userpwd?data->state.aptr.userpwd:"",
+ (data->state.use_range && data->state.aptr.rangeline)?
+ data->state.aptr.rangeline:"",
+ (data->set.str[STRING_USERAGENT] &&
+ *data->set.str[STRING_USERAGENT] &&
+ data->state.aptr.uagent)?
+ data->state.aptr.uagent:"",
+ p_accept?p_accept:"",
+ data->state.aptr.te?data->state.aptr.te:"",
+ (data->set.str[STRING_ENCODING] &&
+ *data->set.str[STRING_ENCODING] &&
+ data->state.aptr.accept_encoding)?
+ data->state.aptr.accept_encoding:"",
+ (data->change.referer && data->state.aptr.ref)?
+ data->state.aptr.ref:"" /* Referer: <data> */,
+#ifndef CURL_DISABLE_PROXY
+ (conn->bits.httpproxy &&
+ !conn->bits.tunnel_proxy &&
+ !Curl_checkProxyheaders(conn, "Proxy-Connection"))?
+ "Proxy-Connection: Keep-Alive\r\n":"",
+#else
+ "",
+#endif
+ te,
+ altused ? altused : ""
+ );
+
+ /* clear userpwd and proxyuserpwd to avoid re-using old credentials
+ * from re-used connections */
+ Curl_safefree(data->state.aptr.userpwd);
+ Curl_safefree(data->state.aptr.proxyuserpwd);
+ free(altused);
+
+ if(result) {
+ Curl_dyn_free(&req);
+ return result;
+ }
+
+ if(!(conn->handler->flags&PROTOPT_SSL) &&
+ conn->httpversion != 20 &&
+ (data->set.httpversion == CURL_HTTP_VERSION_2)) {
+ /* append HTTP2 upgrade magic stuff to the HTTP request if it isn't done
+ over SSL */
+ result = Curl_http2_request_upgrade(&req, conn);
+ if(result) {
+ Curl_dyn_free(&req);
+ return result;
+ }
+ }
+
+ result = Curl_http_cookies(data, conn, &req);
+ if(!result)
+ result = Curl_add_timecondition(conn, &req);
+ if(!result)
+ result = Curl_add_custom_headers(conn, FALSE, &req);
+
+ if(!result) {
+ http->postdata = NULL; /* nothing to post at this point */
+ if((httpreq == HTTPREQ_GET) ||
+ (httpreq == HTTPREQ_HEAD))
+ Curl_pgrsSetUploadSize(data, 0); /* nothing */
+
+ /* bodysend takes ownership of the 'req' memory on success */
+ result = Curl_http_bodysend(data, conn, &req, httpreq);
+ }
+ if(result) {
+ Curl_dyn_free(&req);
+ return result;
+ }
+
+ if((http->postsize > -1) &&
+ (http->postsize <= data->req.writebytecount) &&
+ (http->sending != HTTPSEND_REQUEST))
data->req.upload_done = TRUE;
if(data->req.writebytecount) {
@@ -2984,12 +3162,12 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
if(Curl_pgrsUpdate(conn))
result = CURLE_ABORTED_BY_CALLBACK;
- if(data->req.writebytecount >= postsize) {
+ if(!http->postsize) {
/* already sent the entire request body, mark the "upload" as
complete */
infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T
" out of %" CURL_FORMAT_CURL_OFF_T " bytes\n",
- data->req.writebytecount, postsize);
+ data->req.writebytecount, http->postsize);
data->req.upload_done = TRUE;
data->req.keepon &= ~KEEP_SEND; /* we're done writing */
data->req.exp100 = EXP100_SEND_DATA; /* already sent */
@@ -3005,6 +3183,8 @@ CURLcode Curl_http(struct connectdata *conn, bool *done)
return result;
}
+#endif /* USE_HYPER */
+
typedef enum {
STATUS_UNKNOWN, /* not enough data to tell yet */
STATUS_DONE, /* a status line was read */
@@ -3112,38 +3292,396 @@ checkprotoprefix(struct Curl_easy *data, struct connectdata *conn,
static void print_http_error(struct Curl_easy *data)
{
struct SingleRequest *k = &data->req;
- char *beg = Curl_dyn_ptr(&data->state.headerb);
-
- /* make sure that data->req.p points to the HTTP status line */
- if(!strncmp(beg, "HTTP", 4)) {
-
- /* skip to HTTP status code */
- beg = strchr(beg, ' ');
- if(beg && *++beg) {
-
- /* find trailing CR */
- char end_char = '\r';
- char *end = strchr(beg, end_char);
- if(!end) {
- /* try to find LF (workaround for non-compliant HTTP servers) */
- end_char = '\n';
- end = strchr(beg, end_char);
+ failf(data, "The requested URL returned error: %d", k->httpcode);
+}
+
+/*
+ * Curl_http_header() parses a single response header.
+ */
+CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn,
+ char *headp)
+{
+ CURLcode result;
+ struct SingleRequest *k = &data->req;
+ /* Check for Content-Length: header lines to get size */
+ if(!k->http_bodyless &&
+ !data->set.ignorecl && checkprefix("Content-Length:", headp)) {
+ curl_off_t contentlength;
+ CURLofft offt = curlx_strtoofft(headp + 15, NULL, 10, &contentlength);
+
+ if(offt == CURL_OFFT_OK) {
+ if(data->set.max_filesize &&
+ contentlength > data->set.max_filesize) {
+ failf(data, "Maximum file size exceeded");
+ return CURLE_FILESIZE_EXCEEDED;
+ }
+ k->size = contentlength;
+ k->maxdownload = k->size;
+ /* we set the progress download size already at this point
+ just to make it easier for apps/callbacks to extract this
+ info as soon as possible */
+ Curl_pgrsSetDownloadSize(data, k->size);
+ }
+ else if(offt == CURL_OFFT_FLOW) {
+ /* out of range */
+ if(data->set.max_filesize) {
+ failf(data, "Maximum file size exceeded");
+ return CURLE_FILESIZE_EXCEEDED;
}
+ streamclose(conn, "overflow content-length");
+ infof(data, "Overflow Content-Length: value!\n");
+ }
+ else {
+ /* negative or just rubbish - bad HTTP */
+ failf(data, "Invalid Content-Length: value");
+ return CURLE_WEIRD_SERVER_REPLY;
+ }
+ }
+ /* check for Content-Type: header lines to get the MIME-type */
+ else if(checkprefix("Content-Type:", headp)) {
+ char *contenttype = Curl_copy_header_value(headp);
+ if(!contenttype)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*contenttype)
+ /* ignore empty data */
+ free(contenttype);
+ else {
+ Curl_safefree(data->info.contenttype);
+ data->info.contenttype = contenttype;
+ }
+ }
+#ifndef CURL_DISABLE_PROXY
+ else if((conn->httpversion == 10) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(headp, "Proxy-Connection:", "keep-alive")) {
+ /*
+ * When a HTTP/1.0 reply comes when using a proxy, the
+ * 'Proxy-Connection: keep-alive' line tells us the
+ * connection will be kept alive for our pleasure.
+ * Default action for 1.0 is to close.
+ */
+ connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
+ infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
+ }
+ else if((conn->httpversion == 11) &&
+ conn->bits.httpproxy &&
+ Curl_compareheader(headp, "Proxy-Connection:", "close")) {
+ /*
+ * We get a HTTP/1.1 response from a proxy and it says it'll
+ * close down after this transfer.
+ */
+ connclose(conn, "Proxy-Connection: asked to close after done");
+ infof(data, "HTTP/1.1 proxy connection set close!\n");
+ }
+#endif
+ else if((conn->httpversion == 10) &&
+ Curl_compareheader(headp, "Connection:", "keep-alive")) {
+ /*
+ * A HTTP/1.0 reply with the 'Connection: keep-alive' line
+ * tells us the connection will be kept alive for our
+ * pleasure. Default action for 1.0 is to close.
+ *
+ * [RFC2068, section 19.7.1] */
+ connkeep(conn, "Connection keep-alive");
+ infof(data, "HTTP/1.0 connection set to keep alive!\n");
+ }
+ else if(Curl_compareheader(headp, "Connection:", "close")) {
+ /*
+ * [RFC 2616, section 8.1.2.1]
+ * "Connection: close" is HTTP/1.1 language and means that
+ * the connection will close when this request has been
+ * served.
+ */
+ streamclose(conn, "Connection: close used");
+ }
+ else if(!k->http_bodyless && checkprefix("Transfer-Encoding:", headp)) {
+ /* One or more encodings. We check for chunked and/or a compression
+ algorithm. */
+ /*
+ * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
+ * means that the server will send a series of "chunks". Each
+ * chunk starts with line with info (including size of the
+ * coming block) (terminated with CRLF), then a block of data
+ * with the previously mentioned size. There can be any amount
+ * of chunks, and a chunk-data set to zero signals the
+ * end-of-chunks. */
+
+ result = Curl_build_unencoding_stack(conn, headp + 18, TRUE);
+ if(result)
+ return result;
+ }
+ else if(!k->http_bodyless && checkprefix("Content-Encoding:", headp) &&
+ data->set.str[STRING_ENCODING]) {
+ /*
+ * Process Content-Encoding. Look for the values: identity,
+ * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
+ * x-compress are the same as gzip and compress. (Sec 3.5 RFC
+ * 2616). zlib cannot handle compress. However, errors are
+ * handled further down when the response body is processed
+ */
+ result = Curl_build_unencoding_stack(conn, headp + 17, FALSE);
+ if(result)
+ return result;
+ }
+ else if(checkprefix("Retry-After:", headp)) {
+ /* Retry-After = HTTP-date / delay-seconds */
+ curl_off_t retry_after = 0; /* zero for unknown or "now" */
+ time_t date = Curl_getdate_capped(&headp[12]);
+ if(-1 == date) {
+ /* not a date, try it as a decimal number */
+ (void)curlx_strtoofft(&headp[12], NULL, 10, &retry_after);
+ }
+ else
+ /* convert date to number of seconds into the future */
+ retry_after = date - time(NULL);
+ data->info.retry_after = retry_after; /* store it */
+ }
+ else if(!k->http_bodyless && checkprefix("Content-Range:", headp)) {
+ /* Content-Range: bytes [num]-
+ Content-Range: bytes: [num]-
+ Content-Range: [num]-
+ Content-Range: [asterisk]/[total]
+
+ The second format was added since Sun's webserver
+ JavaWebServer/1.1.1 obviously sends the header this way!
+ The third added since some servers use that!
+ The forth means the requested range was unsatisfied.
+ */
+
+ char *ptr = headp + 14;
- if(end) {
- /* temporarily replace CR or LF by NUL and print the error message */
- *end = '\0';
- failf(data, "The requested URL returned error: %s", beg);
+ /* Move forward until first digit or asterisk */
+ while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
+ ptr++;
- /* restore the previously replaced CR or LF */
- *end = end_char;
- return;
+ /* if it truly stopped on a digit */
+ if(ISDIGIT(*ptr)) {
+ if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
+ if(data->state.resume_from == k->offset)
+ /* we asked for a resume and we got it */
+ k->content_range = TRUE;
}
}
+ else
+ data->state.resume_from = 0; /* get everything */
+ }
+#if !defined(CURL_DISABLE_COOKIES)
+ else if(data->cookies && data->state.cookie_engine &&
+ checkprefix("Set-Cookie:", headp)) {
+ Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
+ CURL_LOCK_ACCESS_SINGLE);
+ Curl_cookie_add(data,
+ data->cookies, TRUE, FALSE, headp + 11,
+ /* If there is a custom-set Host: name, use it
+ here, or else use real peer host name. */
+ data->state.aptr.cookiehost?
+ data->state.aptr.cookiehost:conn->host.name,
+ data->state.up.path,
+ (conn->handler->protocol&CURLPROTO_HTTPS)?
+ TRUE:FALSE);
+ Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
+ }
+#endif
+ else if(!k->http_bodyless && checkprefix("Last-Modified:", headp) &&
+ (data->set.timecondition || data->set.get_filetime) ) {
+ k->timeofdoc = Curl_getdate_capped(headp + strlen("Last-Modified:"));
+ if(data->set.get_filetime)
+ data->info.filetime = k->timeofdoc;
}
+ else if((checkprefix("WWW-Authenticate:", headp) &&
+ (401 == k->httpcode)) ||
+ (checkprefix("Proxy-authenticate:", headp) &&
+ (407 == k->httpcode))) {
+
+ bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
+ char *auth = Curl_copy_header_value(headp);
+ if(!auth)
+ return CURLE_OUT_OF_MEMORY;
- /* fall-back to printing the HTTP status code only */
- failf(data, "The requested URL returned error: %d", k->httpcode);
+ result = Curl_http_input_auth(conn, proxy, auth);
+
+ free(auth);
+
+ if(result)
+ return result;
+ }
+#ifdef USE_SPNEGO
+ else if(checkprefix("Persistent-Auth", headp)) {
+ struct negotiatedata *negdata = &conn->negotiate;
+ struct auth *authp = &data->state.authhost;
+ if(authp->picked == CURLAUTH_NEGOTIATE) {
+ char *persistentauth = Curl_copy_header_value(headp);
+ if(!persistentauth)
+ return CURLE_OUT_OF_MEMORY;
+ negdata->noauthpersist = checkprefix("false", persistentauth)?
+ TRUE:FALSE;
+ negdata->havenoauthpersist = TRUE;
+ infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
+ negdata->noauthpersist, persistentauth);
+ free(persistentauth);
+ }
+ }
+#endif
+ else if((k->httpcode >= 300 && k->httpcode < 400) &&
+ checkprefix("Location:", headp) &&
+ !data->req.location) {
+ /* this is the URL that the server advises us to use instead */
+ char *location = Curl_copy_header_value(headp);
+ if(!location)
+ return CURLE_OUT_OF_MEMORY;
+ if(!*location)
+ /* ignore empty data */
+ free(location);
+ else {
+ data->req.location = location;
+
+ if(data->set.http_follow_location) {
+ DEBUGASSERT(!data->req.newurl);
+ data->req.newurl = strdup(data->req.location); /* clone */
+ if(!data->req.newurl)
+ return CURLE_OUT_OF_MEMORY;
+
+ /* some cases of POST and PUT etc needs to rewind the data
+ stream at this point */
+ result = http_perhapsrewind(conn);
+ if(result)
+ return result;
+ }
+ }
+ }
+
+#ifdef USE_HSTS
+ /* If enabled, the header is incoming and this is over HTTPS */
+ else if(data->hsts && checkprefix("Strict-Transport-Security:", headp) &&
+ (conn->handler->flags & PROTOPT_SSL)) {
+ CURLcode check =
+ Curl_hsts_parse(data->hsts, data->state.up.hostname,
+ &headp[ sizeof("Strict-Transport-Security:") -1 ]);
+ if(check)
+ infof(data, "Illegal STS header skipped\n");
+#ifdef DEBUGBUILD
+ else
+ infof(data, "Parsed STS header fine (%zu entries)\n",
+ data->hsts->list.size);
+#endif
+ }
+#endif
+#ifndef CURL_DISABLE_ALTSVC
+ /* If enabled, the header is incoming and this is over HTTPS */
+ else if(data->asi && checkprefix("Alt-Svc:", headp) &&
+ ((conn->handler->flags & PROTOPT_SSL) ||
+#ifdef CURLDEBUG
+ /* allow debug builds to circumvent the HTTPS restriction */
+ getenv("CURL_ALTSVC_HTTP")
+#else
+ 0
+#endif
+ )) {
+ /* the ALPN of the current request */
+ enum alpnid id = (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
+ result = Curl_altsvc_parse(data, data->asi,
+ &headp[ strlen("Alt-Svc:") ],
+ id, conn->host.name,
+ curlx_uitous(conn->remote_port));
+ if(result)
+ return result;
+ }
+#endif
+ else if(conn->handler->protocol & CURLPROTO_RTSP) {
+ result = Curl_rtsp_parseheader(conn, headp);
+ if(result)
+ return result;
+ }
+ return CURLE_OK;
+}
+
+/*
+ * Called after the first HTTP response line (the status line) has been
+ * received and parsed.
+ */
+
+CURLcode Curl_http_statusline(struct Curl_easy *data,
+ struct connectdata *conn)
+{
+ struct SingleRequest *k = &data->req;
+ data->info.httpcode = k->httpcode;
+
+ data->info.httpversion = conn->httpversion;
+ if(!data->state.httpversion ||
+ data->state.httpversion > conn->httpversion)
+ /* store the lowest server version we encounter */
+ data->state.httpversion = conn->httpversion;
+
+ /*
+ * This code executes as part of processing the header. As a
+ * result, it's not totally clear how to interpret the
+ * response code yet as that depends on what other headers may
+ * be present. 401 and 407 may be errors, but may be OK
+ * depending on how authentication is working. Other codes
+ * are definitely errors, so give up here.
+ */
+ if(data->state.resume_from && data->state.httpreq == HTTPREQ_GET &&
+ k->httpcode == 416) {
+ /* "Requested Range Not Satisfiable", just proceed and
+ pretend this is no error */
+ k->ignorebody = TRUE; /* Avoid appending error msg to good data. */
+ }
+ else if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
+ ((k->httpcode != 401) || !conn->bits.user_passwd)
+#ifndef CURL_DISABLE_PROXY
+ && ((k->httpcode != 407) || !conn->bits.proxy_user_passwd)
+#endif
+ ) {
+ /* serious error, go home! */
+ print_http_error(data);
+ return CURLE_HTTP_RETURNED_ERROR;
+ }
+
+ if(conn->httpversion == 10) {
+ /* Default action for HTTP/1.0 must be to close, unless
+ we get one of those fancy headers that tell us the
+ server keeps it open for us! */
+ infof(data, "HTTP 1.0, assume close after body\n");
+ connclose(conn, "HTTP/1.0 close after body");
+ }
+ else if(conn->httpversion == 20 ||
+ (k->upgr101 == UPGR101_REQUESTED && k->httpcode == 101)) {
+ DEBUGF(infof(data, "HTTP/2 found, allow multiplexing\n"));
+ /* HTTP/2 cannot avoid multiplexing since it is a core functionality
+ of the protocol */
+ conn->bundle->multiuse = BUNDLE_MULTIPLEX;
+ }
+ else if(conn->httpversion >= 11 &&
+ !conn->bits.close) {
+ /* If HTTP version is >= 1.1 and connection is persistent */
+ DEBUGF(infof(data,
+ "HTTP 1.1 or later with persistent connection\n"));
+ }
+
+ k->http_bodyless = k->httpcode >= 100 && k->httpcode < 200;
+ switch(k->httpcode) {
+ case 304:
+ /* (quote from RFC2616, section 10.3.5): The 304 response
+ * MUST NOT contain a message-body, and thus is always
+ * terminated by the first empty line after the header
+ * fields. */
+ if(data->set.timecondition)
+ data->info.timecond = TRUE;
+ /* FALLTHROUGH */
+ case 204:
+ /* (quote from RFC2616, section 10.2.5): The server has
+ * fulfilled the request but does not need to return an
+ * entity-body ... The 204 response MUST NOT include a
+ * message-body, and thus is always terminated by the first
+ * empty line after the header fields. */
+ k->size = 0;
+ k->maxdownload = 0;
+ k->http_bodyless = TRUE;
+ break;
+ default:
+ break;
+ }
+ return CURLE_OK;
}
/*
@@ -3661,83 +4199,9 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
}
if(nc) {
- data->info.httpcode = k->httpcode;
-
- data->info.httpversion = conn->httpversion;
- if(!data->state.httpversion ||
- data->state.httpversion > conn->httpversion)
- /* store the lowest server version we encounter */
- data->state.httpversion = conn->httpversion;
-
- /*
- * This code executes as part of processing the header. As a
- * result, it's not totally clear how to interpret the
- * response code yet as that depends on what other headers may
- * be present. 401 and 407 may be errors, but may be OK
- * depending on how authentication is working. Other codes
- * are definitely errors, so give up here.
- */
- if(data->state.resume_from && data->state.httpreq == HTTPREQ_GET &&
- k->httpcode == 416) {
- /* "Requested Range Not Satisfiable", just proceed and
- pretend this is no error */
- k->ignorebody = TRUE; /* Avoid appending error msg to good data. */
- }
- else if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
- ((k->httpcode != 401) || !conn->bits.user_passwd)
-#ifndef CURL_DISABLE_PROXY
- && ((k->httpcode != 407) || !conn->bits.proxy_user_passwd)
-#endif
- ) {
- /* serious error, go home! */
- print_http_error(data);
- return CURLE_HTTP_RETURNED_ERROR;
- }
-
- if(conn->httpversion == 10) {
- /* Default action for HTTP/1.0 must be to close, unless
- we get one of those fancy headers that tell us the
- server keeps it open for us! */
- infof(data, "HTTP 1.0, assume close after body\n");
- connclose(conn, "HTTP/1.0 close after body");
- }
- else if(conn->httpversion == 20 ||
- (k->upgr101 == UPGR101_REQUESTED && k->httpcode == 101)) {
- DEBUGF(infof(data, "HTTP/2 found, allow multiplexing\n"));
- /* HTTP/2 cannot avoid multiplexing since it is a core functionality
- of the protocol */
- conn->bundle->multiuse = BUNDLE_MULTIPLEX;
- }
- else if(conn->httpversion >= 11 &&
- !conn->bits.close) {
- /* If HTTP version is >= 1.1 and connection is persistent */
- DEBUGF(infof(data,
- "HTTP 1.1 or later with persistent connection\n"));
- }
-
- k->http_bodyless = k->httpcode >= 100 && k->httpcode < 200;
- switch(k->httpcode) {
- case 304:
- /* (quote from RFC2616, section 10.3.5): The 304 response
- * MUST NOT contain a message-body, and thus is always
- * terminated by the first empty line after the header
- * fields. */
- if(data->set.timecondition)
- data->info.timecond = TRUE;
- /* FALLTHROUGH */
- case 204:
- /* (quote from RFC2616, section 10.2.5): The server has
- * fulfilled the request but does not need to return an
- * entity-body ... The 204 response MUST NOT include a
- * message-body, and thus is always terminated by the first
- * empty line after the header fields. */
- k->size = 0;
- k->maxdownload = 0;
- k->http_bodyless = TRUE;
- break;
- default:
- break;
- }
+ result = Curl_http_statusline(data, conn);
+ if(result)
+ return result;
}
else {
k->header = FALSE; /* this is not a header line */
@@ -3750,295 +4214,9 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
if(result)
return result;
- /* Check for Content-Length: header lines to get size */
- if(!k->http_bodyless &&
- !data->set.ignorecl && checkprefix("Content-Length:", headp)) {
- curl_off_t contentlength;
- CURLofft offt = curlx_strtoofft(headp + 15, NULL, 10, &contentlength);
-
- if(offt == CURL_OFFT_OK) {
- if(data->set.max_filesize &&
- contentlength > data->set.max_filesize) {
- failf(data, "Maximum file size exceeded");
- return CURLE_FILESIZE_EXCEEDED;
- }
- k->size = contentlength;
- k->maxdownload = k->size;
- /* we set the progress download size already at this point
- just to make it easier for apps/callbacks to extract this
- info as soon as possible */
- Curl_pgrsSetDownloadSize(data, k->size);
- }
- else if(offt == CURL_OFFT_FLOW) {
- /* out of range */
- if(data->set.max_filesize) {
- failf(data, "Maximum file size exceeded");
- return CURLE_FILESIZE_EXCEEDED;
- }
- streamclose(conn, "overflow content-length");
- infof(data, "Overflow Content-Length: value!\n");
- }
- else {
- /* negative or just rubbish - bad HTTP */
- failf(data, "Invalid Content-Length: value");
- return CURLE_WEIRD_SERVER_REPLY;
- }
- }
- /* check for Content-Type: header lines to get the MIME-type */
- else if(checkprefix("Content-Type:", headp)) {
- char *contenttype = Curl_copy_header_value(headp);
- if(!contenttype)
- return CURLE_OUT_OF_MEMORY;
- if(!*contenttype)
- /* ignore empty data */
- free(contenttype);
- else {
- Curl_safefree(data->info.contenttype);
- data->info.contenttype = contenttype;
- }
- }
-#ifndef CURL_DISABLE_PROXY
- else if((conn->httpversion == 10) &&
- conn->bits.httpproxy &&
- Curl_compareheader(headp, "Proxy-Connection:", "keep-alive")) {
- /*
- * When a HTTP/1.0 reply comes when using a proxy, the
- * 'Proxy-Connection: keep-alive' line tells us the
- * connection will be kept alive for our pleasure.
- * Default action for 1.0 is to close.
- */
- connkeep(conn, "Proxy-Connection keep-alive"); /* don't close */
- infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
- }
- else if((conn->httpversion == 11) &&
- conn->bits.httpproxy &&
- Curl_compareheader(headp, "Proxy-Connection:", "close")) {
- /*
- * We get a HTTP/1.1 response from a proxy and it says it'll
- * close down after this transfer.
- */
- connclose(conn, "Proxy-Connection: asked to close after done");
- infof(data, "HTTP/1.1 proxy connection set close!\n");
- }
-#endif
- else if((conn->httpversion == 10) &&
- Curl_compareheader(headp, "Connection:", "keep-alive")) {
- /*
- * A HTTP/1.0 reply with the 'Connection: keep-alive' line
- * tells us the connection will be kept alive for our
- * pleasure. Default action for 1.0 is to close.
- *
- * [RFC2068, section 19.7.1] */
- connkeep(conn, "Connection keep-alive");
- infof(data, "HTTP/1.0 connection set to keep alive!\n");
- }
- else if(Curl_compareheader(headp, "Connection:", "close")) {
- /*
- * [RFC 2616, section 8.1.2.1]
- * "Connection: close" is HTTP/1.1 language and means that
- * the connection will close when this request has been
- * served.
- */
- streamclose(conn, "Connection: close used");
- }
- else if(!k->http_bodyless && checkprefix("Transfer-Encoding:", headp)) {
- /* One or more encodings. We check for chunked and/or a compression
- algorithm. */
- /*
- * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
- * means that the server will send a series of "chunks". Each
- * chunk starts with line with info (including size of the
- * coming block) (terminated with CRLF), then a block of data
- * with the previously mentioned size. There can be any amount
- * of chunks, and a chunk-data set to zero signals the
- * end-of-chunks. */
-
- result = Curl_build_unencoding_stack(conn, headp + 18, TRUE);
- if(result)
- return result;
- }
- else if(!k->http_bodyless && checkprefix("Content-Encoding:", headp) &&
- data->set.str[STRING_ENCODING]) {
- /*
- * Process Content-Encoding. Look for the values: identity,
- * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
- * x-compress are the same as gzip and compress. (Sec 3.5 RFC
- * 2616). zlib cannot handle compress. However, errors are
- * handled further down when the response body is processed
- */
- result = Curl_build_unencoding_stack(conn, headp + 17, FALSE);
- if(result)
- return result;
- }
- else if(checkprefix("Retry-After:", headp)) {
- /* Retry-After = HTTP-date / delay-seconds */
- curl_off_t retry_after = 0; /* zero for unknown or "now" */
- time_t date = Curl_getdate_capped(&headp[12]);
- if(-1 == date) {
- /* not a date, try it as a decimal number */
- (void)curlx_strtoofft(&headp[12], NULL, 10, &retry_after);
- }
- else
- /* convert date to number of seconds into the future */
- retry_after = date - time(NULL);
- data->info.retry_after = retry_after; /* store it */
- }
- else if(!k->http_bodyless && checkprefix("Content-Range:", headp)) {
- /* Content-Range: bytes [num]-
- Content-Range: bytes: [num]-
- Content-Range: [num]-
- Content-Range: [asterisk]/[total]
-
- The second format was added since Sun's webserver
- JavaWebServer/1.1.1 obviously sends the header this way!
- The third added since some servers use that!
- The forth means the requested range was unsatisfied.
- */
-
- char *ptr = headp + 14;
-
- /* Move forward until first digit or asterisk */
- while(*ptr && !ISDIGIT(*ptr) && *ptr != '*')
- ptr++;
-
- /* if it truly stopped on a digit */
- if(ISDIGIT(*ptr)) {
- if(!curlx_strtoofft(ptr, NULL, 10, &k->offset)) {
- if(data->state.resume_from == k->offset)
- /* we asked for a resume and we got it */
- k->content_range = TRUE;
- }
- }
- else
- data->state.resume_from = 0; /* get everything */
- }
-#if !defined(CURL_DISABLE_COOKIES)
- else if(data->cookies && data->state.cookie_engine &&
- checkprefix("Set-Cookie:", headp)) {
- Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
- CURL_LOCK_ACCESS_SINGLE);
- Curl_cookie_add(data,
- data->cookies, TRUE, FALSE, headp + 11,
- /* If there is a custom-set Host: name, use it
- here, or else use real peer host name. */
- data->state.aptr.cookiehost?
- data->state.aptr.cookiehost:conn->host.name,
- data->state.up.path,
- (conn->handler->protocol&CURLPROTO_HTTPS)?
- TRUE:FALSE);
- Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
- }
-#endif
- else if(!k->http_bodyless && checkprefix("Last-Modified:", headp) &&
- (data->set.timecondition || data->set.get_filetime) ) {
- k->timeofdoc = Curl_getdate_capped(headp + strlen("Last-Modified:"));
- if(data->set.get_filetime)
- data->info.filetime = k->timeofdoc;
- }
- else if((checkprefix("WWW-Authenticate:", headp) &&
- (401 == k->httpcode)) ||
- (checkprefix("Proxy-authenticate:", headp) &&
- (407 == k->httpcode))) {
-
- bool proxy = (k->httpcode == 407) ? TRUE : FALSE;
- char *auth = Curl_copy_header_value(headp);
- if(!auth)
- return CURLE_OUT_OF_MEMORY;
-
- result = Curl_http_input_auth(conn, proxy, auth);
-
- free(auth);
-
- if(result)
- return result;
- }
-#ifdef USE_SPNEGO
- else if(checkprefix("Persistent-Auth", headp)) {
- struct negotiatedata *negdata = &conn->negotiate;
- struct auth *authp = &data->state.authhost;
- if(authp->picked == CURLAUTH_NEGOTIATE) {
- char *persistentauth = Curl_copy_header_value(headp);
- if(!persistentauth)
- return CURLE_OUT_OF_MEMORY;
- negdata->noauthpersist = checkprefix("false", persistentauth)?
- TRUE:FALSE;
- negdata->havenoauthpersist = TRUE;
- infof(data, "Negotiate: noauthpersist -> %d, header part: %s",
- negdata->noauthpersist, persistentauth);
- free(persistentauth);
- }
- }
-#endif
- else if((k->httpcode >= 300 && k->httpcode < 400) &&
- checkprefix("Location:", headp) &&
- !data->req.location) {
- /* this is the URL that the server advises us to use instead */
- char *location = Curl_copy_header_value(headp);
- if(!location)
- return CURLE_OUT_OF_MEMORY;
- if(!*location)
- /* ignore empty data */
- free(location);
- else {
- data->req.location = location;
-
- if(data->set.http_follow_location) {
- DEBUGASSERT(!data->req.newurl);
- data->req.newurl = strdup(data->req.location); /* clone */
- if(!data->req.newurl)
- return CURLE_OUT_OF_MEMORY;
-
- /* some cases of POST and PUT etc needs to rewind the data
- stream at this point */
- result = http_perhapsrewind(conn);
- if(result)
- return result;
- }
- }
- }
-
-#ifdef USE_HSTS
- /* If enabled, the header is incoming and this is over HTTPS */
- else if(data->hsts && checkprefix("Strict-Transport-Security:", headp) &&
- (conn->handler->flags & PROTOPT_SSL)) {
- CURLcode check =
- Curl_hsts_parse(data->hsts, data->state.up.hostname,
- &headp[ sizeof("Strict-Transport-Security:") -1 ]);
- if(check)
- infof(data, "Illegal STS header skipped\n");
-#ifdef DEBUGBUILD
- else
- infof(data, "Parsed STS header fine (%zu entries)\n",
- data->hsts->list.size);
-#endif
- }
-#endif
-#ifndef CURL_DISABLE_ALTSVC
- /* If enabled, the header is incoming and this is over HTTPS */
- else if(data->asi && checkprefix("Alt-Svc:", headp) &&
- ((conn->handler->flags & PROTOPT_SSL) ||
-#ifdef CURLDEBUG
- /* allow debug builds to circumvent the HTTPS restriction */
- getenv("CURL_ALTSVC_HTTP")
-#else
- 0
-#endif
- )) {
- /* the ALPN of the current request */
- enum alpnid id = (conn->httpversion == 20) ? ALPN_h2 : ALPN_h1;
- result = Curl_altsvc_parse(data, data->asi,
- &headp[ strlen("Alt-Svc:") ],
- id, conn->host.name,
- curlx_uitous(conn->remote_port));
- if(result)
- return result;
- }
-#endif
- else if(conn->handler->protocol & CURLPROTO_RTSP) {
- result = Curl_rtsp_parseheader(conn, headp);
- if(result)
- return result;
- }
+ result = Curl_http_header(data, conn, headp);
+ if(result)
+ return result;
/*
* End of header-checks. Write them to the client.
diff --git a/lib/http.h b/lib/http.h
index 1aaec225e..3d90e992b 100644
--- a/lib/http.h
+++ b/lib/http.h
@@ -35,6 +35,17 @@ extern const struct Curl_handler Curl_handler_http;
extern const struct Curl_handler Curl_handler_https;
#endif
+typedef enum {
+ HTTPREQ_NONE, /* first in list */
+ HTTPREQ_GET,
+ HTTPREQ_POST,
+ HTTPREQ_POST_FORM, /* we make a difference internally */
+ HTTPREQ_POST_MIME, /* we make a difference internally */
+ HTTPREQ_PUT,
+ HTTPREQ_HEAD,
+ HTTPREQ_LAST /* last in list */
+} Curl_HttpReq;
+
/* Header specific functions */
bool Curl_compareheader(const char *headerline, /* line to check */
const char *header, /* header keyword _with_ colon */
@@ -44,21 +55,62 @@ char *Curl_copy_header_value(const char *header);
char *Curl_checkProxyheaders(const struct connectdata *conn,
const char *thisheader);
+#ifndef USE_HYPER
CURLcode Curl_buffer_send(struct dynbuf *in,
struct connectdata *conn,
curl_off_t *bytes_written,
size_t included_body_bytes,
int socketindex);
+#else
+#define Curl_buffer_send(a,b,c,d,e) CURLE_OK
+#endif
CURLcode Curl_add_timecondition(const struct connectdata *conn,
struct dynbuf *buf);
CURLcode Curl_add_custom_headers(struct connectdata *conn,
bool is_connect,
- struct dynbuf *req_buffer);
+#ifndef USE_HYPER
+ struct dynbuf *req
+#else
+ void *headers
+#endif
+ );
CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
struct dynbuf *buf,
struct Curl_easy *handle);
+void Curl_http_method(struct Curl_easy *data, struct connectdata *conn,
+ const char **method, Curl_HttpReq *);
+CURLcode Curl_http_useragent(struct Curl_easy *data, struct connectdata *conn);
+CURLcode Curl_http_host(struct Curl_easy *data, struct connectdata *conn);
+CURLcode Curl_http_target(struct Curl_easy *data, struct connectdata *conn,
+ struct dynbuf *req);
+CURLcode Curl_http_statusline(struct Curl_easy *data,
+ struct connectdata *conn);
+CURLcode Curl_http_header(struct Curl_easy *data, struct connectdata *conn,
+ char *headp);
+CURLcode Curl_http_body(struct Curl_easy *data, struct connectdata *conn,
+ Curl_HttpReq httpreq,
+ const char **teep);
+CURLcode Curl_http_bodysend(struct Curl_easy *data, struct connectdata *conn,
+ struct dynbuf *r, Curl_HttpReq httpreq);
+#ifndef CURL_DISABLE_COOKIES
+CURLcode Curl_http_cookies(struct Curl_easy *data,
+ struct connectdata *conn,
+ struct dynbuf *r);
+#else
+#define Curl_http_cookies(a,b,c) CURLE_OK
+#endif
+CURLcode Curl_http_resume(struct Curl_easy *data,
+ struct connectdata *conn,
+ Curl_HttpReq httpreq);
+CURLcode Curl_http_range(struct Curl_easy *data,
+ struct connectdata *conn,
+ Curl_HttpReq httpreq);
+CURLcode Curl_http_firstwrite(struct Curl_easy *data,
+ struct connectdata *conn,
+ bool *done);
+
/* protocol-specific functions set up to be called by the main engine */
CURLcode Curl_http(struct connectdata *conn, bool *done);
CURLcode Curl_http_done(struct connectdata *, CURLcode, bool premature);
@@ -115,7 +167,6 @@ struct HTTP {
const char *postdata;
const char *p_pragma; /* Pragma: string */
- const char *p_accept; /* Accept: string */
/* For FORM posting */
curl_mimepart form;
diff --git a/lib/transfer.c b/lib/transfer.c
index 8fcb71832..96ee77c62 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -708,64 +708,10 @@ static CURLcode readwrite_data(struct Curl_easy *data,
write a piece of the body */
if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
/* HTTP-only checks */
-
- if(data->req.newurl) {
- if(conn->bits.close) {
- /* Abort after the headers if "follow Location" is set
- and we're set to close anyway. */
- k->keepon &= ~KEEP_RECV;
- *done = TRUE;
- return CURLE_OK;
- }
- /* We have a new url to load, but since we want to be able
- to re-use this connection properly, we read the full
- response in "ignore more" */
- k->ignorebody = TRUE;
- infof(data, "Ignoring the response-body\n");
- }
- if(data->state.resume_from && !k->content_range &&
- (data->state.httpreq == HTTPREQ_GET) &&
- !k->ignorebody) {
-
- if(k->size == data->state.resume_from) {
- /* The resume point is at the end of file, consider this fine
- even if it doesn't allow resume from here. */
- infof(data, "The entire document is already downloaded");
- connclose(conn, "already downloaded");
- /* Abort download */
- k->keepon &= ~KEEP_RECV;
- *done = TRUE;
- return CURLE_OK;
- }
-
- /* we wanted to resume a download, although the server doesn't
- * seem to support this and we did this with a GET (if it
- * wasn't a GET we did a POST or PUT resume) */
- failf(data, "HTTP server doesn't seem to support "
- "byte ranges. Cannot resume.");
- return CURLE_RANGE_ERROR;
- }
-
- if(data->set.timecondition && !data->state.range) {
- /* A time condition has been set AND no ranges have been
- requested. This seems to be what chapter 13.3.4 of
- RFC 2616 defines to be the correct action for a
- HTTP/1.1 client */
-
- if(!Curl_meets_timecondition(data, k->timeofdoc)) {
- *done = TRUE;
- /* We're simulating a http 304 from server so we return
- what should have been returned from the server */
- data->info.httpcode = 304;
- infof(data, "Simulate a HTTP 304 response!\n");
- /* we abort the transfer before it is completed == we ruin the
- re-use ability. Close the connection */
- connclose(conn, "Simulated 304 handling");
- return CURLE_OK;
- }
- } /* we have a time condition */
-
- } /* this is HTTP or RTSP */
+ result = Curl_http_firstwrite(data, conn, done);
+ if(result || *done)
+ return result;
+ }
} /* this is the first time we write a body part */
#endif /* CURL_DISABLE_HTTP */
@@ -1263,6 +1209,10 @@ CURLcode Curl_readwrite(struct connectdata *conn,
return CURLE_SEND_ERROR;
}
+#ifdef USE_HYPER
+ if(conn->datastream)
+ return conn->datastream(data, conn, &didwhat, done, select_res);
+#endif
/* We go ahead and do a read if we have a readable socket or if
the stream was rewound (in which case we have data in a
buffer) */
diff --git a/lib/urldata.h b/lib/urldata.h
index 296341ebd..d810605bf 100644
--- a/lib/urldata.h
+++ b/lib/urldata.h
@@ -118,6 +118,14 @@ typedef ssize_t (Curl_recv)(struct connectdata *conn, /* connection data */
size_t len, /* max amount to read */
CURLcode *err); /* error to return */
+#ifdef USE_HYPER
+typedef CURLcode (*Curl_datastream)(struct Curl_easy *data,
+ struct connectdata *conn,
+ int *didwhat,
+ bool *done,
+ int select_res);
+#endif
+
#include "mime.h"
#include "imap.h"
#include "pop3.h"
@@ -132,6 +140,7 @@ typedef ssize_t (Curl_recv)(struct connectdata *conn, /* connection data */
#include "wildcard.h"
#include "multihandle.h"
#include "quic.h"
+#include "c-hyper.h"
#ifdef HAVE_GSSAPI
# ifdef HAVE_GSSGNU
@@ -1114,6 +1123,10 @@ struct connectdata {
#ifdef USE_UNIX_SOCKETS
char *unix_domain_socket;
#endif
+#ifdef USE_HYPER
+ /* if set, an alternative data transfer function */
+ Curl_datastream datastream;
+#endif
};
/* The end of connectdata. */
@@ -1209,17 +1222,6 @@ struct Progress {
};
typedef enum {
- HTTPREQ_NONE, /* first in list */
- HTTPREQ_GET,
- HTTPREQ_POST,
- HTTPREQ_POST_FORM, /* we make a difference internally */
- HTTPREQ_POST_MIME, /* we make a difference internally */
- HTTPREQ_PUT,
- HTTPREQ_HEAD,
- HTTPREQ_LAST /* last in list */
-} Curl_HttpReq;
-
-typedef enum {
RTSPREQ_NONE, /* first in list */
RTSPREQ_OPTIONS,
RTSPREQ_DESCRIBE,
@@ -1400,14 +1402,17 @@ struct UrlState {
int stream_weight;
CURLU *uh; /* URL handle for the current parsed URL */
struct urlpieces up;
- Curl_HttpReq httpreq; /* what kind of HTTP request (if any) is this */
#ifndef CURL_DISABLE_HTTP
+ Curl_HttpReq httpreq; /* what kind of HTTP request (if any) is this */
size_t trailers_bytes_sent;
struct dynbuf trailers_buf; /* a buffer containing the compiled trailing
headers */
#endif
trailers_state trailers_state; /* whether we are sending trailers
and what stage are we at */
+#ifdef USE_HYPER
+ CURLcode hresult; /* used to pass return codes back from hyper callbacks */
+#endif
/* Dynamically allocated strings, MUST be freed before this struct is
killed. */
@@ -1571,7 +1576,6 @@ enum dupstring {
STRING_ALTSVC, /* CURLOPT_ALTSVC */
STRING_HSTS, /* CURLOPT_HSTS */
STRING_SASL_AUTHZID, /* CURLOPT_SASL_AUTHZID */
- STRING_TEMP_URL, /* temp URL storage for proxy use */
STRING_DNS_SERVERS,
STRING_DNS_INTERFACE,
STRING_DNS_LOCAL_IP4,
@@ -1702,7 +1706,9 @@ struct UserDefined {
the hostname and port to connect to */
curl_TimeCond timecondition; /* kind of time/date comparison */
time_t timevalue; /* what time to compare with */
+#ifndef CURL_DISABLE_HTTP
Curl_HttpReq method; /* what kind of HTTP request (if any) is this */
+#endif
long httpversion; /* when non-zero, a specific HTTP version requested to
be used in the library's request(s) */
struct ssl_config_data ssl; /* user defined SSL stuff */
@@ -1936,6 +1942,9 @@ struct Curl_easy {
iconv_t inbound_cd; /* for translating from the network encoding */
iconv_t utf8_cd; /* for translating to UTF8 */
#endif /* CURL_DOES_CONVERSIONS && HAVE_ICONV */
+#ifdef USE_HYPER
+ struct hyptransfer hyp;
+#endif
unsigned int magic; /* set to a CURLEASY_MAGIC_NUMBER */
};