From 4ae2d9f24d3ba506f828df69357ac80b346348de Mon Sep 17 00:00:00 2001 From: Stefan Eissing Date: Thu, 6 Apr 2023 09:54:57 +0200 Subject: proxy: http2 proxy tunnel implementation - currently only on debug build and when env variable CURL_PROXY_TUNNEL_H2 is present. - will ALPN negotiate with the proxy server and switch tunnel filter based on the protocol negotiated. - http/1.1 tunnel code moved into cf-h1-proxy.[ch] - http/2 tunnel code implemented in cf-h2-proxy.[ch] - tunnel start and ALPN set remains in http_proxy.c - moving all haproxy related code into cf-haproxy.[ch] VTLS changes - SSL filters rely solely on the "alpn" specification they are created with and no longer check conn->bits.tls_enable_alpn. - checks on which ALPN specification to use (or none at all) are done in vtls.c when creating the filter. Testing - added a nghttpx forward proxy to the pytest setup that speaks HTTP/2 and forwards all requests to the Apache httpd forward proxy server. - extending test coverage in test_10 cases - adding proxy tests for direct/tunnel h1/h2 use of basic auth. - adding test for http/1.1 and h2 proxy tunneling to pytest Closes #10780 --- lib/Makefile.inc | 6 + lib/cf-h1-proxy.c | 1186 ++++++++++++++++++++++++++++++++ lib/cf-h1-proxy.h | 39 ++ lib/cf-h2-proxy.c | 1388 +++++++++++++++++++++++++++++++++++++ lib/cf-h2-proxy.h | 39 ++ lib/cf-haproxy.c | 262 +++++++ lib/cf-haproxy.h | 43 ++ lib/cfilters.c | 31 +- lib/cfilters.h | 21 +- lib/connect.c | 1 + lib/curl_log.c | 7 + lib/http_proxy.c | 1389 +++----------------------------------- lib/http_proxy.h | 27 +- lib/urldata.h | 4 + lib/vquic/curl_ngtcp2.c | 35 +- lib/vquic/curl_quiche.c | 2 +- lib/vtls/bearssl.c | 2 +- lib/vtls/gtls.c | 2 +- lib/vtls/nss.c | 5 +- lib/vtls/openssl.c | 2 +- lib/vtls/sectransp.c | 2 +- lib/vtls/vtls.c | 105 +-- lib/vtls/vtls.h | 52 -- lib/vtls/vtls_int.h | 40 +- lib/vtls/wolfssl.c | 2 +- tests/http/conftest.py | 14 +- tests/http/scorecard.py | 1 - tests/http/test_01_basic.py | 21 +- tests/http/test_02_download.py | 57 +- tests/http/test_03_goaway.py | 15 +- tests/http/test_04_stuttered.py | 12 +- tests/http/test_05_errors.py | 4 +- tests/http/test_06_eyeballs.py | 20 +- tests/http/test_07_upload.py | 37 +- tests/http/test_08_caddy.py | 33 +- tests/http/test_10_proxy.py | 174 ++++- tests/http/test_11_unix.py | 8 +- tests/http/test_12_reuse.py | 7 +- tests/http/test_13_proxy_auth.py | 193 ++++++ tests/http/testenv/__init__.py | 5 + tests/http/testenv/curl.py | 157 +++-- tests/http/testenv/env.py | 21 +- tests/http/testenv/httpd.py | 67 +- tests/http/testenv/nghttpx.py | 123 +++- 44 files changed, 3926 insertions(+), 1735 deletions(-) create mode 100644 lib/cf-h1-proxy.c create mode 100644 lib/cf-h1-proxy.h create mode 100644 lib/cf-h2-proxy.c create mode 100644 lib/cf-h2-proxy.h create mode 100644 lib/cf-haproxy.c create mode 100644 lib/cf-haproxy.h create mode 100644 tests/http/test_13_proxy_auth.py diff --git a/lib/Makefile.inc b/lib/Makefile.inc index 9a7245986..543d937e2 100644 --- a/lib/Makefile.inc +++ b/lib/Makefile.inc @@ -108,6 +108,9 @@ LIB_CFILES = \ bufq.c \ bufref.c \ c-hyper.c \ + cf-h1-proxy.c \ + cf-h2-proxy.c \ + cf-haproxy.c \ cf-https-connect.c \ cf-socket.c \ cfilters.c \ @@ -235,6 +238,9 @@ LIB_HFILES = \ bufq.h \ bufref.h \ c-hyper.h \ + cf-h1-proxy.h \ + cf-h2-proxy.h \ + cf-haproxy.h \ cf-https-connect.h \ cf-socket.h \ cfilters.h \ diff --git a/lib/cf-h1-proxy.c b/lib/cf-h1-proxy.c new file mode 100644 index 000000000..a02c46a52 --- /dev/null +++ b/lib/cf-h1-proxy.c @@ -0,0 +1,1186 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + * SPDX-License-Identifier: curl + * + ***************************************************************************/ + +#include "curl_setup.h" + +#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP) + +#include +#ifdef USE_HYPER +#include +#endif +#include "urldata.h" +#include "dynbuf.h" +#include "sendf.h" +#include "http.h" +#include "http_proxy.h" +#include "url.h" +#include "select.h" +#include "progress.h" +#include "cfilters.h" +#include "cf-h1-proxy.h" +#include "connect.h" +#include "curl_log.h" +#include "curlx.h" +#include "vtls/vtls.h" +#include "transfer.h" +#include "multiif.h" + +/* The last 3 #include files should be in this order */ +#include "curl_printf.h" +#include "curl_memory.h" +#include "memdebug.h" + + +typedef enum { + TUNNEL_INIT, /* init/default/no tunnel state */ + TUNNEL_CONNECT, /* CONNECT request is being send */ + TUNNEL_RECEIVE, /* CONNECT answer is being received */ + TUNNEL_RESPONSE, /* CONNECT response received completely */ + TUNNEL_ESTABLISHED, + TUNNEL_FAILED +} tunnel_state; + +/* struct for HTTP CONNECT tunneling */ +struct tunnel_state { + int sockindex; + const char *hostname; + int remote_port; + struct HTTP CONNECT; + struct dynbuf rcvbuf; + struct dynbuf req; + size_t nsend; + size_t headerlines; + enum keeponval { + KEEPON_DONE, + KEEPON_CONNECT, + KEEPON_IGNORE + } keepon; + curl_off_t cl; /* size of content to read and ignore */ + tunnel_state tunnel_state; + BIT(chunked_encoding); + BIT(close_connection); +}; + + +static bool tunnel_is_established(struct tunnel_state *ts) +{ + return ts && (ts->tunnel_state == TUNNEL_ESTABLISHED); +} + +static bool tunnel_is_failed(struct tunnel_state *ts) +{ + return ts && (ts->tunnel_state == TUNNEL_FAILED); +} + +static CURLcode tunnel_reinit(struct tunnel_state *ts, + struct connectdata *conn, + struct Curl_easy *data) +{ + (void)data; + DEBUGASSERT(ts); + Curl_dyn_reset(&ts->rcvbuf); + Curl_dyn_reset(&ts->req); + ts->tunnel_state = TUNNEL_INIT; + ts->keepon = KEEPON_CONNECT; + ts->cl = 0; + ts->close_connection = FALSE; + + if(conn->bits.conn_to_host) + ts->hostname = conn->conn_to_host.name; + else if(ts->sockindex == SECONDARYSOCKET) + ts->hostname = conn->secondaryhostname; + else + ts->hostname = conn->host.name; + + if(ts->sockindex == SECONDARYSOCKET) + ts->remote_port = conn->secondary_port; + else if(conn->bits.conn_to_port) + ts->remote_port = conn->conn_to_port; + else + ts->remote_port = conn->remote_port; + + return CURLE_OK; +} + +static CURLcode tunnel_init(struct tunnel_state **pts, + struct Curl_easy *data, + struct connectdata *conn, + int sockindex) +{ + struct tunnel_state *ts; + CURLcode result; + + if(conn->handler->flags & PROTOPT_NOTCPPROXY) { + failf(data, "%s cannot be done over CONNECT", conn->handler->scheme); + return CURLE_UNSUPPORTED_PROTOCOL; + } + + /* we might need the upload buffer for streaming a partial request */ + result = Curl_get_upload_buffer(data); + if(result) + return result; + + ts = calloc(1, sizeof(*ts)); + if(!ts) + return CURLE_OUT_OF_MEMORY; + + ts->sockindex = sockindex; + infof(data, "allocate connect buffer"); + + Curl_dyn_init(&ts->rcvbuf, DYN_PROXY_CONNECT_HEADERS); + Curl_dyn_init(&ts->req, DYN_HTTP_REQUEST); + + *pts = ts; + connkeep(conn, "HTTP proxy CONNECT"); + return tunnel_reinit(ts, conn, data); +} + +static void tunnel_go_state(struct Curl_cfilter *cf, + struct tunnel_state *ts, + tunnel_state new_state, + struct Curl_easy *data) +{ + if(ts->tunnel_state == new_state) + return; + /* leaving this one */ + switch(ts->tunnel_state) { + case TUNNEL_CONNECT: + data->req.ignorebody = FALSE; + break; + default: + break; + } + /* entering this one */ + switch(new_state) { + case TUNNEL_INIT: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'init'")); + tunnel_reinit(ts, cf->conn, data); + break; + + case TUNNEL_CONNECT: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'connect'")); + ts->tunnel_state = TUNNEL_CONNECT; + ts->keepon = KEEPON_CONNECT; + Curl_dyn_reset(&ts->rcvbuf); + break; + + case TUNNEL_RECEIVE: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'receive'")); + ts->tunnel_state = TUNNEL_RECEIVE; + break; + + case TUNNEL_RESPONSE: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'response'")); + ts->tunnel_state = TUNNEL_RESPONSE; + break; + + case TUNNEL_ESTABLISHED: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'established'")); + infof(data, "CONNECT phase completed"); + data->state.authproxy.done = TRUE; + data->state.authproxy.multipass = FALSE; + /* FALLTHROUGH */ + case TUNNEL_FAILED: + if(new_state == TUNNEL_FAILED) + DEBUGF(LOG_CF(data, cf, "new tunnel state 'failed'")); + ts->tunnel_state = new_state; + Curl_dyn_reset(&ts->rcvbuf); + Curl_dyn_reset(&ts->req); + /* restore the protocol pointer */ + data->info.httpcode = 0; /* clear it as it might've been used for the + proxy */ + /* If a proxy-authorization header was used for the proxy, then we should + make sure that it isn't accidentally used for the document request + after we've connected. So let's free and clear it here. */ + Curl_safefree(data->state.aptr.proxyuserpwd); + data->state.aptr.proxyuserpwd = NULL; +#ifdef USE_HYPER + data->state.hconnect = FALSE; +#endif + break; + } +} + +static void tunnel_free(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct tunnel_state *ts = cf->ctx; + if(ts) { + tunnel_go_state(cf, ts, TUNNEL_FAILED, data); + Curl_dyn_free(&ts->rcvbuf); + Curl_dyn_free(&ts->req); + free(ts); + cf->ctx = NULL; + } +} + +static CURLcode CONNECT_host(struct Curl_easy *data, + struct connectdata *conn, + const char *hostname, + int remote_port, + char **connecthostp, + char **hostp) +{ + char *hostheader; /* for CONNECT */ + char *host = NULL; /* Host: */ + bool ipv6_ip = conn->bits.ipv6_ip; + + /* the hostname may be different */ + if(hostname != conn->host.name) + ipv6_ip = (strchr(hostname, ':') != NULL); + hostheader = /* host:port with IPv6 support */ + aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"", + remote_port); + if(!hostheader) + return CURLE_OUT_OF_MEMORY; + + if(!Curl_checkProxyheaders(data, conn, STRCONST("Host"))) { + host = aprintf("Host: %s\r\n", hostheader); + if(!host) { + free(hostheader); + return CURLE_OUT_OF_MEMORY; + } + } + *connecthostp = hostheader; + *hostp = host; + return CURLE_OK; +} + +#ifndef USE_HYPER +static CURLcode start_CONNECT(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_state *ts) +{ + struct connectdata *conn = cf->conn; + char *hostheader = NULL; + char *host = NULL; + const char *httpv; + CURLcode result; + + infof(data, "Establish HTTP proxy tunnel to %s:%d", + ts->hostname, ts->remote_port); + + /* This only happens if we've looped here due to authentication + reasons, and we don't really use the newly cloned URL here + then. Just free() it. */ + Curl_safefree(data->req.newurl); + + result = CONNECT_host(data, conn, + ts->hostname, ts->remote_port, + &hostheader, &host); + if(result) + goto out; + + /* Setup the proxy-authorization header, if any */ + result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET, + hostheader, TRUE); + if(result) + goto out; + + httpv = (conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ? "1.0" : "1.1"; + + result = + Curl_dyn_addf(&ts->req, + "CONNECT %s HTTP/%s\r\n" + "%s" /* Host: */ + "%s", /* Proxy-Authorization */ + hostheader, + httpv, + host?host:"", + data->state.aptr.proxyuserpwd? + data->state.aptr.proxyuserpwd:""); + if(result) + goto out; + + if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent")) + && data->set.str[STRING_USERAGENT]) + result = Curl_dyn_addf(&ts->req, "User-Agent: %s\r\n", + data->set.str[STRING_USERAGENT]); + if(result) + goto out; + + if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection"))) + result = Curl_dyn_addn(&ts->req, + STRCONST("Proxy-Connection: Keep-Alive\r\n")); + if(result) + goto out; + + result = Curl_add_custom_headers(data, TRUE, &ts->req); + if(result) + goto out; + + /* CRLF terminate the request */ + result = Curl_dyn_addn(&ts->req, STRCONST("\r\n")); + if(result) + goto out; + + /* Send the connect request to the proxy */ + result = Curl_buffer_send(&ts->req, data, &ts->CONNECT, + &data->info.request_size, 0, + ts->sockindex); + ts->headerlines = 0; + +out: + if(result) + failf(data, "Failed sending CONNECT to proxy"); + free(host); + free(hostheader); + return result; +} + +static CURLcode send_CONNECT(struct Curl_easy *data, + struct connectdata *conn, + struct tunnel_state *ts, + bool *done) +{ + struct SingleRequest *k = &data->req; + struct HTTP *http = &ts->CONNECT; + CURLcode result = CURLE_OK; + + if(http->sending != HTTPSEND_REQUEST) + goto out; + + if(!ts->nsend) { + size_t fillcount; + k->upload_fromhere = data->state.ulbuf; + result = Curl_fillreadbuffer(data, data->set.upload_buffer_size, + &fillcount); + if(result) + goto out; + ts->nsend = fillcount; + } + if(ts->nsend) { + ssize_t bytes_written; + /* write to socket (send away data) */ + result = Curl_write(data, + conn->writesockfd, /* socket to send to */ + k->upload_fromhere, /* buffer pointer */ + ts->nsend, /* buffer size */ + &bytes_written); /* actually sent */ + if(result) + goto out; + /* send to debug callback! */ + Curl_debug(data, CURLINFO_HEADER_OUT, + k->upload_fromhere, bytes_written); + + ts->nsend -= bytes_written; + k->upload_fromhere += bytes_written; + } + if(!ts->nsend) + http->sending = HTTPSEND_NADA; + +out: + if(result) + failf(data, "Failed sending CONNECT to proxy"); + *done = (http->sending != HTTPSEND_REQUEST); + return result; +} + +static CURLcode on_resp_header(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_state *ts, + const char *header) +{ + CURLcode result = CURLE_OK; + struct SingleRequest *k = &data->req; + (void)cf; + + if((checkprefix("WWW-Authenticate:", header) && + (401 == k->httpcode)) || + (checkprefix("Proxy-authenticate:", header) && + (407 == k->httpcode))) { + + bool proxy = (k->httpcode == 407) ? TRUE : FALSE; + char *auth = Curl_copy_header_value(header); + if(!auth) + return CURLE_OUT_OF_MEMORY; + + DEBUGF(LOG_CF(data, cf, "CONNECT: fwd auth header '%s'", header)); + result = Curl_http_input_auth(data, proxy, auth); + + free(auth); + + if(result) + return result; + } + else if(checkprefix("Content-Length:", header)) { + if(k->httpcode/100 == 2) { + /* A client MUST ignore any Content-Length or Transfer-Encoding + header fields received in a successful response to CONNECT. + "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */ + infof(data, "Ignoring Content-Length in CONNECT %03d response", + k->httpcode); + } + else { + (void)curlx_strtoofft(header + strlen("Content-Length:"), + NULL, 10, &ts->cl); + } + } + else if(Curl_compareheader(header, + STRCONST("Connection:"), STRCONST("close"))) + ts->close_connection = TRUE; + else if(checkprefix("Transfer-Encoding:", header)) { + if(k->httpcode/100 == 2) { + /* A client MUST ignore any Content-Length or Transfer-Encoding + header fields received in a successful response to CONNECT. + "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */ + infof(data, "Ignoring Transfer-Encoding in " + "CONNECT %03d response", k->httpcode); + } + else if(Curl_compareheader(header, + STRCONST("Transfer-Encoding:"), + STRCONST("chunked"))) { + infof(data, "CONNECT responded chunked"); + ts->chunked_encoding = TRUE; + /* init our chunky engine */ + Curl_httpchunk_init(data); + } + } + else if(Curl_compareheader(header, + STRCONST("Proxy-Connection:"), + STRCONST("close"))) + ts->close_connection = TRUE; + else if(!strncmp(header, "HTTP/1.", 7) && + ((header[7] == '0') || (header[7] == '1')) && + (header[8] == ' ') && + ISDIGIT(header[9]) && ISDIGIT(header[10]) && ISDIGIT(header[11]) && + !ISDIGIT(header[12])) { + /* store the HTTP code from the proxy */ + data->info.httpproxycode = k->httpcode = (header[9] - '0') * 100 + + (header[10] - '0') * 10 + (header[11] - '0'); + } + return result; +} + +static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_state *ts, + bool *done) +{ + CURLcode result = CURLE_OK; + struct SingleRequest *k = &data->req; + curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data); + char *linep; + size_t perline; + int error; + +#define SELECT_OK 0 +#define SELECT_ERROR 1 + + error = SELECT_OK; + *done = FALSE; + + if(!Curl_conn_data_pending(data, ts->sockindex)) + return CURLE_OK; + + while(ts->keepon) { + ssize_t gotbytes; + char byte; + + /* Read one byte at a time to avoid a race condition. Wait at most one + second before looping to ensure continuous pgrsUpdates. */ + result = Curl_read(data, tunnelsocket, &byte, 1, &gotbytes); + if(result == CURLE_AGAIN) + /* socket buffer drained, return */ + return CURLE_OK; + + if(Curl_pgrsUpdate(data)) + return CURLE_ABORTED_BY_CALLBACK; + + if(result) { + ts->keepon = KEEPON_DONE; + break; + } + + if(gotbytes <= 0) { + if(data->set.proxyauth && data->state.authproxy.avail && + data->state.aptr.proxyuserpwd) { + /* proxy auth was requested and there was proxy auth available, + then deem this as "mere" proxy disconnect */ + ts->close_connection = TRUE; + infof(data, "Proxy CONNECT connection closed"); + } + else { + error = SELECT_ERROR; + failf(data, "Proxy CONNECT aborted"); + } + ts->keepon = KEEPON_DONE; + break; + } + + if(ts->keepon == KEEPON_IGNORE) { + /* This means we are currently ignoring a response-body */ + + if(ts->cl) { + /* A Content-Length based body: simply count down the counter + and make sure to break out of the loop when we're done! */ + ts->cl--; + if(ts->cl <= 0) { + ts->keepon = KEEPON_DONE; + break; + } + } + else { + /* chunked-encoded body, so we need to do the chunked dance + properly to know when the end of the body is reached */ + CHUNKcode r; + CURLcode extra; + ssize_t tookcareof = 0; + + /* now parse the chunked piece of data so that we can + properly tell when the stream ends */ + r = Curl_httpchunk_read(data, &byte, 1, &tookcareof, &extra); + if(r == CHUNKE_STOP) { + /* we're done reading chunks! */ + infof(data, "chunk reading DONE"); + ts->keepon = KEEPON_DONE; + } + } + continue; + } + + if(Curl_dyn_addn(&ts->rcvbuf, &byte, 1)) { + failf(data, "CONNECT response too large"); + return CURLE_RECV_ERROR; + } + + /* if this is not the end of a header line then continue */ + if(byte != 0x0a) + continue; + + ts->headerlines++; + linep = Curl_dyn_ptr(&ts->rcvbuf); + perline = Curl_dyn_len(&ts->rcvbuf); /* amount of bytes in this line */ + + /* output debug if that is requested */ + Curl_debug(data, CURLINFO_HEADER_IN, linep, perline); + + if(!data->set.suppress_connect_headers) { + /* send the header to the callback */ + int writetype = CLIENTWRITE_HEADER | CLIENTWRITE_CONNECT | + (data->set.include_header ? CLIENTWRITE_BODY : 0) | + (ts->headerlines == 1 ? CLIENTWRITE_STATUS : 0); + + result = Curl_client_write(data, writetype, linep, perline); + if(result) + return result; + } + + data->info.header_size += (long)perline; + + /* Newlines are CRLF, so the CR is ignored as the line isn't + really terminated until the LF comes. Treat a following CR + as end-of-headers as well.*/ + + if(('\r' == linep[0]) || + ('\n' == linep[0])) { + /* end of response-headers from the proxy */ + + if((407 == k->httpcode) && !data->state.authproblem) { + /* If we get a 407 response code with content length + when we have no auth problem, we must ignore the + whole response-body */ + ts->keepon = KEEPON_IGNORE; + + if(ts->cl) { + infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T + " bytes of response-body", ts->cl); + } + else if(ts->chunked_encoding) { + CHUNKcode r; + CURLcode extra; + + infof(data, "Ignore chunked response-body"); + + /* We set ignorebody true here since the chunked decoder + function will acknowledge that. Pay attention so that this is + cleared again when this function returns! */ + k->ignorebody = TRUE; + + if(linep[1] == '\n') + /* this can only be a LF if the letter at index 0 was a CR */ + linep++; + + /* now parse the chunked piece of data so that we can properly + tell when the stream ends */ + r = Curl_httpchunk_read(data, linep + 1, 1, &gotbytes, + &extra); + if(r == CHUNKE_STOP) { + /* we're done reading chunks! */ + infof(data, "chunk reading DONE"); + ts->keepon = KEEPON_DONE; + } + } + else { + /* without content-length or chunked encoding, we + can't keep the connection alive since the close is + the end signal so we bail out at once instead */ + DEBUGF(LOG_CF(data, cf, "CONNECT: no content-length or chunked")); + ts->keepon = KEEPON_DONE; + } + } + else { + ts->keepon = KEEPON_DONE; + } + + DEBUGASSERT(ts->keepon == KEEPON_IGNORE + || ts->keepon == KEEPON_DONE); + continue; + } + + result = on_resp_header(cf, data, ts, linep); + if(result) + return result; + + Curl_dyn_reset(&ts->rcvbuf); + } /* while there's buffer left and loop is requested */ + + if(error) + result = CURLE_RECV_ERROR; + *done = (ts->keepon == KEEPON_DONE); + if(!result && *done && data->info.httpproxycode/100 != 2) { + /* Deal with the possibly already received authenticate + headers. 'newurl' is set to a new URL if we must loop. */ + result = Curl_http_auth_act(data); + } + return result; +} + +#else /* USE_HYPER */ +/* The Hyper version of CONNECT */ +static CURLcode start_CONNECT(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_state *ts) +{ + struct connectdata *conn = cf->conn; + struct hyptransfer *h = &data->hyp; + curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data); + hyper_io *io = NULL; + hyper_request *req = NULL; + hyper_headers *headers = NULL; + hyper_clientconn_options *options = NULL; + hyper_task *handshake = NULL; + hyper_task *task = NULL; /* for the handshake */ + hyper_clientconn *client = NULL; + hyper_task *sendtask = NULL; /* for the send */ + char *hostheader = NULL; /* for CONNECT */ + char *host = NULL; /* Host: */ + CURLcode result = CURLE_OUT_OF_MEMORY; + + io = hyper_io_new(); + if(!io) { + failf(data, "Couldn't create hyper IO"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + /* tell Hyper how to read/write network data */ + hyper_io_set_userdata(io, data); + hyper_io_set_read(io, Curl_hyper_recv); + hyper_io_set_write(io, Curl_hyper_send); + conn->sockfd = tunnelsocket; + + data->state.hconnect = TRUE; + + /* create an executor to poll futures */ + if(!h->exec) { + h->exec = hyper_executor_new(); + if(!h->exec) { + failf(data, "Couldn't create hyper executor"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + } + + options = hyper_clientconn_options_new(); + hyper_clientconn_options_set_preserve_header_case(options, 1); + hyper_clientconn_options_set_preserve_header_order(options, 1); + + if(!options) { + failf(data, "Couldn't create hyper client options"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + + hyper_clientconn_options_exec(options, h->exec); + + /* "Both the `io` and the `options` are consumed in this function + call" */ + handshake = hyper_clientconn_handshake(io, options); + if(!handshake) { + failf(data, "Couldn't create hyper client handshake"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + io = NULL; + options = NULL; + + if(HYPERE_OK != hyper_executor_push(h->exec, handshake)) { + failf(data, "Couldn't hyper_executor_push the handshake"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + handshake = NULL; /* ownership passed on */ + + task = hyper_executor_poll(h->exec); + if(!task) { + failf(data, "Couldn't hyper_executor_poll the handshake"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + + client = hyper_task_value(task); + hyper_task_free(task); + req = hyper_request_new(); + if(!req) { + failf(data, "Couldn't hyper_request_new"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + if(hyper_request_set_method(req, (uint8_t *)"CONNECT", + strlen("CONNECT"))) { + failf(data, "error setting method"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + + infof(data, "Establish HTTP proxy tunnel to %s:%d", + ts->hostname, ts->remote_port); + + /* This only happens if we've looped here due to authentication + reasons, and we don't really use the newly cloned URL here + then. Just free() it. */ + Curl_safefree(data->req.newurl); + + result = CONNECT_host(data, conn, ts->hostname, ts->remote_port, + &hostheader, &host); + if(result) + goto error; + + if(hyper_request_set_uri(req, (uint8_t *)hostheader, + strlen(hostheader))) { + failf(data, "error setting path"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + if(data->set.verbose) { + char *se = aprintf("CONNECT %s HTTP/1.1\r\n", hostheader); + if(!se) { + result = CURLE_OUT_OF_MEMORY; + goto error; + } + Curl_debug(data, CURLINFO_HEADER_OUT, se, strlen(se)); + free(se); + } + /* Setup the proxy-authorization header, if any */ + result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET, + hostheader, TRUE); + if(result) + goto error; + Curl_safefree(hostheader); + + /* default is 1.1 */ + if((conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) && + (HYPERE_OK != hyper_request_set_version(req, + HYPER_HTTP_VERSION_1_0))) { + failf(data, "error setting HTTP version"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + + headers = hyper_request_headers(req); + if(!headers) { + failf(data, "hyper_request_headers"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + if(host) { + result = Curl_hyper_header(data, headers, host); + if(result) + goto error; + Curl_safefree(host); + } + + if(data->state.aptr.proxyuserpwd) { + result = Curl_hyper_header(data, headers, + data->state.aptr.proxyuserpwd); + if(result) + goto error; + } + + if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent")) && + data->set.str[STRING_USERAGENT]) { + struct dynbuf ua; + Curl_dyn_init(&ua, DYN_HTTP_REQUEST); + result = Curl_dyn_addf(&ua, "User-Agent: %s\r\n", + data->set.str[STRING_USERAGENT]); + if(result) + goto error; + result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&ua)); + if(result) + goto error; + Curl_dyn_free(&ua); + } + + if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection"))) { + result = Curl_hyper_header(data, headers, + "Proxy-Connection: Keep-Alive"); + if(result) + goto error; + } + + result = Curl_add_custom_headers(data, TRUE, headers); + if(result) + goto error; + + sendtask = hyper_clientconn_send(client, req); + if(!sendtask) { + failf(data, "hyper_clientconn_send"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + + if(HYPERE_OK != hyper_executor_push(h->exec, sendtask)) { + failf(data, "Couldn't hyper_executor_push the send"); + result = CURLE_OUT_OF_MEMORY; + goto error; + } + +error: + free(host); + free(hostheader); + if(io) + hyper_io_free(io); + if(options) + hyper_clientconn_options_free(options); + if(handshake) + hyper_task_free(handshake); + if(client) + hyper_clientconn_free(client); + return result; +} + +static CURLcode send_CONNECT(struct Curl_easy *data, + struct connectdata *conn, + struct tunnel_state *ts, + bool *done) +{ + struct hyptransfer *h = &data->hyp; + hyper_task *task = NULL; + hyper_error *hypererr = NULL; + CURLcode result = CURLE_OK; + + (void)ts; + (void)conn; + do { + task = hyper_executor_poll(h->exec); + if(task) { + bool error = hyper_task_type(task) == HYPER_TASK_ERROR; + if(error) + hypererr = hyper_task_value(task); + hyper_task_free(task); + if(error) { + /* this could probably use a better error code? */ + result = CURLE_OUT_OF_MEMORY; + goto error; + } + } + } while(task); +error: + *done = (result == CURLE_OK); + if(hypererr) { + uint8_t errbuf[256]; + size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf)); + failf(data, "Hyper: %.*s", (int)errlen, errbuf); + hyper_error_free(hypererr); + } + return result; +} + +static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_state *ts, + bool *done) +{ + struct hyptransfer *h = &data->hyp; + CURLcode result; + int didwhat; + + (void)ts; + *done = FALSE; + result = Curl_hyper_stream(data, cf->conn, &didwhat, done, + CURL_CSELECT_IN | CURL_CSELECT_OUT); + if(result || !*done) + return result; + if(h->exec) { + hyper_executor_free(h->exec); + h->exec = NULL; + } + if(h->read_waker) { + hyper_waker_free(h->read_waker); + h->read_waker = NULL; + } + if(h->write_waker) { + hyper_waker_free(h->write_waker); + h->write_waker = NULL; + } + return result; +} + +#endif /* USE_HYPER */ + +static CURLcode CONNECT(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_state *ts) +{ + struct connectdata *conn = cf->conn; + CURLcode result; + bool done; + + if(tunnel_is_established(ts)) + return CURLE_OK; + if(tunnel_is_failed(ts)) + return CURLE_RECV_ERROR; /* Need a cfilter close and new bootstrap */ + + do { + timediff_t check; + + check = Curl_timeleft(data, NULL, TRUE); + if(check <= 0) { + failf(data, "Proxy CONNECT aborted due to timeout"); + result = CURLE_OPERATION_TIMEDOUT; + goto out; + } + + switch(ts->tunnel_state) { + case TUNNEL_INIT: + /* Prepare the CONNECT request and make a first attempt to send. */ + DEBUGF(LOG_CF(data, cf, "CONNECT start")); + result = start_CONNECT(cf, data, ts); + if(result) + goto out; + tunnel_go_state(cf, ts, TUNNEL_CONNECT, data); + /* FALLTHROUGH */ + + case TUNNEL_CONNECT: + /* see that the request is completely sent */ + DEBUGF(LOG_CF(data, cf, "CONNECT send")); + result = send_CONNECT(data, cf->conn, ts, &done); + if(result || !done) + goto out; + tunnel_go_state(cf, ts, TUNNEL_RECEIVE, data); + /* FALLTHROUGH */ + + case TUNNEL_RECEIVE: + /* read what is there */ + DEBUGF(LOG_CF(data, cf, "CONNECT receive")); + result = recv_CONNECT_resp(cf, data, ts, &done); + if(Curl_pgrsUpdate(data)) { + result = CURLE_ABORTED_BY_CALLBACK; + goto out; + } + /* error or not complete yet. return for more multi-multi */ + if(result || !done) + goto out; + /* got it */ + tunnel_go_state(cf, ts, TUNNEL_RESPONSE, data); + /* FALLTHROUGH */ + + case TUNNEL_RESPONSE: + DEBUGF(LOG_CF(data, cf, "CONNECT response")); + if(data->req.newurl) { + /* not the "final" response, we need to do a follow up request. + * If the other side indicated a connection close, or if someone + * else told us to close this connection, do so now. + */ + if(ts->close_connection || conn->bits.close) { + /* Close this filter and the sub-chain, re-connect the + * sub-chain and continue. Closing this filter will + * reset our tunnel state. To avoid recursion, we return + * and expect to be called again. + */ + DEBUGF(LOG_CF(data, cf, "CONNECT need to close+open")); + infof(data, "Connect me again please"); + Curl_conn_cf_close(cf, data); + connkeep(conn, "HTTP proxy CONNECT"); + result = Curl_conn_cf_connect(cf->next, data, FALSE, &done); + goto out; + } + else { + /* staying on this connection, reset state */ + tunnel_go_state(cf, ts, TUNNEL_INIT, data); + } + } + break; + + default: + break; + } + + } while(data->req.newurl); + + DEBUGASSERT(ts->tunnel_state == TUNNEL_RESPONSE); + if(data->info.httpproxycode/100 != 2) { + /* a non-2xx response and we have no next url to try. */ + free(data->req.newurl); + data->req.newurl = NULL; + /* failure, close this connection to avoid re-use */ + streamclose(conn, "proxy CONNECT failure"); + tunnel_go_state(cf, ts, TUNNEL_FAILED, data); + failf(data, "CONNECT tunnel failed, response %d", data->req.httpcode); + return CURLE_RECV_ERROR; + } + /* 2xx response, SUCCESS! */ + tunnel_go_state(cf, ts, TUNNEL_ESTABLISHED, data); + infof(data, "CONNECT tunnel established, response %d", + data->info.httpproxycode); + result = CURLE_OK; + +out: + if(result) + tunnel_go_state(cf, ts, TUNNEL_FAILED, data); + return result; +} + +static CURLcode cf_h1_proxy_connect(struct Curl_cfilter *cf, + struct Curl_easy *data, + bool blocking, bool *done) +{ + CURLcode result; + struct tunnel_state *ts = cf->ctx; + + if(cf->connected) { + *done = TRUE; + return CURLE_OK; + } + + DEBUGF(LOG_CF(data, cf, "connect")); + result = cf->next->cft->connect(cf->next, data, blocking, done); + if(result || !*done) + return result; + + *done = FALSE; + if(!ts) { + result = tunnel_init(&ts, data, cf->conn, cf->sockindex); + if(result) + return result; + cf->ctx = ts; + } + + /* TODO: can we do blocking? */ + /* We want "seamless" operations through HTTP proxy tunnel */ + + result = CONNECT(cf, data, ts); + if(result) + goto out; + Curl_safefree(data->state.aptr.proxyuserpwd); + +out: + *done = (result == CURLE_OK) && tunnel_is_established(cf->ctx); + if (*done) { + cf->connected = TRUE; + tunnel_free(cf, data); + } + return result; +} + +static int cf_h1_proxy_get_select_socks(struct Curl_cfilter *cf, + struct Curl_easy *data, + curl_socket_t *socks) +{ + struct tunnel_state *ts = cf->ctx; + int fds; + + fds = cf->next->cft->get_select_socks(cf->next, data, socks); + if(!fds && cf->next->connected && !cf->connected) { + /* If we are not connected, but the filter "below" is + * and not waiting on something, we are tunneling. */ + socks[0] = Curl_conn_cf_get_socket(cf, data); + if(ts) { + /* when we've sent a CONNECT to a proxy, we should rather either + wait for the socket to become readable to be able to get the + response headers or if we're still sending the request, wait + for write. */ + if(ts->CONNECT.sending == HTTPSEND_REQUEST) { + return GETSOCK_WRITESOCK(0); + } + return GETSOCK_READSOCK(0); + } + return GETSOCK_WRITESOCK(0); + } + return fds; +} + +static void cf_h1_proxy_destroy(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + DEBUGF(LOG_CF(data, cf, "destroy")); + tunnel_free(cf, data); +} + +static void cf_h1_proxy_close(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + DEBUGF(LOG_CF(data, cf, "close")); + cf->connected = FALSE; + if(cf->ctx) { + tunnel_go_state(cf, cf->ctx, TUNNEL_INIT, data); + } + if(cf->next) + cf->next->cft->close(cf->next, data); +} + + +struct Curl_cftype Curl_cft_h1_proxy = { + "H1-PROXY", + CF_TYPE_IP_CONNECT, + 0, + cf_h1_proxy_destroy, + cf_h1_proxy_connect, + cf_h1_proxy_close, + Curl_cf_http_proxy_get_host, + cf_h1_proxy_get_select_socks, + Curl_cf_def_data_pending, + Curl_cf_def_send, + Curl_cf_def_recv, + Curl_cf_def_cntrl, + Curl_cf_def_conn_is_alive, + Curl_cf_def_conn_keep_alive, + Curl_cf_def_query, +}; + +CURLcode Curl_cf_h1_proxy_insert_after(struct Curl_cfilter *cf_at, + struct Curl_easy *data) +{ + struct Curl_cfilter *cf; + CURLcode result; + + (void)data; + result = Curl_cf_create(&cf, &Curl_cft_h1_proxy, NULL); + if(!result) + Curl_conn_cf_insert_after(cf_at, cf); + return result; +} + +#endif /* !CURL_DISABLE_PROXY && ! CURL_DISABLE_HTTP */ diff --git a/lib/cf-h1-proxy.h b/lib/cf-h1-proxy.h new file mode 100644 index 000000000..ac5bed0b2 --- /dev/null +++ b/lib/cf-h1-proxy.h @@ -0,0 +1,39 @@ +#ifndef HEADER_CURL_H1_PROXY_H +#define HEADER_CURL_H1_PROXY_H +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + * SPDX-License-Identifier: curl + * + ***************************************************************************/ + +#include "curl_setup.h" + +#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP) + +CURLcode Curl_cf_h1_proxy_insert_after(struct Curl_cfilter *cf, + struct Curl_easy *data); + +extern struct Curl_cftype Curl_cft_h1_proxy; + + +#endif /* !CURL_DISABLE_PROXY && !CURL_DISABLE_HTTP */ + +#endif /* HEADER_CURL_H1_PROXY_H */ diff --git a/lib/cf-h2-proxy.c b/lib/cf-h2-proxy.c new file mode 100644 index 000000000..e39d32575 --- /dev/null +++ b/lib/cf-h2-proxy.c @@ -0,0 +1,1388 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + * SPDX-License-Identifier: curl + * + ***************************************************************************/ + +#include "curl_setup.h" + +#if defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) + +#include +#include "urldata.h" +#include "cfilters.h" +#include "connect.h" +#include "curl_log.h" +#include "bufq.h" +#include "dynbuf.h" +#include "dynhds.h" +#include "h2h3.h" +#include "http_proxy.h" +#include "multiif.h" +#include "cf-h2-proxy.h" + +/* The last 3 #include files should be in this order */ +#include "curl_printf.h" +#include "curl_memory.h" +#include "memdebug.h" + +#define H2_NW_CHUNK_SIZE (128*1024) +#define H2_NW_RECV_CHUNKS 1 +#define H2_NW_SEND_CHUNKS 1 + +#define HTTP2_HUGE_WINDOW_SIZE (32 * 1024 * 1024) /* 32 MB */ + +#define H2_TUNNEL_WINDOW_SIZE (1024 * 1024) +#define H2_TUNNEL_CHUNK_SIZE (32 * 1024) +#define H2_TUNNEL_RECV_CHUNKS \ + (H2_TUNNEL_WINDOW_SIZE / H2_TUNNEL_CHUNK_SIZE) +#define H2_TUNNEL_SEND_CHUNKS \ + (H2_TUNNEL_WINDOW_SIZE / H2_TUNNEL_CHUNK_SIZE) + +typedef enum { + TUNNEL_INIT, /* init/default/no tunnel state */ + TUNNEL_CONNECT, /* CONNECT request is being send */ + TUNNEL_RESPONSE, /* CONNECT response received completely */ + TUNNEL_ESTABLISHED, + TUNNEL_FAILED +} tunnel_state; + +struct tunnel_stream { + struct http_resp *resp; + struct bufq recvbuf; + struct bufq sendbuf; + char *authority; + int32_t stream_id; + uint32_t error; + tunnel_state state; + bool has_final_response; + bool closed; + bool reset; +}; + +static CURLcode tunnel_stream_init(struct Curl_cfilter *cf, + struct tunnel_stream *ts) +{ + const char *hostname; + int port; + bool ipv6_ip = cf->conn->bits.ipv6_ip; + + ts->state = TUNNEL_INIT; + ts->stream_id = -1; + Curl_bufq_init2(&ts->recvbuf, H2_TUNNEL_CHUNK_SIZE, H2_TUNNEL_RECV_CHUNKS, + BUFQ_OPT_SOFT_LIMIT); + Curl_bufq_init(&ts->sendbuf, H2_TUNNEL_CHUNK_SIZE, H2_TUNNEL_SEND_CHUNKS); + + if(cf->conn->bits.conn_to_host) + hostname = cf->conn->conn_to_host.name; + else if(cf->sockindex == SECONDARYSOCKET) + hostname = cf->conn->secondaryhostname; + else + hostname = cf->conn->host.name; + + if(cf->sockindex == SECONDARYSOCKET) + port = cf->conn->secondary_port; + else if(cf->conn->bits.conn_to_port) + port = cf->conn->conn_to_port; + else + port = cf->conn->remote_port; + + if(hostname != cf->conn->host.name) + ipv6_ip = (strchr(hostname, ':') != NULL); + + ts->authority = /* host:port with IPv6 support */ + aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"", port); + if(!ts->authority) + return CURLE_OUT_OF_MEMORY; + + return CURLE_OK; +} + +static void tunnel_stream_clear(struct tunnel_stream *ts) +{ + Curl_http_resp_free(ts->resp); + Curl_bufq_free(&ts->recvbuf); + Curl_bufq_free(&ts->sendbuf); + Curl_safefree(ts->authority); + memset(ts, 0, sizeof(*ts)); + ts->state = TUNNEL_INIT; +} + +static void tunnel_go_state(struct Curl_cfilter *cf, + struct tunnel_stream *ts, + tunnel_state new_state, + struct Curl_easy *data) +{ + (void)cf; + + if(ts->state == new_state) + return; + /* leaving this one */ + switch(ts->state) { + case TUNNEL_CONNECT: + data->req.ignorebody = FALSE; + break; + default: + break; + } + /* entering this one */ + switch(new_state) { + case TUNNEL_INIT: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'init'")); + tunnel_stream_clear(ts); + break; + + case TUNNEL_CONNECT: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'connect'")); + ts->state = TUNNEL_CONNECT; + break; + + case TUNNEL_RESPONSE: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'response'")); + ts->state = TUNNEL_RESPONSE; + break; + + case TUNNEL_ESTABLISHED: + DEBUGF(LOG_CF(data, cf, "new tunnel state 'established'")); + infof(data, "CONNECT phase completed"); + data->state.authproxy.done = TRUE; + data->state.authproxy.multipass = FALSE; + /* FALLTHROUGH */ + case TUNNEL_FAILED: + if(new_state == TUNNEL_FAILED) + DEBUGF(LOG_CF(data, cf, "new tunnel state 'failed'")); + ts->state = new_state; + /* If a proxy-authorization header was used for the proxy, then we should + make sure that it isn't accidentally used for the document request + after we've connected. So let's free and clear it here. */ + Curl_safefree(data->state.aptr.proxyuserpwd); + break; + } +} + +struct cf_h2_proxy_ctx { + nghttp2_session *h2; + /* The easy handle used in the current filter call, cleared at return */ + struct cf_call_data call_data; + + struct bufq inbufq; /* network receive buffer */ + struct bufq outbufq; /* network send buffer */ + + struct tunnel_stream tunnel; /* our tunnel CONNECT stream */ + int32_t goaway_error; + int32_t last_stream_id; + BIT(conn_closed); + BIT(goaway); +}; + +/* How to access `call_data` from a cf_h2 filter */ +#define CF_CTX_CALL_DATA(cf) \ + ((struct cf_h2_proxy_ctx *)(cf)->ctx)->call_data + +static void cf_h2_proxy_ctx_clear(struct cf_h2_proxy_ctx *ctx) +{ + struct cf_call_data save = ctx->call_data; + + if(ctx->h2) { + nghttp2_session_del(ctx->h2); + } + Curl_bufq_free(&ctx->inbufq); + Curl_bufq_free(&ctx->outbufq); + tunnel_stream_clear(&ctx->tunnel); + memset(ctx, 0, sizeof(*ctx)); + ctx->call_data = save; +} + +static void cf_h2_proxy_ctx_free(struct cf_h2_proxy_ctx *ctx) +{ + if(ctx) { + cf_h2_proxy_ctx_clear(ctx); + free(ctx); + } +} + +static ssize_t nw_in_reader(void *reader_ctx, + unsigned char *buf, size_t buflen, + CURLcode *err) +{ + struct Curl_cfilter *cf = reader_ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + ssize_t nread; + + nread = Curl_conn_cf_recv(cf->next, data, (char *)buf, buflen, err); + DEBUGF(LOG_CF(data, cf, "nw_in recv(len=%zu) -> %zd, %d", + buflen, nread, *err)); + return nread; +} + +static ssize_t nw_out_writer(void *writer_ctx, + const unsigned char *buf, size_t buflen, + CURLcode *err) +{ + struct Curl_cfilter *cf = writer_ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + ssize_t nwritten; + + nwritten = Curl_conn_cf_send(cf->next, data, (const char *)buf, buflen, err); + DEBUGF(LOG_CF(data, cf, "nw_out send(len=%zu) -> %zd", buflen, nwritten)); + return nwritten; +} + +static int h2_client_new(struct Curl_cfilter *cf, + nghttp2_session_callbacks *cbs) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + nghttp2_option *o; + + int rc = nghttp2_option_new(&o); + if(rc) + return rc; + /* We handle window updates ourself to enfore buffer limits */ + nghttp2_option_set_no_auto_window_update(o, 1); +#if NGHTTP2_VERSION_NUM >= 0x013200 + /* with 1.50.0 */ + /* turn off RFC 9113 leading and trailing white spaces validation against + HTTP field value. */ + nghttp2_option_set_no_rfc9113_leading_and_trailing_ws_validation(o, 1); +#endif + rc = nghttp2_session_client_new2(&ctx->h2, cbs, cf, o); + nghttp2_option_del(o); + return rc; +} + +static ssize_t on_session_send(nghttp2_session *h2, + const uint8_t *buf, size_t blen, + int flags, void *userp); +static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame, + void *userp); +static int on_stream_close(nghttp2_session *session, int32_t stream_id, + uint32_t error_code, void *userp); +static int on_header(nghttp2_session *session, const nghttp2_frame *frame, + const uint8_t *name, size_t namelen, + const uint8_t *value, size_t valuelen, + uint8_t flags, + void *userp); +static int tunnel_recv_callback(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const uint8_t *mem, size_t len, void *userp); + +/* + * Initialize the cfilter context + */ +static CURLcode cf_h2_proxy_ctx_init(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + CURLcode result = CURLE_OUT_OF_MEMORY; + nghttp2_session_callbacks *cbs = NULL; + int rc; + + DEBUGASSERT(!ctx->h2); + memset(&ctx->tunnel, 0, sizeof(ctx->tunnel)); + + Curl_bufq_init(&ctx->inbufq, H2_NW_CHUNK_SIZE, H2_NW_RECV_CHUNKS); + Curl_bufq_init(&ctx->outbufq, H2_NW_CHUNK_SIZE, H2_NW_SEND_CHUNKS); + + if(tunnel_stream_init(cf, &ctx->tunnel)) + goto out; + + rc = nghttp2_session_callbacks_new(&cbs); + if(rc) { + failf(data, "Couldn't initialize nghttp2 callbacks"); + goto out; + } + + nghttp2_session_callbacks_set_send_callback(cbs, on_session_send); + nghttp2_session_callbacks_set_on_frame_recv_callback(cbs, on_frame_recv); + nghttp2_session_callbacks_set_on_data_chunk_recv_callback( + cbs, tunnel_recv_callback); + nghttp2_session_callbacks_set_on_stream_close_callback(cbs, on_stream_close); + nghttp2_session_callbacks_set_on_header_callback(cbs, on_header); + + /* The nghttp2 session is not yet setup, do it */ + rc = h2_client_new(cf, cbs); + if(rc) { + failf(data, "Couldn't initialize nghttp2"); + goto out; + } + + { + nghttp2_settings_entry iv[3]; + + iv[0].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS; + iv[0].value = Curl_multi_max_concurrent_streams(data->multi); + iv[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; + iv[1].value = H2_TUNNEL_WINDOW_SIZE; + iv[2].settings_id = NGHTTP2_SETTINGS_ENABLE_PUSH; + iv[2].value = 0; + rc = nghttp2_submit_settings(ctx->h2, NGHTTP2_FLAG_NONE, iv, 3); + if(rc) { + failf(data, "nghttp2_submit_settings() failed: %s(%d)", + nghttp2_strerror(rc), rc); + result = CURLE_HTTP2; + goto out; + } + } + + rc = nghttp2_session_set_local_window_size(ctx->h2, NGHTTP2_FLAG_NONE, 0, + HTTP2_HUGE_WINDOW_SIZE); + if(rc) { + failf(data, "nghttp2_session_set_local_window_size() failed: %s(%d)", + nghttp2_strerror(rc), rc); + result = CURLE_HTTP2; + goto out; + } + + + /* all set, traffic will be send on connect */ + result = CURLE_OK; + +out: + if(cbs) + nghttp2_session_callbacks_del(cbs); + DEBUGF(LOG_CF(data, cf, "init proxy ctx -> %d", result)); + return result; +} + +static CURLcode nw_out_flush(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + size_t buflen = Curl_bufq_len(&ctx->outbufq); + ssize_t nwritten; + CURLcode result; + + (void)data; + if(!buflen) + return CURLE_OK; + + DEBUGF(LOG_CF(data, cf, "h2 conn flush %zu bytes", buflen)); + nwritten = Curl_bufq_pass(&ctx->outbufq, nw_out_writer, cf, &result); + if(nwritten < 0) { + return result; + } + if((size_t)nwritten < buflen) { + return CURLE_AGAIN; + } + return CURLE_OK; +} + +/* + * Processes pending input left in network input buffer. + * This function returns 0 if it succeeds, or -1 and error code will + * be assigned to *err. + */ +static int h2_process_pending_input(struct Curl_cfilter *cf, + struct Curl_easy *data, + CURLcode *err) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + const unsigned char *buf; + size_t blen; + ssize_t rv; + + while(Curl_bufq_peek(&ctx->inbufq, &buf, &blen)) { + + rv = nghttp2_session_mem_recv(ctx->h2, (const uint8_t *)buf, blen); + DEBUGF(LOG_CF(data, cf, + "fed %zu bytes from nw to nghttp2 -> %zd", blen, rv)); + if(rv < 0) { + failf(data, + "process_pending_input: nghttp2_session_mem_recv() returned " + "%zd:%s", rv, nghttp2_strerror((int)rv)); + *err = CURLE_RECV_ERROR; + return -1; + } + Curl_bufq_skip(&ctx->inbufq, (size_t)rv); + if(Curl_bufq_is_empty(&ctx->inbufq)) { + DEBUGF(LOG_CF(data, cf, "all data in connection buffer processed")); + break; + } + else { + DEBUGF(LOG_CF(data, cf, "process_pending_input: %zu bytes left " + "in connection buffer", Curl_bufq_len(&ctx->inbufq))); + } + } + + if(nghttp2_session_check_request_allowed(ctx->h2) == 0) { + /* No more requests are allowed in the current session, so + the connection may not be reused. This is set when a + GOAWAY frame has been received or when the limit of stream + identifiers has been reached. */ + connclose(cf->conn, "http/2: No new requests allowed"); + } + + return 0; +} + +static CURLcode h2_progress_ingress(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + CURLcode result = CURLE_OK; + ssize_t nread; + bool keep_reading = TRUE; + + /* Process network input buffer fist */ + if(!Curl_bufq_is_empty(&ctx->inbufq)) { + DEBUGF(LOG_CF(data, cf, "Process %zd bytes in connection buffer", + Curl_bufq_len(&ctx->inbufq))); + if(h2_process_pending_input(cf, data, &result) < 0) + return result; + } + + /* Receive data from the "lower" filters, e.g. network until + * it is time to stop or we have enough data for this stream */ + while(keep_reading && + !ctx->conn_closed && /* not closed the connection */ + !ctx->tunnel.closed && /* nor the tunnel */ + Curl_bufq_is_empty(&ctx->inbufq) && /* and we consumed our input */ + !Curl_bufq_is_full(&ctx->tunnel.recvbuf)) { + + nread = Curl_bufq_slurp(&ctx->inbufq, nw_in_reader, cf, &result); + DEBUGF(LOG_CF(data, cf, "read %zd bytes nw data -> %zd, %d", + Curl_bufq_len(&ctx->inbufq), nread, result)); + if(nread < 0) { + if(result != CURLE_AGAIN) { + failf(data, "Failed receiving HTTP2 data"); + return result; + } + break; + } + else if(nread == 0) { + ctx->conn_closed = TRUE; + break; + } + + keep_reading = Curl_bufq_is_full(&ctx->inbufq); + if(h2_process_pending_input(cf, data, &result)) + return result; + } + + if(ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) { + connclose(cf->conn, "GOAWAY received"); + } + + return CURLE_OK; +} + +/* + * Check if there's been an update in the priority / + * dependency settings and if so it submits a PRIORITY frame with the updated + * info. + * Flush any out data pending in the network buffer. + */ +static CURLcode h2_progress_egress(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + int rv = 0; + + rv = nghttp2_session_send(ctx->h2); + if(nghttp2_is_fatal(rv)) { + DEBUGF(LOG_CF(data, cf, "nghttp2_session_send error (%s)%d", + nghttp2_strerror(rv), rv)); + return CURLE_SEND_ERROR; + } + return nw_out_flush(cf, data); +} + +static ssize_t on_session_send(nghttp2_session *h2, + const uint8_t *buf, size_t blen, int flags, + void *userp) +{ + struct Curl_cfilter *cf = userp; + struct cf_h2_proxy_ctx *ctx = cf->ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + ssize_t nwritten; + CURLcode result = CURLE_OK; + + (void)h2; + (void)flags; + DEBUGASSERT(data); + + nwritten = Curl_bufq_write_pass(&ctx->outbufq, buf, blen, + nw_out_writer, cf, &result); + if(nwritten < 0) { + if(result == CURLE_AGAIN) { + return NGHTTP2_ERR_WOULDBLOCK; + } + failf(data, "Failed sending HTTP2 data"); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + if(!nwritten) + return NGHTTP2_ERR_WOULDBLOCK; + + return nwritten; +} + +static int on_frame_recv(nghttp2_session *session, const nghttp2_frame *frame, + void *userp) +{ + struct Curl_cfilter *cf = userp; + struct cf_h2_proxy_ctx *ctx = cf->ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + int32_t stream_id = frame->hd.stream_id; + + (void)session; + DEBUGASSERT(data); + if(!stream_id) { + /* stream ID zero is for connection-oriented stuff */ + DEBUGASSERT(data); + switch(frame->hd.type) { + case NGHTTP2_SETTINGS: + /* we do not do anything with this for now */ + break; + case NGHTTP2_GOAWAY: + infof(data, "recveived GOAWAY, error=%d, last_stream=%u", + frame->goaway.error_code, frame->goaway.last_stream_id); + ctx->goaway = TRUE; + break; + case NGHTTP2_WINDOW_UPDATE: + DEBUGF(LOG_CF(data, cf, "recv frame WINDOW_UPDATE")); + break; + default: + DEBUGF(LOG_CF(data, cf, "recv frame %x on 0", frame->hd.type)); + } + return 0; + } + + if(stream_id != ctx->tunnel.stream_id) { + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] rcvd FRAME not for tunnel", + stream_id)); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + switch(frame->hd.type) { + case NGHTTP2_DATA: + /* If body started on this stream, then receiving DATA is illegal. */ + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv frame DATA", stream_id)); + break; + case NGHTTP2_HEADERS: + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv frame HEADERS", stream_id)); + + /* nghttp2 guarantees that :status is received, and we store it to + stream->status_code. Fuzzing has proven this can still be reached + without status code having been set. */ + if(!ctx->tunnel.resp) + return NGHTTP2_ERR_CALLBACK_FAILURE; + /* Only final status code signals the end of header */ + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] got http status: %d", + stream_id, ctx->tunnel.resp->status)); + if(!ctx->tunnel.has_final_response) { + if(ctx->tunnel.resp->status / 100 != 1) { + ctx->tunnel.has_final_response = TRUE; + } + } + break; + case NGHTTP2_PUSH_PROMISE: + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv PUSH_PROMISE", stream_id)); + return NGHTTP2_ERR_CALLBACK_FAILURE; + case NGHTTP2_RST_STREAM: + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv RST", stream_id)); + ctx->tunnel.reset = TRUE; + break; + case NGHTTP2_WINDOW_UPDATE: + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv WINDOW_UPDATE", stream_id)); + if((data->req.keepon & KEEP_SEND_HOLD) && + (data->req.keepon & KEEP_SEND)) { + data->req.keepon &= ~KEEP_SEND_HOLD; + Curl_expire(data, 0, EXPIRE_RUN_NOW); + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] unpausing after win update", + stream_id)); + } + break; + default: + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] recv frame %x", + stream_id, frame->hd.type)); + break; + } + return 0; +} + +static int on_header(nghttp2_session *session, const nghttp2_frame *frame, + const uint8_t *name, size_t namelen, + const uint8_t *value, size_t valuelen, + uint8_t flags, + void *userp) +{ + struct Curl_cfilter *cf = userp; + struct cf_h2_proxy_ctx *ctx = cf->ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + int32_t stream_id = frame->hd.stream_id; + CURLcode result; + + (void)flags; + (void)data; + (void)session; + DEBUGASSERT(stream_id); /* should never be a zero stream ID here */ + if(stream_id != ctx->tunnel.stream_id) { + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] header for non-tunnel stream: " + "%.*s: %.*s", stream_id, + (int)namelen, name, + (int)valuelen, value)); + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + + if(frame->hd.type == NGHTTP2_PUSH_PROMISE) + return NGHTTP2_ERR_CALLBACK_FAILURE; + + if(ctx->tunnel.has_final_response) { + /* we do not do anything with trailers for tunnel streams */ + return 0; + } + + if(namelen == sizeof(H2H3_PSEUDO_STATUS) - 1 && + memcmp(H2H3_PSEUDO_STATUS, name, namelen) == 0) { + int http_status; + struct http_resp *resp; + + /* status: always comes first, we might get more than one response, + * link the previous ones for keepers */ + result = Curl_http_decode_status(&http_status, + (const char *)value, valuelen); + if(result) + return NGHTTP2_ERR_CALLBACK_FAILURE; + result = Curl_http_resp_make(&resp, http_status, NULL); + if(result) + return NGHTTP2_ERR_CALLBACK_FAILURE; + resp->prev = ctx->tunnel.resp; + ctx->tunnel.resp = resp; + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] status: HTTP/2 %03d", + stream_id, ctx->tunnel.resp->status)); + return 0; + } + + if(!ctx->tunnel.resp) + return NGHTTP2_ERR_CALLBACK_FAILURE; + + result = Curl_dynhds_add(&ctx->tunnel.resp->headers, + (const char *)name, namelen, + (const char *)value, valuelen); + if(result) + return NGHTTP2_ERR_CALLBACK_FAILURE; + + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] header: %.*s: %.*s", + stream_id, + (int)namelen, name, + (int)valuelen, value)); + + return 0; /* 0 is successful */ +} + +static ssize_t tunnel_send_callback(nghttp2_session *session, + int32_t stream_id, + uint8_t *buf, size_t length, + uint32_t *data_flags, + nghttp2_data_source *source, + void *userp) +{ + struct Curl_cfilter *cf = userp; + struct cf_h2_proxy_ctx *ctx = cf->ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + struct tunnel_stream *ts; + CURLcode result; + ssize_t nread; + + (void)source; + (void)data; + (void)ctx; + + if(!stream_id) + return NGHTTP2_ERR_INVALID_ARGUMENT; + + ts = nghttp2_session_get_stream_user_data(session, stream_id); + if(!ts) + return NGHTTP2_ERR_CALLBACK_FAILURE; + DEBUGASSERT(ts == &ctx->tunnel); + + nread = Curl_bufq_read(&ts->sendbuf, buf, length, &result); + if(nread < 0) { + if(result != CURLE_AGAIN) + return NGHTTP2_ERR_CALLBACK_FAILURE; + return NGHTTP2_ERR_DEFERRED; + } + if(ts->closed && Curl_bufq_is_empty(&ts->sendbuf)) + *data_flags = NGHTTP2_DATA_FLAG_EOF; + + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] tunnel_send_callback -> %zd", + ts->stream_id, nread)); + return nread; +} + +static int tunnel_recv_callback(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const uint8_t *mem, size_t len, void *userp) +{ + struct Curl_cfilter *cf = userp; + struct cf_h2_proxy_ctx *ctx = cf->ctx; + ssize_t nwritten; + CURLcode result; + + (void)flags; + (void)session; + DEBUGASSERT(stream_id); /* should never be a zero stream ID here */ + + if(stream_id != ctx->tunnel.stream_id) + return NGHTTP2_ERR_CALLBACK_FAILURE; + + nwritten = Curl_bufq_write(&ctx->tunnel.recvbuf, mem, len, &result); + if(nwritten < 0) { + if(result != CURLE_AGAIN) + return NGHTTP2_ERR_CALLBACK_FAILURE; + nwritten = 0; + } + DEBUGASSERT((size_t)nwritten == len); + return 0; +} + +static int on_stream_close(nghttp2_session *session, int32_t stream_id, + uint32_t error_code, void *userp) +{ + struct Curl_cfilter *cf = userp; + struct cf_h2_proxy_ctx *ctx = cf->ctx; + struct Curl_easy *data = CF_DATA_CURRENT(cf); + + (void)session; + (void)data; + + if(stream_id != ctx->tunnel.stream_id) + return 0; + + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] on_stream_close, %s (err %d)", + stream_id, nghttp2_http2_strerror(error_code), error_code)); + ctx->tunnel.closed = TRUE; + ctx->tunnel.error = error_code; + + return 0; +} + +static CURLcode h2_submit(int32_t *pstream_id, + struct Curl_cfilter *cf, + struct Curl_easy *data, + nghttp2_session *h2, + struct http_req *req, + const nghttp2_priority_spec *pri_spec, + void *stream_user_data, + nghttp2_data_source_read_callback read_callback, + void *read_ctx) +{ + nghttp2_nv *nva = NULL; + unsigned int i; + int32_t stream_id = -1; + size_t nheader, j; + CURLcode result = CURLE_OUT_OF_MEMORY; + + (void)cf; + nheader = req->headers.hds_len + 1; /* ":method" is a MUST */ + if(req->scheme) + ++nheader; + if(req->authority) + ++nheader; + if(req->path) + ++nheader; + + nva = malloc(sizeof(nghttp2_nv) * nheader); + if(!nva) + goto out; + + nva[0].name = (unsigned char *)H2H3_PSEUDO_METHOD; + nva[0].namelen = sizeof(H2H3_PSEUDO_METHOD) - 1; + nva[0].value = (unsigned char *)req->method; + nva[0].valuelen = strlen(req->method); + nva[0].flags = NGHTTP2_NV_FLAG_NONE; + i = 1; + if(req->scheme) { + nva[i].name = (unsigned char *)H2H3_PSEUDO_SCHEME; + nva[i].namelen = sizeof(H2H3_PSEUDO_SCHEME) - 1; + nva[i].value = (unsigned char *)req->scheme; + nva[i].valuelen = strlen(req->scheme); + nva[i].flags = NGHTTP2_NV_FLAG_NONE; + ++i; + } + if(req->authority) { + nva[i].name = (unsigned char *)H2H3_PSEUDO_AUTHORITY; + nva[i].namelen = sizeof(H2H3_PSEUDO_AUTHORITY) - 1; + nva[i].value = (unsigned char *)req->authority; + nva[i].valuelen = strlen(req->authority); + nva[i].flags = NGHTTP2_NV_FLAG_NONE; + ++i; + } + if(req->path) { + nva[i].name = (unsigned char *)H2H3_PSEUDO_PATH; + nva[i].namelen = sizeof(H2H3_PSEUDO_PATH) - 1; + nva[i].value = (unsigned char *)req->path; + nva[i].valuelen = strlen(req->path); + nva[i].flags = NGHTTP2_NV_FLAG_NONE; + ++i; + } + + for(j = 0; i < nheader; i++, j++) { + struct dynhds_entry *e = Curl_dynhds_getn(&req->headers, j); + if(!e) + break; + nva[i].name = (unsigned char *)e->name; + nva[i].namelen = e->namelen; + nva[i].value = (unsigned char *)e->value; + nva[i].valuelen = e->valuelen; + nva[i].flags = NGHTTP2_NV_FLAG_NONE; + } + + if(read_callback) { + nghttp2_data_provider data_prd; + + data_prd.read_callback = read_callback; + data_prd.source.ptr = read_ctx; + stream_id = nghttp2_submit_request(h2, pri_spec, nva, nheader, + &data_prd, stream_user_data); + } + else { + stream_id = nghttp2_submit_request(h2, pri_spec, nva, nheader, + NULL, stream_user_data); + } + + if(stream_id < 0) { + failf(data, "nghttp2_session_upgrade2() failed: %s(%d)", + nghttp2_strerror(stream_id), stream_id); + result = CURLE_SEND_ERROR; + goto out; + } + result = CURLE_OK; + +out: + Curl_safefree(nva); + *pstream_id = stream_id; + return result; +} + +static CURLcode submit_CONNECT(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_stream *ts) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + CURLcode result; + struct http_req *req = NULL; + + infof(data, "Establish HTTP/2 proxy tunnel to %s", ts->authority); + + result = Curl_http_req_make(&req, "CONNECT", NULL, ts->authority, NULL); + if(result) + goto out; + + /* Setup the proxy-authorization header, if any */ + result = Curl_http_output_auth(data, cf->conn, req->method, HTTPREQ_GET, + req->authority, TRUE); + if(result) + goto out; + + if(data->state.aptr.proxyuserpwd) { + result = Curl_dynhds_h1_cadd_line(&req->headers, + data->state.aptr.proxyuserpwd); + if(result) + goto out; + } + + if(!Curl_checkProxyheaders(data, cf->conn, STRCONST("User-Agent")) + && data->set.str[STRING_USERAGENT]) { + result = Curl_dynhds_cadd(&req->headers, "User-Agent", + data->set.str[STRING_USERAGENT]); + if(result) + goto out; + } + + result = Curl_dynhds_add_custom(data, TRUE, &req->headers); + if(result) + goto out; + + result = h2_submit(&ts->stream_id, cf, data, ctx->h2, req, + NULL, ts, tunnel_send_callback, cf); + if(result) { + DEBUGF(LOG_CF(data, cf, "send: nghttp2_submit_request error (%s)%u", + nghttp2_strerror(ts->stream_id), ts->stream_id)); + } + +out: + if(req) + Curl_http_req_free(req); + if(result) + failf(data, "Failed sending CONNECT to proxy"); + return result; +} + +static CURLcode inspect_response(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_stream *ts) +{ + CURLcode result = CURLE_OK; + struct dynhds_entry *auth_reply = NULL; + (void)cf; + + DEBUGASSERT(ts->resp); + if(ts->resp->status/100 == 2) { + infof(data, "CONNECT tunnel established, response %d", ts->resp->status); + tunnel_go_state(cf, ts, TUNNEL_ESTABLISHED, data); + return CURLE_OK; + } + + if(ts->resp->status == 401) { + auth_reply = Curl_dynhds_cget(&ts->resp->headers, "WWW-Authenticate"); + } + else if(ts->resp->status == 407) { + auth_reply = Curl_dynhds_cget(&ts->resp->headers, "Proxy-Authenticate"); + } + + if(auth_reply) { + DEBUGF(LOG_CF(data, cf, "CONNECT: fwd auth header '%s'", + auth_reply->value)); + result = Curl_http_input_auth(data, ts->resp->status == 407, + auth_reply->value); + if(result) + return result; + if(data->req.newurl) { + /* Inidicator that we should try again */ + Curl_safefree(data->req.newurl); + tunnel_go_state(cf, ts, TUNNEL_INIT, data); + return CURLE_OK; + } + } + + /* Seems to have failed */ + return CURLE_RECV_ERROR; +} + +static CURLcode CONNECT(struct Curl_cfilter *cf, + struct Curl_easy *data, + struct tunnel_stream *ts) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + CURLcode result = CURLE_OK; + + DEBUGASSERT(ts); + DEBUGASSERT(ts->authority); + do { + switch(ts->state) { + case TUNNEL_INIT: + /* Prepare the CONNECT request and make a first attempt to send. */ + DEBUGF(LOG_CF(data, cf, "CONNECT start for %s", ts->authority)); + result = submit_CONNECT(cf, data, ts); + if(result) + goto out; + tunnel_go_state(cf, ts, TUNNEL_CONNECT, data); + /* FALLTHROUGH */ + + case TUNNEL_CONNECT: + /* see that the request is completely sent */ + result = h2_progress_ingress(cf, data); + if(!result) + result = h2_progress_egress(cf, data); + if(result) { + tunnel_go_state(cf, ts, TUNNEL_FAILED, data); + break; + } + + if(ts->has_final_response) { + tunnel_go_state(cf, ts, TUNNEL_RESPONSE, data); + } + else { + result = CURLE_OK; + goto out; + } + /* FALLTHROUGH */ + + case TUNNEL_RESPONSE: + DEBUGASSERT(ts->has_final_response); + result = inspect_response(cf, data, ts); + if(result) + goto out; + break; + + case TUNNEL_ESTABLISHED: + return CURLE_OK; + + case TUNNEL_FAILED: + return CURLE_RECV_ERROR; + + default: + break; + } + + } while(ts->state == TUNNEL_INIT); + +out: + if(result || ctx->tunnel.closed) + tunnel_go_state(cf, ts, TUNNEL_FAILED, data); + return result; +} + +static CURLcode cf_h2_proxy_connect(struct Curl_cfilter *cf, + struct Curl_easy *data, + bool blocking, bool *done) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + CURLcode result = CURLE_OK; + struct cf_call_data save; + timediff_t check; + struct tunnel_stream *ts = &ctx->tunnel; + + if(cf->connected) { + *done = TRUE; + return CURLE_OK; + } + + /* Connect the lower filters first */ + if(!cf->next->connected) { + result = Curl_conn_cf_connect(cf->next, data, blocking, done); + if(result || !*done) + return result; + } + + *done = FALSE; + + CF_DATA_SAVE(save, cf, data); + if(!ctx->h2) { + result = cf_h2_proxy_ctx_init(cf, data); + if(result) + goto out; + } + DEBUGASSERT(ts->authority); + + check = Curl_timeleft(data, NULL, TRUE); + if(check <= 0) { + failf(data, "Proxy CONNECT aborted due to timeout"); + result = CURLE_OPERATION_TIMEDOUT; + goto out; + } + + /* for the secondary socket (FTP), use the "connect to host" + * but ignore the "connect to port" (use the secondary port) + */ + result = CONNECT(cf, data, ts); + +out: + *done = (result == CURLE_OK) && (ts->state == TUNNEL_ESTABLISHED); + cf->connected = *done; + CF_DATA_RESTORE(cf, save); + return result; +} + +static void cf_h2_proxy_close(struct Curl_cfilter *cf, struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + + if(ctx) { + struct cf_call_data save; + + CF_DATA_SAVE(save, cf, data); + cf_h2_proxy_ctx_clear(ctx); + CF_DATA_RESTORE(cf, save); + } +} + +static void cf_h2_proxy_destroy(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + + (void)data; + if(ctx) { + cf_h2_proxy_ctx_free(ctx); + cf->ctx = NULL; + } +} + +static bool cf_h2_proxy_data_pending(struct Curl_cfilter *cf, + const struct Curl_easy *data) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + if((ctx && !Curl_bufq_is_empty(&ctx->inbufq)) || + (ctx && ctx->tunnel.state == TUNNEL_ESTABLISHED && + !Curl_bufq_is_empty(&ctx->tunnel.recvbuf))) + return TRUE; + return cf->next? cf->next->cft->has_data_pending(cf->next, data) : FALSE; +} + +static int cf_h2_proxy_get_select_socks(struct Curl_cfilter *cf, + struct Curl_easy *data, + curl_socket_t *sock) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + int bitmap = GETSOCK_BLANK; + struct cf_call_data save; + + CF_DATA_SAVE(save, cf, data); + sock[0] = Curl_conn_cf_get_socket(cf, data); + bitmap |= GETSOCK_READSOCK(0); + + /* HTTP/2 layer wants to send data) AND there's a window to send data in */ + if(nghttp2_session_want_write(ctx->h2) && + nghttp2_session_get_remote_window_size(ctx->h2)) + bitmap |= GETSOCK_WRITESOCK(0); + + CF_DATA_RESTORE(cf, save); + return bitmap; +} + +static ssize_t h2_handle_tunnel_close(struct Curl_cfilter *cf, + struct Curl_easy *data, + CURLcode *err) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + ssize_t rv = 0; + + if(ctx->tunnel.error == NGHTTP2_REFUSED_STREAM) { + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] REFUSED_STREAM, try again on a new " + "connection", ctx->tunnel.stream_id)); + connclose(cf->conn, "REFUSED_STREAM"); /* don't use this anymore */ + *err = CURLE_RECV_ERROR; /* trigger Curl_retry_request() later */ + return -1; + } + else if(ctx->tunnel.error != NGHTTP2_NO_ERROR) { + failf(data, "HTTP/2 stream %u was not closed cleanly: %s (err %u)", + ctx->tunnel.stream_id, nghttp2_http2_strerror(ctx->tunnel.error), + ctx->tunnel.error); + *err = CURLE_HTTP2_STREAM; + return -1; + } + else if(ctx->tunnel.reset) { + failf(data, "HTTP/2 stream %u was reset", ctx->tunnel.stream_id); + *err = CURLE_RECV_ERROR; + return -1; + } + + *err = CURLE_OK; + rv = 0; + DEBUGF(LOG_CF(data, cf, "handle_tunnel_close -> %zd, %d", rv, *err)); + return rv; +} + +static ssize_t tunnel_recv(struct Curl_cfilter *cf, struct Curl_easy *data, + char *buf, size_t len, CURLcode *err) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + ssize_t nread = -1; + + *err = CURLE_AGAIN; + if(!Curl_bufq_is_empty(&ctx->tunnel.recvbuf)) { + nread = Curl_bufq_read(&ctx->tunnel.recvbuf, + (unsigned char *)buf, len, err); + if(nread < 0) + goto out; + DEBUGASSERT(nread > 0); + } + + if(nread < 0) { + if(ctx->tunnel.closed) { + nread = h2_handle_tunnel_close(cf, data, err); + } + else if(ctx->tunnel.reset || + (ctx->conn_closed && Curl_bufq_is_empty(&ctx->inbufq)) || + (ctx->goaway && ctx->last_stream_id < ctx->tunnel.stream_id)) { + *err = CURLE_RECV_ERROR; + nread = -1; + } + } + else if(nread == 0) { + *err = CURLE_AGAIN; + nread = -1; + } + +out: + DEBUGF(LOG_CF(data, cf, "tunnel_recv(len=%zu) -> %zd, %d", + len, nread, *err)); + return nread; +} + +static ssize_t cf_h2_proxy_recv(struct Curl_cfilter *cf, + struct Curl_easy *data, + char *buf, size_t len, CURLcode *err) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + ssize_t nread = -1; + struct cf_call_data save; + CURLcode result; + + if(ctx->tunnel.state != TUNNEL_ESTABLISHED) { + *err = CURLE_RECV_ERROR; + return -1; + } + CF_DATA_SAVE(save, cf, data); + + if(Curl_bufq_is_empty(&ctx->tunnel.recvbuf)) { + *err = h2_progress_ingress(cf, data); + if(*err) + goto out; + } + + nread = tunnel_recv(cf, data, buf, len, err); + + if(nread > 0) { + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] increase window by %zd", + ctx->tunnel.stream_id, nread)); + nghttp2_session_consume(ctx->h2, ctx->tunnel.stream_id, (size_t)nread); + } + + result = h2_progress_egress(cf, data); + if(result) { + *err = result; + nread = -1; + } + +out: + DEBUGF(LOG_CF(data, cf, "[h2sid=%u] cf_recv(len=%zu) -> %zd %d", + ctx->tunnel.stream_id, len, nread, *err)); + CF_DATA_RESTORE(cf, save); + return nread; +} + +static ssize_t cf_h2_proxy_send(struct Curl_cfilter *cf, + struct Curl_easy *data, + const void *mem, size_t len, CURLcode *err) +{ + struct cf_h2_proxy_ctx *ctx = cf->ctx; + struct cf_call_data save; + ssize_t nwritten = -1; + const unsigned char *buf = mem; + size_t start_len = len; + int rv; + + if(ctx->tunnel.state != TUNNEL_ESTABLISHED) { + *err = CURLE_SEND_ERROR; + return -1; + } + CF_DATA_SAVE(save, cf, data); + + while(len) { + nwritten = Curl_bufq_write(&ctx->tunnel.sendbuf, buf, len, err); + if(nwritten <= 0) { + if(*err && *err != CURLE_AGAIN) { + DEBUGF(LOG_CF(data, cf, "error adding data to tunnel sendbuf: %d", + *err)); + nwritten = -1; + goto out; + } + /* blocked */ + nwritten = 0; + } + else { + DEBUGASSERT((size_t)nwritten <= len); + buf += (size_t)nwritten; + len -= (size_t)nwritten; + } + + /* resume the tunnel stream and let the h2 session send, which + * triggers reading from tunnel.sendbuf */ + rv = nghttp2_session_resume_data(ctx->h2, ctx->tunnel.stream_id); + if(nghttp2_is_fatal(rv)) { + *err = CURLE_SEND_ERROR; + nwritten = -1; + goto out; + } + *err = h2_progress_egress(cf, data); + if(*err) { + nwritten = -1; + goto out; + } + + if(!nwritten && Curl_bufq_is_full(&ctx->tunnel.sendbuf)) { + size_t rwin; + /* we could not add to the buffer and after session processing, + * it is still full. */ + rwin = nghttp2_session_get_stream_remote_window_size( + ctx->h2, ctx->tunnel.stream_id); + DEBUGF(LOG_CF(data, cf, "cf_send: tunnel win %u/%zu", + nghttp2_session_get_remote_window_size(ctx->h2), rwin)); + if(rwin == 0) { + /* We cannot upload more as the stream's remote window size + * is 0. We need to receive WIN_UPDATEs before we can continue. + */ + data->req.keepon |= KEEP_SEND_HOLD; + DEBUGF(LOG_CF(data, cf, "pausing send as remote flow " + "window is exhausted")); + } + break; + } + } + + nwritten = start_len - len; + if(nwritten > 0) { + *err = CURLE_OK; + } + else if(ctx->tunnel.closed) { + nwritten = -1; + *err = CURLE_SEND_ERROR; + } + else { + nwritten = -1; + *err = CURLE_AGAIN; + } + +out: + DEBUGF(LOG_CF(data, cf, "cf_send(len=%zu) -> %zd, %d ", + start_len, nwritten, *err)); + CF_DATA_RESTORE(cf, save); + return nwritten; +} + +struct Curl_cftype Curl_cft_h2_proxy = { + "H2-PROXY", + CF_TYPE_IP_CONNECT, + CURL_LOG_DEFAULT, + cf_h2_proxy_destroy, + cf_h2_proxy_connect, + cf_h2_proxy_close, + Curl_cf_http_proxy_get_host, + cf_h2_proxy_get_select_socks, + cf_h2_proxy_data_pending, + cf_h2_proxy_send, + cf_h2_proxy_recv, + Curl_cf_def_cntrl, + Curl_cf_def_conn_is_alive, + Curl_cf_def_conn_keep_alive, + Curl_cf_def_query, +}; + +CURLcode Curl_cf_h2_proxy_insert_after(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + struct Curl_cfilter *cf_h2_proxy = NULL; + struct cf_h2_proxy_ctx *ctx; + CURLcode result = CURLE_OUT_OF_MEMORY; + + (void)data; + ctx = calloc(sizeof(*ctx), 1); + if(!ctx) + goto out; + + result = Curl_cf_create(&cf_h2_proxy, &Curl_cft_h2_proxy, ctx); + if(result) + goto out; + + Curl_conn_cf_insert_after(cf, cf_h2_proxy); + result = CURLE_OK; + +out: + if(result) + cf_h2_proxy_ctx_free(ctx); + return result; +} + +#endif /* defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) */ diff --git a/lib/cf-h2-proxy.h b/lib/cf-h2-proxy.h new file mode 100644 index 000000000..c01bf6213 --- /dev/null +++ b/lib/cf-h2-proxy.h @@ -0,0 +1,39 @@ +#ifndef HEADER_CURL_H2_PROXY_H +#define HEADER_CURL_H2_PROXY_H +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + * SPDX-License-Identifier: curl + * + ***************************************************************************/ + +#include "curl_setup.h" + +#if defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) + +CURLcode Curl_cf_h2_proxy_insert_after(struct Curl_cfilter *cf, + struct Curl_easy *data); + +extern struct Curl_cftype Curl_cft_h2_proxy; + + +#endif /* defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) */ + +#endif /* HEADER_CURL_H2_PROXY_H */ diff --git a/lib/cf-haproxy.c b/lib/cf-haproxy.c new file mode 100644 index 000000000..84aa9156b --- /dev/null +++ b/lib/cf-haproxy.c @@ -0,0 +1,262 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + * SPDX-License-Identifier: curl + * + ***************************************************************************/ + +#include "curl_setup.h" + +#if !defined(CURL_DISABLE_PROXY) + +#include +#include "urldata.h" +#include "cfilters.h" +#include "cf-haproxy.h" +#include "curl_log.h" +#include "multiif.h" + +/* The last 3 #include files should be in this order */ +#include "curl_printf.h" +#include "curl_memory.h" +#include "memdebug.h" + + +typedef enum { + HAPROXY_INIT, /* init/default/no tunnel state */ + HAPROXY_SEND, /* data_out being sent */ + HAPROXY_DONE /* all work done */ +} haproxy_state; + +struct cf_haproxy_ctx { + int state; + struct dynbuf data_out; +}; + +static void cf_haproxy_ctx_reset(struct cf_haproxy_ctx *ctx) +{ + DEBUGASSERT(ctx); + ctx->state = HAPROXY_INIT; + Curl_dyn_reset(&ctx->data_out); +} + +static void cf_haproxy_ctx_free(struct cf_haproxy_ctx *ctx) +{ + if(ctx) { + Curl_dyn_free(&ctx->data_out); + free(ctx); + } +} + +static CURLcode cf_haproxy_date_out_set(struct Curl_cfilter*cf, + struct Curl_easy *data) +{ + struct cf_haproxy_ctx *ctx = cf->ctx; + CURLcode result; + const char *tcp_version; + + DEBUGASSERT(ctx); + DEBUGASSERT(ctx->state == HAPROXY_INIT); +#ifdef USE_UNIX_SOCKETS + if(cf->conn->unix_domain_socket) + /* the buffer is large enough to hold this! */ + result = Curl_dyn_addn(&ctx->data_out, STRCONST("PROXY UNKNOWN\r\n")); + else { +#endif /* USE_UNIX_SOCKETS */ + /* Emit the correct prefix for IPv6 */ + tcp_version = cf->conn->bits.ipv6 ? "TCP6" : "TCP4"; + + result = Curl_dyn_addf(&ctx->data_out, "PROXY %s %s %s %i %i\r\n", + tcp_version, + data->info.conn_local_ip, + data->info.conn_primary_ip, + data->info.conn_local_port, + data->info.conn_primary_port); + +#ifdef USE_UNIX_SOCKETS + } +#endif /* USE_UNIX_SOCKETS */ + return result; +} + +static CURLcode cf_haproxy_connect(struct Curl_cfilter *cf, + struct Curl_easy *data, + bool blocking, bool *done) +{ + struct cf_haproxy_ctx *ctx = cf->ctx; + CURLcode result; + size_t len; + + DEBUGASSERT(ctx); + if(cf->connected) { + *done = TRUE; + return CURLE_OK; + } + + result = cf->next->cft->connect(cf->next, data, blocking, done); + if(result || !*done) + return result; + + switch(ctx->state) { + case HAPROXY_INIT: + result = cf_haproxy_date_out_set(cf, data); + if(result) + goto out; + ctx->state = HAPROXY_SEND; + /* FALLTHROUGH */ + case HAPROXY_SEND: + len = Curl_dyn_len(&ctx->data_out); + if(len > 0) { + ssize_t written = Curl_conn_send(data, cf->sockindex, + Curl_dyn_ptr(&ctx->data_out), + len, &result); + if(written < 0) + goto out; + Curl_dyn_tail(&ctx->data_out, len - (size_t)written); + if(Curl_dyn_len(&ctx->data_out) > 0) { + result = CURLE_OK; + goto out; + } + } + ctx->state = HAPROXY_DONE; + /* FALLTHROUGH */ + default: + Curl_dyn_free(&ctx->data_out); + break; + } + +out: + *done = (!result) && (ctx->state == HAPROXY_DONE); + cf->connected = *done; + return result; +} + +static void cf_haproxy_destroy(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + (void)data; + DEBUGF(LOG_CF(data, cf, "destroy")); + cf_haproxy_ctx_free(cf->ctx); +} + +static void cf_haproxy_close(struct Curl_cfilter *cf, + struct Curl_easy *data) +{ + DEBUGF(LOG_CF(data, cf, "close")); + cf->connected = FALSE; + cf_haproxy_ctx_reset(cf->ctx); + if(cf->next) + cf->next->cft->close(cf->next, data); +} + +static int cf_haproxy_get_select_socks(struct Curl_cfilter *cf, + struct Curl_easy *data, + curl_socket_t *socks) +{ + int fds; + + fds = cf->next->cft->get_select_socks(cf->next, data, socks); + if(!fds && cf->next->connected && !cf->connected) { + /* If we are not connected, but the filter "below" is + * and not waiting on something, we are sending. */ + socks[0] = Curl_conn_cf_get_socket(cf, data); + return GETSOCK_WRITESOCK(0); + } + return fds; +} + + +struct Curl_cftype Curl_cft_haproxy = { + "HAPROXY", + 0, + 0, + cf_haproxy_destroy, + cf_haproxy_connect, + cf_haproxy_close, + Curl_cf_def_get_host, + cf_haproxy_get_select_socks, + Curl_cf_def_data_pending, + Curl_cf_def_send, + Curl_cf_def_recv, + Curl_cf_def_cntrl, + Curl_cf_def_conn_is_alive, + Curl_cf_def_conn_keep_alive, + Curl_cf_def_query, +}; + +static CURLcode cf_haproxy_create(struct Curl_cfilter **pcf, + struct Curl_easy *data) +{ + struct Curl_cfilter *cf = NULL; + struct cf_haproxy_ctx *ctx; + CURLcode result; + + (void)data; + ctx = calloc(sizeof(*ctx), 1); + if(!ctx) { + result = CURLE_OUT_OF_MEMORY; + goto out; + } + ctx->state = HAPROXY_INIT; + Curl_dyn_init(&ctx->data_out, DYN_HAXPROXY); + + result = Curl_cf_create(&cf, &Curl_cft_haproxy, ctx); + if(result) + goto out; + ctx = NULL; + +out: + cf_haproxy_ctx_free(ctx); + *pcf = result? NULL : cf; + return result; +} + +CURLcode Curl_conn_haproxy_add(struct Curl_easy *data, + struct connectdata *conn, + int sockindex) +{ + struct Curl_cfilter *cf; + CURLcode result; + + result = cf_haproxy_create(&cf, data); + if(result) + goto out; + Curl_conn_cf_add(data, conn, sockindex, cf); + +out: + return result; +} + +CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at, + struct Curl_easy *data) +{ + struct Curl_cfilter *cf; + CURLcode result; + + result = cf_haproxy_create(&cf, data); + if(result) + goto out; + Curl_conn_cf_insert_after(cf_at, cf); + +out: + return result; +} + +#endif /* !CURL_DISABLE_PROXY */ diff --git a/lib/cf-haproxy.h b/lib/cf-haproxy.h new file mode 100644 index 000000000..f082bd98f --- /dev/null +++ b/lib/cf-haproxy.h @@ -0,0 +1,43 @@ +#ifndef HEADER_CURL_CF_HAPROXY_H +#define HEADER_CURL_CF_HAPROXY_H +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + * SPDX-License-Identifier: curl + * + ***************************************************************************/ + +#include "curl_setup.h" +#include "urldata.h" + +#if !defined(CURL_DISABLE_PROXY) + +CURLcode Curl_conn_haproxy_add(struct Curl_easy *data, + struct connectdata *conn, + int sockindex); + +CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at, + struct Curl_easy *data); + +extern struct Curl_cftype Curl_cft_haproxy; + +#endif /* !CURL_DISABLE_PROXY */ + +#endif /* HEADER_CURL_CF_HAPROXY_H */ diff --git a/lib/cfilters.c b/lib/cfilters.c index e60d1386e..a839f7910 100644 --- a/lib/cfilters.c +++ b/lib/cfilters.c @@ -283,21 +283,31 @@ void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at, *pnext = tail; } -void Curl_conn_cf_discard(struct Curl_cfilter *cf, struct Curl_easy *data) +bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf, + struct Curl_cfilter *discard, + struct Curl_easy *data, + bool destroy_always) { - struct Curl_cfilter **pprev = &cf->conn->cfilter[cf->sockindex]; + struct Curl_cfilter **pprev = &cf->next; + bool found = FALSE; - /* remove from chain if still in there */ + /* remove from sub-chain and destroy */ DEBUGASSERT(cf); while (*pprev) { if (*pprev == cf) { - *pprev = cf->next; + *pprev = discard->next; + discard->next = NULL; + found = TRUE; break; } pprev = &((*pprev)->next); } - cf->cft->destroy(cf, data); - free(cf); + if(found || destroy_always) { + discard->next = NULL; + discard->cft->destroy(discard, data); + free(discard); + } + return found; } CURLcode Curl_conn_cf_connect(struct Curl_cfilter *cf, @@ -405,10 +415,8 @@ bool Curl_conn_is_ip_connected(struct Curl_easy *data, int sockindex) return FALSE; } -bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex) +bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf) { - struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL; - for(; cf; cf = cf->next) { if(cf->cft->flags & CF_TYPE_SSL) return TRUE; @@ -418,6 +426,11 @@ bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex) return FALSE; } +bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex) +{ + return conn? Curl_conn_cf_is_ssl(conn->cfilter[sockindex]) : FALSE; +} + bool Curl_conn_is_multiplex(struct connectdata *conn, int sockindex) { struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL; diff --git a/lib/cfilters.h b/lib/cfilters.h index 317f2bb19..35b7cfcb9 100644 --- a/lib/cfilters.h +++ b/lib/cfilters.h @@ -254,11 +254,16 @@ void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at, struct Curl_cfilter *cf_new); /** - * Discard, e.g. remove and destroy a specific filter instance. - * If the filter is attached to a connection, it will be removed before - * it is destroyed. - */ -void Curl_conn_cf_discard(struct Curl_cfilter *cf, struct Curl_easy *data); + * Discard, e.g. remove and destroy `discard` iff + * it still is in the filter chain below `cf`. If `discard` + * is no longer found beneath `cf` return FALSE. + * if `destroy_always` is TRUE, will call `discard`s destroy + * function and free it even if not found in the subchain. + */ +bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf, + struct Curl_cfilter *discard, + struct Curl_easy *data, + bool destroy_always); /** * Discard all cfilters starting with `*pcf` and clearing it afterwards. @@ -292,6 +297,12 @@ CURLcode Curl_conn_cf_cntrl(struct Curl_cfilter *cf, bool ignore_result, int event, int arg1, void *arg2); +/** + * Determine if the connection filter chain is using SSL to the remote host + * (or will be once connected). + */ +bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf); + /** * Get the socket used by the filter chain starting at `cf`. * Returns CURL_SOCKET_BAD if not available. diff --git a/lib/connect.c b/lib/connect.c index 2dd4e66f2..2d940972e 100644 --- a/lib/connect.c +++ b/lib/connect.c @@ -59,6 +59,7 @@ #include "strerror.h" #include "cfilters.h" #include "connect.h" +#include "cf-haproxy.h" #include "cf-https-connect.h" #include "cf-socket.h" #include "select.h" diff --git a/lib/curl_log.c b/lib/curl_log.c index 2301cff12..71024cfc6 100644 --- a/lib/curl_log.c +++ b/lib/curl_log.c @@ -38,6 +38,9 @@ #include "connect.h" #include "http2.h" #include "http_proxy.h" +#include "cf-h1-proxy.h" +#include "cf-h2-proxy.h" +#include "cf-haproxy.h" #include "cf-https-connect.h" #include "socks.h" #include "strtok.h" @@ -160,6 +163,10 @@ static struct Curl_cftype *cf_types[] = { #endif #if !defined(CURL_DISABLE_PROXY) #if !defined(CURL_DISABLE_HTTP) + &Curl_cft_h1_proxy, +#ifdef USE_NGHTTP2 + &Curl_cft_h2_proxy, +#endif &Curl_cft_http_proxy, #endif /* !CURL_DISABLE_HTTP */ &Curl_cft_haproxy, diff --git a/lib/http_proxy.c b/lib/http_proxy.c index 9f214a305..6e60f8cc2 100644 --- a/lib/http_proxy.c +++ b/lib/http_proxy.c @@ -26,7 +26,7 @@ #include "http_proxy.h" -#if !defined(CURL_DISABLE_PROXY) +#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_PROXY) #include #ifdef USE_HYPER @@ -38,6 +38,8 @@ #include "select.h" #include "progress.h" #include "cfilters.h" +#include "cf-h1-proxy.h" +#include "cf-h2-proxy.h" #include "connect.h" #include "curlx.h" #include "vtls/vtls.h" @@ -50,1023 +52,17 @@ #include "memdebug.h" -#if !defined(CURL_DISABLE_HTTP) - -typedef enum { - TUNNEL_INIT, /* init/default/no tunnel state */ - TUNNEL_CONNECT, /* CONNECT request is being send */ - TUNNEL_RECEIVE, /* CONNECT answer is being received */ - TUNNEL_RESPONSE, /* CONNECT response received completely */ - TUNNEL_ESTABLISHED, - TUNNEL_FAILED -} tunnel_state; - -/* struct for HTTP CONNECT tunneling */ -struct tunnel_state { - int sockindex; - const char *hostname; - int remote_port; - struct HTTP CONNECT; - struct dynbuf rcvbuf; - struct dynbuf req; - size_t nsend; - size_t headerlines; - enum keeponval { - KEEPON_DONE, - KEEPON_CONNECT, - KEEPON_IGNORE - } keepon; - curl_off_t cl; /* size of content to read and ignore */ - tunnel_state tunnel_state; - BIT(chunked_encoding); - BIT(close_connection); +struct cf_proxy_ctx { + /* the protocol specific sub-filter we install during connect */ + struct Curl_cfilter *cf_protocol; }; - -static bool tunnel_is_established(struct tunnel_state *ts) -{ - return ts && (ts->tunnel_state == TUNNEL_ESTABLISHED); -} - -static bool tunnel_is_failed(struct tunnel_state *ts) -{ - return ts && (ts->tunnel_state == TUNNEL_FAILED); -} - -static CURLcode tunnel_reinit(struct tunnel_state *ts, - struct connectdata *conn, - struct Curl_easy *data) -{ - (void)data; - DEBUGASSERT(ts); - Curl_dyn_reset(&ts->rcvbuf); - Curl_dyn_reset(&ts->req); - ts->tunnel_state = TUNNEL_INIT; - ts->keepon = KEEPON_CONNECT; - ts->cl = 0; - ts->close_connection = FALSE; - - if(conn->bits.conn_to_host) - ts->hostname = conn->conn_to_host.name; - else if(ts->sockindex == SECONDARYSOCKET) - ts->hostname = conn->secondaryhostname; - else - ts->hostname = conn->host.name; - - if(ts->sockindex == SECONDARYSOCKET) - ts->remote_port = conn->secondary_port; - else if(conn->bits.conn_to_port) - ts->remote_port = conn->conn_to_port; - else - ts->remote_port = conn->remote_port; - - return CURLE_OK; -} - -static CURLcode tunnel_init(struct tunnel_state **pts, - struct Curl_easy *data, - struct connectdata *conn, - int sockindex) -{ - struct tunnel_state *ts; - CURLcode result; - - if(conn->handler->flags & PROTOPT_NOTCPPROXY) { - failf(data, "%s cannot be done over CONNECT", conn->handler->scheme); - return CURLE_UNSUPPORTED_PROTOCOL; - } - - /* we might need the upload buffer for streaming a partial request */ - result = Curl_get_upload_buffer(data); - if(result) - return result; - - ts = calloc(1, sizeof(*ts)); - if(!ts) - return CURLE_OUT_OF_MEMORY; - - ts->sockindex = sockindex; - infof(data, "allocate connect buffer"); - - Curl_dyn_init(&ts->rcvbuf, DYN_PROXY_CONNECT_HEADERS); - Curl_dyn_init(&ts->req, DYN_HTTP_REQUEST); - - *pts = ts; - connkeep(conn, "HTTP proxy CONNECT"); - return tunnel_reinit(ts, conn, data); -} - -static void tunnel_go_state(struct Curl_cfilter *cf, - struct tunnel_state *ts, - tunnel_state new_state, - struct Curl_easy *data) -{ - if(ts->tunnel_state == new_state) - return; - /* leaving this one */ - switch(ts->tunnel_state) { - case TUNNEL_CONNECT: - data->req.ignorebody = FALSE; - break; - default: - break; - } - /* entering this one */ - switch(new_state) { - case TUNNEL_INIT: - DEBUGF(LOG_CF(data, cf, "new tunnel state 'init'")); - tunnel_reinit(ts, cf->conn, data); - break; - - case TUNNEL_CONNECT: - DEBUGF(LOG_CF(data, cf, "new tunnel state 'connect'")); - ts->tunnel_state = TUNNEL_CONNECT; - ts->keepon = KEEPON_CONNECT; - Curl_dyn_reset(&ts->rcvbuf); - break; - - case TUNNEL_RECEIVE: - DEBUGF(LOG_CF(data, cf, "new tunnel state 'receive'")); - ts->tunnel_state = TUNNEL_RECEIVE; - break; - - case TUNNEL_RESPONSE: - DEBUGF(LOG_CF(data, cf, "new tunnel state 'response'")); - ts->tunnel_state = TUNNEL_RESPONSE; - break; - - case TUNNEL_ESTABLISHED: - DEBUGF(LOG_CF(data, cf, "new tunnel state 'established'")); - infof(data, "CONNECT phase completed"); - data->state.authproxy.done = TRUE; - data->state.authproxy.multipass = FALSE; - /* FALLTHROUGH */ - case TUNNEL_FAILED: - DEBUGF(LOG_CF(data, cf, "new tunnel state 'failed'")); - ts->tunnel_state = new_state; - Curl_dyn_reset(&ts->rcvbuf); - Curl_dyn_reset(&ts->req); - /* restore the protocol pointer */ - data->info.httpcode = 0; /* clear it as it might've been used for the - proxy */ - /* If a proxy-authorization header was used for the proxy, then we should - make sure that it isn't accidentally used for the document request - after we've connected. So let's free and clear it here. */ - Curl_safefree(data->state.aptr.proxyuserpwd); - data->state.aptr.proxyuserpwd = NULL; -#ifdef USE_HYPER - data->state.hconnect = FALSE; -#endif - break; - } -} - -static void tunnel_free(struct Curl_cfilter *cf, - struct Curl_easy *data) -{ - struct tunnel_state *ts = cf->ctx; - if(ts) { - tunnel_go_state(cf, ts, TUNNEL_FAILED, data); - Curl_dyn_free(&ts->rcvbuf); - Curl_dyn_free(&ts->req); - free(ts); - cf->ctx = NULL; - } -} - -static CURLcode CONNECT_host(struct Curl_easy *data, - struct connectdata *conn, - const char *hostname, - int remote_port, - char **connecthostp, - char **hostp) -{ - char *hostheader; /* for CONNECT */ - char *host = NULL; /* Host: */ - bool ipv6_ip = conn->bits.ipv6_ip; - - /* the hostname may be different */ - if(hostname != conn->host.name) - ipv6_ip = (strchr(hostname, ':') != NULL); - hostheader = /* host:port with IPv6 support */ - aprintf("%s%s%s:%d", ipv6_ip?"[":"", hostname, ipv6_ip?"]":"", - remote_port); - if(!hostheader) - return CURLE_OUT_OF_MEMORY; - - if(!Curl_checkProxyheaders(data, conn, STRCONST("Host"))) { - host = aprintf("Host: %s\r\n", hostheader); - if(!host) { - free(hostheader); - return CURLE_OUT_OF_MEMORY; - } - } - *connecthostp = hostheader; - *hostp = host; - return CURLE_OK; -} - -#ifndef USE_HYPER -static CURLcode start_CONNECT(struct Curl_cfilter *cf, - struct Curl_easy *data, - struct tunnel_state *ts) -{ - struct connectdata *conn = cf->conn; - char *hostheader = NULL; - char *host = NULL; - const char *httpv; - CURLcode result; - - infof(data, "Establish HTTP proxy tunnel to %s:%d", - ts->hostname, ts->remote_port); - - /* This only happens if we've looped here due to authentication - reasons, and we don't really use the newly cloned URL here - then. Just free() it. */ - Curl_safefree(data->req.newurl); - - result = CONNECT_host(data, conn, - ts->hostname, ts->remote_port, - &hostheader, &host); - if(result) - goto out; - - /* Setup the proxy-authorization header, if any */ - result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET, - hostheader, TRUE); - if(result) - goto out; - - httpv = (conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) ? "1.0" : "1.1"; - - result = - Curl_dyn_addf(&ts->req, - "CONNECT %s HTTP/%s\r\n" - "%s" /* Host: */ - "%s", /* Proxy-Authorization */ - hostheader, - httpv, - host?host:"", - data->state.aptr.proxyuserpwd? - data->state.aptr.proxyuserpwd:""); - if(result) - goto out; - - if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent")) - && data->set.str[STRING_USERAGENT]) - result = Curl_dyn_addf(&ts->req, "User-Agent: %s\r\n", - data->set.str[STRING_USERAGENT]); - if(result) - goto out; - - if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection"))) - result = Curl_dyn_addn(&ts->req, - STRCONST("Proxy-Connection: Keep-Alive\r\n")); - if(result) - goto out; - - result = Curl_add_custom_headers(data, TRUE, &ts->req); - if(result) - goto out; - - /* CRLF terminate the request */ - result = Curl_dyn_addn(&ts->req, STRCONST("\r\n")); - if(result) - goto out; - - /* Send the connect request to the proxy */ - result = Curl_buffer_send(&ts->req, data, &ts->CONNECT, - &data->info.request_size, 0, - ts->sockindex); - ts->headerlines = 0; - -out: - if(result) - failf(data, "Failed sending CONNECT to proxy"); - free(host); - free(hostheader); - return result; -} - -static CURLcode send_CONNECT(struct Curl_easy *data, - struct connectdata *conn, - struct tunnel_state *ts, - bool *done) -{ - struct SingleRequest *k = &data->req; - struct HTTP *http = &ts->CONNECT; - CURLcode result = CURLE_OK; - - if(http->sending != HTTPSEND_REQUEST) - goto out; - - if(!ts->nsend) { - size_t fillcount; - k->upload_fromhere = data->state.ulbuf; - result = Curl_fillreadbuffer(data, data->set.upload_buffer_size, - &fillcount); - if(result) - goto out; - ts->nsend = fillcount; - } - if(ts->nsend) { - ssize_t bytes_written; - /* write to socket (send away data) */ - result = Curl_write(data, - conn->writesockfd, /* socket to send to */ - k->upload_fromhere, /* buffer pointer */ - ts->nsend, /* buffer size */ - &bytes_written); /* actually sent */ - if(result) - goto out; - /* send to debug callback! */ - Curl_debug(data, CURLINFO_HEADER_OUT, - k->upload_fromhere, bytes_written); - - ts->nsend -= bytes_written; - k->upload_fromhere += bytes_written; - } - if(!ts->nsend) - http->sending = HTTPSEND_NADA; - -out: - if(result) - failf(data, "Failed sending CONNECT to proxy"); - *done = (http->sending != HTTPSEND_REQUEST); - return result; -} - -static CURLcode on_resp_header(struct Curl_cfilter *cf, - struct Curl_easy *data, - struct tunnel_state *ts, - const char *header) -{ - CURLcode result = CURLE_OK; - struct SingleRequest *k = &data->req; - (void)cf; - - if((checkprefix("WWW-Authenticate:", header) && - (401 == k->httpcode)) || - (checkprefix("Proxy-authenticate:", header) && - (407 == k->httpcode))) { - - bool proxy = (k->httpcode == 407) ? TRUE : FALSE; - char *auth = Curl_copy_header_value(header); - if(!auth) - return CURLE_OUT_OF_MEMORY; - - DEBUGF(LOG_CF(data, cf, "CONNECT: fwd auth header '%s'", header)); - result = Curl_http_input_auth(data, proxy, auth); - - free(auth); - - if(result) - return result; - } - else if(checkprefix("Content-Length:", header)) { - if(k->httpcode/100 == 2) { - /* A client MUST ignore any Content-Length or Transfer-Encoding - header fields received in a successful response to CONNECT. - "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */ - infof(data, "Ignoring Content-Length in CONNECT %03d response", - k->httpcode); - } - else { - (void)curlx_strtoofft(header + strlen("Content-Length:"), - NULL, 10, &ts->cl); - } - } - else if(Curl_compareheader(header, - STRCONST("Connection:"), STRCONST("close"))) - ts->close_connection = TRUE; - else if(checkprefix("Transfer-Encoding:", header)) { - if(k->httpcode/100 == 2) { - /* A client MUST ignore any Content-Length or Transfer-Encoding - header fields received in a successful response to CONNECT. - "Successful" described as: 2xx (Successful). RFC 7231 4.3.6 */ - infof(data, "Ignoring Transfer-Encoding in " - "CONNECT %03d response", k->httpcode); - } - else if(Curl_compareheader(header, - STRCONST("Transfer-Encoding:"), - STRCONST("chunked"))) { - infof(data, "CONNECT responded chunked"); - ts->chunked_encoding = TRUE; - /* init our chunky engine */ - Curl_httpchunk_init(data); - } - } - else if(Curl_compareheader(header, - STRCONST("Proxy-Connection:"), - STRCONST("close"))) - ts->close_connection = TRUE; - else if(!strncmp(header, "HTTP/1.", 7) && - ((header[7] == '0') || (header[7] == '1')) && - (header[8] == ' ') && - ISDIGIT(header[9]) && ISDIGIT(header[10]) && ISDIGIT(header[11]) && - !ISDIGIT(header[12])) { - /* store the HTTP code from the proxy */ - data->info.httpproxycode = k->httpcode = (header[9] - '0') * 100 + - (header[10] - '0') * 10 + (header[11] - '0'); - } - return result; -} - -static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, - struct Curl_easy *data, - struct tunnel_state *ts, - bool *done) -{ - CURLcode result = CURLE_OK; - struct SingleRequest *k = &data->req; - curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data); - char *linep; - size_t perline; - int error; - -#define SELECT_OK 0 -#define SELECT_ERROR 1 - - error = SELECT_OK; - *done = FALSE; - - if(!Curl_conn_data_pending(data, ts->sockindex)) - return CURLE_OK; - - while(ts->keepon) { - ssize_t gotbytes; - char byte; - - /* Read one byte at a time to avoid a race condition. Wait at most one - second before looping to ensure continuous pgrsUpdates. */ - result = Curl_read(data, tunnelsocket, &byte, 1, &gotbytes); - if(result == CURLE_AGAIN) - /* socket buffer drained, return */ - return CURLE_OK; - - if(Curl_pgrsUpdate(data)) - return CURLE_ABORTED_BY_CALLBACK; - - if(result) { - ts->keepon = KEEPON_DONE; - break; - } - - if(gotbytes <= 0) { - if(data->set.proxyauth && data->state.authproxy.avail && - data->state.aptr.proxyuserpwd) { - /* proxy auth was requested and there was proxy auth available, - then deem this as "mere" proxy disconnect */ - ts->close_connection = TRUE; - infof(data, "Proxy CONNECT connection closed"); - } - else { - error = SELECT_ERROR; - failf(data, "Proxy CONNECT aborted"); - } - ts->keepon = KEEPON_DONE; - break; - } - - if(ts->keepon == KEEPON_IGNORE) { - /* This means we are currently ignoring a response-body */ - - if(ts->cl) { - /* A Content-Length based body: simply count down the counter - and make sure to break out of the loop when we're done! */ - ts->cl--; - if(ts->cl <= 0) { - ts->keepon = KEEPON_DONE; - break; - } - } - else { - /* chunked-encoded body, so we need to do the chunked dance - properly to know when the end of the body is reached */ - CHUNKcode r; - CURLcode extra; - ssize_t tookcareof = 0; - - /* now parse the chunked piece of data so that we can - properly tell when the stream ends */ - r = Curl_httpchunk_read(data, &byte, 1, &tookcareof, &extra); - if(r == CHUNKE_STOP) { - /* we're done reading chunks! */ - infof(data, "chunk reading DONE"); - ts->keepon = KEEPON_DONE; - } - } - continue; - } - - if(Curl_dyn_addn(&ts->rcvbuf, &byte, 1)) { - failf(data, "CONNECT response too large"); - return CURLE_RECV_ERROR; - } - - /* if this is not the end of a header line then continue */ - if(byte != 0x0a) - continue; - - ts->headerlines++; - linep = Curl_dyn_ptr(&ts->rcvbuf); - perline = Curl_dyn_len(&ts->rcvbuf); /* amount of bytes in this line */ - - /* output debug if that is requested */ - Curl_debug(data, CURLINFO_HEADER_IN, linep, perline); - - if(!data->set.suppress_connect_headers) { - /* send the header to the callback */ - int writetype = CLIENTWRITE_HEADER | CLIENTWRITE_CONNECT | - (data->set.include_header ? CLIENTWRITE_BODY : 0) | - (ts->headerlines == 1 ? CLIENTWRITE_STATUS : 0); - - result = Curl_client_write(data, writetype, linep, perline); - if(result) - return result; - } - - data->info.header_size += (long)perline; - - /* Newlines are CRLF, so the CR is ignored as the line isn't - really terminated until the LF comes. Treat a following CR - as end-of-headers as well.*/ - - if(('\r' == linep[0]) || - ('\n' == linep[0])) { - /* end of response-headers from the proxy */ - - if((407 == k->httpcode) && !data->state.authproblem) { - /* If we get a 407 response code with content length - when we have no auth problem, we must ignore the - whole response-body */ - ts->keepon = KEEPON_IGNORE; - - if(ts->cl) { - infof(data, "Ignore %" CURL_FORMAT_CURL_OFF_T - " bytes of response-body", ts->cl); - } - else if(ts->chunked_encoding) { - CHUNKcode r; - CURLcode extra; - - infof(data, "Ignore chunked response-body"); - - /* We set ignorebody true here since the chunked decoder - function will acknowledge that. Pay attention so that this is - cleared again when this function returns! */ - k->ignorebody = TRUE; - - if(linep[1] == '\n') - /* this can only be a LF if the letter at index 0 was a CR */ - linep++; - - /* now parse the chunked piece of data so that we can properly - tell when the stream ends */ - r = Curl_httpchunk_read(data, linep + 1, 1, &gotbytes, - &extra); - if(r == CHUNKE_STOP) { - /* we're done reading chunks! */ - infof(data, "chunk reading DONE"); - ts->keepon = KEEPON_DONE; - } - } - else { - /* without content-length or chunked encoding, we - can't keep the connection alive since the close is - the end signal so we bail out at once instead */ - DEBUGF(LOG_CF(data, cf, "CONNECT: no content-length or chunked")); - ts->keepon = KEEPON_DONE; - } - } - else { - ts->keepon = KEEPON_DONE; - } - - DEBUGASSERT(ts->keepon == KEEPON_IGNORE - || ts->keepon == KEEPON_DONE); - continue; - } - - result = on_resp_header(cf, data, ts, linep); - if(result) - return result; - - Curl_dyn_reset(&ts->rcvbuf); - } /* while there's buffer left and loop is requested */ - - if(error) - result = CURLE_RECV_ERROR; - *done = (ts->keepon == KEEPON_DONE); - if(!result && *done && data->info.httpproxycode/100 != 2) { - /* Deal with the possibly already received authenticate - headers. 'newurl' is set to a new URL if we must loop. */ - result = Curl_http_auth_act(data); - } - return result; -} - -#else /* USE_HYPER */ -/* The Hyper version of CONNECT */ -static CURLcode start_CONNECT(struct Curl_cfilter *cf, - struct Curl_easy *data, - struct tunnel_state *ts) -{ - struct connectdata *conn = cf->conn; - struct hyptransfer *h = &data->hyp; - curl_socket_t tunnelsocket = Curl_conn_cf_get_socket(cf, data); - hyper_io *io = NULL; - hyper_request *req = NULL; - hyper_headers *headers = NULL; - hyper_clientconn_options *options = NULL; - hyper_task *handshake = NULL; - hyper_task *task = NULL; /* for the handshake */ - hyper_clientconn *client = NULL; - hyper_task *sendtask = NULL; /* for the send */ - char *hostheader = NULL; /* for CONNECT */ - char *host = NULL; /* Host: */ - CURLcode result = CURLE_OUT_OF_MEMORY; - - io = hyper_io_new(); - if(!io) { - failf(data, "Couldn't create hyper IO"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - /* tell Hyper how to read/write network data */ - hyper_io_set_userdata(io, data); - hyper_io_set_read(io, Curl_hyper_recv); - hyper_io_set_write(io, Curl_hyper_send); - conn->sockfd = tunnelsocket; - - data->state.hconnect = TRUE; - - /* create an executor to poll futures */ - if(!h->exec) { - h->exec = hyper_executor_new(); - if(!h->exec) { - failf(data, "Couldn't create hyper executor"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - } - - options = hyper_clientconn_options_new(); - hyper_clientconn_options_set_preserve_header_case(options, 1); - hyper_clientconn_options_set_preserve_header_order(options, 1); - - if(!options) { - failf(data, "Couldn't create hyper client options"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - - hyper_clientconn_options_exec(options, h->exec); - - /* "Both the `io` and the `options` are consumed in this function - call" */ - handshake = hyper_clientconn_handshake(io, options); - if(!handshake) { - failf(data, "Couldn't create hyper client handshake"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - io = NULL; - options = NULL; - - if(HYPERE_OK != hyper_executor_push(h->exec, handshake)) { - failf(data, "Couldn't hyper_executor_push the handshake"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - handshake = NULL; /* ownership passed on */ - - task = hyper_executor_poll(h->exec); - if(!task) { - failf(data, "Couldn't hyper_executor_poll the handshake"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - - client = hyper_task_value(task); - hyper_task_free(task); - req = hyper_request_new(); - if(!req) { - failf(data, "Couldn't hyper_request_new"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - if(hyper_request_set_method(req, (uint8_t *)"CONNECT", - strlen("CONNECT"))) { - failf(data, "error setting method"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - - infof(data, "Establish HTTP proxy tunnel to %s:%d", - ts->hostname, ts->remote_port); - - /* This only happens if we've looped here due to authentication - reasons, and we don't really use the newly cloned URL here - then. Just free() it. */ - Curl_safefree(data->req.newurl); - - result = CONNECT_host(data, conn, ts->hostname, ts->remote_port, - &hostheader, &host); - if(result) - goto error; - - if(hyper_request_set_uri(req, (uint8_t *)hostheader, - strlen(hostheader))) { - failf(data, "error setting path"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - if(data->set.verbose) { - char *se = aprintf("CONNECT %s HTTP/1.1\r\n", hostheader); - if(!se) { - result = CURLE_OUT_OF_MEMORY; - goto error; - } - Curl_debug(data, CURLINFO_HEADER_OUT, se, strlen(se)); - free(se); - } - /* Setup the proxy-authorization header, if any */ - result = Curl_http_output_auth(data, conn, "CONNECT", HTTPREQ_GET, - hostheader, TRUE); - if(result) - goto error; - Curl_safefree(hostheader); - - /* default is 1.1 */ - if((conn->http_proxy.proxytype == CURLPROXY_HTTP_1_0) && - (HYPERE_OK != hyper_request_set_version(req, - HYPER_HTTP_VERSION_1_0))) { - failf(data, "error setting HTTP version"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - - headers = hyper_request_headers(req); - if(!headers) { - failf(data, "hyper_request_headers"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - if(host) { - result = Curl_hyper_header(data, headers, host); - if(result) - goto error; - Curl_safefree(host); - } - - if(data->state.aptr.proxyuserpwd) { - result = Curl_hyper_header(data, headers, - data->state.aptr.proxyuserpwd); - if(result) - goto error; - } - - if(!Curl_checkProxyheaders(data, conn, STRCONST("User-Agent")) && - data->set.str[STRING_USERAGENT]) { - struct dynbuf ua; - Curl_dyn_init(&ua, DYN_HTTP_REQUEST); - result = Curl_dyn_addf(&ua, "User-Agent: %s\r\n", - data->set.str[STRING_USERAGENT]); - if(result) - goto error; - result = Curl_hyper_header(data, headers, Curl_dyn_ptr(&ua)); - if(result) - goto error; - Curl_dyn_free(&ua); - } - - if(!Curl_checkProxyheaders(data, conn, STRCONST("Proxy-Connection"))) { - result = Curl_hyper_header(data, headers, - "Proxy-Connection: Keep-Alive"); - if(result) - goto error; - } - - result = Curl_add_custom_headers(data, TRUE, headers); - if(result) - goto error; - - sendtask = hyper_clientconn_send(client, req); - if(!sendtask) { - failf(data, "hyper_clientconn_send"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - - if(HYPERE_OK != hyper_executor_push(h->exec, sendtask)) { - failf(data, "Couldn't hyper_executor_push the send"); - result = CURLE_OUT_OF_MEMORY; - goto error; - } - -error: - free(host); - free(hostheader); - if(io) - hyper_io_free(io); - if(options) - hyper_clientconn_options_free(options); - if(handshake) - hyper_task_free(handshake); - if(client) - hyper_clientconn_free(client); - return result; -} - -static CURLcode send_CONNECT(struct Curl_easy *data, - struct connectdata *conn, - struct tunnel_state *ts, - bool *done) -{ - struct hyptransfer *h = &data->hyp; - hyper_task *task = NULL; - hyper_error *hypererr = NULL; - CURLcode result = CURLE_OK; - - (void)ts; - (void)conn; - do { - task = hyper_executor_poll(h->exec); - if(task) { - bool error = hyper_task_type(task) == HYPER_TASK_ERROR; - if(error) - hypererr = hyper_task_value(task); - hyper_task_free(task); - if(error) { - /* this could probably use a better error code? */ - result = CURLE_OUT_OF_MEMORY; - goto error; - } - } - } while(task); -error: - *done = (result == CURLE_OK); - if(hypererr) { - uint8_t errbuf[256]; - size_t errlen = hyper_error_print(hypererr, errbuf, sizeof(errbuf)); - failf(data, "Hyper: %.*s", (int)errlen, errbuf); - hyper_error_free(hypererr); - } - return result; -} - -static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf, - struct Curl_easy *data, - struct tunnel_state *ts, - bool *done) -{ - struct hyptransfer *h = &data->hyp; - CURLcode result; - int didwhat; - - (void)ts; - *done = FALSE; - result = Curl_hyper_stream(data, cf->conn, &didwhat, done, - CURL_CSELECT_IN | CURL_CSELECT_OUT); - if(result || !*done) - return result; - if(h->exec) { - hyper_executor_free(h->exec); - h->exec = NULL; - } - if(h->read_waker) { - hyper_waker_free(h->read_waker); - h->read_waker = NULL; - } - if(h->write_waker) { - hyper_waker_free(h->write_waker); - h->write_waker = NULL; - } - return result; -} - -#endif /* USE_HYPER */ - -static CURLcode CONNECT(struct Curl_cfilter *cf, - struct Curl_easy *data, - struct tunnel_state *ts) -{ - struct connectdata *conn = cf->conn; - CURLcode result; - bool done; - - if(tunnel_is_established(ts)) - return CURLE_OK; - if(tunnel_is_failed(ts)) - return CURLE_RECV_ERROR; /* Need a cfilter close and new bootstrap */ - - do { - timediff_t check; - - check = Curl_timeleft(data, NULL, TRUE); - if(check <= 0) { - failf(data, "Proxy CONNECT aborted due to timeout"); - result = CURLE_OPERATION_TIMEDOUT; - goto out; - } - - switch(ts->tunnel_state) { - case TUNNEL_INIT: - /* Prepare the CONNECT request and make a first attempt to send. */ - DEBUGF(LOG_CF(data, cf, "CONNECT start")); - result = start_CONNECT(cf, data, ts); - if(result) - goto out; - tunnel_go_state(cf, ts, TUNNEL_CONNECT, data); - /* FALLTHROUGH */ - - case TUNNEL_CONNECT: - /* see that the request is completely sent */ - DEBUGF(LOG_CF(data, cf, "CONNECT send")); - result = send_CONNECT(data, cf->conn, ts, &done); - if(result || !done) - goto out; - tunnel_go_state(cf, ts, TUNNEL_RECEIVE, data); - /* FALLTHROUGH */ - - case TUNNEL_RECEIVE: - /* read what is there */ - DEBUGF(LOG_CF(data, cf, "CONNECT receive")); - result = recv_CONNECT_resp(cf, data, ts, &done); - if(Curl_pgrsUpdate(data)) { - result = CURLE_ABORTED_BY_CALLBACK; - goto out; - } - /* error or not complete yet. return for more multi-multi */ - if(result || !done) - goto out; - /* got it */ - tunnel_go_state(cf, ts, TUNNEL_RESPONSE, data); - /* FALLTHROUGH */ - - case TUNNEL_RESPONSE: - DEBUGF(LOG_CF(data, cf, "CONNECT response")); - if(data->req.newurl) { - /* not the "final" response, we need to do a follow up request. - * If the other side indicated a connection close, or if someone - * else told us to close this connection, do so now. - */ - if(ts->close_connection || conn->bits.close) { - /* Close this filter and the sub-chain, re-connect the - * sub-chain and continue. Closing this filter will - * reset our tunnel state. To avoid recursion, we return - * and expect to be called again. - */ - DEBUGF(LOG_CF(data, cf, "CONNECT need to close+open")); - infof(data, "Connect me again please"); - Curl_conn_cf_close(cf, data); - connkeep(conn, "HTTP proxy CONNECT"); - result = Curl_conn_cf_connect(cf->next, data, FALSE, &done); - goto out; - } - else { - /* staying on this connection, reset state */ - tunnel_go_state(cf, ts, TUNNEL_INIT, data); - } - } - break; - - default: - break; - } - - } while(data->req.newurl); - - DEBUGASSERT(ts->tunnel_state == TUNNEL_RESPONSE); - if(data->info.httpproxycode/100 != 2) { - /* a non-2xx response and we have no next url to try. */ - free(data->req.newurl); - data->req.newurl = NULL; - /* failure, close this connection to avoid re-use */ - streamclose(conn, "proxy CONNECT failure"); - tunnel_go_state(cf, ts, TUNNEL_FAILED, data); - failf(data, "CONNECT tunnel failed, response %d", data->req.httpcode); - return CURLE_RECV_ERROR; - } - /* 2xx response, SUCCESS! */ - tunnel_go_state(cf, ts, TUNNEL_ESTABLISHED, data); - infof(data, "CONNECT tunnel established, response %d", - data->info.httpproxycode); - result = CURLE_OK; - -out: - if(result) - tunnel_go_state(cf, ts, TUNNEL_FAILED, data); - return result; -} - static CURLcode http_proxy_cf_connect(struct Curl_cfilter *cf, struct Curl_easy *data, bool blocking, bool *done) { + struct cf_proxy_ctx *ctx = cf->ctx; CURLcode result; - struct tunnel_state *ts = cf->ctx; if(cf->connected) { *done = TRUE; @@ -1074,44 +70,74 @@ static CURLcode http_proxy_cf_connect(struct Curl_cfilter *cf, } DEBUGF(LOG_CF(data, cf, "connect")); +connect_sub: result = cf->next->cft->connect(cf->next, data, blocking, done); if(result || !*done) return result; - DEBUGF(LOG_CF(data, cf, "subchain is connected")); - /* TODO: can we do blocking? */ - /* We want "seamless" operations through HTTP proxy tunnel */ - - /* for the secondary socket (FTP), use the "connect to host" - * but ignore the "connect to port" (use the secondary port) - */ *done = FALSE; - if(!ts) { - result = tunnel_init(&ts, data, cf->conn, cf->sockindex); - if(result) - return result; - cf->ctx = ts; - } + if(!ctx->cf_protocol) { + struct Curl_cfilter *cf_protocol = NULL; + int alpn = Curl_conn_cf_is_ssl(cf->next)? + cf->conn->proxy_alpn : CURL_HTTP_VERSION_1_1; + + /* First time call after the subchain connected */ + switch(alpn) { + case CURL_HTTP_VERSION_NONE: + case CURL_HTTP_VERSION_1_0: + case CURL_HTTP_VERSION_1_1: + DEBUGF(LOG_CF(data, cf, "installing subfilter for HTTP/1.1")); + infof(data, "CONNECT tunnel: HTTP/1.%d negotiated", + (alpn == CURL_HTTP_VERSION_1_0)? 0 : 1); + result = Curl_cf_h1_proxy_insert_after(cf, data); + if(result) + goto out; + cf_protocol = cf->next; + break; +#ifdef USE_NGHTTP2 + case CURL_HTTP_VERSION_2: + DEBUGF(LOG_CF(data, cf, "installing subfilter for HTTP/2")); + infof(data, "CONNECT tunnel: HTTP/2 negotiated"); + result = Curl_cf_h2_proxy_insert_after(cf, data); + if(result) + goto out; + cf_protocol = cf->next; + break; +#endif + default: + DEBUGF(LOG_CF(data, cf, "installing subfilter for default HTTP/1.1")); + infof(data, "CONNECT tunnel: unsupported ALPN(%d) negotiated"); + result = CURLE_COULDNT_CONNECT; + goto out; + } - result = CONNECT(cf, data, ts); - if(result) - goto out; - Curl_safefree(data->state.aptr.proxyuserpwd); + ctx->cf_protocol = cf_protocol; + /* after we installed the filter "below" us, we call connect + * on out sub-chain again. + */ + goto connect_sub; + } + else { + /* subchain connected and we had already installed the protocol filter. + * This means the protocol tunnel is established, we are done. + */ + DEBUGASSERT(ctx->cf_protocol); + result = CURLE_OK; + } out: - *done = (result == CURLE_OK) && tunnel_is_established(cf->ctx); - if (*done) { + if(!result) { cf->connected = TRUE; - tunnel_free(cf, data); + *done = TRUE; } return result; } -static void http_proxy_cf_get_host(struct Curl_cfilter *cf, - struct Curl_easy *data, - const char **phost, - const char **pdisplay_host, - int *pport) +void Curl_cf_http_proxy_get_host(struct Curl_cfilter *cf, + struct Curl_easy *data, + const char **phost, + const char **pdisplay_host, + int *pport) { (void)data; if(!cf->connected) { @@ -1124,50 +150,38 @@ static void http_proxy_cf_get_host(struct Curl_cfilter *cf, } } -static int http_proxy_cf_get_select_socks(struct Curl_cfilter *cf, - struct Curl_easy *data, - curl_socket_t *socks) -{ - struct tunnel_state *ts = cf->ctx; - int fds; - - fds = cf->next->cft->get_select_socks(cf->next, data, socks); - if(!fds && cf->next->connected && !cf->connected) { - /* If we are not connected, but the filter "below" is - * and not waiting on something, we are tunneling. */ - socks[0] = Curl_conn_cf_get_socket(cf, data); - if(ts) { - /* when we've sent a CONNECT to a proxy, we should rather either - wait for the socket to become readable to be able to get the - response headers or if we're still sending the request, wait - for write. */ - if(ts->CONNECT.sending == HTTPSEND_REQUEST) { - return GETSOCK_WRITESOCK(0); - } - return GETSOCK_READSOCK(0); - } - return GETSOCK_WRITESOCK(0); - } - return fds; -} - static void http_proxy_cf_destroy(struct Curl_cfilter *cf, struct Curl_easy *data) { + struct cf_proxy_ctx *ctx = cf->ctx; + + (void)data; DEBUGF(LOG_CF(data, cf, "destroy")); - tunnel_free(cf, data); + free(ctx); } static void http_proxy_cf_close(struct Curl_cfilter *cf, struct Curl_easy *data) { - DEBUGASSERT(cf->next); + struct cf_proxy_ctx *ctx = cf->ctx; + DEBUGF(LOG_CF(data, cf, "close")); cf->connected = FALSE; - cf->next->cft->close(cf->next, data); - if(cf->ctx) { - tunnel_go_state(cf, cf->ctx, TUNNEL_INIT, data); + if(ctx->cf_protocol) { + struct Curl_cfilter *f; + /* if someone already removed it, we assume he also + * took care of destroying it. */ + for(f = cf->next; f; f = f->next) { + if(f == ctx->cf_protocol) { + /* still in our sub-chain */ + Curl_conn_cf_discard_sub(cf, ctx->cf_protocol, data, FALSE); + break; + } + } + ctx->cf_protocol = NULL; } + if(cf->next) + cf->next->cft->close(cf->next, data); } @@ -1178,8 +192,8 @@ struct Curl_cftype Curl_cft_http_proxy = { http_proxy_cf_destroy, http_proxy_cf_connect, http_proxy_cf_close, - http_proxy_cf_get_host, - http_proxy_cf_get_select_socks, + Curl_cf_http_proxy_get_host, + Curl_cf_def_get_select_socks, Curl_cf_def_data_pending, Curl_cf_def_send, Curl_cf_def_recv, @@ -1189,253 +203,28 @@ struct Curl_cftype Curl_cft_http_proxy = { Curl_cf_def_query, }; -CURLcode Curl_conn_http_proxy_add(struct Curl_easy *data, - struct connectdata *conn, - int sockindex) -{ - struct Curl_cfilter *cf; - CURLcode result; - - result = Curl_cf_create(&cf, &Curl_cft_http_proxy, NULL); - if(!result) - Curl_conn_cf_add(data, conn, sockindex, cf); - return result; -} - CURLcode Curl_cf_http_proxy_insert_after(struct Curl_cfilter *cf_at, struct Curl_easy *data) { struct Curl_cfilter *cf; + struct cf_proxy_ctx *ctx = NULL; CURLcode result; (void)data; - result = Curl_cf_create(&cf, &Curl_cft_http_proxy, NULL); - if(!result) - Curl_conn_cf_insert_after(cf_at, cf); - return result; -} - -#endif /* ! CURL_DISABLE_HTTP */ - - -typedef enum { - HAPROXY_INIT, /* init/default/no tunnel state */ - HAPROXY_SEND, /* data_out being sent */ - HAPROXY_DONE /* all work done */ -} haproxy_state; - -struct cf_haproxy_ctx { - int state; - struct dynbuf data_out; -}; - -static void cf_haproxy_ctx_reset(struct cf_haproxy_ctx *ctx) -{ - DEBUGASSERT(ctx); - ctx->state = HAPROXY_INIT; - Curl_dyn_reset(&ctx->data_out); -} - -static void cf_haproxy_ctx_free(struct cf_haproxy_ctx *ctx) -{ - if(ctx) { - Curl_dyn_free(&ctx->data_out); - free(ctx); - } -} - -static CURLcode cf_haproxy_date_out_set(struct Curl_cfilter*cf, - struct Curl_easy *data) -{ - struct cf_haproxy_ctx *ctx = cf->ctx; - CURLcode result; - const char *tcp_version; - - DEBUGASSERT(ctx); - DEBUGASSERT(ctx->state == HAPROXY_INIT); -#ifdef USE_UNIX_SOCKETS - if(cf->conn->unix_domain_socket) - /* the buffer is large enough to hold this! */ - result = Curl_dyn_addn(&ctx->data_out, STRCONST("PROXY UNKNOWN\r\n")); - else { -#endif /* USE_UNIX_SOCKETS */ - /* Emit the correct prefix for IPv6 */ - tcp_version = cf->conn->bits.ipv6 ? "TCP6" : "TCP4"; - - result = Curl_dyn_addf(&ctx->data_out, "PROXY %s %s %s %i %i\r\n", - tcp_version, - data->info.conn_local_ip, - data->info.conn_primary_ip, - data->info.conn_local_port, - data->info.conn_primary_port); - -#ifdef USE_UNIX_SOCKETS - } -#endif /* USE_UNIX_SOCKETS */ - return result; -} - -static CURLcode cf_haproxy_connect(struct Curl_cfilter *cf, - struct Curl_easy *data, - bool blocking, bool *done) -{ - struct cf_haproxy_ctx *ctx = cf->ctx; - CURLcode result; - size_t len; - - DEBUGASSERT(ctx); - if(cf->connected) { - *done = TRUE; - return CURLE_OK; - } - - result = cf->next->cft->connect(cf->next, data, blocking, done); - if(result || !*done) - return result; - - switch(ctx->state) { - case HAPROXY_INIT: - result = cf_haproxy_date_out_set(cf, data); - if(result) - goto out; - ctx->state = HAPROXY_SEND; - /* FALLTHROUGH */ - case HAPROXY_SEND: - len = Curl_dyn_len(&ctx->data_out); - if(len > 0) { - ssize_t written = Curl_conn_send(data, cf->sockindex, - Curl_dyn_ptr(&ctx->data_out), - len, &result); - if(written < 0) - goto out; - Curl_dyn_tail(&ctx->data_out, len - (size_t)written); - if(Curl_dyn_len(&ctx->data_out) > 0) { - result = CURLE_OK; - goto out; - } - } - ctx->state = HAPROXY_DONE; - /* FALLTHROUGH */ - default: - Curl_dyn_free(&ctx->data_out); - break; - } - -out: - *done = (!result) && (ctx->state == HAPROXY_DONE); - cf->connected = *done; - return result; -} - -static void cf_haproxy_destroy(struct Curl_cfilter *cf, - struct Curl_easy *data) -{ - (void)data; - DEBUGF(LOG_CF(data, cf, "destroy")); - cf_haproxy_ctx_free(cf->ctx); -} - -static void cf_haproxy_close(struct Curl_cfilter *cf, - struct Curl_easy *data) -{ - DEBUGF(LOG_CF(data, cf, "close")); - cf->connected = FALSE; - cf_haproxy_ctx_reset(cf->ctx); - if(cf->next) - cf->next->cft->close(cf->next, data); -} - -static int cf_haproxy_get_select_socks(struct Curl_cfilter *cf, - struct Curl_easy *data, - curl_socket_t *socks) -{ - int fds; - - fds = cf->next->cft->get_select_socks(cf->next, data, socks); - if(!fds && cf->next->connected && !cf->connected) { - /* If we are not connected, but the filter "below" is - * and not waiting on something, we are sending. */ - socks[0] = Curl_conn_cf_get_socket(cf, data); - return GETSOCK_WRITESOCK(0); - } - return fds; -} - - -struct Curl_cftype Curl_cft_haproxy = { - "HAPROXY", - 0, - 0, - cf_haproxy_destroy, - cf_haproxy_connect, - cf_haproxy_close, - Curl_cf_def_get_host, - cf_haproxy_get_select_socks, - Curl_cf_def_data_pending, - Curl_cf_def_send, - Curl_cf_def_recv, - Curl_cf_def_cntrl, - Curl_cf_def_conn_is_alive, - Curl_cf_def_conn_keep_alive, - Curl_cf_def_query, -}; - -static CURLcode cf_haproxy_create(struct Curl_cfilter **pcf, - struct Curl_easy *data) -{ - struct Curl_cfilter *cf = NULL; - struct cf_haproxy_ctx *ctx; - CURLcode result; - - (void)data; - ctx = calloc(sizeof(*ctx), 1); + ctx = calloc(1, sizeof(*ctx)); if(!ctx) { result = CURLE_OUT_OF_MEMORY; goto out; } - ctx->state = HAPROXY_INIT; - Curl_dyn_init(&ctx->data_out, DYN_HAXPROXY); - - result = Curl_cf_create(&cf, &Curl_cft_haproxy, ctx); + result = Curl_cf_create(&cf, &Curl_cft_http_proxy, ctx); if(result) goto out; ctx = NULL; - -out: - cf_haproxy_ctx_free(ctx); - *pcf = result? NULL : cf; - return result; -} - -CURLcode Curl_conn_haproxy_add(struct Curl_easy *data, - struct connectdata *conn, - int sockindex) -{ - struct Curl_cfilter *cf; - CURLcode result; - - result = cf_haproxy_create(&cf, data); - if(result) - goto out; - Curl_conn_cf_add(data, conn, sockindex, cf); - -out: - return result; -} - -CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at, - struct Curl_easy *data) -{ - struct Curl_cfilter *cf; - CURLcode result; - - result = cf_haproxy_create(&cf, data); - if(result) - goto out; Curl_conn_cf_insert_after(cf_at, cf); out: + free(ctx); return result; } -#endif /* !CURL_DISABLE_PROXY */ +#endif /* ! CURL_DISABLE_HTTP && !CURL_DISABLE_PROXY */ diff --git a/lib/http_proxy.h b/lib/http_proxy.h index f573da273..2d7164bad 100644 --- a/lib/http_proxy.h +++ b/lib/http_proxy.h @@ -25,34 +25,25 @@ ***************************************************************************/ #include "curl_setup.h" -#include "urldata.h" -#if !defined(CURL_DISABLE_PROXY) +#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP) + +#include "urldata.h" -#if !defined(CURL_DISABLE_HTTP) /* Default proxy timeout in milliseconds */ #define PROXY_TIMEOUT (3600*1000) -CURLcode Curl_conn_http_proxy_add(struct Curl_easy *data, - struct connectdata *conn, - int sockindex); +void Curl_cf_http_proxy_get_host(struct Curl_cfilter *cf, + struct Curl_easy *data, + const char **phost, + const char **pdisplay_host, + int *pport); CURLcode Curl_cf_http_proxy_insert_after(struct Curl_cfilter *cf_at, struct Curl_easy *data); extern struct Curl_cftype Curl_cft_http_proxy; -#endif /* !CURL_DISABLE_HTTP */ - -CURLcode Curl_conn_haproxy_add(struct Curl_easy *data, - struct connectdata *conn, - int sockindex); - -CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at, - struct Curl_easy *data); - -extern struct Curl_cftype Curl_cft_haproxy; - -#endif /* !CURL_DISABLE_PROXY */ +#endif /* !CURL_DISABLE_PROXY && !CURL_DISABLE_HTTP */ #endif /* HEADER_CURL_HTTP_PROXY_H */ diff --git a/lib/urldata.h b/lib/urldata.h index 15b285c09..cca992a02 100644 --- a/lib/urldata.h +++ b/lib/urldata.h @@ -134,6 +134,7 @@ typedef unsigned int curl_prot_t; #include "hash.h" #include "splay.h" #include "dynbuf.h" +#include "dynhds.h" /* return the count of bytes sent, or -1 on error */ typedef ssize_t (Curl_send)(struct Curl_easy *data, /* transfer */ @@ -1066,6 +1067,9 @@ struct connectdata { (ftp) */ unsigned char alpn; /* APLN TLS negotiated protocol, a CURL_HTTP_VERSION* value */ +#ifndef CURL_DISABLE_PROXY + unsigned char proxy_alpn; /* APLN of proxy tunnel, CURL_HTTP_VERSION* */ +#endif unsigned char transport; /* one of the TRNSPRT_* defines */ unsigned char ip_version; /* copied from the Curl_easy at creation time */ unsigned char httpversion; /* the HTTP version*10 reported by the server */ diff --git a/lib/vquic/curl_ngtcp2.c b/lib/vquic/curl_ngtcp2.c index 854edb84f..5a0b15405 100644 --- a/lib/vquic/curl_ngtcp2.c +++ b/lib/vquic/curl_ngtcp2.c @@ -1090,33 +1090,6 @@ static int cb_h3_deferred_consume(nghttp3_conn *conn, int64_t stream3_id, return 0; } -/* Decode HTTP status code. Returns -1 if no valid status code was - decoded. (duplicate from http2.c) */ -static int decode_status_code(const uint8_t *value, size_t len) -{ - int i; - int res; - - if(len != 3) { - return -1; - } - - res = 0; - - for(i = 0; i < 3; ++i) { - char c = value[i]; - - if(c < '0' || c > '9') { - return -1; - } - - res *= 10; - res += c - '0'; - } - - return res; -} - static int cb_h3_end_headers(nghttp3_conn *conn, int64_t stream_id, int fin, void *user_data, void *stream_user_data) { @@ -1167,8 +1140,10 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id, char line[14]; /* status line is always 13 characters long */ size_t ncopy; - stream->status_code = decode_status_code(h3val.base, h3val.len); - DEBUGASSERT(stream->status_code != -1); + result = Curl_http_decode_status(&stream->status_code, + (const char *)h3val.base, h3val.len); + if(result) + return -1; ncopy = msnprintf(line, sizeof(line), "HTTP/3 %03d \r\n", stream->status_code); DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] status: %s", @@ -2539,7 +2514,7 @@ out: *pcf = (!result)? cf : NULL; if(result) { if(udp_cf) - Curl_conn_cf_discard(udp_cf, data); + Curl_conn_cf_discard_sub(cf, udp_cf, data, TRUE); Curl_safefree(cf); Curl_safefree(ctx); } diff --git a/lib/vquic/curl_quiche.c b/lib/vquic/curl_quiche.c index 01a0d2c7e..d2209adeb 100644 --- a/lib/vquic/curl_quiche.c +++ b/lib/vquic/curl_quiche.c @@ -1547,7 +1547,7 @@ out: *pcf = (!result)? cf : NULL; if(result) { if(udp_cf) - Curl_conn_cf_discard(udp_cf, data); + Curl_conn_cf_discard_sub(cf, udp_cf, data, TRUE); Curl_safefree(cf); Curl_safefree(ctx); } diff --git a/lib/vtls/bearssl.c b/lib/vtls/bearssl.c index 7e3eb79ce..a3a557c48 100644 --- a/lib/vtls/bearssl.c +++ b/lib/vtls/bearssl.c @@ -849,7 +849,7 @@ static CURLcode bearssl_connect_step3(struct Curl_cfilter *cf, DEBUGASSERT(ssl_connect_3 == connssl->connecting_state); DEBUGASSERT(backend); - if(cf->conn->bits.tls_enable_alpn) { + if(connssl->alpn) { const char *proto; proto = br_ssl_engine_get_selected_protocol(&backend->ctx.eng); diff --git a/lib/vtls/gtls.c b/lib/vtls/gtls.c index 07dfaa437..3d1906ea4 100644 --- a/lib/vtls/gtls.c +++ b/lib/vtls/gtls.c @@ -1252,7 +1252,7 @@ static CURLcode gtls_verifyserver(struct Curl_cfilter *cf, if(result) goto out; - if(cf->conn->bits.tls_enable_alpn) { + if(connssl->alpn) { gnutls_datum_t proto; int rc; diff --git a/lib/vtls/nss.c b/lib/vtls/nss.c index 12c03900d..5e5dbb744 100644 --- a/lib/vtls/nss.c +++ b/lib/vtls/nss.c @@ -852,14 +852,13 @@ static void HandshakeCallback(PRFileDesc *sock, void *arg) struct Curl_cfilter *cf = (struct Curl_cfilter *)arg; struct ssl_connect_data *connssl = cf->ctx; struct Curl_easy *data = connssl->backend->data; - struct connectdata *conn = cf->conn; unsigned int buflenmax = 50; unsigned char buf[50]; unsigned int buflen; SSLNextProtoState state; DEBUGASSERT(data); - if(!conn->bits.tls_enable_alpn) { + if(!connssl->alpn) { return; } @@ -2096,7 +2095,7 @@ static CURLcode nss_setup_connect(struct Curl_cfilter *cf, #ifdef SSL_ENABLE_ALPN if(SSL_OptionSet(backend->handle, SSL_ENABLE_ALPN, - cf->conn->bits.tls_enable_alpn ? PR_TRUE : PR_FALSE) + connssl->alpn ? PR_TRUE : PR_FALSE) != SECSuccess) goto error; #endif diff --git a/lib/vtls/openssl.c b/lib/vtls/openssl.c index 397447007..366fc22ff 100644 --- a/lib/vtls/openssl.c +++ b/lib/vtls/openssl.c @@ -3950,7 +3950,7 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf, /* Sets data and len to negotiated protocol, len is 0 if no protocol was * negotiated */ - if(cf->conn->bits.tls_enable_alpn) { + if(connssl->alpn) { const unsigned char *neg_protocol; unsigned int len; SSL_get0_alpn_selected(backend->handle, &neg_protocol, &len); diff --git a/lib/vtls/sectransp.c b/lib/vtls/sectransp.c index 7f55fb5be..d59f2a8c0 100644 --- a/lib/vtls/sectransp.c +++ b/lib/vtls/sectransp.c @@ -2796,7 +2796,7 @@ check_handshake: } #if(CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1 - if(cf->conn->bits.tls_enable_alpn) { + if(connssl->alpn) { if(__builtin_available(macOS 10.13.4, iOS 11, tvOS 11, *)) { CFArrayRef alpnArr = NULL; CFStringRef chosenProtocol = NULL; diff --git a/lib/vtls/vtls.c b/lib/vtls/vtls.c index ca51919bd..8b6a7bf0c 100644 --- a/lib/vtls/vtls.c +++ b/lib/vtls/vtls.c @@ -130,6 +130,33 @@ static bool blobcmp(struct curl_blob *first, struct curl_blob *second) return !memcmp(first->data, second->data, first->len); /* same data */ } +#ifdef USE_SSL +static const struct alpn_spec ALPN_SPEC_H10 = { + { ALPN_HTTP_1_0 }, 1 +}; +static const struct alpn_spec ALPN_SPEC_H11 = { + { ALPN_HTTP_1_1 }, 1 +}; +#ifdef USE_HTTP2 +static const struct alpn_spec ALPN_SPEC_H2_H11 = { + { ALPN_H2, ALPN_HTTP_1_1 }, 2 +}; +#endif + +static const struct alpn_spec *alpn_get_spec(int httpwant, bool use_alpn) +{ + if(!use_alpn) + return NULL; + if(httpwant == CURL_HTTP_VERSION_1_0) + return &ALPN_SPEC_H10; +#ifdef USE_HTTP2 + if(httpwant >= CURL_HTTP_VERSION_2) + return &ALPN_SPEC_H2_H11; +#endif + return &ALPN_SPEC_H11; +} +#endif /* USE_SSL */ + bool Curl_ssl_config_matches(struct ssl_primary_config *data, @@ -291,7 +318,7 @@ static bool ssl_prefs_check(struct Curl_easy *data) } static struct ssl_connect_data *cf_ctx_new(struct Curl_easy *data, - const struct alpn_spec *alpn) + const struct alpn_spec *alpn) { struct ssl_connect_data *ctx; @@ -1733,7 +1760,8 @@ static CURLcode cf_ssl_create(struct Curl_cfilter **pcf, DEBUGASSERT(data->conn); - ctx = cf_ctx_new(data, Curl_alpn_get_spec(data, conn)); + ctx = cf_ctx_new(data, alpn_get_spec(data->state.httpwant, + conn->bits.tls_enable_alpn)); if(!ctx) { result = CURLE_OUT_OF_MEMORY; goto out; @@ -1774,6 +1802,7 @@ CURLcode Curl_cf_ssl_insert_after(struct Curl_cfilter *cf_at, } #ifndef CURL_DISABLE_PROXY + static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf, struct Curl_easy *data, struct connectdata *conn) @@ -1781,8 +1810,17 @@ static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf, struct Curl_cfilter *cf = NULL; struct ssl_connect_data *ctx; CURLcode result; + bool use_alpn = conn->bits.tls_enable_alpn; + int httpwant = CURL_HTTP_VERSION_1_1; - ctx = cf_ctx_new(data, Curl_alpn_get_proxy_spec(data, conn)); +#if defined(USE_HTTP2) && defined(DEBUGBUILD) + if(conn->bits.tunnel_proxy && getenv("CURL_PROXY_TUNNEL_H2")) { + use_alpn = TRUE; + httpwant = CURL_HTTP_VERSION_2; + } +#endif + + ctx = cf_ctx_new(data, alpn_get_spec(httpwant, use_alpn)); if(!ctx) { result = CURLE_OUT_OF_MEMORY; goto out; @@ -1851,15 +1889,16 @@ void *Curl_ssl_get_internals(struct Curl_easy *data, int sockindex, CURLcode Curl_ssl_cfilter_remove(struct Curl_easy *data, int sockindex) { - struct Curl_cfilter *cf = data->conn? data->conn->cfilter[sockindex] : NULL; + struct Curl_cfilter *cf, *head; CURLcode result = CURLE_OK; (void)data; - for(; cf; cf = cf->next) { + head = data->conn? data->conn->cfilter[sockindex] : NULL; + for(cf = head; cf; cf = cf->next) { if(cf->cft == &Curl_cft_ssl) { if(Curl_ssl->shut_down(cf, data)) result = CURLE_SSL_SHUTDOWN_FAILED; - Curl_conn_cf_discard(cf, data); + Curl_conn_cf_discard_sub(head, cf, data, FALSE); break; } } @@ -1943,42 +1982,6 @@ struct Curl_cfilter *Curl_ssl_cf_get_ssl(struct Curl_cfilter *cf) return NULL; } -static const struct alpn_spec ALPN_SPEC_H10 = { - { ALPN_HTTP_1_0 }, 1 -}; -static const struct alpn_spec ALPN_SPEC_H11 = { - { ALPN_HTTP_1_1 }, 1 -}; -#ifdef USE_HTTP2 -static const struct alpn_spec ALPN_SPEC_H2_H11 = { - { ALPN_H2, ALPN_HTTP_1_1 }, 2 -}; -#endif - -const struct alpn_spec * -Curl_alpn_get_spec(struct Curl_easy *data, struct connectdata *conn) -{ - if(!conn->bits.tls_enable_alpn) - return NULL; - if(data->state.httpwant == CURL_HTTP_VERSION_1_0) - return &ALPN_SPEC_H10; -#ifdef USE_HTTP2 - if(data->state.httpwant >= CURL_HTTP_VERSION_2) - return &ALPN_SPEC_H2_H11; -#endif - return &ALPN_SPEC_H11; -} - -const struct alpn_spec * -Curl_alpn_get_proxy_spec(struct Curl_easy *data, struct connectdata *conn) -{ - if(!conn->bits.tls_enable_alpn) - return NULL; - if(data->state.httpwant == CURL_HTTP_VERSION_1_0) - return &ALPN_SPEC_H10; - return &ALPN_SPEC_H11; -} - CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf, const struct alpn_spec *spec) { @@ -2031,32 +2034,34 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf, size_t proto_len) { int can_multi = 0; + unsigned char *palpn = Curl_ssl_cf_is_proxy(cf)? + &cf->conn->proxy_alpn : &cf->conn->alpn; if(proto && proto_len) { if(proto_len == ALPN_HTTP_1_1_LENGTH && !memcmp(ALPN_HTTP_1_1, proto, ALPN_HTTP_1_1_LENGTH)) { - cf->conn->alpn = CURL_HTTP_VERSION_1_1; + *palpn = CURL_HTTP_VERSION_1_1; } else if(proto_len == ALPN_HTTP_1_0_LENGTH && !memcmp(ALPN_HTTP_1_0, proto, ALPN_HTTP_1_0_LENGTH)) { - cf->conn->alpn = CURL_HTTP_VERSION_1_0; + *palpn = CURL_HTTP_VERSION_1_0; } #ifdef USE_HTTP2 else if(proto_len == ALPN_H2_LENGTH && !memcmp(ALPN_H2, proto, ALPN_H2_LENGTH)) { - cf->conn->alpn = CURL_HTTP_VERSION_2; + *palpn = CURL_HTTP_VERSION_2; can_multi = 1; } #endif #ifdef USE_HTTP3 else if(proto_len == ALPN_H3_LENGTH && !memcmp(ALPN_H3, proto, ALPN_H3_LENGTH)) { - cf->conn->alpn = CURL_HTTP_VERSION_3; + *palpn = CURL_HTTP_VERSION_3; can_multi = 1; } #endif else { - cf->conn->alpn = CURL_HTTP_VERSION_NONE; + *palpn = CURL_HTTP_VERSION_NONE; failf(data, "unsupported ALPN protocol: '%.*s'", (int)proto_len, proto); /* TODO: do we want to fail this? Previous code just ignored it and * some vtls backends even ignore the return code of this function. */ @@ -2066,12 +2071,14 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf, infof(data, VTLS_INFOF_ALPN_ACCEPTED_LEN_1STR, (int)proto_len, proto); } else { - cf->conn->alpn = CURL_HTTP_VERSION_NONE; + *palpn = CURL_HTTP_VERSION_NONE; infof(data, VTLS_INFOF_NO_ALPN); } out: - Curl_multiuse_state(data, can_multi? BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE); + if(!Curl_ssl_cf_is_proxy(cf)) + Curl_multiuse_state(data, can_multi? + BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE); return CURLE_OK; } diff --git a/lib/vtls/vtls.h b/lib/vtls/vtls.h index 0d9e74a69..f24dca15b 100644 --- a/lib/vtls/vtls.h +++ b/lib/vtls/vtls.h @@ -65,58 +65,6 @@ CURLsslset Curl_init_sslset_nolock(curl_sslbackend id, const char *name, #define CURL_SHA256_DIGEST_LENGTH 32 /* fixed size */ #endif -/* see https://www.iana.org/assignments/tls-extensiontype-values/ */ -#define ALPN_HTTP_1_1_LENGTH 8 -#define ALPN_HTTP_1_1 "http/1.1" -#define ALPN_HTTP_1_0_LENGTH 8 -#define ALPN_HTTP_1_0 "http/1.0" -#define ALPN_H2_LENGTH 2 -#define ALPN_H2 "h2" -#define ALPN_H3_LENGTH 2 -#define ALPN_H3 "h3" - -/* conservative sizes on the ALPN entries and count we are handling, - * we can increase these if we ever feel the need or have to accommodate - * ALPN strings from the "outside". */ -#define ALPN_NAME_MAX 10 -#define ALPN_ENTRIES_MAX 3 -#define ALPN_PROTO_BUF_MAX (ALPN_ENTRIES_MAX * (ALPN_NAME_MAX + 1)) - -struct alpn_spec { - const char entries[ALPN_ENTRIES_MAX][ALPN_NAME_MAX]; - size_t count; /* number of entries */ -}; - -struct alpn_proto_buf { - unsigned char data[ALPN_PROTO_BUF_MAX]; - int len; -}; - -CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf, - const struct alpn_spec *spec); -CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf, - const struct alpn_spec *spec); - -CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf, - struct Curl_easy *data, - const unsigned char *proto, - size_t proto_len); - -/** - * Get the ALPN specification to use for talking to remote host. - * May return NULL if ALPN is disabled on the connection. - */ -const struct alpn_spec * -Curl_alpn_get_spec(struct Curl_easy *data, struct connectdata *conn); - -/** - * Get the ALPN specification to use for talking to the proxy. - * May return NULL if ALPN is disabled on the connection. - */ -const struct alpn_spec * -Curl_alpn_get_proxy_spec(struct Curl_easy *data, struct connectdata *conn); - - char *Curl_ssl_snihost(struct Curl_easy *data, const char *host, size_t *olen); bool Curl_ssl_config_matches(struct ssl_primary_config *data, struct ssl_primary_config *needle); diff --git a/lib/vtls/vtls_int.h b/lib/vtls/vtls_int.h index a20ca7db7..ed49339e4 100644 --- a/lib/vtls/vtls_int.h +++ b/lib/vtls/vtls_int.h @@ -29,17 +29,55 @@ #ifdef USE_SSL +/* see https://www.iana.org/assignments/tls-extensiontype-values/ */ +#define ALPN_HTTP_1_1_LENGTH 8 +#define ALPN_HTTP_1_1 "http/1.1" +#define ALPN_HTTP_1_0_LENGTH 8 +#define ALPN_HTTP_1_0 "http/1.0" +#define ALPN_H2_LENGTH 2 +#define ALPN_H2 "h2" +#define ALPN_H3_LENGTH 2 +#define ALPN_H3 "h3" + +/* conservative sizes on the ALPN entries and count we are handling, + * we can increase these if we ever feel the need or have to accommodate + * ALPN strings from the "outside". */ +#define ALPN_NAME_MAX 10 +#define ALPN_ENTRIES_MAX 3 +#define ALPN_PROTO_BUF_MAX (ALPN_ENTRIES_MAX * (ALPN_NAME_MAX + 1)) + +struct alpn_spec { + const char entries[ALPN_ENTRIES_MAX][ALPN_NAME_MAX]; + size_t count; /* number of entries */ +}; + +struct alpn_proto_buf { + unsigned char data[ALPN_PROTO_BUF_MAX]; + int len; +}; + +CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf, + const struct alpn_spec *spec); +CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf, + const struct alpn_spec *spec); + +CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf, + struct Curl_easy *data, + const unsigned char *proto, + size_t proto_len); + /* Information in each SSL cfilter context: cf->ctx */ struct ssl_connect_data { ssl_connection_state state; ssl_connect_state connecting_state; char *hostname; /* hostname for verification */ char *dispname; /* display version of hostname */ - int port; /* remote port at origin */ const struct alpn_spec *alpn; /* ALPN to use or NULL for none */ struct ssl_backend_data *backend; /* vtls backend specific props */ struct cf_call_data call_data; /* data handle used in current call */ struct curltime handshake_done; /* time when handshake finished */ + int port; /* remote port at origin */ + BIT(use_alpn); /* if ALPN shall be used in handshake */ }; diff --git a/lib/vtls/wolfssl.c b/lib/vtls/wolfssl.c index ac68eabab..292872878 100644 --- a/lib/vtls/wolfssl.c +++ b/lib/vtls/wolfssl.c @@ -854,7 +854,7 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data) } #ifdef HAVE_ALPN - if(cf->conn->bits.tls_enable_alpn) { + if(connssl->alpn) { int rc; char *protocol = NULL; unsigned short protocol_len = 0; diff --git a/tests/http/conftest.py b/tests/http/conftest.py index 22386b94d..bd97baa8b 100644 --- a/tests/http/conftest.py +++ b/tests/http/conftest.py @@ -31,8 +31,7 @@ import pytest sys.path.append(os.path.join(os.path.dirname(__file__), '.')) -from testenv import Env, Nghttpx, Httpd - +from testenv import Env, Nghttpx, Httpd, NghttpxQuic, NghttpxFwd @pytest.fixture(scope="package") def env(pytestconfig) -> Env: @@ -68,7 +67,16 @@ def httpd(env) -> Httpd: @pytest.fixture(scope='package') def nghttpx(env, httpd) -> Optional[Nghttpx]: - nghttpx = Nghttpx(env=env) + nghttpx = NghttpxQuic(env=env) + if env.have_h3(): + nghttpx.clear_logs() + assert nghttpx.start() + yield nghttpx + nghttpx.stop() + +@pytest.fixture(scope='package') +def nghttpx_fwd(env, httpd) -> Optional[Nghttpx]: + nghttpx = NghttpxFwd(env=env) if env.have_h3(): nghttpx.clear_logs() assert nghttpx.start() diff --git a/tests/http/scorecard.py b/tests/http/scorecard.py index bc3808fb4..3c29159a0 100644 --- a/tests/http/scorecard.py +++ b/tests/http/scorecard.py @@ -35,7 +35,6 @@ from typing import Dict, Any, Optional, List from testenv import Env, Httpd, Nghttpx, CurlClient, Caddy, ExecResult - log = logging.getLogger(__name__) diff --git a/tests/http/test_01_basic.py b/tests/http/test_01_basic.py index 66c9ae50e..30b87007b 100644 --- a/tests/http/test_01_basic.py +++ b/tests/http/test_01_basic.py @@ -46,37 +46,34 @@ class TestBasic: curl = CurlClient(env=env) url = f'http://{env.domain1}:{env.http_port}/data.json' r = curl.http_get(url=url) - r.check_exit_code(0) - assert r.response['status'] == 200 + r.check_response(http_status=200) assert r.json['server'] == env.domain1 # simple https: GET, any http version + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") def test_01_02_https_get(self, env: Env, httpd): curl = CurlClient(env=env) url = f'https://{env.domain1}:{env.https_port}/data.json' r = curl.http_get(url=url) - r.check_exit_code(0) - assert r.response['status'] == 200 + r.check_response(http_status=200) assert r.json['server'] == env.domain1 # simple https: GET, h2 wanted and got + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") def test_01_03_h2_get(self, env: Env, httpd): curl = CurlClient(env=env) url = f'https://{env.domain1}:{env.https_port}/data.json' r = curl.http_get(url=url, extra_args=['--http2']) - r.check_exit_code(0) - assert r.response['status'] == 200 - assert r.response['protocol'] == 'HTTP/2' + r.check_response(http_status=200, protocol='HTTP/2') assert r.json['server'] == env.domain1 # simple https: GET, h2 unsupported, fallback to h1 + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") def test_01_04_h2_unsupported(self, env: Env, httpd): curl = CurlClient(env=env) url = f'https://{env.domain2}:{env.https_port}/data.json' r = curl.http_get(url=url, extra_args=['--http2']) - r.check_exit_code(0) - assert r.response['status'] == 200 - assert r.response['protocol'] == 'HTTP/1.1' + r.check_response(http_status=200, protocol='HTTP/1.1') assert r.json['server'] == env.domain2 # simple h3: GET, want h3 and get it @@ -85,7 +82,5 @@ class TestBasic: curl = CurlClient(env=env) url = f'https://{env.domain1}:{env.h3_port}/data.json' r = curl.http_get(url=url, extra_args=['--http3']) - r.check_exit_code(0) - assert r.response['status'] == 200 - assert r.response['protocol'] == 'HTTP/3' + r.check_response(http_status=200, protocol='HTTP/3') assert r.json['server'] == env.domain1 diff --git a/tests/http/test_02_download.py b/tests/http/test_02_download.py index 5804adaf8..f718c8b98 100644 --- a/tests/http/test_02_download.py +++ b/tests/http/test_02_download.py @@ -59,8 +59,7 @@ class TestDownload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/data.json' r = curl.http_download(urls=[url], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(http_status=200) # download 2 files @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -70,8 +69,7 @@ class TestDownload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-1]' r = curl.http_download(urls=[url], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=2, exp_status=200) + r.check_response(http_status=200, count=2) # download 100 files sequentially @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -82,10 +80,7 @@ class TestDownload: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-99]' r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=100, exp_status=200) - # http/1.1 sequential transfers will open 1 connection - assert r.total_connects == 1 + r.check_response(http_status=200, count=100, connect_count=1) # download 100 files parallel @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -99,14 +94,13 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--parallel', '--parallel-max', f'{max_parallel}' ]) - r.check_exit_code(0) - r.check_stats(count=100, exp_status=200) + r.check_response(http_status=200, count=100) if proto == 'http/1.1': # http/1.1 parallel transfers will open multiple connections - assert r.total_connects > 1 + assert r.total_connects > 1, r.dump_logs() else: # http2 parallel transfers will use one connection (common limit is 100) - assert r.total_connects == 1 + assert r.total_connects == 1, r.dump_logs() # download 500 files sequential @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -119,14 +113,13 @@ class TestDownload: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-499]' r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=500, exp_status=200) + r.check_response(http_status=200, count=500) if proto == 'http/1.1': # http/1.1 parallel transfers will open multiple connections - assert r.total_connects > 1 + assert r.total_connects > 1, r.dump_logs() else: # http2 parallel transfers will use one connection (common limit is 100) - assert r.total_connects == 1 + assert r.total_connects == 1, r.dump_logs() # download 500 files parallel @pytest.mark.parametrize("proto", ['h2', 'h3']) @@ -141,10 +134,7 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--parallel', '--parallel-max', f'{max_parallel}' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) - # http2 parallel transfers will use one connection (common limit is 100) - assert r.total_connects == 1 + r.check_response(http_status=200, count=count, connect_count=1) # download files parallel, check connection reuse/multiplex @pytest.mark.parametrize("proto", ['h2', 'h3']) @@ -159,8 +149,7 @@ class TestDownload: with_stats=True, extra_args=[ '--parallel', '--parallel-max', '200' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(http_status=200, count=count) # should have used at most 2 connections only (test servers allow 100 req/conn) # it may be just 1 on slow systems where request are answered faster than # curl can exhaust the capacity or if curl runs with address-sanitizer speed @@ -177,8 +166,7 @@ class TestDownload: with_stats=True, extra_args=[ '--parallel' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) # http/1.1 should have used count connections assert r.total_connects == count, "http/1.1 should use this many connections" @@ -191,8 +179,7 @@ class TestDownload: urln = f'https://{env.authority_for(env.domain1, proto)}/data-1m?[0-{count-1}]' curl = CurlClient(env=env) r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) def test_02_09_1MB_parallel(self, env: Env, @@ -205,8 +192,7 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--parallel' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) def test_02_10_10MB_serial(self, env: Env, @@ -217,8 +203,7 @@ class TestDownload: urln = f'https://{env.authority_for(env.domain1, proto)}/data-10m?[0-{count-1}]' curl = CurlClient(env=env) r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) def test_02_11_10MB_parallel(self, env: Env, @@ -233,8 +218,7 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--parallel' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) @pytest.mark.parametrize("proto", ['h2', 'h3']) def test_02_12_head_serial_https(self, env: Env, @@ -247,8 +231,7 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--head' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) @pytest.mark.parametrize("proto", ['h2']) def test_02_13_head_serial_h2c(self, env: Env, @@ -261,8 +244,7 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--head', '--http2-prior-knowledge', '--fail-early' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) def test_02_20_h2_small_frames(self, env: Env, httpd, repeat): # Test case to reproduce content corruption as observed in @@ -288,8 +270,7 @@ class TestDownload: r = curl.http_download(urls=[urln], alpn_proto="h2", extra_args=[ '--parallel', '--parallel-max', '2' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) srcfile = os.path.join(httpd.docs_dir, 'data-1m') for i in range(count): dfile = curl.download_file(i) diff --git a/tests/http/test_03_goaway.py b/tests/http/test_03_goaway.py index 5da60aa83..00fbce9a4 100644 --- a/tests/http/test_03_goaway.py +++ b/tests/http/test_03_goaway.py @@ -66,8 +66,7 @@ class TestGoAway: assert httpd.reload() t.join() r: ExecResult = self.r - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) # reload will shut down the connection gracefully with GOAWAY # we expect to see a second connection opened afterwards assert r.total_connects == 2 @@ -101,16 +100,14 @@ class TestGoAway: assert nghttpx.reload(timeout=timedelta(seconds=2)) t.join() r: ExecResult = self.r - r.check_exit_code(0) + # this should take `count` seconds to retrieve + assert r.duration >= timedelta(seconds=count) + r.check_response(count=count, http_status=200, connect_count=2) # reload will shut down the connection gracefully with GOAWAY # we expect to see a second connection opened afterwards - assert r.total_connects == 2 for idx, s in enumerate(r.stats): if s['num_connects'] > 0: log.debug(f'request {idx} connected') - # this should take `count` seconds to retrieve - assert r.duration >= timedelta(seconds=count) - r.check_stats(count=count, exp_status=200, exp_exitcode=0) # download files sequentially with delay, reload server for GOAWAY def test_03_03_h1_goaway(self, env: Env, httpd, nghttpx, repeat): @@ -133,11 +130,9 @@ class TestGoAway: assert httpd.reload() t.join() r: ExecResult = self.r - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200, connect_count=2) # reload will shut down the connection gracefully with GOAWAY # we expect to see a second connection opened afterwards - assert r.total_connects == 2 for idx, s in enumerate(r.stats): if s['num_connects'] > 0: log.debug(f'request {idx} connected') diff --git a/tests/http/test_04_stuttered.py b/tests/http/test_04_stuttered.py index 0cad4c227..2a5f1e266 100644 --- a/tests/http/test_04_stuttered.py +++ b/tests/http/test_04_stuttered.py @@ -55,8 +55,7 @@ class TestStuttered: f'/curltest/tweak?id=[0-{count - 1}]'\ '&chunks=100&chunk_size=100&chunk_delay=10ms' r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download 50 files in 100 chunks a 100 bytes with 10ms delay between # prepend 100 file requests to warm up connection processing limits @@ -75,8 +74,7 @@ class TestStuttered: '&chunks=100&chunk_size=100&chunk_delay=10ms' r = curl.http_download(urls=[url1, urln], alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=warmups+count, exp_status=200) + r.check_response(count=warmups+count, http_status=200) assert r.total_connects == 1 t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total') if t_max < (5 * t_min) and t_min < 2: @@ -98,8 +96,7 @@ class TestStuttered: '&chunks=1000&chunk_size=10&chunk_delay=100us' r = curl.http_download(urls=[url1, urln], alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=warmups+count, exp_status=200) + r.check_response(count=warmups+count, http_status=200) assert r.total_connects == 1 t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total') if t_max < (5 * t_min): @@ -121,8 +118,7 @@ class TestStuttered: '&chunks=10000&chunk_size=1&chunk_delay=50us' r = curl.http_download(urls=[url1, urln], alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=warmups+count, exp_status=200) + r.check_response(count=warmups+count, http_status=200) assert r.total_connects == 1 t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total') if t_max < (5 * t_min): diff --git a/tests/http/test_05_errors.py b/tests/http/test_05_errors.py index f27ba8c39..587ba33c4 100644 --- a/tests/http/test_05_errors.py +++ b/tests/http/test_05_errors.py @@ -62,7 +62,7 @@ class TestErrors: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--retry', '0' ]) - r.check_exit_code_not(0) + r.check_exit_code(False) invalid_stats = [] for idx, s in enumerate(r.stats): if 'exitcode' not in s or s['exitcode'] not in [18, 56, 92]: @@ -85,7 +85,7 @@ class TestErrors: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--retry', '0', '--parallel', ]) - r.check_exit_code_not(0) + r.check_exit_code(False) assert len(r.stats) == count, f'did not get all stats: {r}' invalid_stats = [] for idx, s in enumerate(r.stats): diff --git a/tests/http/test_06_eyeballs.py b/tests/http/test_06_eyeballs.py index f30ecd36f..3eeb0793b 100644 --- a/tests/http/test_06_eyeballs.py +++ b/tests/http/test_06_eyeballs.py @@ -50,8 +50,7 @@ class TestEyeballs: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, "h3")}/data.json' r = curl.http_download(urls=[urln], extra_args=['--http3-only']) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) assert r.stats[0]['http_version'] == '3' # download using only HTTP/3 on missing server @@ -61,7 +60,7 @@ class TestEyeballs: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, "h3")}/data.json' r = curl.http_download(urls=[urln], extra_args=['--http3-only']) - r.check_exit_code(7) + r.check_response(exitcode=7, http_status=None) # download using HTTP/3 on missing server with fallback on h2 @pytest.mark.skipif(condition=not Env.have_h3(), reason=f"missing HTTP/3 support") @@ -70,8 +69,7 @@ class TestEyeballs: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, "h3")}/data.json' r = curl.http_download(urls=[urln], extra_args=['--http3']) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) assert r.stats[0]['http_version'] == '2' # download using HTTP/3 on missing server with fallback on http/1.1 @@ -81,8 +79,7 @@ class TestEyeballs: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain2, "h3")}/data.json' r = curl.http_download(urls=[urln], extra_args=['--http3']) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) assert r.stats[0]['http_version'] == '1.1' # make a successful https: transfer and observer the timer stats @@ -90,8 +87,7 @@ class TestEyeballs: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, "h2")}/data.json' r = curl.http_download(urls=[urln]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) assert r.stats[0]['time_connect'] > 0.0 assert r.stats[0]['time_appconnect'] > 0.0 @@ -102,8 +98,7 @@ class TestEyeballs: r = curl.http_download(urls=[urln], extra_args=[ '--resolve', f'not-valid.com:{env.https_port}:127.0.0.1' ]) - r.check_exit_code_not(0) - r.check_stats(count=1, exp_status=0) + r.check_response(count=1, http_status=0, exitcode=False) assert r.stats[0]['time_connect'] > 0.0 # was tcp connected assert r.stats[0]['time_appconnect'] == 0 # but not SSL verified @@ -114,8 +109,7 @@ class TestEyeballs: r = curl.http_download(urls=[urln], extra_args=[ '--resolve', f'not-valid.com:{1}:127.0.0.1' ]) - r.check_exit_code_not(0) - r.check_stats(count=1, exp_status=0) + r.check_response(count=1, http_status=None, exitcode=False) assert r.stats[0]['time_connect'] == 0 # no one should have listened assert r.stats[0]['time_appconnect'] == 0 # did not happen either diff --git a/tests/http/test_07_upload.py b/tests/http/test_07_upload.py index 795e5f2fe..5b4c1d8a8 100644 --- a/tests/http/test_07_upload.py +++ b/tests/http/test_07_upload.py @@ -58,8 +58,7 @@ class TestUpload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]' r = curl.http_upload(urls=[url], data=data, alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) respdata = open(curl.response_file(0)).readlines() assert respdata == [data] @@ -74,8 +73,7 @@ class TestUpload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]' r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) indata = open(fdata).readlines() respdata = open(curl.response_file(0)).readlines() assert respdata == indata @@ -92,8 +90,7 @@ class TestUpload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]' r = curl.http_upload(urls=[url], data=data, alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == [data] @@ -112,8 +109,7 @@ class TestUpload: url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]' r = curl.http_upload(urls=[url], data=data, alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == [data] @@ -130,10 +126,9 @@ class TestUpload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]' r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) indata = open(fdata).readlines() - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == indata @@ -150,10 +145,8 @@ class TestUpload: curl = CurlClient(env=env) url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]' r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) indata = open(fdata).readlines() - r.check_stats(count=count, exp_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == indata @@ -172,8 +165,7 @@ class TestUpload: url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]' r = curl.http_upload(urls=[url], data=data, alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == [data] @@ -192,8 +184,7 @@ class TestUpload: url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]' r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) self.check_download(count, fdata, curl) # PUT 100k @@ -209,10 +200,9 @@ class TestUpload: url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]' r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) exp_data = [f'{os.path.getsize(fdata)}'] - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == exp_data @@ -230,10 +220,9 @@ class TestUpload: url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]&chunk_delay=10ms' r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto, extra_args=['--parallel']) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) exp_data = [f'{os.path.getsize(fdata)}'] - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) for i in range(count): respdata = open(curl.response_file(i)).readlines() assert respdata == exp_data diff --git a/tests/http/test_08_caddy.py b/tests/http/test_08_caddy.py index 6ce34ec89..157419222 100644 --- a/tests/http/test_08_caddy.py +++ b/tests/http/test_08_caddy.py @@ -35,6 +35,7 @@ log = logging.getLogger(__name__) @pytest.mark.skipif(condition=not Env.has_caddy(), reason=f"missing caddy") +@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") class TestCaddy: @pytest.fixture(autouse=True, scope='class') @@ -71,8 +72,7 @@ class TestCaddy: curl = CurlClient(env=env) url = f'https://{env.domain1}:{caddy.port}/data.json' r = curl.http_download(urls=[url], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download 1MB files sequentially @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -86,10 +86,7 @@ class TestCaddy: curl = CurlClient(env=env) urln = f'https://{env.domain1}:{caddy.port}/data1.data?[0-{count-1}]' r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) - # sequential transfers will open 1 connection - assert r.total_connects == 1 + r.check_response(count=count, http_status=200, connect_count=1) # download 1MB files parallel @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -105,13 +102,12 @@ class TestCaddy: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--parallel' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) if proto == 'http/1.1': # http/1.1 parallel transfers will open multiple connections - assert r.total_connects > 1 + assert r.total_connects > 1, r.dump_logs() else: - assert r.total_connects == 1 + assert r.total_connects == 1, r.dump_logs() # download 5MB files sequentially @pytest.mark.parametrize("proto", ['h2', 'h3']) @@ -125,10 +121,7 @@ class TestCaddy: curl = CurlClient(env=env) urln = f'https://{env.domain1}:{caddy.port}/data5.data?[0-{count-1}]' r = curl.http_download(urls=[urln], alpn_proto=proto) - assert r.exit_code == 0 - r.check_stats(count=count, exp_status=200) - # sequential transfers will open 1 connection - assert r.total_connects == 1 + r.check_response(count=count, http_status=200, connect_count=1) # download 10MB files sequentially @pytest.mark.parametrize("proto", ['h2', 'h3']) @@ -142,10 +135,7 @@ class TestCaddy: curl = CurlClient(env=env) urln = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]' r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) - # sequential transfers will open 1 connection - assert r.total_connects == 1 + r.check_response(count=count, http_status=200, connect_count=1) # download 10MB files parallel @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3']) @@ -161,11 +151,10 @@ class TestCaddy: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--parallel' ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) if proto == 'http/1.1': # http/1.1 parallel transfers will open multiple connections - assert r.total_connects > 1 + assert r.total_connects > 1, r.dump_logs() else: - assert r.total_connects == 1 + assert r.total_connects == 1, r.dump_logs() diff --git a/tests/http/test_10_proxy.py b/tests/http/test_10_proxy.py index b93d665b0..87e74e1bf 100644 --- a/tests/http/test_10_proxy.py +++ b/tests/http/test_10_proxy.py @@ -24,11 +24,13 @@ # ########################################################################### # +import filecmp import logging import os +import re import pytest -from testenv import Env, CurlClient +from testenv import Env, CurlClient, ExecResult log = logging.getLogger(__name__) @@ -37,13 +39,33 @@ log = logging.getLogger(__name__) class TestProxy: @pytest.fixture(autouse=True, scope='class') - def _class_scope(self, env, httpd): + def _class_scope(self, env, httpd, nghttpx_fwd): push_dir = os.path.join(httpd.docs_dir, 'push') if not os.path.exists(push_dir): os.makedirs(push_dir) + if env.have_nghttpx(): + nghttpx_fwd.start_if_needed() + env.make_data_file(indir=env.gen_dir, fname="data-100k", fsize=100*1024) + env.make_data_file(indir=env.gen_dir, fname="data-10m", fsize=10*1024*1024) httpd.clear_extra_configs() httpd.reload() + def set_tunnel_proto(self, proto): + if proto == 'h2': + os.environ['CURL_PROXY_TUNNEL_H2'] = '1' + return 'HTTP/2' + else: + os.environ.pop('CURL_PROXY_TUNNEL_H2', None) + return 'HTTP/1.1' + + def get_tunnel_proto_used(self, r: ExecResult): + for l in r.trace_lines: + m = re.match(r'.* CONNECT tunnel: (\S+) negotiated$', l) + if m: + return m.group(1) + assert False, f'tunnel protocol not found in:\n{"".join(r.trace_lines)}' + return None + # download via http: proxy (no tunnel) def test_10_01_proxy_http(self, env: Env, httpd, repeat): curl = CurlClient(env=env) @@ -53,13 +75,13 @@ class TestProxy: '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download via https: proxy (no tunnel) @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), reason='curl lacks HTTPS-proxy support') - def test_10_02_proxy_https(self, env: Env, httpd, repeat): + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_10_02_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat): curl = CurlClient(env=env) url = f'http://localhost:{env.http_port}/data.json' r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, @@ -68,8 +90,7 @@ class TestProxy: '--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1', '--proxy-cacert', env.ca.cert_file, ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download http: via http: proxytunnel def test_10_03_proxytunnel_http(self, env: Env, httpd, repeat): @@ -81,27 +102,27 @@ class TestProxy: '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download http: via https: proxytunnel @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), reason='curl lacks HTTPS-proxy support') - def test_10_04_proxy_https(self, env: Env, httpd, repeat): + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_10_04_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat): curl = CurlClient(env=env) url = f'http://localhost:{env.http_port}/data.json' r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, extra_args=[ '--proxytunnel', - '--proxy', f'https://{env.proxy_domain}:{env.proxys_port}/', - '--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1', '--proxy-cacert', env.ca.cert_file, ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download https: with proto via http: proxytunnel @pytest.mark.parametrize("proto", ['http/1.1', 'h2']) + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") def test_10_05_proxytunnel_http(self, env: Env, httpd, proto, repeat): curl = CurlClient(env=env) url = f'https://localhost:{env.https_port}/data.json' @@ -112,28 +133,129 @@ class TestProxy: '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) - exp_proto = 'HTTP/2' if proto == 'h2' else 'HTTP/1.1' - assert r.response['protocol'] == exp_proto + r.check_response(count=1, http_status=200, + protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') # download https: with proto via https: proxytunnel @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), reason='curl lacks HTTPS-proxy support') @pytest.mark.parametrize("proto", ['http/1.1', 'h2']) - def test_10_06_proxy_https(self, env: Env, httpd, proto, repeat): + @pytest.mark.parametrize("tunnel", ['http/1.1', 'h2']) + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_10_06_proxytunnel_https(self, env: Env, httpd, nghttpx_fwd, proto, tunnel, repeat): + if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): + pytest.skip('only supported with nghttp2') + exp_tunnel_proto = self.set_tunnel_proto(tunnel) curl = CurlClient(env=env) - url = f'https://localhost:{env.https_port}/data.json' + url = f'https://localhost:{env.https_port}/data.json?[0-0]' r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, with_headers=True, extra_args=[ '--proxytunnel', - '--proxy', f'https://{env.proxy_domain}:{env.proxys_port}/', - '--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + r.check_response(count=1, http_status=200, + protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') + assert self.get_tunnel_proto_used(r) == exp_tunnel_proto + srcfile = os.path.join(httpd.docs_dir, 'data.json') + dfile = curl.download_file(0) + assert filecmp.cmp(srcfile, dfile, shallow=False) + + # download many https: with proto via https: proxytunnel + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") + @pytest.mark.parametrize("proto", ['http/1.1', 'h2']) + @pytest.mark.parametrize("tunnel", ['http/1.1', 'h2']) + @pytest.mark.parametrize("fname, fcount", [ + ['data.json', 100], + ['data-100k', 20], + ['data-1m', 5] + ]) + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_10_07_pts_down_small(self, env: Env, httpd, nghttpx_fwd, proto, + tunnel, fname, fcount, repeat): + if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): + pytest.skip('only supported with nghttp2') + count = fcount + exp_tunnel_proto = self.set_tunnel_proto(tunnel) + curl = CurlClient(env=env) + url = f'https://localhost:{env.https_port}/{fname}?[0-{count-1}]' + r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, + with_headers=True, + extra_args=[ + '--proxytunnel', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + r.check_response(count=count, http_status=200, + protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') + assert self.get_tunnel_proto_used(r) == exp_tunnel_proto + srcfile = os.path.join(httpd.docs_dir, fname) + for i in range(count): + dfile = curl.download_file(i) + assert filecmp.cmp(srcfile, dfile, shallow=False) + + # upload many https: with proto via https: proxytunnel + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") + @pytest.mark.parametrize("proto", ['http/1.1', 'h2']) + @pytest.mark.parametrize("tunnel", ['http/1.1', 'h2']) + @pytest.mark.parametrize("fname, fcount", [ + ['data.json', 50], + ['data-100k', 20], + ['data-1m', 5] + ]) + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_10_08_upload_seq_large(self, env: Env, httpd, nghttpx, proto, + tunnel, fname, fcount, repeat): + if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): + pytest.skip('only supported with nghttp2') + count = fcount + srcfile = os.path.join(httpd.docs_dir, fname) + exp_tunnel_proto = self.set_tunnel_proto(tunnel) + curl = CurlClient(env=env) + url = f'https://localhost:{env.https_port}/curltest/echo?id=[0-{count-1}]' + r = curl.http_upload(urls=[url], data=f'@{srcfile}', alpn_proto=proto, + extra_args=[ + '--proxytunnel', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + assert self.get_tunnel_proto_used(r) == exp_tunnel_proto + r.check_response(count=count, http_status=200) + indata = open(srcfile).readlines() + r.check_response(count=count, http_status=200) + for i in range(count): + respdata = open(curl.response_file(i)).readlines() + assert respdata == indata + + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") + @pytest.mark.parametrize("tunnel", ['http/1.1', 'h2']) + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_10_09_reuse_ser(self, env: Env, httpd, nghttpx_fwd, tunnel, repeat): + if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): + pytest.skip('only supported with nghttp2') + exp_tunnel_proto = self.set_tunnel_proto(tunnel) + curl = CurlClient(env=env) + url1 = f'https://localhost:{env.https_port}/data.json' + url2 = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url1, url2], alpn_proto='http/1.1', with_stats=True, + with_headers=True, + extra_args=[ + '--proxytunnel', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1', '--proxy-cacert', env.ca.cert_file, ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) - exp_proto = 'HTTP/2' if proto == 'h2' else 'HTTP/1.1' - assert r.response['protocol'] == exp_proto + r.check_response(count=2, http_status=200) + assert self.get_tunnel_proto_used(r) == exp_tunnel_proto + if tunnel == 'h2': + # TODO: we would like to reuse the first connection for the + # second URL, but this is currently not possible + # assert r.total_connects == 1 + assert r.total_connects == 2 + else: + assert r.total_connects == 2 diff --git a/tests/http/test_11_unix.py b/tests/http/test_11_unix.py index 86ecd6f86..dc2684adb 100644 --- a/tests/http/test_11_unix.py +++ b/tests/http/test_11_unix.py @@ -101,10 +101,10 @@ class TestUnix: extra_args=[ '--unix-socket', uds_faker.path, ]) - r.check_exit_code(0) - r.check_stats(count=1, exp_status=200) + r.check_response(count=1, http_status=200) # download https: via unix socket + @pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") def test_11_02_unix_connect_http(self, env: Env, httpd, uds_faker, repeat): curl = CurlClient(env=env) url = f'https://{env.domain1}:{env.https_port}/data.json' @@ -112,7 +112,7 @@ class TestUnix: extra_args=[ '--unix-socket', uds_faker.path, ]) - r.check_exit_code(35) + r.check_response(exitcode=35, http_status=None) # download HTTP/3 via unix socket @pytest.mark.skipif(condition=not Env.have_h3(), reason='h3 not supported') @@ -124,4 +124,4 @@ class TestUnix: extra_args=[ '--unix-socket', uds_faker.path, ]) - r.check_exit_code(96) + r.check_response(exitcode=96, http_status=None) diff --git a/tests/http/test_12_reuse.py b/tests/http/test_12_reuse.py index cd22af6e9..302929956 100644 --- a/tests/http/test_12_reuse.py +++ b/tests/http/test_12_reuse.py @@ -37,6 +37,7 @@ log = logging.getLogger(__name__) @pytest.mark.skipif(condition=Env.curl_uses_lib('bearssl'), reason='BearSSL too slow') +@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL") class TestReuse: # check if HTTP/1.1 handles 'Connection: close' correctly @@ -52,8 +53,7 @@ class TestReuse: curl = CurlClient(env=env) urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{count-1}]' r = curl.http_download(urls=[urln], alpn_proto=proto) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) # Server sends `Connection: close` on every 2nd request, requiring # a new connection assert r.total_connects == count/2 @@ -72,8 +72,7 @@ class TestReuse: r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[ '--rate', '30/m', ]) - r.check_exit_code(0) - r.check_stats(count=count, exp_status=200) + r.check_response(count=count, http_status=200) # Connections time out on server before we send another request, assert r.total_connects == count # we do not see how often a request was retried in the stats, so diff --git a/tests/http/test_13_proxy_auth.py b/tests/http/test_13_proxy_auth.py new file mode 100644 index 000000000..b20a84945 --- /dev/null +++ b/tests/http/test_13_proxy_auth.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +#*************************************************************************** +# _ _ ____ _ +# Project ___| | | | _ \| | +# / __| | | | |_) | | +# | (__| |_| | _ <| |___ +# \___|\___/|_| \_\_____| +# +# Copyright (C) Daniel Stenberg, , et al. +# +# This software is licensed as described in the file COPYING, which +# you should have received as part of this distribution. The terms +# are also available at https://curl.se/docs/copyright.html. +# +# You may opt to use, copy, modify, merge, publish, distribute and/or sell +# copies of the Software, and permit persons to whom the Software is +# furnished to do so, under the terms of the COPYING file. +# +# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY +# KIND, either express or implied. +# +# SPDX-License-Identifier: curl +# +########################################################################### +# +import filecmp +import logging +import os +import re +import time +import pytest + +from testenv import Env, CurlClient + + +log = logging.getLogger(__name__) + + +@pytest.mark.skipif(condition=Env.setup_incomplete(), + reason=f"missing: {Env.incomplete_reason()}") +class TestProxyAuth: + + @pytest.fixture(autouse=True, scope='class') + def _class_scope(self, env, httpd, nghttpx_fwd): + if env.have_nghttpx(): + nghttpx_fwd.start_if_needed() + httpd.clear_extra_configs() + httpd.set_proxy_auth(True) + httpd.reload() + yield + httpd.set_proxy_auth(False) + httpd.reload() + + def set_tunnel_proto(self, proto): + if proto == 'h2': + os.environ['CURL_PROXY_TUNNEL_H2'] = '1' + return 'HTTP/2' + else: + os.environ.pop('CURL_PROXY_TUNNEL_H2', None) + return 'HTTP/1.1' + + def get_tunnel_proto_used(self, curl: CurlClient): + assert os.path.exists(curl.trace_file) + for l in open(curl.trace_file).readlines(): + m = re.match(r'.* == Info: CONNECT tunnel: (\S+) negotiated', l) + if m: + return m.group(1) + return None + + # download via http: proxy (no tunnel), no auth + def test_13_01_proxy_no_auth(self, env: Env, httpd, repeat): + curl = CurlClient(env=env) + url = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, + extra_args=[ + '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', + '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', + ]) + r.check_response(count=1, http_status=407) + + # download via http: proxy (no tunnel), auth + def test_13_02_proxy_auth(self, env: Env, httpd, repeat): + curl = CurlClient(env=env) + url = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, + extra_args=[ + '--proxy-user', 'proxy:proxy', + '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', + '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', + ]) + r.check_response(count=1, http_status=200) + + @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), + reason='curl lacks HTTPS-proxy support') + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_13_03_proxys_no_auth(self, env: Env, httpd, nghttpx_fwd, repeat): + curl = CurlClient(env=env) + url = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, + extra_args=[ + '--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + r.check_response(count=1, http_status=407) + + @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), + reason='curl lacks HTTPS-proxy support') + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + def test_13_04_proxys_auth(self, env: Env, httpd, nghttpx_fwd, repeat): + curl = CurlClient(env=env) + url = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, + extra_args=[ + '--proxy-user', 'proxy:proxy', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + r.check_response(count=1, http_status=200) + + def test_13_05_tunnel_http_no_auth(self, env: Env, httpd, repeat): + curl = CurlClient(env=env) + url = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, + extra_args=[ + '--proxytunnel', + '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', + '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', + ]) + # expect "COULD_NOT_CONNECT" + r.check_response(exitcode=56, http_status=None) + + def test_13_06_tunnel_http_auth(self, env: Env, httpd, repeat): + curl = CurlClient(env=env) + url = f'http://localhost:{env.http_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True, + extra_args=[ + '--proxytunnel', + '--proxy-user', 'proxy:proxy', + '--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/', + '--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1', + ]) + r.check_response(count=1, http_status=200) + + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), + reason='curl lacks HTTPS-proxy support') + @pytest.mark.parametrize("proto", ['http/1.1', 'h2']) + @pytest.mark.parametrize("tunnel", ['http/1.1', 'h2']) + def test_13_07_tunnels_no_auth(self, env: Env, httpd, proto, tunnel, repeat): + if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): + pytest.skip('only supported with nghttp2') + exp_tunnel_proto = self.set_tunnel_proto(tunnel) + curl = CurlClient(env=env) + url = f'https://localhost:{env.https_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, + with_headers=True, with_trace=True, + extra_args=[ + '--proxytunnel', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + # expect "COULD_NOT_CONNECT" + r.check_response(exitcode=56, http_status=None) + assert self.get_tunnel_proto_used(curl) == exp_tunnel_proto + + @pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available") + @pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'), + reason='curl lacks HTTPS-proxy support') + @pytest.mark.parametrize("proto", ['http/1.1', 'h2']) + @pytest.mark.parametrize("tunnel", ['http/1.1', 'h2']) + def test_13_08_tunnels_auth(self, env: Env, httpd, proto, tunnel, repeat): + if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'): + pytest.skip('only supported with nghttp2') + exp_tunnel_proto = self.set_tunnel_proto(tunnel) + curl = CurlClient(env=env) + url = f'https://localhost:{env.https_port}/data.json' + r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True, + with_headers=True, with_trace=True, + extra_args=[ + '--proxytunnel', + '--proxy-user', 'proxy:proxy', + '--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/', + '--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1', + '--proxy-cacert', env.ca.cert_file, + ]) + r.check_response(count=1, http_status=200, + protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1') + assert self.get_tunnel_proto_used(curl) == exp_tunnel_proto + diff --git a/tests/http/testenv/__init__.py b/tests/http/testenv/__init__.py index 8eb7632ae..3da668570 100644 --- a/tests/http/testenv/__init__.py +++ b/tests/http/testenv/__init__.py @@ -24,9 +24,14 @@ # ########################################################################### # +import pytest +pytest.register_assert_rewrite("testenv.env", "testenv.curl", "testenv.caddy", + "testenv.httpd", "testenv.nghttpx") + from .env import Env from .certs import TestCA, Credentials from .caddy import Caddy from .httpd import Httpd from .curl import CurlClient, ExecResult from .nghttpx import Nghttpx +from .nghttpx import Nghttpx, NghttpxQuic, NghttpxFwd diff --git a/tests/http/testenv/curl.py b/tests/http/testenv/curl.py index 98c1bd4ab..a272dbf27 100644 --- a/tests/http/testenv/curl.py +++ b/tests/http/testenv/curl.py @@ -24,6 +24,7 @@ # ########################################################################### # +import pytest import json import logging import os @@ -31,7 +32,7 @@ import re import shutil import subprocess from datetime import timedelta, datetime -from typing import List, Optional, Dict +from typing import List, Optional, Dict, Union from urllib.parse import urlparse from .env import Env @@ -110,6 +111,10 @@ class ExecResult: def stderr(self) -> str: return ''.join(self._stderr) + @property + def trace_lines(self) -> List[str]: + return self._trace if self._trace else self._stderr + @property def duration(self) -> timedelta: return self._duration @@ -159,53 +164,97 @@ class ExecResult: def add_assets(self, assets: List): self._assets.extend(assets) - def check_exit_code(self, code: int): - assert self.exit_code == code, \ - f'expected exit code {code}, '\ - f'got {self.exit_code}\n{self._dump_logs()}' - - def check_exit_code_not(self, code: int): - assert self.exit_code != code, \ - f'expected exit code other than {code}\n{self._dump_logs()}' - - def check_responses(self, count: int, exp_status: Optional[int] = None, - exp_exitcode: Optional[int] = None): - assert len(self.responses) == count, \ - f'response count: expected {count}, ' \ - f'got {len(self.responses)}\n{self._dump_logs()}' - if exp_status is not None: - for idx, x in enumerate(self.responses): - assert x['status'] == exp_status, \ - f'response #{idx} status: expected {exp_status},'\ - f'got {x["status"]}\n{self._dump_logs()}' - if exp_exitcode is not None: - for idx, x in enumerate(self.responses): - if 'exitcode' in x: - assert x['exitcode'] == 0, \ - f'response #{idx} exitcode: expected {exp_exitcode}, '\ - f'got {x["exitcode"]}\n{self._dump_logs()}' - if self.with_stats: - self.check_stats(count) + def check_exit_code(self, code: Union[int, bool]): + if code is True: + assert self.exit_code == 0, f'expected exit code {code}, '\ + f'got {self.exit_code}\n{self.dump_logs()}' + elif code is False: + assert self.exit_code != 0, f'expected exit code {code}, '\ + f'got {self.exit_code}\n{self.dump_logs()}' + else: + assert self.exit_code == code, f'expected exit code {code}, '\ + f'got {self.exit_code}\n{self.dump_logs()}' + + def check_response(self, http_status: Optional[int] = 200, + count: Optional[int] = 1, + protocol: Optional[str] = None, + exitcode: Optional[int] = 0, + connect_count: Optional[int] = None): + if exitcode: + self.check_exit_code(exitcode) + if self.with_stats and isinstance(exitcode, int): + for idx, x in enumerate(self.stats): + if 'exitcode' in x: + assert int(x['exitcode']) == exitcode, \ + f'response #{idx} exitcode: expected {exitcode}, '\ + f'got {x["exitcode"]}\n{self.dump_logs()}' - def check_stats(self, count: int, exp_status: Optional[int] = None, - exp_exitcode: Optional[int] = None): + if self.with_stats: + assert len(self.stats) == count, \ + f'response count: expected {count}, ' \ + f'got {len(self.stats)}\n{self.dump_logs()}' + else: + assert len(self.responses) == count, \ + f'response count: expected {count}, ' \ + f'got {len(self.responses)}\n{self.dump_logs()}' + if http_status is not None: + if self.with_stats: + for idx, x in enumerate(self.stats): + assert 'http_code' in x, \ + f'response #{idx} reports no http_code\n{self.dump_logs()}' + assert x['http_code'] == http_status, \ + f'response #{idx} http_code: expected {http_status}, '\ + f'got {x["http_code"]}\n{self.dump_logs()}' + else: + for idx, x in enumerate(self.responses): + assert x['status'] == http_status, \ + f'response #{idx} status: expected {http_status},'\ + f'got {x["status"]}\n{self.dump_logs()}' + if protocol is not None: + if self.with_stats: + http_version = None + if protocol == 'HTTP/1.1': + http_version = '1.1' + elif protocol == 'HTTP/2': + http_version = '2' + elif protocol == 'HTTP/3': + http_version = '3' + if http_version is not None: + for idx, x in enumerate(self.stats): + assert x['http_version'] == http_version, \ + f'response #{idx} protocol: expected http/{http_version},' \ + f'got version {x["http_version"]}\n{self.dump_logs()}' + else: + for idx, x in enumerate(self.responses): + assert x['protocol'] == protocol, \ + f'response #{idx} protocol: expected {protocol},'\ + f'got {x["protocol"]}\n{self.dump_logs()}' + if connect_count is not None: + assert self.total_connects == connect_count, \ + f'expected {connect_count}, but {self.total_connects} '\ + f'were made\n{self.dump_logs()}' + + def check_stats(self, count: int, http_status: Optional[int] = None, + exitcode: Optional[int] = None): + if exitcode is None: + self.check_exit_code(0) assert len(self.stats) == count, \ - f'stats count: expected {count}, got {len(self.stats)}\n{self._dump_logs()}' - if exp_status is not None: + f'stats count: expected {count}, got {len(self.stats)}\n{self.dump_logs()}' + if http_status is not None: for idx, x in enumerate(self.stats): assert 'http_code' in x, \ - f'status #{idx} reports no http_code\n{self._dump_logs()}' - assert x['http_code'] == exp_status, \ - f'status #{idx} http_code: expected {exp_status}, '\ - f'got {x["http_code"]}\n{self._dump_logs()}' - if exp_exitcode is not None: + f'status #{idx} reports no http_code\n{self.dump_logs()}' + assert x['http_code'] == http_status, \ + f'status #{idx} http_code: expected {http_status}, '\ + f'got {x["http_code"]}\n{self.dump_logs()}' + if exitcode is not None: for idx, x in enumerate(self.stats): if 'exitcode' in x: assert x['exitcode'] == 0, \ - f'status #{idx} exitcode: expected {exp_exitcode}, '\ - f'got {x["exitcode"]}\n{self._dump_logs()}' + f'status #{idx} exitcode: expected {exitcode}, '\ + f'got {x["exitcode"]}\n{self.dump_logs()}' - def _dump_logs(self): + def dump_logs(self): lines = [] lines.append('>>--stdout ----------------------------------------------\n') lines.extend(self._stdout) @@ -252,6 +301,10 @@ class CurlClient: def download_file(self, i: int) -> str: return os.path.join(self.run_dir, f'download_{i}.data') + @property + def trace_file(self) -> str: + return self._tracefile + def _rmf(self, path): if os.path.exists(path): return os.remove(path) @@ -272,6 +325,7 @@ class CurlClient: with_stats: bool = True, with_headers: bool = False, no_save: bool = False, + with_trace: bool = False, extra_args: List[str] = None): if extra_args is None: extra_args = [] @@ -292,12 +346,14 @@ class CurlClient: ]) return self._raw(urls, alpn_proto=alpn_proto, options=extra_args, with_stats=with_stats, - with_headers=with_headers) + with_headers=with_headers, + with_trace=with_trace) def http_upload(self, urls: List[str], data: str, alpn_proto: Optional[str] = None, with_stats: bool = True, with_headers: bool = False, + with_trace: bool = False, extra_args: Optional[List[str]] = None): if extra_args is None: extra_args = [] @@ -310,12 +366,14 @@ class CurlClient: ]) return self._raw(urls, alpn_proto=alpn_proto, options=extra_args, with_stats=with_stats, - with_headers=with_headers) + with_headers=with_headers, + with_trace=with_trace) def http_put(self, urls: List[str], data=None, fdata=None, alpn_proto: Optional[str] = None, with_stats: bool = True, with_headers: bool = False, + with_trace: bool = False, extra_args: Optional[List[str]] = None): if extra_args is None: extra_args = [] @@ -333,7 +391,8 @@ class CurlClient: return self._raw(urls, intext=data, alpn_proto=alpn_proto, options=extra_args, with_stats=with_stats, - with_headers=with_headers) + with_headers=with_headers, + with_trace=with_trace) def response_file(self, idx: int): return os.path.join(self._run_dir, f'download_{idx}.data') @@ -379,15 +438,16 @@ class CurlClient: duration=datetime.now() - start, with_stats=with_stats) - def _raw(self, urls, intext='', timeout=10, options=None, insecure=False, + def _raw(self, urls, intext='', timeout=None, options=None, insecure=False, alpn_proto: Optional[str] = None, force_resolve=True, with_stats=False, - with_headers=True): + with_headers=True, + with_trace=False): args = self._complete_args( urls=urls, timeout=timeout, options=options, insecure=insecure, alpn_proto=alpn_proto, force_resolve=force_resolve, - with_headers=with_headers) + with_headers=with_headers, with_trace=with_trace) r = self._run(args, intext=intext, with_stats=with_stats) if r.exit_code == 0 and with_headers: self._parse_headerfile(self._headerfile, r=r) @@ -398,14 +458,15 @@ class CurlClient: def _complete_args(self, urls, timeout=None, options=None, insecure=False, force_resolve=True, alpn_proto: Optional[str] = None, - with_headers: bool = True): + with_headers: bool = True, + with_trace: bool = False): if not isinstance(urls, list): urls = [urls] args = [self._curl, "-s", "--path-as-is"] if with_headers: args.extend(["-D", self._headerfile]) - if self.env.verbose > 2: + if with_trace or self.env.verbose > 2: args.extend(['--trace', self._tracefile, '--trace-time']) elif self.env.verbose > 1: args.extend(['--trace', self._tracefile]) diff --git a/tests/http/testenv/env.py b/tests/http/testenv/env.py index 6dcb4b2ea..1e175e4a9 100644 --- a/tests/http/testenv/env.py +++ b/tests/http/testenv/env.py @@ -106,6 +106,7 @@ class EnvConfig: 'https': socket.SOCK_STREAM, 'proxy': socket.SOCK_STREAM, 'proxys': socket.SOCK_STREAM, + 'h2proxys': socket.SOCK_STREAM, 'caddy': socket.SOCK_STREAM, 'caddys': socket.SOCK_STREAM, }) @@ -229,10 +230,18 @@ class Env: def incomplete_reason() -> Optional[str]: return Env.CONFIG.get_incomplete_reason() + @staticmethod + def have_nghttpx() -> bool: + return Env.CONFIG.nghttpx is not None + @staticmethod def have_h3_server() -> bool: return Env.CONFIG.nghttpx_with_h3 + @staticmethod + def have_ssl_curl() -> bool: + return 'ssl' in Env.CONFIG.curl_props['features'] + @staticmethod def have_h2_curl() -> bool: return 'http2' in Env.CONFIG.curl_props['features'] @@ -371,13 +380,21 @@ class Env: return self.https_port @property - def proxy_port(self) -> str: + def proxy_port(self) -> int: return self.CONFIG.ports['proxy'] @property - def proxys_port(self) -> str: + def proxys_port(self) -> int: return self.CONFIG.ports['proxys'] + @property + def h2proxys_port(self) -> int: + return self.CONFIG.ports['h2proxys'] + + def pts_port(self, proto: str = 'http/1.1') -> int: + # proxy tunnel port + return self.CONFIG.ports['h2proxys' if proto == 'h2' else 'proxys'] + @property def caddy(self) -> str: return self.CONFIG.caddy diff --git a/tests/http/testenv/httpd.py b/tests/http/testenv/httpd.py index 5b20d31e2..612da1006 100644 --- a/tests/http/testenv/httpd.py +++ b/tests/http/testenv/httpd.py @@ -44,7 +44,9 @@ class Httpd: MODULES = [ 'log_config', 'logio', 'unixd', 'version', 'watchdog', - 'authn_core', 'authz_user', 'authz_core', 'authz_host', + 'authn_core', 'authn_file', + 'authz_user', 'authz_core', 'authz_host', + 'auth_basic', 'auth_digest', 'env', 'filter', 'headers', 'mime', 'rewrite', 'http2', 'ssl', 'proxy', 'proxy_http', 'proxy_connect', 'mpm_event', @@ -56,7 +58,7 @@ class Httpd: MOD_CURLTEST = None - def __init__(self, env: Env): + def __init__(self, env: Env, proxy_auth: bool = False): self.env = env self._cmd = env.apachectl self._apache_dir = os.path.join(env.gen_dir, 'apache') @@ -68,7 +70,9 @@ class Httpd: self._logs_dir = os.path.join(self._apache_dir, 'logs') self._error_log = os.path.join(self._logs_dir, 'error_log') self._tmp_dir = os.path.join(self._apache_dir, 'tmp') + self._passwords = os.path.join(self._conf_dir, 'passwords') self._mods_dir = None + self._proxy_auth = proxy_auth self._extra_configs = {} assert env.apxs p = subprocess.run(args=[env.apxs, '-q', 'libexecdir'], @@ -103,6 +107,9 @@ class Httpd: def clear_extra_configs(self): self._extra_configs = {} + def set_proxy_auth(self, active: bool): + self._proxy_auth = active + def _run(self, args, intext=''): env = {} for key, val in os.environ.items(): @@ -146,6 +153,7 @@ class Httpd: r = self._apachectl('stop') if r.exit_code == 0: return self.wait_dead(timeout=timedelta(seconds=5)) + log.fatal(f'stopping httpd failed: {r}') return r.exit_code == 0 def restart(self): @@ -211,6 +219,9 @@ class Httpd: 'server': f'{domain2}', } fd.write(JSONEncoder().encode(data)) + if self._proxy_auth: + with open(self._passwords, 'w') as fd: + fd.write('proxy:$apr1$FQfeInbs$WQZbODJlVg60j0ogEIlTW/\n') with open(self._conf_file, 'w') as fd: for m in self.MODULES: if os.path.exists(os.path.join(self._mods_dir, f'mod_{m}.so')): @@ -223,9 +234,6 @@ class Httpd: f'PidFile httpd.pid', f'ErrorLog {self._error_log}', f'LogLevel {self._get_log_level()}', - f'LogLevel http:trace4', - f'LogLevel proxy:trace4', - f'LogLevel proxy_http:trace4', f'H2MinWorkers 16', f'H2MaxWorkers 128', f'H2Direct on', @@ -284,30 +292,33 @@ class Httpd: conf.extend([ # http forward proxy f'', f' ServerName {proxy_domain}', - f' Protocols h2c, http/1.1', + f' Protocols h2c http/1.1', f' ProxyRequests On', f' ProxyVia On', f' AllowCONNECT {self.env.http_port} {self.env.https_port}', - f' ', - f' Require ip 127.0.0.1', - f' ', + ]) + conf.extend(self._get_proxy_conf()) + conf.extend([ f'', + f'', ]) conf.extend([ # https forward proxy f'', f' ServerName {proxy_domain}', - f' Protocols h2, http/1.1', + f' Protocols h2 http/1.1', f' SSLEngine on', f' SSLCertificateFile {proxy_creds.cert_file}', f' SSLCertificateKeyFile {proxy_creds.pkey_file}', f' ProxyRequests On', f' ProxyVia On', f' AllowCONNECT {self.env.http_port} {self.env.https_port}', - f' ', - f' Require ip 127.0.0.1', - f' ', + ]) + conf.extend(self._get_proxy_conf()) + conf.extend([ f'', + f'', ]) + fd.write("\n".join(conf)) with open(os.path.join(self._conf_dir, 'mime.types'), 'w') as fd: fd.write("\n".join([ @@ -316,13 +327,31 @@ class Httpd: '' ])) + def _get_proxy_conf(self): + if self._proxy_auth: + return [ + f' ', + f' AuthType Basic', + f' AuthName "Restricted Proxy"', + f' AuthBasicProvider file', + f' AuthUserFile "{self._passwords}"', + f' Require user proxy', + f' ', + ] + else: + return [ + f' ', + f' Require ip 127.0.0.1', + f' ', + ] + def _get_log_level(self): - #if self.env.verbose > 3: - # return 'trace2' - #if self.env.verbose > 2: - # return 'trace1' - #if self.env.verbose > 1: - # return 'debug' + if self.env.verbose > 3: + return 'trace2' + if self.env.verbose > 2: + return 'trace1' + if self.env.verbose > 1: + return 'debug' return 'info' def _curltest_conf(self) -> List[str]: diff --git a/tests/http/testenv/nghttpx.py b/tests/http/testenv/nghttpx.py index 1a26aa84f..234b31c0e 100644 --- a/tests/http/testenv/nghttpx.py +++ b/tests/http/testenv/nghttpx.py @@ -41,10 +41,12 @@ log = logging.getLogger(__name__) class Nghttpx: - def __init__(self, env: Env): + def __init__(self, env: Env, port: int, name: str): self.env = env + self._name = name + self._port = port self._cmd = env.nghttpx - self._run_dir = os.path.join(env.gen_dir, 'nghttpx') + self._run_dir = os.path.join(env.gen_dir, name) self._pid_file = os.path.join(self._run_dir, 'nghttpx.pid') self._conf_file = os.path.join(self._run_dir, 'nghttpx.conf') self._error_log = os.path.join(self._run_dir, 'nghttpx.log') @@ -76,27 +78,7 @@ class Nghttpx: return True def start(self, wait_live=True): - self._mkpath(self._tmp_dir) - if self._process: - self.stop() - args = [ - self._cmd, - f'--frontend=*,{self.env.h3_port};quic', - f'--backend=127.0.0.1,{self.env.https_port};{self.env.domain1};sni={self.env.domain1};proto=h2;tls', - f'--backend=127.0.0.1,{self.env.http_port}', - f'--log-level=INFO', - f'--pid-file={self._pid_file}', - f'--errorlog-file={self._error_log}', - f'--conf={self._conf_file}', - f'--cacert={self.env.ca.cert_file}', - self.env.get_credentials(self.env.domain1).pkey_file, - self.env.get_credentials(self.env.domain1).cert_file, - ] - ngerr = open(self._stderr, 'a') - self._process = subprocess.Popen(args=args, stderr=ngerr) - if self._process.returncode is not None: - return False - return not wait_live or self.wait_live(timeout=timedelta(seconds=5)) + pass def stop_if_running(self): if self.is_running(): @@ -146,7 +128,7 @@ class Nghttpx: curl = CurlClient(env=self.env, run_dir=self._tmp_dir) try_until = datetime.now() + timeout while datetime.now() < try_until: - check_url = f'https://{self.env.domain1}:{self.env.h3_port}/' + check_url = f'https://{self.env.domain1}:{self._port}/' r = curl.http_get(url=check_url, extra_args=['--http3-only']) if r.exit_code != 0: return True @@ -159,7 +141,7 @@ class Nghttpx: curl = CurlClient(env=self.env, run_dir=self._tmp_dir) try_until = datetime.now() + timeout while datetime.now() < try_until: - check_url = f'https://{self.env.domain1}:{self.env.h3_port}/' + check_url = f'https://{self.env.domain1}:{self._port}/' r = curl.http_get(url=check_url, extra_args=[ '--http3-only', '--trace', 'curl.trace', '--trace-time' ]) @@ -184,3 +166,94 @@ class Nghttpx: fd.write("\n".join([ '# do we need something here?' ])) + + +class NghttpxQuic(Nghttpx): + + def __init__(self, env: Env): + super().__init__(env=env, name='nghttpx-quic', port=env.h3_port) + + def start(self, wait_live=True): + self._mkpath(self._tmp_dir) + if self._process: + self.stop() + args = [ + self._cmd, + f'--frontend=*,{self.env.h3_port};quic', + f'--backend=127.0.0.1,{self.env.https_port};{self.env.domain1};sni={self.env.domain1};proto=h2;tls', + f'--backend=127.0.0.1,{self.env.http_port}', + f'--log-level=INFO', + f'--pid-file={self._pid_file}', + f'--errorlog-file={self._error_log}', + f'--conf={self._conf_file}', + f'--cacert={self.env.ca.cert_file}', + self.env.get_credentials(self.env.domain1).pkey_file, + self.env.get_credentials(self.env.domain1).cert_file, + f'--frontend-http3-window-size=1M', + f'--frontend-http3-max-window-size=10M', + f'--frontend-http3-connection-window-size=10M', + f'--frontend-http3-max-connection-window-size=100M', + ] + ngerr = open(self._stderr, 'a') + self._process = subprocess.Popen(args=args, stderr=ngerr) + if self._process.returncode is not None: + return False + return not wait_live or self.wait_live(timeout=timedelta(seconds=5)) + + +class NghttpxFwd(Nghttpx): + + def __init__(self, env: Env): + super().__init__(env=env, name='nghttpx-fwd', port=env.h2proxys_port) + + def start(self, wait_live=True): + self._mkpath(self._tmp_dir) + if self._process: + self.stop() + args = [ + self._cmd, + f'--http2-proxy', + f'--frontend=*,{self.env.h2proxys_port}', + f'--backend=127.0.0.1,{self.env.proxy_port}', + f'--log-level=INFO', + f'--pid-file={self._pid_file}', + f'--errorlog-file={self._error_log}', + f'--conf={self._conf_file}', + f'--cacert={self.env.ca.cert_file}', + self.env.get_credentials(self.env.proxy_domain).pkey_file, + self.env.get_credentials(self.env.proxy_domain).cert_file, + ] + ngerr = open(self._stderr, 'a') + self._process = subprocess.Popen(args=args, stderr=ngerr) + if self._process.returncode is not None: + return False + return not wait_live or self.wait_live(timeout=timedelta(seconds=5)) + + def wait_dead(self, timeout: timedelta): + curl = CurlClient(env=self.env, run_dir=self._tmp_dir) + try_until = datetime.now() + timeout + while datetime.now() < try_until: + check_url = f'https://{self.env.proxy_domain}:{self.env.h2proxys_port}/' + r = curl.http_get(url=check_url) + if r.exit_code != 0: + return True + log.debug(f'waiting for nghttpx-fwd to stop responding: {r}') + time.sleep(.1) + log.debug(f"Server still responding after {timeout}") + return False + + def wait_live(self, timeout: timedelta): + curl = CurlClient(env=self.env, run_dir=self._tmp_dir) + try_until = datetime.now() + timeout + while datetime.now() < try_until: + check_url = f'https://{self.env.proxy_domain}:{self.env.h2proxys_port}/' + r = curl.http_get(url=check_url, extra_args=[ + '--trace', 'curl.trace', '--trace-time' + ]) + if r.exit_code == 0: + return True + log.debug(f'waiting for nghttpx-fwd to become responsive: {r}') + time.sleep(.1) + log.error(f"Server still not responding after {timeout}") + return False + -- cgit v1.2.1