summaryrefslogtreecommitdiff
path: root/libgo/go/net/http
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/net/http')
-rw-r--r--libgo/go/net/http/cgi/child.go6
-rw-r--r--libgo/go/net/http/cgi/child_test.go21
-rw-r--r--libgo/go/net/http/cgi/host.go10
-rw-r--r--libgo/go/net/http/cgi/host_test.go43
-rw-r--r--libgo/go/net/http/cgi/matryoshka_test.go15
-rw-r--r--libgo/go/net/http/cgi/testdata/test.cgi2
-rw-r--r--libgo/go/net/http/client.go82
-rw-r--r--libgo/go/net/http/client_test.go126
-rw-r--r--libgo/go/net/http/cookie.go36
-rw-r--r--libgo/go/net/http/cookie_test.go18
-rw-r--r--libgo/go/net/http/example_test.go23
-rw-r--r--libgo/go/net/http/export_test.go24
-rw-r--r--libgo/go/net/http/fcgi/child.go25
-rw-r--r--libgo/go/net/http/fcgi/fcgi_test.go106
-rw-r--r--libgo/go/net/http/fs.go53
-rw-r--r--libgo/go/net/http/fs_test.go51
-rw-r--r--libgo/go/net/http/header.go2
-rw-r--r--libgo/go/net/http/http_test.go58
-rw-r--r--libgo/go/net/http/httptest/server.go42
-rw-r--r--libgo/go/net/http/httputil/dump.go17
-rw-r--r--libgo/go/net/http/httputil/dump_test.go8
-rw-r--r--libgo/go/net/http/httputil/reverseproxy.go67
-rw-r--r--libgo/go/net/http/httputil/reverseproxy_test.go70
-rw-r--r--libgo/go/net/http/internal/chunked.go17
-rw-r--r--libgo/go/net/http/lex.go73
-rw-r--r--libgo/go/net/http/lex_test.go70
-rw-r--r--libgo/go/net/http/main_test.go26
-rw-r--r--libgo/go/net/http/npn_test.go22
-rw-r--r--libgo/go/net/http/pprof/pprof.go40
-rw-r--r--libgo/go/net/http/proxy_test.go2
-rw-r--r--libgo/go/net/http/readrequest_test.go88
-rw-r--r--libgo/go/net/http/request.go139
-rw-r--r--libgo/go/net/http/request_test.go109
-rw-r--r--libgo/go/net/http/requestwrite_test.go69
-rw-r--r--libgo/go/net/http/response.go15
-rw-r--r--libgo/go/net/http/response_test.go51
-rw-r--r--libgo/go/net/http/responsewrite_test.go15
-rw-r--r--libgo/go/net/http/serve_test.go654
-rw-r--r--libgo/go/net/http/server.go279
-rw-r--r--libgo/go/net/http/sniff.go10
-rw-r--r--libgo/go/net/http/transfer.go144
-rw-r--r--libgo/go/net/http/transport.go360
-rw-r--r--libgo/go/net/http/transport_test.go528
43 files changed, 3193 insertions, 423 deletions
diff --git a/libgo/go/net/http/cgi/child.go b/libgo/go/net/http/cgi/child.go
index 45fc2e57cd7..ec101088219 100644
--- a/libgo/go/net/http/cgi/child.go
+++ b/libgo/go/net/http/cgi/child.go
@@ -132,9 +132,9 @@ func RequestFromMap(params map[string]string) (*http.Request, error) {
}
// Request.RemoteAddr has its port set by Go's standard http
- // server, so we do here too. We don't have one, though, so we
- // use a dummy one.
- r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0")
+ // server, so we do here too.
+ remotePort, _ := strconv.Atoi(params["REMOTE_PORT"]) // zero if unset or invalid
+ r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], strconv.Itoa(remotePort))
return r, nil
}
diff --git a/libgo/go/net/http/cgi/child_test.go b/libgo/go/net/http/cgi/child_test.go
index 075d8411bcf..14e0af475f5 100644
--- a/libgo/go/net/http/cgi/child_test.go
+++ b/libgo/go/net/http/cgi/child_test.go
@@ -22,6 +22,7 @@ func TestRequest(t *testing.T) {
"CONTENT_LENGTH": "123",
"CONTENT_TYPE": "text/xml",
"REMOTE_ADDR": "5.6.7.8",
+ "REMOTE_PORT": "54321",
}
req, err := RequestFromMap(env)
if err != nil {
@@ -60,7 +61,7 @@ func TestRequest(t *testing.T) {
if req.TLS != nil {
t.Errorf("expected nil TLS")
}
- if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
+ if e, g := "5.6.7.8:54321", req.RemoteAddr; e != g {
t.Errorf("RemoteAddr: got %q; want %q", g, e)
}
}
@@ -129,3 +130,21 @@ func TestRequestWithoutRequestURI(t *testing.T) {
t.Errorf("URL = %q; want %q", g, e)
}
}
+
+func TestRequestWithoutRemotePort(t *testing.T) {
+ env := map[string]string{
+ "SERVER_PROTOCOL": "HTTP/1.1",
+ "HTTP_HOST": "example.com",
+ "REQUEST_METHOD": "GET",
+ "REQUEST_URI": "/path?a=b",
+ "CONTENT_LENGTH": "123",
+ "REMOTE_ADDR": "5.6.7.8",
+ }
+ req, err := RequestFromMap(env)
+ if err != nil {
+ t.Fatalf("RequestFromMap: %v", err)
+ }
+ if e, g := "5.6.7.8:0", req.RemoteAddr; e != g {
+ t.Errorf("RemoteAddr: got %q; want %q", g, e)
+ }
+}
diff --git a/libgo/go/net/http/cgi/host.go b/libgo/go/net/http/cgi/host.go
index ec95a972c1a..4efbe7abeec 100644
--- a/libgo/go/net/http/cgi/host.go
+++ b/libgo/go/net/http/cgi/host.go
@@ -19,6 +19,7 @@ import (
"fmt"
"io"
"log"
+ "net"
"net/http"
"os"
"os/exec"
@@ -128,11 +129,16 @@ func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
"PATH_INFO=" + pathInfo,
"SCRIPT_NAME=" + root,
"SCRIPT_FILENAME=" + h.Path,
- "REMOTE_ADDR=" + req.RemoteAddr,
- "REMOTE_HOST=" + req.RemoteAddr,
"SERVER_PORT=" + port,
}
+ if remoteIP, remotePort, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+ env = append(env, "REMOTE_ADDR="+remoteIP, "REMOTE_HOST="+remoteIP, "REMOTE_PORT="+remotePort)
+ } else {
+ // could not parse ip:port, let's use whole RemoteAddr and leave REMOTE_PORT undefined
+ env = append(env, "REMOTE_ADDR="+req.RemoteAddr, "REMOTE_HOST="+req.RemoteAddr)
+ }
+
if req.TLS != nil {
env = append(env, "HTTPS=on")
}
diff --git a/libgo/go/net/http/cgi/host_test.go b/libgo/go/net/http/cgi/host_test.go
index b514e10e963..f3411105ca9 100644
--- a/libgo/go/net/http/cgi/host_test.go
+++ b/libgo/go/net/http/cgi/host_test.go
@@ -29,7 +29,7 @@ func newRequest(httpreq string) *http.Request {
if err != nil {
panic("cgi: bogus http request in test: " + httpreq)
}
- req.RemoteAddr = "1.2.3.4"
+ req.RemoteAddr = "1.2.3.4:1234"
return req
}
@@ -37,7 +37,11 @@ func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string
rw := httptest.NewRecorder()
req := newRequest(httpreq)
h.ServeHTTP(rw, req)
+ runResponseChecks(t, rw, expectedMap)
+ return rw
+}
+func runResponseChecks(t *testing.T, rw *httptest.ResponseRecorder, expectedMap map[string]string) {
// Make a map to hold the test map that the CGI returns.
m := make(map[string]string)
m["_body"] = rw.Body.String()
@@ -75,7 +79,6 @@ readlines:
t.Errorf("for key %q got %q; expected %q", key, got, expected)
}
}
- return rw
}
var cgiTested, cgiWorks bool
@@ -108,6 +111,7 @@ func TestCGIBasicGet(t *testing.T) {
"env-QUERY_STRING": "foo=bar&a=b",
"env-REMOTE_ADDR": "1.2.3.4",
"env-REMOTE_HOST": "1.2.3.4",
+ "env-REMOTE_PORT": "1234",
"env-REQUEST_METHOD": "GET",
"env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
"env-SCRIPT_FILENAME": "testdata/test.cgi",
@@ -126,6 +130,39 @@ func TestCGIBasicGet(t *testing.T) {
}
}
+func TestCGIEnvIPv6(t *testing.T) {
+ check(t)
+ h := &Handler{
+ Path: "testdata/test.cgi",
+ Root: "/test.cgi",
+ }
+ expectedMap := map[string]string{
+ "test": "Hello CGI",
+ "param-a": "b",
+ "param-foo": "bar",
+ "env-GATEWAY_INTERFACE": "CGI/1.1",
+ "env-HTTP_HOST": "example.com",
+ "env-PATH_INFO": "",
+ "env-QUERY_STRING": "foo=bar&a=b",
+ "env-REMOTE_ADDR": "2000::3000",
+ "env-REMOTE_HOST": "2000::3000",
+ "env-REMOTE_PORT": "12345",
+ "env-REQUEST_METHOD": "GET",
+ "env-REQUEST_URI": "/test.cgi?foo=bar&a=b",
+ "env-SCRIPT_FILENAME": "testdata/test.cgi",
+ "env-SCRIPT_NAME": "/test.cgi",
+ "env-SERVER_NAME": "example.com",
+ "env-SERVER_PORT": "80",
+ "env-SERVER_SOFTWARE": "go",
+ }
+
+ rw := httptest.NewRecorder()
+ req := newRequest("GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n")
+ req.RemoteAddr = "[2000::3000]:12345"
+ h.ServeHTTP(rw, req)
+ runResponseChecks(t, rw, expectedMap)
+}
+
func TestCGIBasicGetAbsPath(t *testing.T) {
check(t)
pwd, err := os.Getwd()
@@ -289,7 +326,7 @@ func TestInternalRedirect(t *testing.T) {
}
expectedMap := map[string]string{
"basepath": "/foo",
- "remoteaddr": "1.2.3.4",
+ "remoteaddr": "1.2.3.4:1234",
}
runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap)
}
diff --git a/libgo/go/net/http/cgi/matryoshka_test.go b/libgo/go/net/http/cgi/matryoshka_test.go
index 18c4803e71b..32d59c09a3c 100644
--- a/libgo/go/net/http/cgi/matryoshka_test.go
+++ b/libgo/go/net/http/cgi/matryoshka_test.go
@@ -12,11 +12,11 @@ import (
"bytes"
"errors"
"fmt"
+ "internal/testenv"
"io"
"net/http"
"net/http/httptest"
"os"
- "runtime"
"testing"
"time"
)
@@ -24,9 +24,7 @@ import (
// This test is a CGI host (testing host.go) that runs its own binary
// as a child process testing the other half of CGI (child.go).
func TestHostingOurselves(t *testing.T) {
- if runtime.GOOS == "nacl" {
- t.Skip("skipping on nacl")
- }
+ testenv.MustHaveExec(t)
h := &Handler{
Path: os.Args[0],
@@ -43,6 +41,7 @@ func TestHostingOurselves(t *testing.T) {
"env-QUERY_STRING": "foo=bar&a=b",
"env-REMOTE_ADDR": "1.2.3.4",
"env-REMOTE_HOST": "1.2.3.4",
+ "env-REMOTE_PORT": "1234",
"env-REQUEST_METHOD": "GET",
"env-REQUEST_URI": "/test.go?foo=bar&a=b",
"env-SCRIPT_FILENAME": os.Args[0],
@@ -92,9 +91,7 @@ func (w *limitWriter) Write(p []byte) (n int, err error) {
// If there's an error copying the child's output to the parent, test
// that we kill the child.
func TestKillChildAfterCopyError(t *testing.T) {
- if runtime.GOOS == "nacl" {
- t.Skip("skipping on nacl")
- }
+ testenv.MustHaveExec(t)
defer func() { testHookStartProcess = nil }()
proc := make(chan *os.Process, 1)
@@ -139,9 +136,7 @@ func TestKillChildAfterCopyError(t *testing.T) {
// Test that a child handler writing only headers works.
// golang.org/issue/7196
func TestChildOnlyHeaders(t *testing.T) {
- if runtime.GOOS == "nacl" {
- t.Skip("skipping on nacl")
- }
+ testenv.MustHaveExec(t)
h := &Handler{
Path: os.Args[0],
diff --git a/libgo/go/net/http/cgi/testdata/test.cgi b/libgo/go/net/http/cgi/testdata/test.cgi
index 3214df6f004..ec7ee6f3864 100644
--- a/libgo/go/net/http/cgi/testdata/test.cgi
+++ b/libgo/go/net/http/cgi/testdata/test.cgi
@@ -45,7 +45,7 @@ foreach my $k (sort keys %ENV) {
# NOTE: msys perl returns /c/go/src/... not C:\go\....
my $dir = getcwd();
-if ($^O eq 'MSWin32' || $^O eq 'msys') {
+if ($^O eq 'MSWin32' || $^O eq 'msys' || $^O eq 'cygwin') {
if ($dir =~ /^.:/) {
$dir =~ s!/!\\!g;
} else {
diff --git a/libgo/go/net/http/client.go b/libgo/go/net/http/client.go
index ce884d1f07b..7f2fbb4678e 100644
--- a/libgo/go/net/http/client.go
+++ b/libgo/go/net/http/client.go
@@ -19,6 +19,7 @@ import (
"net/url"
"strings"
"sync"
+ "sync/atomic"
"time"
)
@@ -211,7 +212,7 @@ func send(req *Request, t RoundTripper) (resp *Response, err error) {
req.Header = make(Header)
}
- if u := req.URL.User; u != nil {
+ if u := req.URL.User; u != nil && req.Header.Get("Authorization") == "" {
username := u.Username()
password, _ := u.Password()
req.Header.Set("Authorization", "Basic "+basicAuth(username, password))
@@ -256,8 +257,9 @@ func shouldRedirectPost(statusCode int) bool {
return false
}
-// Get issues a GET to the specified URL. If the response is one of the following
-// redirect codes, Get follows the redirect, up to a maximum of 10 redirects:
+// Get issues a GET to the specified URL. If the response is one of
+// the following redirect codes, Get follows the redirect, up to a
+// maximum of 10 redirects:
//
// 301 (Moved Permanently)
// 302 (Found)
@@ -272,13 +274,16 @@ func shouldRedirectPost(statusCode int) bool {
// Caller should close resp.Body when done reading from it.
//
// Get is a wrapper around DefaultClient.Get.
+//
+// To make a request with custom headers, use NewRequest and
+// DefaultClient.Do.
func Get(url string) (resp *Response, err error) {
return DefaultClient.Get(url)
}
-// Get issues a GET to the specified URL. If the response is one of the
+// Get issues a GET to the specified URL. If the response is one of the
// following redirect codes, Get follows the redirect after calling the
-// Client's CheckRedirect function.
+// Client's CheckRedirect function:
//
// 301 (Moved Permanently)
// 302 (Found)
@@ -291,6 +296,8 @@ func Get(url string) (resp *Response, err error) {
//
// When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
+//
+// To make a request with custom headers, use NewRequest and Client.Do.
func (c *Client) Get(url string) (resp *Response, err error) {
req, err := NewRequest("GET", url, nil)
if err != nil {
@@ -299,6 +306,8 @@ func (c *Client) Get(url string) (resp *Response, err error) {
return c.doFollowingRedirects(req, shouldRedirectGet)
}
+func alwaysFalse() bool { return false }
+
func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) {
var base *url.URL
redirectChecker := c.CheckRedirect
@@ -316,7 +325,10 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo
req := ireq
var timer *time.Timer
+ var atomicWasCanceled int32 // atomic bool (1 or 0)
+ var wasCanceled = alwaysFalse
if c.Timeout > 0 {
+ wasCanceled = func() bool { return atomic.LoadInt32(&atomicWasCanceled) != 0 }
type canceler interface {
CancelRequest(*Request)
}
@@ -325,6 +337,7 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo
return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport())
}
timer = time.AfterFunc(c.Timeout, func() {
+ atomic.StoreInt32(&atomicWasCanceled, 1)
reqmu.Lock()
defer reqmu.Unlock()
tr.CancelRequest(req)
@@ -365,6 +378,12 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo
urlStr = req.URL.String()
if resp, err = c.send(req); err != nil {
+ if wasCanceled() {
+ err = &httpError{
+ err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
+ timeout: true,
+ }
+ }
break
}
@@ -377,7 +396,7 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo
}
resp.Body.Close()
if urlStr = resp.Header.Get("Location"); urlStr == "" {
- err = errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode))
+ err = fmt.Errorf("%d response missing Location header", resp.StatusCode)
break
}
base = req.URL
@@ -385,7 +404,11 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo
continue
}
if timer != nil {
- resp.Body = &cancelTimerBody{timer, resp.Body}
+ resp.Body = &cancelTimerBody{
+ t: timer,
+ rc: resp.Body,
+ reqWasCanceled: wasCanceled,
+ }
}
return resp, nil
}
@@ -400,7 +423,7 @@ func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bo
if redirectFailed {
// Special case for Go 1 compatibility: return both the response
// and an error if the CheckRedirect function failed.
- // See http://golang.org/issue/3795
+ // See https://golang.org/issue/3795
return resp, urlErr
}
@@ -421,7 +444,12 @@ func defaultCheckRedirect(req *Request, via []*Request) error {
//
// Caller should close resp.Body when done reading from it.
//
-// Post is a wrapper around DefaultClient.Post
+// If the provided body is an io.Closer, it is closed after the
+// request.
+//
+// Post is a wrapper around DefaultClient.Post.
+//
+// To set custom headers, use NewRequest and DefaultClient.Do.
func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) {
return DefaultClient.Post(url, bodyType, body)
}
@@ -430,8 +458,10 @@ func Post(url string, bodyType string, body io.Reader) (resp *Response, err erro
//
// Caller should close resp.Body when done reading from it.
//
-// If the provided body is also an io.Closer, it is closed after the
+// If the provided body is an io.Closer, it is closed after the
// request.
+//
+// To set custom headers, use NewRequest and Client.Do.
func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) {
req, err := NewRequest("POST", url, body)
if err != nil {
@@ -444,16 +474,22 @@ func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Respon
// PostForm issues a POST to the specified URL, with data's keys and
// values URL-encoded as the request body.
//
+// The Content-Type header is set to application/x-www-form-urlencoded.
+// To set other headers, use NewRequest and DefaultClient.Do.
+//
// When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
//
-// PostForm is a wrapper around DefaultClient.PostForm
+// PostForm is a wrapper around DefaultClient.PostForm.
func PostForm(url string, data url.Values) (resp *Response, err error) {
return DefaultClient.PostForm(url, data)
}
// PostForm issues a POST to the specified URL,
-// with data's keys and values urlencoded as the request body.
+// with data's keys and values URL-encoded as the request body.
+//
+// The Content-Type header is set to application/x-www-form-urlencoded.
+// To set other headers, use NewRequest and DefaultClient.Do.
//
// When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
@@ -461,9 +497,9 @@ func (c *Client) PostForm(url string, data url.Values) (resp *Response, err erro
return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}
-// Head issues a HEAD to the specified URL. If the response is one of the
-// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function.
+// Head issues a HEAD to the specified URL. If the response is one of
+// the following redirect codes, Head follows the redirect, up to a
+// maximum of 10 redirects:
//
// 301 (Moved Permanently)
// 302 (Found)
@@ -477,7 +513,7 @@ func Head(url string) (resp *Response, err error) {
// Head issues a HEAD to the specified URL. If the response is one of the
// following redirect codes, Head follows the redirect after calling the
-// Client's CheckRedirect function.
+// Client's CheckRedirect function:
//
// 301 (Moved Permanently)
// 302 (Found)
@@ -491,15 +527,25 @@ func (c *Client) Head(url string) (resp *Response, err error) {
return c.doFollowingRedirects(req, shouldRedirectGet)
}
+// cancelTimerBody is an io.ReadCloser that wraps rc with two features:
+// 1) on Read EOF or Close, the timer t is Stopped,
+// 2) On Read failure, if reqWasCanceled is true, the error is wrapped and
+// marked as net.Error that hit its timeout.
type cancelTimerBody struct {
- t *time.Timer
- rc io.ReadCloser
+ t *time.Timer
+ rc io.ReadCloser
+ reqWasCanceled func() bool
}
func (b *cancelTimerBody) Read(p []byte) (n int, err error) {
n, err = b.rc.Read(p)
if err == io.EOF {
b.t.Stop()
+ } else if err != nil && b.reqWasCanceled() {
+ return n, &httpError{
+ err: err.Error() + " (Client.Timeout exceeded while reading body)",
+ timeout: true,
+ }
}
return
}
diff --git a/libgo/go/net/http/client_test.go b/libgo/go/net/http/client_test.go
index 56b6563c486..7b524d381bc 100644
--- a/libgo/go/net/http/client_test.go
+++ b/libgo/go/net/http/client_test.go
@@ -258,7 +258,7 @@ func TestClientRedirects(t *testing.T) {
t.Errorf("with redirects forbidden, expected a *url.Error with our 'no redirects allowed' error inside; got %#v (%q)", err, err)
}
if res == nil {
- t.Fatalf("Expected a non-nil Response on CheckRedirect failure (http://golang.org/issue/3795)")
+ t.Fatalf("Expected a non-nil Response on CheckRedirect failure (https://golang.org/issue/3795)")
}
res.Body.Close()
if res.Header.Get("Location") == "" {
@@ -334,6 +334,7 @@ var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request)
})
func TestClientSendsCookieFromJar(t *testing.T) {
+ defer afterTest(t)
tr := &recordingTransport{}
client := &Client{Transport: tr}
client.Jar = &TestJar{perURL: make(map[string][]*Cookie)}
@@ -426,7 +427,7 @@ func TestJarCalls(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
pathSuffix := r.RequestURI[1:]
if r.RequestURI == "/nosetcookie" {
- return // dont set cookies for this path
+ return // don't set cookies for this path
}
SetCookie(w, &Cookie{Name: "name" + pathSuffix, Value: "val" + pathSuffix})
if r.RequestURI == "/" {
@@ -738,7 +739,7 @@ func TestResponseSetsTLSConnectionState(t *testing.T) {
}
}
-// Verify Response.ContentLength is populated. http://golang.org/issue/4126
+// Verify Response.ContentLength is populated. https://golang.org/issue/4126
func TestClientHeadContentLength(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
@@ -842,6 +843,47 @@ func TestBasicAuth(t *testing.T) {
}
}
+func TestBasicAuthHeadersPreserved(t *testing.T) {
+ defer afterTest(t)
+ tr := &recordingTransport{}
+ client := &Client{Transport: tr}
+
+ // If Authorization header is provided, username in URL should not override it
+ url := "http://My%20User@dummy.faketld/"
+ req, err := NewRequest("GET", url, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.SetBasicAuth("My User", "My Pass")
+ expected := "My User:My Pass"
+ client.Do(req)
+
+ if tr.req.Method != "GET" {
+ t.Errorf("got method %q, want %q", tr.req.Method, "GET")
+ }
+ if tr.req.URL.String() != url {
+ t.Errorf("got URL %q, want %q", tr.req.URL.String(), url)
+ }
+ if tr.req.Header == nil {
+ t.Fatalf("expected non-nil request Header")
+ }
+ auth := tr.req.Header.Get("Authorization")
+ if strings.HasPrefix(auth, "Basic ") {
+ encoded := auth[6:]
+ decoded, err := base64.StdEncoding.DecodeString(encoded)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := string(decoded)
+ if expected != s {
+ t.Errorf("Invalid Authorization header. Got %q, wanted %q", s, expected)
+ }
+ } else {
+ t.Errorf("Invalid auth %q", auth)
+ }
+
+}
+
func TestClientTimeout(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode")
@@ -899,14 +941,64 @@ func TestClientTimeout(t *testing.T) {
select {
case err := <-errc:
if err == nil {
- t.Error("expected error from ReadAll")
+ t.Fatal("expected error from ReadAll")
+ }
+ ne, ok := err.(net.Error)
+ if !ok {
+ t.Errorf("error value from ReadAll was %T; expected some net.Error", err)
+ } else if !ne.Timeout() {
+ t.Errorf("net.Error.Timeout = false; want true")
+ }
+ if got := ne.Error(); !strings.Contains(got, "Client.Timeout exceeded") {
+ t.Errorf("error string = %q; missing timeout substring", got)
}
- // Expected error.
case <-time.After(failTime):
t.Errorf("timeout after %v waiting for timeout of %v", failTime, timeout)
}
}
+// Client.Timeout firing before getting to the body
+func TestClientTimeout_Headers(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ defer afterTest(t)
+ donec := make(chan bool)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ <-donec
+ }))
+ defer ts.Close()
+ // Note that we use a channel send here and not a close.
+ // The race detector doesn't know that we're waiting for a timeout
+ // and thinks that the waitgroup inside httptest.Server is added to concurrently
+ // with us closing it. If we timed out immediately, we could close the testserver
+ // before we entered the handler. We're not timing out immediately and there's
+ // no way we would be done before we entered the handler, but the race detector
+ // doesn't know this, so synchronize explicitly.
+ defer func() { donec <- true }()
+
+ c := &Client{Timeout: 500 * time.Millisecond}
+
+ _, err := c.Get(ts.URL)
+ if err == nil {
+ t.Fatal("got response from Get; expected error")
+ }
+ ue, ok := err.(*url.Error)
+ if !ok {
+ t.Fatalf("Got error of type %T; want *url.Error", err)
+ }
+ ne, ok := ue.Err.(net.Error)
+ if !ok {
+ t.Fatalf("Got url.Error.Err of type %T; want some net.Error", err)
+ }
+ if !ne.Timeout() {
+ t.Error("net.Error.Timeout = false; want true")
+ }
+ if got := ne.Error(); !strings.Contains(got, "Client.Timeout exceeded") {
+ t.Errorf("error string = %q; missing timeout substring", got)
+ }
+}
+
func TestClientRedirectEatsBody(t *testing.T) {
defer afterTest(t)
saw := make(chan string, 2)
@@ -984,24 +1076,12 @@ func TestClientTrailers(t *testing.T) {
r.Trailer.Get("Client-Trailer-B"))
}
- // TODO: golang.org/issue/7759: there's no way yet for
- // the server to set trailers without hijacking, so do
- // that for now, just to test the client. Later, in
- // Go 1.4, it should be implicit that any mutations
- // to w.Header() after the initial write are the
- // trailers to be sent, if and only if they were
- // previously declared with w.Header().Set("Trailer",
- // ..keys..)
- w.(Flusher).Flush()
- conn, buf, _ := w.(Hijacker).Hijack()
- t := Header{}
- t.Set("Server-Trailer-A", "valuea")
- t.Set("Server-Trailer-C", "valuec") // skipping B
- buf.WriteString("0\r\n") // eof
- t.Write(buf)
- buf.WriteString("\r\n") // end of trailers
- buf.Flush()
- conn.Close()
+ // How handlers set Trailers: declare it ahead of time
+ // with the Trailer header, and then mutate the
+ // Header() of those values later, after the response
+ // has been written (we wrote to w above).
+ w.Header().Set("Server-Trailer-A", "valuea")
+ w.Header().Set("Server-Trailer-C", "valuec") // skipping B
}))
defer ts.Close()
diff --git a/libgo/go/net/http/cookie.go b/libgo/go/net/http/cookie.go
index a0d0fdbbd07..648709dd997 100644
--- a/libgo/go/net/http/cookie.go
+++ b/libgo/go/net/http/cookie.go
@@ -14,19 +14,18 @@ import (
"time"
)
-// This implementation is done according to RFC 6265:
-//
-// http://tools.ietf.org/html/rfc6265
-
// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an
// HTTP response or the Cookie header of an HTTP request.
+//
+// See http://tools.ietf.org/html/rfc6265 for details.
type Cookie struct {
- Name string
- Value string
- Path string
- Domain string
- Expires time.Time
- RawExpires string
+ Name string
+ Value string
+
+ Path string // optional
+ Domain string // optional
+ Expires time.Time // optional
+ RawExpires string // for reading cookies only
// MaxAge=0 means no 'Max-Age' attribute specified.
// MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'
@@ -126,14 +125,22 @@ func readSetCookies(h Header) []*Cookie {
}
// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.
+// The provided cookie must have a valid Name. Invalid cookies may be
+// silently dropped.
func SetCookie(w ResponseWriter, cookie *Cookie) {
- w.Header().Add("Set-Cookie", cookie.String())
+ if v := cookie.String(); v != "" {
+ w.Header().Add("Set-Cookie", v)
+ }
}
// String returns the serialization of the cookie for use in a Cookie
// header (if only Name and Value are set) or a Set-Cookie response
// header (if other fields are set).
+// If c is nil or c.Name is invalid, the empty string is returned.
func (c *Cookie) String() string {
+ if c == nil || !isCookieNameValid(c.Name) {
+ return ""
+ }
var b bytes.Buffer
fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value))
if len(c.Path) > 0 {
@@ -156,7 +163,7 @@ func (c *Cookie) String() string {
}
}
if c.Expires.Unix() > 0 {
- fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123))
+ fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(TimeFormat))
}
if c.MaxAge > 0 {
fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
@@ -297,7 +304,7 @@ func sanitizeCookieName(n string) string {
// We loosen this as spaces and commas are common in cookie values
// but we produce a quoted cookie-value in when value starts or ends
// with a comma or space.
-// See http://golang.org/issue/7243 for the discussion.
+// See https://golang.org/issue/7243 for the discussion.
func sanitizeCookieValue(v string) string {
v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v)
if len(v) == 0 {
@@ -359,5 +366,8 @@ func parseCookieValue(raw string, allowDoubleQuote bool) (string, bool) {
}
func isCookieNameValid(raw string) bool {
+ if raw == "" {
+ return false
+ }
return strings.IndexFunc(raw, isNotToken) < 0
}
diff --git a/libgo/go/net/http/cookie_test.go b/libgo/go/net/http/cookie_test.go
index 98dc2fade0d..d474f313476 100644
--- a/libgo/go/net/http/cookie_test.go
+++ b/libgo/go/net/http/cookie_test.go
@@ -52,6 +52,10 @@ var writeSetCookiesTests = []struct {
&Cookie{Name: "cookie-8", Value: "eight", Domain: "::1"},
"cookie-8=eight",
},
+ {
+ &Cookie{Name: "cookie-9", Value: "expiring", Expires: time.Unix(1257894000, 0)},
+ "cookie-9=expiring; Expires=Tue, 10 Nov 2009 23:00:00 GMT",
+ },
// The "special" cookies have values containing commas or spaces which
// are disallowed by RFC 6265 but are common in the wild.
{
@@ -90,6 +94,18 @@ var writeSetCookiesTests = []struct {
&Cookie{Name: "empty-value", Value: ""},
`empty-value=`,
},
+ {
+ nil,
+ ``,
+ },
+ {
+ &Cookie{Name: ""},
+ ``,
+ },
+ {
+ &Cookie{Name: "\t"},
+ ``,
+ },
}
func TestWriteSetCookies(t *testing.T) {
@@ -349,7 +365,7 @@ func TestSetCookieDoubleQuotes(t *testing.T) {
{Name: "quoted3", Value: "both"},
}
if len(got) != len(want) {
- t.Fatal("got %d cookies, want %d", len(got), len(want))
+ t.Fatalf("got %d cookies, want %d", len(got), len(want))
}
for i, w := range want {
g := got[i]
diff --git a/libgo/go/net/http/example_test.go b/libgo/go/net/http/example_test.go
index 88b97d9e3d7..1774795d379 100644
--- a/libgo/go/net/http/example_test.go
+++ b/libgo/go/net/http/example_test.go
@@ -6,6 +6,7 @@ package http_test
import (
"fmt"
+ "io"
"io/ioutil"
"log"
"net/http"
@@ -86,3 +87,25 @@ func ExampleServeMux_Handle() {
fmt.Fprintf(w, "Welcome to the home page!")
})
}
+
+// HTTP Trailers are a set of key/value pairs like headers that come
+// after the HTTP response, instead of before.
+func ExampleResponseWriter_trailers() {
+ mux := http.NewServeMux()
+ mux.HandleFunc("/sendstrailers", func(w http.ResponseWriter, req *http.Request) {
+ // Before any call to WriteHeader or Write, declare
+ // the trailers you will set during the HTTP
+ // response. These three headers are actually sent in
+ // the trailer.
+ w.Header().Set("Trailer", "AtEnd1, AtEnd2")
+ w.Header().Add("Trailer", "AtEnd3")
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8") // normal header
+ w.WriteHeader(http.StatusOK)
+
+ w.Header().Set("AtEnd1", "value 1")
+ io.WriteString(w, "This HTTP response has both headers before this text and trailers at the end.\n")
+ w.Header().Set("AtEnd2", "value 2")
+ w.Header().Set("AtEnd3", "value 3") // These will appear as trailers.
+ })
+}
diff --git a/libgo/go/net/http/export_test.go b/libgo/go/net/http/export_test.go
index 87b6c0773aa..0457be50da6 100644
--- a/libgo/go/net/http/export_test.go
+++ b/libgo/go/net/http/export_test.go
@@ -10,9 +10,17 @@ package http
import (
"net"
"net/url"
+ "sync"
"time"
)
+func init() {
+ // We only want to pay for this cost during testing.
+ // When not under test, these values are always nil
+ // and never assigned to.
+ testHookMu = new(sync.Mutex)
+}
+
func NewLoggingConn(baseName string, c net.Conn) net.Conn {
return newLoggingConn(baseName, c)
}
@@ -78,6 +86,20 @@ func (t *Transport) PutIdleTestConn() bool {
})
}
+func SetInstallConnClosedHook(f func()) {
+ testHookPersistConnClosedGotRes = f
+}
+
+func SetEnterRoundTripHook(f func()) {
+ testHookEnterRoundTrip = f
+}
+
+func SetReadLoopBeforeNextReadHook(f func()) {
+ testHookMu.Lock()
+ defer testHookMu.Unlock()
+ testHookReadLoopBeforeNextRead = f
+}
+
func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler {
f := func() <-chan time.Time {
return ch
@@ -106,3 +128,5 @@ func SetPendingDialHooks(before, after func()) {
var ExportServerNewConn = (*Server).newConn
var ExportCloseWriteAndWait = (*conn).closeWriteAndWait
+
+var ExportErrRequestCanceled = errRequestCanceled
diff --git a/libgo/go/net/http/fcgi/child.go b/libgo/go/net/http/fcgi/child.go
index a3beaa33a86..da824ed717e 100644
--- a/libgo/go/net/http/fcgi/child.go
+++ b/libgo/go/net/http/fcgi/child.go
@@ -144,6 +144,7 @@ func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child {
func (c *child) serve() {
defer c.conn.Close()
+ defer c.cleanUp()
var rec record
for {
if err := rec.read(c.conn.rwc); err != nil {
@@ -159,6 +160,14 @@ var errCloseConn = errors.New("fcgi: connection should be closed")
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
+// ErrRequestAborted is returned by Read when a handler attempts to read the
+// body of a request that has been aborted by the web server.
+var ErrRequestAborted = errors.New("fcgi: request aborted by web server")
+
+// ErrConnClosed is returned by Read when a handler attempts to read the body of
+// a request after the connection to the web server has been closed.
+var ErrConnClosed = errors.New("fcgi: connection to web server closed")
+
func (c *child) handleRecord(rec *record) error {
c.mu.Lock()
req, ok := c.requests[rec.h.Id]
@@ -227,11 +236,13 @@ func (c *child) handleRecord(rec *record) error {
// If the filter role is implemented, read the data stream here.
return nil
case typeAbortRequest:
- println("abort")
c.mu.Lock()
delete(c.requests, rec.h.Id)
c.mu.Unlock()
c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete)
+ if req.pw != nil {
+ req.pw.CloseWithError(ErrRequestAborted)
+ }
if !req.keepConn {
// connection will close upon return
return errCloseConn
@@ -277,6 +288,18 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) {
}
}
+func (c *child) cleanUp() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ for _, req := range c.requests {
+ if req.pw != nil {
+ // race with call to Close in c.serveRequest doesn't matter because
+ // Pipe(Reader|Writer).Close are idempotent
+ req.pw.CloseWithError(ErrConnClosed)
+ }
+ }
+}
+
// Serve accepts incoming FastCGI connections on the listener l, creating a new
// goroutine for each. The goroutine reads requests and then calls handler
// to reply to them.
diff --git a/libgo/go/net/http/fcgi/fcgi_test.go b/libgo/go/net/http/fcgi/fcgi_test.go
index 6c7e1a9ce83..de0f7f831f6 100644
--- a/libgo/go/net/http/fcgi/fcgi_test.go
+++ b/libgo/go/net/http/fcgi/fcgi_test.go
@@ -8,6 +8,8 @@ import (
"bytes"
"errors"
"io"
+ "io/ioutil"
+ "net/http"
"testing"
)
@@ -148,3 +150,107 @@ func TestGetValues(t *testing.T) {
t.Errorf(" got: %q\nwant: %q\n", got, want)
}
}
+
+func nameValuePair11(nameData, valueData string) []byte {
+ return bytes.Join(
+ [][]byte{
+ {byte(len(nameData)), byte(len(valueData))},
+ []byte(nameData),
+ []byte(valueData),
+ },
+ nil,
+ )
+}
+
+func makeRecord(
+ recordType recType,
+ requestId uint16,
+ contentData []byte,
+) []byte {
+ requestIdB1 := byte(requestId >> 8)
+ requestIdB0 := byte(requestId)
+
+ contentLength := len(contentData)
+ contentLengthB1 := byte(contentLength >> 8)
+ contentLengthB0 := byte(contentLength)
+ return bytes.Join([][]byte{
+ {1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1,
+ contentLengthB0, 0, 0},
+ contentData,
+ },
+ nil)
+}
+
+// a series of FastCGI records that start a request and begin sending the
+// request body
+var streamBeginTypeStdin = bytes.Join([][]byte{
+ // set up request 1
+ makeRecord(typeBeginRequest, 1,
+ []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}),
+ // add required parameters to request 1
+ makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")),
+ makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")),
+ makeRecord(typeParams, 1, nil),
+ // begin sending body of request 1
+ makeRecord(typeStdin, 1, []byte("0123456789abcdef")),
+},
+ nil)
+
+var cleanUpTests = []struct {
+ input []byte
+ err error
+}{
+ // confirm that child.handleRecord closes req.pw after aborting req
+ {
+ bytes.Join([][]byte{
+ streamBeginTypeStdin,
+ makeRecord(typeAbortRequest, 1, nil),
+ },
+ nil),
+ ErrRequestAborted,
+ },
+ // confirm that child.serve closes all pipes after error reading record
+ {
+ bytes.Join([][]byte{
+ streamBeginTypeStdin,
+ nil,
+ },
+ nil),
+ ErrConnClosed,
+ },
+}
+
+type nopWriteCloser struct {
+ io.ReadWriter
+}
+
+func (nopWriteCloser) Close() error {
+ return nil
+}
+
+// Test that child.serve closes the bodies of aborted requests and closes the
+// bodies of all requests before returning. Causes deadlock if either condition
+// isn't met. See issue 6934.
+func TestChildServeCleansUp(t *testing.T) {
+ for _, tt := range cleanUpTests {
+ input := make([]byte, len(tt.input))
+ copy(input, tt.input)
+ rc := nopWriteCloser{bytes.NewBuffer(input)}
+ done := make(chan bool)
+ c := newChild(rc, http.HandlerFunc(func(
+ w http.ResponseWriter,
+ r *http.Request,
+ ) {
+ // block on reading body of request
+ _, err := io.Copy(ioutil.Discard, r.Body)
+ if err != tt.err {
+ t.Errorf("Expected %#v, got %#v", tt.err, err)
+ }
+ // not reached if body of request isn't closed
+ done <- true
+ }))
+ go c.serve()
+ // wait for body of request to be closed or all goroutines to block
+ <-done
+ }
+}
diff --git a/libgo/go/net/http/fs.go b/libgo/go/net/http/fs.go
index e322f710a5d..75720234c25 100644
--- a/libgo/go/net/http/fs.go
+++ b/libgo/go/net/http/fs.go
@@ -102,10 +102,10 @@ func dirList(w ResponseWriter, f File) {
// The name is otherwise unused; in particular it can be empty and is
// never sent in the response.
//
-// If modtime is not the zero time, ServeContent includes it in a
-// Last-Modified header in the response. If the request includes an
-// If-Modified-Since header, ServeContent uses modtime to decide
-// whether the content needs to be sent at all.
+// If modtime is not the zero time or Unix epoch, ServeContent
+// includes it in a Last-Modified header in the response. If the
+// request includes an If-Modified-Since header, ServeContent uses
+// modtime to decide whether the content needs to be sent at all.
//
// The content's Seek method must work: ServeContent uses
// a seek to the end of the content to determine its size.
@@ -258,10 +258,15 @@ func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time,
}
}
+var unixEpochTime = time.Unix(0, 0)
+
// modtime is the modification time of the resource to be served, or IsZero().
// return value is whether this request is now complete.
func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool {
- if modtime.IsZero() {
+ if modtime.IsZero() || modtime.Equal(unixEpochTime) {
+ // If the file doesn't have a modtime (IsZero), or the modtime
+ // is obviously garbage (Unix time == 0), then ignore modtimes
+ // and don't process the If-Modified-Since header.
return false
}
@@ -353,16 +358,16 @@ func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirec
f, err := fs.Open(name)
if err != nil {
- // TODO expose actual error?
- NotFound(w, r)
+ msg, code := toHTTPError(err)
+ Error(w, msg, code)
return
}
defer f.Close()
d, err1 := f.Stat()
if err1 != nil {
- // TODO expose actual error?
- NotFound(w, r)
+ msg, code := toHTTPError(err)
+ Error(w, msg, code)
return
}
@@ -412,6 +417,22 @@ func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirec
serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f)
}
+// toHTTPError returns a non-specific HTTP error message and status code
+// for a given non-nil error value. It's important that toHTTPError does not
+// actually return err.Error(), since msg and httpStatus are returned to users,
+// and historically Go's ServeContent always returned just "404 Not Found" for
+// all errors. We don't want to start leaking information in error messages.
+func toHTTPError(err error) (msg string, httpStatus int) {
+ if os.IsNotExist(err) {
+ return "404 page not found", StatusNotFound
+ }
+ if os.IsPermission(err) {
+ return "403 Forbidden", StatusForbidden
+ }
+ // Default:
+ return "500 Internal Server Error", StatusInternalServerError
+}
+
// localRedirect gives a Moved Permanently response.
// It does not convert relative paths to absolute paths like Redirect does.
func localRedirect(w ResponseWriter, r *Request, newPath string) {
@@ -422,7 +443,13 @@ func localRedirect(w ResponseWriter, r *Request, newPath string) {
w.WriteHeader(StatusMovedPermanently)
}
-// ServeFile replies to the request with the contents of the named file or directory.
+// ServeFile replies to the request with the contents of the named
+// file or directory.
+//
+// As a special case, ServeFile redirects any request where r.URL.Path
+// ends in "/index.html" to the same path, without the final
+// "index.html". To avoid such redirects either modify the path or
+// use ServeContent.
func ServeFile(w ResponseWriter, r *Request, name string) {
dir, file := filepath.Split(name)
serveFile(w, r, Dir(dir), file, false)
@@ -439,6 +466,10 @@ type fileHandler struct {
// use http.Dir:
//
// http.Handle("/", http.FileServer(http.Dir("/tmp")))
+//
+// As a special case, the returned file server redirects any request
+// ending in "/index.html" to the same path, without the final
+// "index.html".
func FileServer(root FileSystem) Handler {
return &fileHandler{root}
}
@@ -503,7 +534,7 @@ func parseRange(s string, size int64) ([]httpRange, error) {
r.length = size - r.start
} else {
i, err := strconv.ParseInt(start, 10, 64)
- if err != nil || i > size || i < 0 {
+ if err != nil || i >= size || i < 0 {
return nil, errors.New("invalid range")
}
r.start = i
diff --git a/libgo/go/net/http/fs_test.go b/libgo/go/net/http/fs_test.go
index 2ddd4ca5fe9..538f34d7201 100644
--- a/libgo/go/net/http/fs_test.go
+++ b/libgo/go/net/http/fs_test.go
@@ -50,15 +50,23 @@ var ServeFileRangeTests = []struct {
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
- {r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
- {r: "bytes=0-" + itoa(testFileLen-2), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
- {r: "bytes=0-" + itoa(testFileLen-1), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
- {r: "bytes=0-" + itoa(testFileLen), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
+ {r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
+ {r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
+ {r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
+ {r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
+ {r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
+ {r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
}
func TestServeFile(t *testing.T) {
@@ -489,6 +497,7 @@ type fakeFileInfo struct {
modtime time.Time
ents []*fakeFileInfo
contents string
+ err error
}
func (f *fakeFileInfo) Name() string { return f.basename }
@@ -541,6 +550,9 @@ func (fs fakeFS) Open(name string) (File, error) {
if !ok {
return nil, os.ErrNotExist
}
+ if f.err != nil {
+ return nil, f.err
+ }
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
@@ -743,6 +755,12 @@ func TestServeContent(t *testing.T) {
wantContentType: "text/css; charset=utf-8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
+ "unix_zero_modtime": {
+ content: strings.NewReader("<html>foo"),
+ modtime: time.Unix(0, 0),
+ wantStatus: StatusOK,
+ wantContentType: "text/html; charset=utf-8",
+ },
}
for testName, tt := range tests {
var content io.ReadSeeker
@@ -789,6 +807,31 @@ func TestServeContent(t *testing.T) {
}
}
+func TestServeContentErrorMessages(t *testing.T) {
+ defer afterTest(t)
+ fs := fakeFS{
+ "/500": &fakeFileInfo{
+ err: errors.New("random error"),
+ },
+ "/403": &fakeFileInfo{
+ err: &os.PathError{Err: os.ErrPermission},
+ },
+ }
+ ts := httptest.NewServer(FileServer(fs))
+ defer ts.Close()
+ for _, code := range []int{403, 404, 500} {
+ res, err := DefaultClient.Get(fmt.Sprintf("%s/%d", ts.URL, code))
+ if err != nil {
+ t.Errorf("Error fetching /%d: %v", code, err)
+ continue
+ }
+ if res.StatusCode != code {
+ t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
+ }
+ res.Body.Close()
+ }
+}
+
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
defer afterTest(t)
diff --git a/libgo/go/net/http/header.go b/libgo/go/net/http/header.go
index 153b94370f8..d847b131184 100644
--- a/libgo/go/net/http/header.go
+++ b/libgo/go/net/http/header.go
@@ -168,6 +168,8 @@ func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error {
// letter and any letter following a hyphen to upper case;
// the rest are converted to lowercase. For example, the
// canonical key for "accept-encoding" is "Accept-Encoding".
+// If s contains a space or invalid header field bytes, it is
+// returned without modifications.
func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) }
// hasToken reports whether token appears with v, ASCII
diff --git a/libgo/go/net/http/http_test.go b/libgo/go/net/http/http_test.go
new file mode 100644
index 00000000000..dead3b04542
--- /dev/null
+++ b/libgo/go/net/http/http_test.go
@@ -0,0 +1,58 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests of internal functions with no better homes.
+
+package http
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestForeachHeaderElement(t *testing.T) {
+ tests := []struct {
+ in string
+ want []string
+ }{
+ {"Foo", []string{"Foo"}},
+ {" Foo", []string{"Foo"}},
+ {"Foo ", []string{"Foo"}},
+ {" Foo ", []string{"Foo"}},
+
+ {"foo", []string{"foo"}},
+ {"anY-cAsE", []string{"anY-cAsE"}},
+
+ {"", nil},
+ {",,,, , ,, ,,, ,", nil},
+
+ {" Foo,Bar, Baz,lower,,Quux ", []string{"Foo", "Bar", "Baz", "lower", "Quux"}},
+ }
+ for _, tt := range tests {
+ var got []string
+ foreachHeaderElement(tt.in, func(v string) {
+ got = append(got, v)
+ })
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("foreachHeaderElement(%q) = %q; want %q", tt.in, got, tt.want)
+ }
+ }
+}
+
+func TestCleanHost(t *testing.T) {
+ tests := []struct {
+ in, want string
+ }{
+ {"www.google.com", "www.google.com"},
+ {"www.google.com foo", "www.google.com"},
+ {"www.google.com/foo", "www.google.com"},
+ {" first character is a space", ""},
+ }
+ for _, tt := range tests {
+ got := cleanHost(tt.in)
+ if tt.want != got {
+ t.Errorf("cleanHost(%q) = %q, want %q", tt.in, got, tt.want)
+ }
+ }
+}
diff --git a/libgo/go/net/http/httptest/server.go b/libgo/go/net/http/httptest/server.go
index 789e7bf41e6..96eb0ef6d2f 100644
--- a/libgo/go/net/http/httptest/server.go
+++ b/libgo/go/net/http/httptest/server.go
@@ -204,25 +204,35 @@ func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end
// of ASN.1 time).
// generated from src/crypto/tls:
-// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
+// go run generate_cert.go --rsa-bits 1024 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
-MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD
-bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj
-bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa
-IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA
-AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud
-EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA
-AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk
-Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA==
+MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zANBgkqhkiG9w0BAQsFADAS
+MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
+MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
+iQKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9SjY1bIw4
+iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZBl2+XsDul
+rKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQABo2gwZjAO
+BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw
+AwEB/zAuBgNVHREEJzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAA
+AAAAATANBgkqhkiG9w0BAQsFAAOBgQCEcetwO59EWk7WiJsG4x8SY+UIAA+flUI9
+tyC4lNhbcF2Idq9greZwbYCqTTTr2XiRNSMLCOjKyI7ukPoPjo16ocHj+P3vZGfs
+h1fIw3cSS2OolhloGw/XM6RWPWtPAlGykKLciQrBru5NAPvCMsb/I1DAceTiotQM
+fblo6RBxUQ==
-----END CERTIFICATE-----`)
// localhostKey is the private key for localhostCert.
var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0
-0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV
-NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d
-AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW
-MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD
-EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA
-1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE=
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
+qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
-----END RSA PRIVATE KEY-----`)
diff --git a/libgo/go/net/http/httputil/dump.go b/libgo/go/net/http/httputil/dump.go
index ac8f103f9b9..ca2d1cde924 100644
--- a/libgo/go/net/http/httputil/dump.go
+++ b/libgo/go/net/http/httputil/dump.go
@@ -98,6 +98,14 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
defer pr.Close()
defer pw.Close()
dr := &delegateReader{c: make(chan io.Reader)}
+
+ t := &http.Transport{
+ Dial: func(net, addr string) (net.Conn, error) {
+ return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
+ },
+ }
+ defer t.CloseIdleConnections()
+
// Wait for the request before replying with a dummy response:
go func() {
req, err := http.ReadRequest(bufio.NewReader(pr))
@@ -107,16 +115,9 @@ func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
io.Copy(ioutil.Discard, req.Body)
req.Body.Close()
}
- dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n")
+ dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n")
}()
- t := &http.Transport{
- DisableKeepAlives: true,
- Dial: func(net, addr string) (net.Conn, error) {
- return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
- },
- }
-
_, err := t.RoundTrip(reqSend)
req.Body = save
diff --git a/libgo/go/net/http/httputil/dump_test.go b/libgo/go/net/http/httputil/dump_test.go
index 024ee5a86f4..ae67e983ae9 100644
--- a/libgo/go/net/http/httputil/dump_test.go
+++ b/libgo/go/net/http/httputil/dump_test.go
@@ -71,7 +71,7 @@ var dumpTests = []dumpTest{
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
},
@@ -83,7 +83,7 @@ var dumpTests = []dumpTest{
WantDumpOut: "GET /foo HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
},
@@ -105,7 +105,7 @@ var dumpTests = []dumpTest{
WantDumpOut: "POST / HTTP/1.1\r\n" +
"Host: post.tld\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Content-Length: 6\r\n" +
"Accept-Encoding: gzip\r\n\r\n",
@@ -130,7 +130,7 @@ var dumpTests = []dumpTest{
WantDumpOut: "POST / HTTP/1.1\r\n" +
"Host: post.tld\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Content-Length: 8193\r\n" +
"Accept-Encoding: gzip\r\n\r\n" +
strings.Repeat("a", 8193),
diff --git a/libgo/go/net/http/httputil/reverseproxy.go b/libgo/go/net/http/httputil/reverseproxy.go
index ab463701803..3b7a184d933 100644
--- a/libgo/go/net/http/httputil/reverseproxy.go
+++ b/libgo/go/net/http/httputil/reverseproxy.go
@@ -100,6 +100,24 @@ var hopHeaders = []string{
"Upgrade",
}
+type requestCanceler interface {
+ CancelRequest(*http.Request)
+}
+
+type runOnFirstRead struct {
+ io.Reader
+
+ fn func() // Run before first Read, then set to nil
+}
+
+func (c *runOnFirstRead) Read(bs []byte) (int, error) {
+ if c.fn != nil {
+ c.fn()
+ c.fn = nil
+ }
+ return c.Reader.Read(bs)
+}
+
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
transport := p.Transport
if transport == nil {
@@ -109,6 +127,34 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
outreq := new(http.Request)
*outreq = *req // includes shallow copies of maps, but okay
+ if closeNotifier, ok := rw.(http.CloseNotifier); ok {
+ if requestCanceler, ok := transport.(requestCanceler); ok {
+ reqDone := make(chan struct{})
+ defer close(reqDone)
+
+ clientGone := closeNotifier.CloseNotify()
+
+ outreq.Body = struct {
+ io.Reader
+ io.Closer
+ }{
+ Reader: &runOnFirstRead{
+ Reader: outreq.Body,
+ fn: func() {
+ go func() {
+ select {
+ case <-clientGone:
+ requestCanceler.CancelRequest(outreq)
+ case <-reqDone:
+ }
+ }()
+ },
+ },
+ Closer: outreq.Body,
+ }
+ }
+ }
+
p.Director(outreq)
outreq.Proto = "HTTP/1.1"
outreq.ProtoMajor = 1
@@ -148,7 +194,6 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(http.StatusInternalServerError)
return
}
- defer res.Body.Close()
for _, h := range hopHeaders {
res.Header.Del(h)
@@ -156,8 +201,28 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
copyHeader(rw.Header(), res.Header)
+ // The "Trailer" header isn't included in the Transport's response,
+ // at least for *http.Transport. Build it up from Trailer.
+ if len(res.Trailer) > 0 {
+ var trailerKeys []string
+ for k := range res.Trailer {
+ trailerKeys = append(trailerKeys, k)
+ }
+ rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
+ }
+
rw.WriteHeader(res.StatusCode)
+ if len(res.Trailer) > 0 {
+ // Force chunking if we saw a response trailer.
+ // This prevents net/http from calculating the length for short
+ // bodies and adding a Content-Length.
+ if fl, ok := rw.(http.Flusher); ok {
+ fl.Flush()
+ }
+ }
p.copyResponse(rw, res.Body)
+ res.Body.Close() // close now, instead of defer, to populate res.Trailer
+ copyHeader(rw.Header(), res.Trailer)
}
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
diff --git a/libgo/go/net/http/httputil/reverseproxy_test.go b/libgo/go/net/http/httputil/reverseproxy_test.go
index e9539b44b6e..25947e6a8ab 100644
--- a/libgo/go/net/http/httputil/reverseproxy_test.go
+++ b/libgo/go/net/http/httputil/reverseproxy_test.go
@@ -8,9 +8,12 @@ package httputil
import (
"io/ioutil"
+ "log"
"net/http"
"net/http/httptest"
"net/url"
+ "reflect"
+ "runtime"
"strings"
"testing"
"time"
@@ -41,6 +44,7 @@ func TestReverseProxy(t *testing.T) {
if g, e := r.Host, "some-name"; g != e {
t.Errorf("backend got Host header %q, want %q", g, e)
}
+ w.Header().Set("Trailer", "X-Trailer")
w.Header().Set("X-Foo", "bar")
w.Header().Set("Upgrade", "foo")
w.Header().Set(fakeHopHeader, "foo")
@@ -49,6 +53,7 @@ func TestReverseProxy(t *testing.T) {
http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"})
w.WriteHeader(backendStatus)
w.Write([]byte(backendResponse))
+ w.Header().Set("X-Trailer", "trailer_value")
}))
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
@@ -83,6 +88,9 @@ func TestReverseProxy(t *testing.T) {
if g, e := len(res.Header["Set-Cookie"]), 1; g != e {
t.Fatalf("got %d SetCookies, want %d", g, e)
}
+ if g, e := res.Trailer, (http.Header{"X-Trailer": nil}); !reflect.DeepEqual(g, e) {
+ t.Errorf("before reading body, Trailer = %#v; want %#v", g, e)
+ }
if cookie := res.Cookies()[0]; cookie.Name != "flavor" {
t.Errorf("unexpected cookie %q", cookie.Name)
}
@@ -90,6 +98,10 @@ func TestReverseProxy(t *testing.T) {
if g, e := string(bodyBytes), backendResponse; g != e {
t.Errorf("got body %q; expected %q", g, e)
}
+ if g, e := res.Trailer.Get("X-Trailer"), "trailer_value"; g != e {
+ t.Errorf("Trailer(X-Trailer) = %q ; want %q", g, e)
+ }
+
}
func TestXForwardedFor(t *testing.T) {
@@ -211,3 +223,61 @@ func TestReverseProxyFlushInterval(t *testing.T) {
t.Error("maxLatencyWriter flushLoop() never exited")
}
}
+
+func TestReverseProxyCancellation(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping test; see https://golang.org/issue/9554")
+ }
+ const backendResponse = "I am the backend"
+
+ reqInFlight := make(chan struct{})
+ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ close(reqInFlight)
+
+ select {
+ case <-time.After(10 * time.Second):
+ // Note: this should only happen in broken implementations, and the
+ // closenotify case should be instantaneous.
+ t.Log("Failed to close backend connection")
+ t.Fail()
+ case <-w.(http.CloseNotifier).CloseNotify():
+ }
+
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(backendResponse))
+ }))
+
+ defer backend.Close()
+
+ backend.Config.ErrorLog = log.New(ioutil.Discard, "", 0)
+
+ backendURL, err := url.Parse(backend.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ proxyHandler := NewSingleHostReverseProxy(backendURL)
+
+ // Discards errors of the form:
+ // http: proxy error: read tcp 127.0.0.1:44643: use of closed network connection
+ proxyHandler.ErrorLog = log.New(ioutil.Discard, "", 0)
+
+ frontend := httptest.NewServer(proxyHandler)
+ defer frontend.Close()
+
+ getReq, _ := http.NewRequest("GET", frontend.URL, nil)
+ go func() {
+ <-reqInFlight
+ http.DefaultTransport.(*http.Transport).CancelRequest(getReq)
+ }()
+ res, err := http.DefaultClient.Do(getReq)
+ if res != nil {
+ t.Fatal("Non-nil response")
+ }
+ if err == nil {
+ // This should be an error like:
+ // Get http://127.0.0.1:58079: read tcp 127.0.0.1:58079:
+ // use of closed network connection
+ t.Fatal("DefaultClient.Do() returned nil error")
+ }
+}
diff --git a/libgo/go/net/http/internal/chunked.go b/libgo/go/net/http/internal/chunked.go
index 9294deb3e5e..6d7c69874d9 100644
--- a/libgo/go/net/http/internal/chunked.go
+++ b/libgo/go/net/http/internal/chunked.go
@@ -173,8 +173,12 @@ func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
err = io.ErrShortWrite
return
}
- _, err = io.WriteString(cw.Wire, "\r\n")
-
+ if _, err = io.WriteString(cw.Wire, "\r\n"); err != nil {
+ return
+ }
+ if bw, ok := cw.Wire.(*FlushAfterChunkWriter); ok {
+ err = bw.Flush()
+ }
return
}
@@ -183,6 +187,15 @@ func (cw *chunkedWriter) Close() error {
return err
}
+// FlushAfterChunkWriter signals from the caller of NewChunkedWriter
+// that each chunk should be followed by a flush. It is used by the
+// http.Transport code to keep the buffering behavior for headers and
+// trailers, but flush out chunks aggressively in the middle for
+// request bodies which may be generated slowly. See Issue 6574.
+type FlushAfterChunkWriter struct {
+ *bufio.Writer
+}
+
func parseHexUint(v []byte) (n uint64, err error) {
for _, b := range v {
n <<= 4
diff --git a/libgo/go/net/http/lex.go b/libgo/go/net/http/lex.go
index cb33318f49b..50b14f8b325 100644
--- a/libgo/go/net/http/lex.go
+++ b/libgo/go/net/http/lex.go
@@ -4,6 +4,11 @@
package http
+import (
+ "strings"
+ "unicode/utf8"
+)
+
// This file deals with lexical matters of HTTP
var isTokenTable = [127]bool{
@@ -94,3 +99,71 @@ func isToken(r rune) bool {
func isNotToken(r rune) bool {
return !isToken(r)
}
+
+// headerValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func headerValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ v = trimOWS(v)
+ if comma := strings.IndexByte(v, ','); comma != -1 {
+ return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+ }
+ return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/libgo/go/net/http/lex_test.go b/libgo/go/net/http/lex_test.go
index 6d9d294f703..986fda17dcd 100644
--- a/libgo/go/net/http/lex_test.go
+++ b/libgo/go/net/http/lex_test.go
@@ -29,3 +29,73 @@ func TestIsToken(t *testing.T) {
}
}
}
+
+func TestHeaderValuesContainsToken(t *testing.T) {
+ tests := []struct {
+ vals []string
+ token string
+ want bool
+ }{
+ {
+ vals: []string{"foo"},
+ token: "foo",
+ want: true,
+ },
+ {
+ vals: []string{"bar", "foo"},
+ token: "foo",
+ want: true,
+ },
+ {
+ vals: []string{"foo"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"foo"},
+ token: "bar",
+ want: false,
+ },
+ {
+ vals: []string{" foo "},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"foo,bar"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar,foo,bar"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar , foo"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"foo ,bar "},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar, foo ,bar"},
+ token: "FOO",
+ want: true,
+ },
+ {
+ vals: []string{"bar , foo"},
+ token: "FOO",
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+ got := headerValuesContainsToken(tt.vals, tt.token)
+ if got != tt.want {
+ t.Errorf("headerValuesContainsToken(%q, %q) = %v; want %v", tt.vals, tt.token, got, tt.want)
+ }
+ }
+}
diff --git a/libgo/go/net/http/main_test.go b/libgo/go/net/http/main_test.go
index b8c71fd19fd..12eea6f0e11 100644
--- a/libgo/go/net/http/main_test.go
+++ b/libgo/go/net/http/main_test.go
@@ -56,17 +56,21 @@ func goroutineLeaked() bool {
// not counting goroutines for leakage in -short mode
return false
}
- gs := interestingGoroutines()
- n := 0
- stackCount := make(map[string]int)
- for _, g := range gs {
- stackCount[g]++
- n++
- }
-
- if n == 0 {
- return false
+ var stackCount map[string]int
+ for i := 0; i < 5; i++ {
+ n := 0
+ stackCount = make(map[string]int)
+ gs := interestingGoroutines()
+ for _, g := range gs {
+ stackCount[g]++
+ n++
+ }
+ if n == 0 {
+ return false
+ }
+ // Wait for goroutines to schedule and die off:
+ time.Sleep(100 * time.Millisecond)
}
fmt.Fprintf(os.Stderr, "Too many goroutines running after net/http test(s).\n")
for stack, count := range stackCount {
@@ -75,7 +79,7 @@ func goroutineLeaked() bool {
return true
}
-func afterTest(t *testing.T) {
+func afterTest(t testing.TB) {
http.DefaultTransport.(*http.Transport).CloseIdleConnections()
if testing.Short() {
return
diff --git a/libgo/go/net/http/npn_test.go b/libgo/go/net/http/npn_test.go
index 98b8930d064..e2e911d3dd1 100644
--- a/libgo/go/net/http/npn_test.go
+++ b/libgo/go/net/http/npn_test.go
@@ -6,6 +6,7 @@ package http_test
import (
"bufio"
+ "bytes"
"crypto/tls"
"fmt"
"io"
@@ -17,6 +18,7 @@ import (
)
func TestNextProtoUpgrade(t *testing.T) {
+ defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "path=%s,proto=", r.URL.Path)
if r.TLS != nil {
@@ -38,12 +40,12 @@ func TestNextProtoUpgrade(t *testing.T) {
ts.StartTLS()
defer ts.Close()
- tr := newTLSTransport(t, ts)
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
-
// Normal request, without NPN.
{
+ tr := newTLSTransport(t, ts)
+ defer tr.CloseIdleConnections()
+ c := &Client{Transport: tr}
+
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -60,11 +62,17 @@ func TestNextProtoUpgrade(t *testing.T) {
// Request to an advertised but unhandled NPN protocol.
// Server will hang up.
{
- tr.CloseIdleConnections()
+ tr := newTLSTransport(t, ts)
tr.TLSClientConfig.NextProtos = []string{"unhandled-proto"}
- _, err := c.Get(ts.URL)
+ defer tr.CloseIdleConnections()
+ c := &Client{Transport: tr}
+
+ res, err := c.Get(ts.URL)
if err == nil {
- t.Errorf("expected error on unhandled-proto request")
+ defer res.Body.Close()
+ var buf bytes.Buffer
+ res.Write(&buf)
+ t.Errorf("expected error on unhandled-proto request; got: %s", buf.Bytes())
}
}
diff --git a/libgo/go/net/http/pprof/pprof.go b/libgo/go/net/http/pprof/pprof.go
index a23f1bc4bc6..8994392b1e4 100644
--- a/libgo/go/net/http/pprof/pprof.go
+++ b/libgo/go/net/http/pprof/pprof.go
@@ -34,12 +34,16 @@
//
// go tool pprof http://localhost:6060/debug/pprof/block
//
+// Or to collect a 5-second execution trace:
+//
+// wget http://localhost:6060/debug/pprof/trace?seconds=5
+//
// To view all available profiles, open http://localhost:6060/debug/pprof/
// in your browser.
//
// For a study of the facility in action, visit
//
-// http://blog.golang.org/2011/06/profiling-go-programs.html
+// https://blog.golang.org/2011/06/profiling-go-programs.html
//
package pprof
@@ -64,6 +68,7 @@ func init() {
http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
+ http.Handle("/debug/pprof/trace", http.HandlerFunc(Trace))
}
// Cmdline responds with the running program's
@@ -98,6 +103,33 @@ func Profile(w http.ResponseWriter, r *http.Request) {
pprof.StopCPUProfile()
}
+// Trace responds with the execution trace in binary form.
+// Tracing lasts for duration specified in seconds GET parameter, or for 1 second if not specified.
+// The package initialization registers it as /debug/pprof/trace.
+func Trace(w http.ResponseWriter, r *http.Request) {
+ sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64)
+ if sec == 0 {
+ sec = 1
+ }
+
+ // Set Content Type assuming trace.Start will work,
+ // because if it does it starts writing.
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Write([]byte("tracing not yet supported with gccgo"))
+ /*
+ if err := trace.Start(w); err != nil {
+ // trace.Start failed, so no writes yet.
+ // Can change header back to text content and send error code.
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Could not enable tracing: %s\n", err)
+ return
+ }
+ time.Sleep(time.Duration(sec) * time.Second)
+ trace.Stop()
+ */
+}
+
// Symbol looks up the program counters listed in the request,
// responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol.
@@ -193,17 +225,17 @@ var indexTmpl = template.Must(template.New("index").Parse(`<html>
<head>
<title>/debug/pprof/</title>
</head>
+<body>
/debug/pprof/<br>
<br>
-<body>
profiles:<br>
<table>
{{range .}}
-<tr><td align=right>{{.Count}}<td><a href="/debug/pprof/{{.Name}}?debug=1">{{.Name}}</a>
+<tr><td align=right>{{.Count}}<td><a href="{{.Name}}?debug=1">{{.Name}}</a>
{{end}}
</table>
<br>
-<a href="/debug/pprof/goroutine?debug=2">full goroutine stack dump</a><br>
+<a href="goroutine?debug=2">full goroutine stack dump</a><br>
</body>
</html>
`))
diff --git a/libgo/go/net/http/proxy_test.go b/libgo/go/net/http/proxy_test.go
index b6aed3792b6..823d1447ee9 100644
--- a/libgo/go/net/http/proxy_test.go
+++ b/libgo/go/net/http/proxy_test.go
@@ -18,7 +18,7 @@ var UseProxyTests = []struct {
match bool
}{
// Never proxy localhost:
- {"localhost:80", false},
+ {"localhost", false},
{"127.0.0.1", false},
{"127.0.0.2", false},
{"[::1]", false},
diff --git a/libgo/go/net/http/readrequest_test.go b/libgo/go/net/http/readrequest_test.go
index e930d99af62..60e2be41d17 100644
--- a/libgo/go/net/http/readrequest_test.go
+++ b/libgo/go/net/http/readrequest_test.go
@@ -9,6 +9,7 @@ import (
"bytes"
"fmt"
"io"
+ "io/ioutil"
"net/url"
"reflect"
"strings"
@@ -177,6 +178,36 @@ var reqTests = []reqTest{
noError,
},
+ // Tests chunked body and a bogus Content-Length which should be deleted.
+ {
+ "POST / HTTP/1.1\r\n" +
+ "Host: foo.com\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "Content-Length: 9999\r\n\r\n" + // to be removed.
+ "3\r\nfoo\r\n" +
+ "3\r\nbar\r\n" +
+ "0\r\n" +
+ "\r\n",
+ &Request{
+ Method: "POST",
+ URL: &url.URL{
+ Path: "/",
+ },
+ TransferEncoding: []string{"chunked"},
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: Header{},
+ ContentLength: -1,
+ Host: "foo.com",
+ RequestURI: "/",
+ },
+
+ "foobar",
+ noTrailer,
+ noError,
+ },
+
// CONNECT request with domain name:
{
"CONNECT www.google.com:443 HTTP/1.1\r\n\r\n",
@@ -323,6 +354,32 @@ var reqTests = []reqTest{
noTrailer,
noError,
},
+
+ // HEAD with Content-Length 0. Make sure this is permitted,
+ // since I think we used to send it.
+ {
+ "HEAD / HTTP/1.1\r\nHost: issue8261.com\r\nConnection: close\r\nContent-Length: 0\r\n\r\n",
+ &Request{
+ Method: "HEAD",
+ URL: &url.URL{
+ Path: "/",
+ },
+ Header: Header{
+ "Connection": []string{"close"},
+ "Content-Length": []string{"0"},
+ },
+ Host: "issue8261.com",
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Close: true,
+ RequestURI: "/",
+ },
+
+ noBody,
+ noTrailer,
+ noError,
+ },
}
func TestReadRequest(t *testing.T) {
@@ -356,3 +413,34 @@ func TestReadRequest(t *testing.T) {
}
}
}
+
+// reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters,
+// ending in \r\n\r\n
+func reqBytes(req string) []byte {
+ return []byte(strings.Replace(strings.TrimSpace(req), "\n", "\r\n", -1) + "\r\n\r\n")
+}
+
+var badRequestTests = []struct {
+ name string
+ req []byte
+}{
+ {"bad_connect_host", reqBytes("CONNECT []%20%48%54%54%50%2f%31%2e%31%0a%4d%79%48%65%61%64%65%72%3a%20%31%32%33%0a%0a HTTP/1.0")},
+ {"smuggle_two_contentlen", reqBytes(`POST / HTTP/1.1
+Content-Length: 3
+Content-Length: 4
+
+abc`)},
+ {"smuggle_content_len_head", reqBytes(`HEAD / HTTP/1.1
+Host: foo
+Content-Length: 5`)},
+}
+
+func TestReadRequest_Bad(t *testing.T) {
+ for _, tt := range badRequestTests {
+ got, err := ReadRequest(bufio.NewReader(bytes.NewReader(tt.req)))
+ if err == nil {
+ all, err := ioutil.ReadAll(got.Body)
+ t.Errorf("%s: got unexpected request = %#v\n Body = %q, %v", tt.name, got, all, err)
+ }
+ }
+}
diff --git a/libgo/go/net/http/request.go b/libgo/go/net/http/request.go
index 487eebcb841..31fe45a4edb 100644
--- a/libgo/go/net/http/request.go
+++ b/libgo/go/net/http/request.go
@@ -25,9 +25,6 @@ import (
)
const (
- maxValueLength = 4096
- maxHeaderLines = 1024
- chunkSize = 4 << 10 // 4 KB chunks
defaultMaxMemory = 32 << 20 // 32 MB
)
@@ -172,8 +169,9 @@ type Request struct {
// The HTTP client ignores Form and uses Body instead.
Form url.Values
- // PostForm contains the parsed form data from POST or PUT
- // body parameters.
+ // PostForm contains the parsed form data from POST, PATCH,
+ // or PUT body parameters.
+ //
// This field is only available after ParseForm is called.
// The HTTP client ignores PostForm and uses Body instead.
PostForm url.Values
@@ -226,6 +224,13 @@ type Request struct {
// otherwise it leaves the field nil.
// This field is ignored by the HTTP client.
TLS *tls.ConnectionState
+
+ // Cancel is an optional channel whose closure indicates that the client
+ // request should be regarded as canceled. Not all implementations of
+ // RoundTripper may support Cancel.
+ //
+ // For server requests, this field is not applicable.
+ Cancel <-chan struct{}
}
// ProtoAtLeast reports whether the HTTP protocol used
@@ -245,6 +250,7 @@ func (r *Request) Cookies() []*Cookie {
return readCookies(r.Header, "")
}
+// ErrNoCookie is returned by Request's Cookie method when a cookie is not found.
var ErrNoCookie = errors.New("http: named cookie not present")
// Cookie returns the named cookie provided in the request or
@@ -329,13 +335,12 @@ func valueOrDefault(value, def string) string {
}
// NOTE: This is not intended to reflect the actual Go version being used.
-// It was changed from "Go http package" to "Go 1.1 package http" at the
-// time of the Go 1.1 release because the former User-Agent had ended up
-// on a blacklist for some intrusion detection systems.
+// It was changed at the time of Go 1.1 release because the former User-Agent
+// had ended up on a blacklist for some intrusion detection systems.
// See https://codereview.appspot.com/7532043.
-const defaultUserAgent = "Go 1.1 package http"
+const defaultUserAgent = "Go-http-client/1.1"
-// Write writes an HTTP/1.1 request -- header and body -- in wire format.
+// Write writes an HTTP/1.1 request, which is the header and body, in wire format.
// This method consults the following fields of the request:
// Host
// URL
@@ -364,14 +369,23 @@ func (r *Request) WriteProxy(w io.Writer) error {
// extraHeaders may be nil
func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error {
- host := req.Host
+ // Find the target host. Prefer the Host: header, but if that
+ // is not given, use the host from the request URL.
+ //
+ // Clean the host, in case it arrives with unexpected stuff in it.
+ host := cleanHost(req.Host)
if host == "" {
if req.URL == nil {
return errors.New("http: Request.Write on Request with no Host or URL set")
}
- host = req.URL.Host
+ host = cleanHost(req.URL.Host)
}
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ host = removeZone(host)
+
ruri := req.URL.RequestURI()
if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" {
ruri = req.URL.Scheme + "://" + host + ruri
@@ -456,6 +470,39 @@ func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) err
return nil
}
+// cleanHost strips anything after '/' or ' '.
+// Ideally we'd clean the Host header according to the spec:
+// https://tools.ietf.org/html/rfc7230#section-5.4 (Host = uri-host [ ":" port ]")
+// https://tools.ietf.org/html/rfc7230#section-2.7 (uri-host -> rfc3986's host)
+// https://tools.ietf.org/html/rfc3986#section-3.2.2 (definition of host)
+// But practically, what we are trying to avoid is the situation in
+// issue 11206, where a malformed Host header used in the proxy context
+// would create a bad request. So it is enough to just truncate at the
+// first offending character.
+func cleanHost(in string) string {
+ if i := strings.IndexAny(in, " /"); i != -1 {
+ return in[:i]
+ }
+ return in
+}
+
+// removeZone removes IPv6 zone identifer from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
// ParseHTTPVersion parses a HTTP version string.
// "HTTP/1.0" returns (1, 0, true).
func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
@@ -489,6 +536,13 @@ func ParseHTTPVersion(vers string) (major, minor int, ok bool) {
// If the provided body is also an io.Closer, the returned
// Request.Body is set to body and will be closed by the Client
// methods Do, Post, and PostForm, and Transport.RoundTrip.
+//
+// NewRequest returns a Request suitable for use with Client.Do or
+// Transport.RoundTrip.
+// To create a request for use with testing a Server Handler use either
+// ReadRequest or manually update the Request fields. See the Request
+// type's documentation for the difference between inbound and outbound
+// request fields.
func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
u, err := url.Parse(urlStr)
if err != nil {
@@ -536,10 +590,11 @@ func (r *Request) BasicAuth() (username, password string, ok bool) {
// parseBasicAuth parses an HTTP Basic Authentication string.
// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true).
func parseBasicAuth(auth string) (username, password string, ok bool) {
- if !strings.HasPrefix(auth, "Basic ") {
+ const prefix = "Basic "
+ if !strings.HasPrefix(auth, prefix) {
return
}
- c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic "))
+ c, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
if err != nil {
return
}
@@ -587,7 +642,7 @@ func putTextprotoReader(r *textproto.Reader) {
textprotoReaderPool.Put(r)
}
-// ReadRequest reads and parses a request from b.
+// ReadRequest reads and parses an incoming request from b.
func ReadRequest(b *bufio.Reader) (req *Request, err error) {
tp := newTextprotoReader(b)
@@ -660,19 +715,20 @@ func ReadRequest(b *bufio.Reader) (req *Request, err error) {
fixPragmaCacheControl(req.Header)
+ req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false)
+
err = readTransfer(req, b)
if err != nil {
return nil, err
}
- req.Close = shouldClose(req.ProtoMajor, req.ProtoMinor, req.Header, false)
return req, nil
}
// MaxBytesReader is similar to io.LimitReader but is intended for
// limiting the size of incoming request bodies. In contrast to
// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a
-// non-EOF error for a Read beyond the limit, and Closes the
+// non-EOF error for a Read beyond the limit, and closes the
// underlying reader when its Close method is called.
//
// MaxBytesReader prevents clients from accidentally or maliciously
@@ -686,23 +742,52 @@ type maxBytesReader struct {
r io.ReadCloser // underlying reader
n int64 // max bytes remaining
stopped bool
+ sawEOF bool
+}
+
+func (l *maxBytesReader) tooLarge() (n int, err error) {
+ if !l.stopped {
+ l.stopped = true
+ if res, ok := l.w.(*response); ok {
+ res.requestTooLarge()
+ }
+ }
+ return 0, errors.New("http: request body too large")
}
func (l *maxBytesReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- if !l.stopped {
- l.stopped = true
- if res, ok := l.w.(*response); ok {
- res.requestTooLarge()
- }
+ toRead := l.n
+ if l.n == 0 {
+ if l.sawEOF {
+ return l.tooLarge()
}
- return 0, errors.New("http: request body too large")
+ // The underlying io.Reader may not return (0, io.EOF)
+ // at EOF if the requested size is 0, so read 1 byte
+ // instead. The io.Reader docs are a bit ambiguous
+ // about the return value of Read when 0 bytes are
+ // requested, and {bytes,strings}.Reader gets it wrong
+ // too (it returns (0, nil) even at EOF).
+ toRead = 1
}
- if int64(len(p)) > l.n {
- p = p[:l.n]
+ if int64(len(p)) > toRead {
+ p = p[:toRead]
}
n, err = l.r.Read(p)
+ if err == io.EOF {
+ l.sawEOF = true
+ }
+ if l.n == 0 {
+ // If we had zero bytes to read remaining (but hadn't seen EOF)
+ // and we get a byte here, that means we went over our limit.
+ if n > 0 {
+ return l.tooLarge()
+ }
+ return 0, err
+ }
l.n -= int64(n)
+ if l.n < 0 {
+ l.n = 0
+ }
return
}
@@ -852,6 +937,7 @@ func (r *Request) ParseMultipartForm(maxMemory int64) error {
// POST and PUT body parameters take precedence over URL query string values.
// FormValue calls ParseMultipartForm and ParseForm if necessary and ignores
// any errors returned by these functions.
+// If key is not present, FormValue returns the empty string.
// To access multiple values of the same key, call ParseForm and
// then inspect Request.Form directly.
func (r *Request) FormValue(key string) string {
@@ -868,6 +954,7 @@ func (r *Request) FormValue(key string) string {
// or PUT request body. URL query parameters are ignored.
// PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores
// any errors returned by these functions.
+// If key is not present, PostFormValue returns the empty string.
func (r *Request) PostFormValue(key string) string {
if r.PostForm == nil {
r.ParseMultipartForm(defaultMaxMemory)
diff --git a/libgo/go/net/http/request_test.go b/libgo/go/net/http/request_test.go
index 759ea4e8b5d..627620c0c41 100644
--- a/libgo/go/net/http/request_test.go
+++ b/libgo/go/net/http/request_test.go
@@ -178,6 +178,7 @@ func TestParseMultipartForm(t *testing.T) {
}
func TestRedirect(t *testing.T) {
+ defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.URL.Path {
case "/":
@@ -326,13 +327,31 @@ func TestReadRequestErrors(t *testing.T) {
}
}
+var newRequestHostTests = []struct {
+ in, out string
+}{
+ {"http://www.example.com/", "www.example.com"},
+ {"http://www.example.com:8080/", "www.example.com:8080"},
+
+ {"http://192.168.0.1/", "192.168.0.1"},
+ {"http://192.168.0.1:8080/", "192.168.0.1:8080"},
+
+ {"http://[fe80::1]/", "[fe80::1]"},
+ {"http://[fe80::1]:8080/", "[fe80::1]:8080"},
+ {"http://[fe80::1%25en0]/", "[fe80::1%en0]"},
+ {"http://[fe80::1%25en0]:8080/", "[fe80::1%en0]:8080"},
+}
+
func TestNewRequestHost(t *testing.T) {
- req, err := NewRequest("GET", "http://localhost:1234/", nil)
- if err != nil {
- t.Fatal(err)
- }
- if req.Host != "localhost:1234" {
- t.Errorf("Host = %q; want localhost:1234", req.Host)
+ for i, tt := range newRequestHostTests {
+ req, err := NewRequest("GET", tt.in, nil)
+ if err != nil {
+ t.Errorf("#%v: %v", i, err)
+ continue
+ }
+ if req.Host != tt.out {
+ t.Errorf("got %q; want %q", req.Host, tt.out)
+ }
}
}
@@ -402,8 +421,6 @@ type getBasicAuthTest struct {
ok bool
}
-type parseBasicAuthTest getBasicAuthTest
-
type basicAuthCredentialsTest struct {
username, password string
}
@@ -496,6 +513,82 @@ func TestRequestWriteBufferedWriter(t *testing.T) {
}
}
+func TestRequestBadHost(t *testing.T) {
+ got := []string{}
+ req, err := NewRequest("GET", "http://foo.com with spaces/after", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ req.Write(logWrites{t, &got})
+ want := []string{
+ "GET /after HTTP/1.1\r\n",
+ "Host: foo.com\r\n",
+ "User-Agent: " + DefaultUserAgent + "\r\n",
+ "\r\n",
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Writes = %q\n Want = %q", got, want)
+ }
+}
+
+func TestStarRequest(t *testing.T) {
+ req, err := ReadRequest(bufio.NewReader(strings.NewReader("M-SEARCH * HTTP/1.1\r\n\r\n")))
+ if err != nil {
+ return
+ }
+ var out bytes.Buffer
+ if err := req.Write(&out); err != nil {
+ t.Fatal(err)
+ }
+ back, err := ReadRequest(bufio.NewReader(&out))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Ignore the Headers (the User-Agent breaks the deep equal,
+ // but we don't care about it)
+ req.Header = nil
+ back.Header = nil
+ if !reflect.DeepEqual(req, back) {
+ t.Errorf("Original request doesn't match Request read back.")
+ t.Logf("Original: %#v", req)
+ t.Logf("Original.URL: %#v", req.URL)
+ t.Logf("Wrote: %s", out.Bytes())
+ t.Logf("Read back (doesn't match Original): %#v", back)
+ }
+}
+
+type responseWriterJustWriter struct {
+ io.Writer
+}
+
+func (responseWriterJustWriter) Header() Header { panic("should not be called") }
+func (responseWriterJustWriter) WriteHeader(int) { panic("should not be called") }
+
+// delayedEOFReader never returns (n > 0, io.EOF), instead putting
+// off the io.EOF until a subsequent Read call.
+type delayedEOFReader struct {
+ r io.Reader
+}
+
+func (dr delayedEOFReader) Read(p []byte) (n int, err error) {
+ n, err = dr.r.Read(p)
+ if n > 0 && err == io.EOF {
+ err = nil
+ }
+ return
+}
+
+func TestIssue10884_MaxBytesEOF(t *testing.T) {
+ dst := ioutil.Discard
+ _, err := io.Copy(dst, MaxBytesReader(
+ responseWriterJustWriter{dst},
+ ioutil.NopCloser(delayedEOFReader{strings.NewReader("12345")}),
+ 5))
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
func testMissingFile(t *testing.T, req *Request) {
f, fh, err := req.FormFile("missing")
if f != nil {
diff --git a/libgo/go/net/http/requestwrite_test.go b/libgo/go/net/http/requestwrite_test.go
index 7a6bd587863..cfb95b0a800 100644
--- a/libgo/go/net/http/requestwrite_test.go
+++ b/libgo/go/net/http/requestwrite_test.go
@@ -93,13 +93,13 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
},
@@ -123,14 +123,14 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "POST /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Connection: close\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Connection: close\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("abcdef") + chunk(""),
@@ -156,7 +156,7 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "POST /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Connection: close\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
@@ -164,7 +164,7 @@ var reqWriteTests = []reqWriteTest{
WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Connection: close\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
@@ -187,14 +187,14 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
"abcdef",
WantProxy: "POST http://example.com/ HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Content-Length: 6\r\n" +
"\r\n" +
"abcdef",
@@ -210,7 +210,7 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET /search HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"\r\n",
},
@@ -232,13 +232,13 @@ var reqWriteTests = []reqWriteTest{
// Also, nginx expects it for POST and PUT.
WantWrite: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
WantProxy: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Content-Length: 0\r\n" +
"\r\n",
},
@@ -258,13 +258,13 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("x") + chunk(""),
WantProxy: "POST / HTTP/1.1\r\n" +
"Host: example.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
chunk("x") + chunk(""),
},
@@ -365,7 +365,7 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET /foo HTTP/1.1\r\n" +
"Host: \r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"X-Foo: X-Bar\r\n\r\n",
},
@@ -391,7 +391,7 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET /search HTTP/1.1\r\n" +
"Host: \r\n" +
- "User-Agent: Go 1.1 package http\r\n\r\n",
+ "User-Agent: Go-http-client/1.1\r\n\r\n",
},
// Opaque test #1 from golang.org/issue/4860
@@ -410,7 +410,7 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET /%2F/%2F/ HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n\r\n",
+ "User-Agent: Go-http-client/1.1\r\n\r\n",
},
// Opaque test #2 from golang.org/issue/4860
@@ -429,7 +429,7 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET http://y.google.com/%2F/%2F/ HTTP/1.1\r\n" +
"Host: x.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n\r\n",
+ "User-Agent: Go-http-client/1.1\r\n\r\n",
},
// Testing custom case in header keys. Issue 5022.
@@ -451,10 +451,41 @@ var reqWriteTests = []reqWriteTest{
WantWrite: "GET / HTTP/1.1\r\n" +
"Host: www.google.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"ALL-CAPS: x\r\n" +
"\r\n",
},
+
+ // Request with host header field; IPv6 address with zone identifier
+ {
+ Req: Request{
+ Method: "GET",
+ URL: &url.URL{
+ Host: "[fe80::1%en0]",
+ },
+ },
+
+ WantWrite: "GET / HTTP/1.1\r\n" +
+ "Host: [fe80::1]\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
+ "\r\n",
+ },
+
+ // Request with optional host header field; IPv6 address with zone identifier
+ {
+ Req: Request{
+ Method: "GET",
+ URL: &url.URL{
+ Host: "www.example.com",
+ },
+ Host: "[fe80::1%en0]:8080",
+ },
+
+ WantWrite: "GET / HTTP/1.1\r\n" +
+ "Host: [fe80::1]:8080\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
+ "\r\n",
+ },
}
func TestRequestWrite(t *testing.T) {
@@ -538,7 +569,7 @@ func TestRequestWriteClosesBody(t *testing.T) {
}
expected := "POST / HTTP/1.1\r\n" +
"Host: foo.com\r\n" +
- "User-Agent: Go 1.1 package http\r\n" +
+ "User-Agent: Go-http-client/1.1\r\n" +
"Transfer-Encoding: chunked\r\n\r\n" +
// TODO: currently we don't buffer before chunking, so we get a
// single "m" chunk before the other chunks, as this was the 1-byte
diff --git a/libgo/go/net/http/response.go b/libgo/go/net/http/response.go
index 5d2c39080e4..76b85385244 100644
--- a/libgo/go/net/http/response.go
+++ b/libgo/go/net/http/response.go
@@ -48,7 +48,10 @@ type Response struct {
// The http Client and Transport guarantee that Body is always
// non-nil, even on responses without a body or responses with
// a zero-length body. It is the caller's responsibility to
- // close Body.
+ // close Body. The default HTTP client's Transport does not
+ // attempt to reuse HTTP/1.0 or HTTP/1.1 TCP connections
+ // ("keep-alive") unless the Body is read to completion and is
+ // closed.
//
// The Body is automatically dechunked if the server replied
// with a "chunked" Transfer-Encoding.
@@ -90,6 +93,8 @@ func (r *Response) Cookies() []*Cookie {
return readSetCookies(r.Header)
}
+// ErrNoLocation is returned by Response's Location method
+// when no Location header is present.
var ErrNoLocation = errors.New("http: no Location header in response")
// Location returns the URL of the response's "Location" header,
@@ -186,8 +191,10 @@ func (r *Response) ProtoAtLeast(major, minor int) bool {
r.ProtoMajor == major && r.ProtoMinor >= minor
}
-// Writes the response (header, body and trailer) in wire format. This method
-// consults the following fields of the response:
+// Write writes r to w in the HTTP/1.n server response format,
+// including the status line, headers, body, and optional trailer.
+//
+// This method consults the following fields of the response r:
//
// StatusCode
// ProtoMajor
@@ -199,7 +206,7 @@ func (r *Response) ProtoAtLeast(major, minor int) bool {
// ContentLength
// Header, values for non-canonical keys will have unpredictable behavior
//
-// Body is closed after it is sent.
+// The Response Body is closed after it is sent.
func (r *Response) Write(w io.Writer) error {
// Status line
text := r.Status
diff --git a/libgo/go/net/http/response_test.go b/libgo/go/net/http/response_test.go
index 06e940d9aba..421cf55f491 100644
--- a/libgo/go/net/http/response_test.go
+++ b/libgo/go/net/http/response_test.go
@@ -405,6 +405,57 @@ some body`,
"foobar",
},
+
+ // Both keep-alive and close, on the same Connection line. (Issue 8840)
+ {
+ "HTTP/1.1 200 OK\r\n" +
+ "Content-Length: 256\r\n" +
+ "Connection: keep-alive, close\r\n" +
+ "\r\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("HEAD"),
+ Header: Header{
+ "Content-Length": {"256"},
+ },
+ TransferEncoding: nil,
+ Close: true,
+ ContentLength: 256,
+ },
+
+ "",
+ },
+
+ // Both keep-alive and close, on different Connection lines. (Issue 8840)
+ {
+ "HTTP/1.1 200 OK\r\n" +
+ "Content-Length: 256\r\n" +
+ "Connection: keep-alive\r\n" +
+ "Connection: close\r\n" +
+ "\r\n",
+
+ Response{
+ Status: "200 OK",
+ StatusCode: 200,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: dummyReq("HEAD"),
+ Header: Header{
+ "Content-Length": {"256"},
+ },
+ TransferEncoding: nil,
+ Close: true,
+ ContentLength: 256,
+ },
+
+ "",
+ },
}
func TestReadResponse(t *testing.T) {
diff --git a/libgo/go/net/http/responsewrite_test.go b/libgo/go/net/http/responsewrite_test.go
index 585b13b8504..5b8d47ab581 100644
--- a/libgo/go/net/http/responsewrite_test.go
+++ b/libgo/go/net/http/responsewrite_test.go
@@ -207,6 +207,21 @@ func TestResponseWrite(t *testing.T) {
},
"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n",
},
+
+ // When a response to a POST has Content-Length: -1, make sure we don't
+ // write the Content-Length as -1.
+ {
+ Response{
+ StatusCode: StatusOK,
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Request: &Request{Method: "POST"},
+ Header: Header{},
+ ContentLength: -1,
+ Body: ioutil.NopCloser(strings.NewReader("abcdef")),
+ },
+ "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nabcdef",
+ },
}
for i := range respWriteTests {
diff --git a/libgo/go/net/http/serve_test.go b/libgo/go/net/http/serve_test.go
index 6bd168d3de3..d51417eb4a0 100644
--- a/libgo/go/net/http/serve_test.go
+++ b/libgo/go/net/http/serve_test.go
@@ -20,6 +20,7 @@ import (
. "net/http"
"net/http/httptest"
"net/http/httputil"
+ "net/http/internal"
"net/url"
"os"
"os/exec"
@@ -146,6 +147,7 @@ func (ht handlerTest) rawResponse(req string) string {
}
func TestConsumingBodyOnNextConn(t *testing.T) {
+ defer afterTest(t)
conn := new(testConn)
for i := 0; i < 2; i++ {
conn.readBuf.Write([]byte(
@@ -205,6 +207,7 @@ var handlers = []struct {
}{
{"/", "Default"},
{"/someDir/", "someDir"},
+ {"/#/", "hash"},
{"someHost.com/someDir/", "someHost.com/someDir"},
}
@@ -213,12 +216,14 @@ var vtests = []struct {
expected string
}{
{"http://localhost/someDir/apage", "someDir"},
+ {"http://localhost/%23/apage", "hash"},
{"http://localhost/otherDir/apage", "Default"},
{"http://someHost.com/someDir/apage", "someHost.com/someDir"},
{"http://otherHost.com/someDir/apage", "someDir"},
{"http://otherHost.com/aDir/apage", "Default"},
// redirections for trees
{"http://localhost/someDir", "/someDir/"},
+ {"http://localhost/%23", "/%23/"},
{"http://someHost.com/someDir", "/someDir/"},
}
@@ -416,7 +421,7 @@ func TestServeMuxHandlerRedirects(t *testing.T) {
}
}
-// Tests for http://code.google.com/p/go/issues/detail?id=900
+// Tests for https://golang.org/issue/900
func TestMuxRedirectLeadingSlashes(t *testing.T) {
paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"}
for _, path := range paths {
@@ -443,7 +448,7 @@ func TestMuxRedirectLeadingSlashes(t *testing.T) {
func TestServerTimeouts(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
reqNum := 0
@@ -522,7 +527,7 @@ func TestServerTimeouts(t *testing.T) {
// request) that will never happen.
func TestOnlyWriteTimeout(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
var conn net.Conn
@@ -877,7 +882,7 @@ func TestHeadResponses(t *testing.T) {
func TestTLSHandshakeTimeout(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
@@ -1105,6 +1110,7 @@ func TestServerExpect(t *testing.T) {
// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server
// should consume client request bodies that a handler didn't read.
func TestServerUnreadRequestBodyLittle(t *testing.T) {
+ defer afterTest(t)
conn := new(testConn)
body := strings.Repeat("x", 100<<10)
conn.readBuf.Write([]byte(fmt.Sprintf(
@@ -1166,6 +1172,365 @@ func TestServerUnreadRequestBodyLarge(t *testing.T) {
}
}
+type handlerBodyCloseTest struct {
+ bodySize int
+ bodyChunked bool
+ reqConnClose bool
+
+ wantEOFSearch bool // should Handler's Body.Close do Reads, looking for EOF?
+ wantNextReq bool // should it find the next request on the same conn?
+}
+
+func (t handlerBodyCloseTest) connectionHeader() string {
+ if t.reqConnClose {
+ return "Connection: close\r\n"
+ }
+ return ""
+}
+
+var handlerBodyCloseTests = [...]handlerBodyCloseTest{
+ // Small enough to slurp past to the next request +
+ // has Content-Length.
+ 0: {
+ bodySize: 20 << 10,
+ bodyChunked: false,
+ reqConnClose: false,
+ wantEOFSearch: true,
+ wantNextReq: true,
+ },
+
+ // Small enough to slurp past to the next request +
+ // is chunked.
+ 1: {
+ bodySize: 20 << 10,
+ bodyChunked: true,
+ reqConnClose: false,
+ wantEOFSearch: true,
+ wantNextReq: true,
+ },
+
+ // Small enough to slurp past to the next request +
+ // has Content-Length +
+ // declares Connection: close (so pointless to read more).
+ 2: {
+ bodySize: 20 << 10,
+ bodyChunked: false,
+ reqConnClose: true,
+ wantEOFSearch: false,
+ wantNextReq: false,
+ },
+
+ // Small enough to slurp past to the next request +
+ // declares Connection: close,
+ // but chunked, so it might have trailers.
+ // TODO: maybe skip this search if no trailers were declared
+ // in the headers.
+ 3: {
+ bodySize: 20 << 10,
+ bodyChunked: true,
+ reqConnClose: true,
+ wantEOFSearch: true,
+ wantNextReq: false,
+ },
+
+ // Big with Content-Length, so give up immediately if we know it's too big.
+ 4: {
+ bodySize: 1 << 20,
+ bodyChunked: false, // has a Content-Length
+ reqConnClose: false,
+ wantEOFSearch: false,
+ wantNextReq: false,
+ },
+
+ // Big chunked, so read a bit before giving up.
+ 5: {
+ bodySize: 1 << 20,
+ bodyChunked: true,
+ reqConnClose: false,
+ wantEOFSearch: true,
+ wantNextReq: false,
+ },
+
+ // Big with Connection: close, but chunked, so search for trailers.
+ // TODO: maybe skip this search if no trailers were declared
+ // in the headers.
+ 6: {
+ bodySize: 1 << 20,
+ bodyChunked: true,
+ reqConnClose: true,
+ wantEOFSearch: true,
+ wantNextReq: false,
+ },
+
+ // Big with Connection: close, so don't do any reads on Close.
+ // With Content-Length.
+ 7: {
+ bodySize: 1 << 20,
+ bodyChunked: false,
+ reqConnClose: true,
+ wantEOFSearch: false,
+ wantNextReq: false,
+ },
+}
+
+func TestHandlerBodyClose(t *testing.T) {
+ for i, tt := range handlerBodyCloseTests {
+ testHandlerBodyClose(t, i, tt)
+ }
+}
+
+func testHandlerBodyClose(t *testing.T, i int, tt handlerBodyCloseTest) {
+ conn := new(testConn)
+ body := strings.Repeat("x", tt.bodySize)
+ if tt.bodyChunked {
+ conn.readBuf.WriteString("POST / HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ tt.connectionHeader() +
+ "Transfer-Encoding: chunked\r\n" +
+ "\r\n")
+ cw := internal.NewChunkedWriter(&conn.readBuf)
+ io.WriteString(cw, body)
+ cw.Close()
+ conn.readBuf.WriteString("\r\n")
+ } else {
+ conn.readBuf.Write([]byte(fmt.Sprintf(
+ "POST / HTTP/1.1\r\n"+
+ "Host: test\r\n"+
+ tt.connectionHeader()+
+ "Content-Length: %d\r\n"+
+ "\r\n", len(body))))
+ conn.readBuf.Write([]byte(body))
+ }
+ if !tt.reqConnClose {
+ conn.readBuf.WriteString("GET / HTTP/1.1\r\nHost: test\r\n\r\n")
+ }
+ conn.closec = make(chan bool, 1)
+
+ ls := &oneConnListener{conn}
+ var numReqs int
+ var size0, size1 int
+ go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
+ numReqs++
+ if numReqs == 1 {
+ size0 = conn.readBuf.Len()
+ req.Body.Close()
+ size1 = conn.readBuf.Len()
+ }
+ }))
+ <-conn.closec
+ if numReqs < 1 || numReqs > 2 {
+ t.Fatalf("%d. bug in test. unexpected number of requests = %d", i, numReqs)
+ }
+ didSearch := size0 != size1
+ if didSearch != tt.wantEOFSearch {
+ t.Errorf("%d. did EOF search = %v; want %v (size went from %d to %d)", i, didSearch, !didSearch, size0, size1)
+ }
+ if tt.wantNextReq && numReqs != 2 {
+ t.Errorf("%d. numReq = %d; want 2", i, numReqs)
+ }
+}
+
+// testHandlerBodyConsumer represents a function injected into a test handler to
+// vary work done on a request Body.
+type testHandlerBodyConsumer struct {
+ name string
+ f func(io.ReadCloser)
+}
+
+var testHandlerBodyConsumers = []testHandlerBodyConsumer{
+ {"nil", func(io.ReadCloser) {}},
+ {"close", func(r io.ReadCloser) { r.Close() }},
+ {"discard", func(r io.ReadCloser) { io.Copy(ioutil.Discard, r) }},
+}
+
+func TestRequestBodyReadErrorClosesConnection(t *testing.T) {
+ defer afterTest(t)
+ for _, handler := range testHandlerBodyConsumers {
+ conn := new(testConn)
+ conn.readBuf.WriteString("POST /public HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "\r\n" +
+ "hax\r\n" + // Invalid chunked encoding
+ "GET /secret HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "\r\n")
+
+ conn.closec = make(chan bool, 1)
+ ls := &oneConnListener{conn}
+ var numReqs int
+ go Serve(ls, HandlerFunc(func(_ ResponseWriter, req *Request) {
+ numReqs++
+ if strings.Contains(req.URL.Path, "secret") {
+ t.Error("Request for /secret encountered, should not have happened.")
+ }
+ handler.f(req.Body)
+ }))
+ <-conn.closec
+ if numReqs != 1 {
+ t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
+ }
+ }
+}
+
+func TestInvalidTrailerClosesConnection(t *testing.T) {
+ defer afterTest(t)
+ for _, handler := range testHandlerBodyConsumers {
+ conn := new(testConn)
+ conn.readBuf.WriteString("POST /public HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "Trailer: hack\r\n" +
+ "Transfer-Encoding: chunked\r\n" +
+ "\r\n" +
+ "3\r\n" +
+ "hax\r\n" +
+ "0\r\n" +
+ "I'm not a valid trailer\r\n" +
+ "GET /secret HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "\r\n")
+
+ conn.closec = make(chan bool, 1)
+ ln := &oneConnListener{conn}
+ var numReqs int
+ go Serve(ln, HandlerFunc(func(_ ResponseWriter, req *Request) {
+ numReqs++
+ if strings.Contains(req.URL.Path, "secret") {
+ t.Errorf("Handler %s, Request for /secret encountered, should not have happened.", handler.name)
+ }
+ handler.f(req.Body)
+ }))
+ <-conn.closec
+ if numReqs != 1 {
+ t.Errorf("Handler %s: got %d reqs; want 1", handler.name, numReqs)
+ }
+ }
+}
+
+// slowTestConn is a net.Conn that provides a means to simulate parts of a
+// request being received piecemeal. Deadlines can be set and enforced in both
+// Read and Write.
+type slowTestConn struct {
+ // over multiple calls to Read, time.Durations are slept, strings are read.
+ script []interface{}
+ closec chan bool
+ rd, wd time.Time // read, write deadline
+ noopConn
+}
+
+func (c *slowTestConn) SetDeadline(t time.Time) error {
+ c.SetReadDeadline(t)
+ c.SetWriteDeadline(t)
+ return nil
+}
+
+func (c *slowTestConn) SetReadDeadline(t time.Time) error {
+ c.rd = t
+ return nil
+}
+
+func (c *slowTestConn) SetWriteDeadline(t time.Time) error {
+ c.wd = t
+ return nil
+}
+
+func (c *slowTestConn) Read(b []byte) (n int, err error) {
+restart:
+ if !c.rd.IsZero() && time.Now().After(c.rd) {
+ return 0, syscall.ETIMEDOUT
+ }
+ if len(c.script) == 0 {
+ return 0, io.EOF
+ }
+
+ switch cue := c.script[0].(type) {
+ case time.Duration:
+ if !c.rd.IsZero() {
+ // If the deadline falls in the middle of our sleep window, deduct
+ // part of the sleep, then return a timeout.
+ if remaining := c.rd.Sub(time.Now()); remaining < cue {
+ c.script[0] = cue - remaining
+ time.Sleep(remaining)
+ return 0, syscall.ETIMEDOUT
+ }
+ }
+ c.script = c.script[1:]
+ time.Sleep(cue)
+ goto restart
+
+ case string:
+ n = copy(b, cue)
+ // If cue is too big for the buffer, leave the end for the next Read.
+ if len(cue) > n {
+ c.script[0] = cue[n:]
+ } else {
+ c.script = c.script[1:]
+ }
+
+ default:
+ panic("unknown cue in slowTestConn script")
+ }
+
+ return
+}
+
+func (c *slowTestConn) Close() error {
+ select {
+ case c.closec <- true:
+ default:
+ }
+ return nil
+}
+
+func (c *slowTestConn) Write(b []byte) (int, error) {
+ if !c.wd.IsZero() && time.Now().After(c.wd) {
+ return 0, syscall.ETIMEDOUT
+ }
+ return len(b), nil
+}
+
+func TestRequestBodyTimeoutClosesConnection(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in -short mode")
+ }
+ defer afterTest(t)
+ for _, handler := range testHandlerBodyConsumers {
+ conn := &slowTestConn{
+ script: []interface{}{
+ "POST /public HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "Content-Length: 10000\r\n" +
+ "\r\n",
+ "foo bar baz",
+ 600 * time.Millisecond, // Request deadline should hit here
+ "GET /secret HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "\r\n",
+ },
+ closec: make(chan bool, 1),
+ }
+ ls := &oneConnListener{conn}
+
+ var numReqs int
+ s := Server{
+ Handler: HandlerFunc(func(_ ResponseWriter, req *Request) {
+ numReqs++
+ if strings.Contains(req.URL.Path, "secret") {
+ t.Error("Request for /secret encountered, should not have happened.")
+ }
+ handler.f(req.Body)
+ }),
+ ReadTimeout: 400 * time.Millisecond,
+ }
+ go s.Serve(ls)
+ <-conn.closec
+
+ if numReqs != 1 {
+ t.Errorf("Handler %v: got %d reqs; want 1", handler.name, numReqs)
+ }
+ }
+}
+
func TestTimeoutHandler(t *testing.T) {
defer afterTest(t)
sendHi := make(chan bool, 1)
@@ -1451,19 +1816,23 @@ func testHandlerPanic(t *testing.T, withHijack bool, panicValue interface{}) {
}
}
-func TestNoDate(t *testing.T) {
+func TestServerNoDate(t *testing.T) { testServerNoHeader(t, "Date") }
+func TestServerNoContentType(t *testing.T) { testServerNoHeader(t, "Content-Type") }
+
+func testServerNoHeader(t *testing.T, header string) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- w.Header()["Date"] = nil
+ w.Header()[header] = nil
+ io.WriteString(w, "<html>foo</html>") // non-empty
}))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
- _, present := res.Header["Date"]
- if present {
- t.Fatalf("Expected no Date header; got %v", res.Header["Date"])
+ res.Body.Close()
+ if got, ok := res.Header[header]; ok {
+ t.Fatalf("Expected no %s header; got %q", header, got)
}
}
@@ -1577,7 +1946,7 @@ func TestRequestBodyLimit(t *testing.T) {
// side of their TCP connection, the server doesn't send a 400 Bad Request.
func TestClientWriteShutdown(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
@@ -1632,7 +2001,7 @@ func TestServerBufferedChunking(t *testing.T) {
// Tests that the server flushes its response headers out when it's
// ignoring the response body and waits a bit before forcefully
// closing the TCP connection, causing the client to get a RST.
-// See http://golang.org/issue/3595
+// See https://golang.org/issue/3595
func TestServerGracefulClose(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
@@ -2124,7 +2493,7 @@ func TestDoubleHijack(t *testing.T) {
<-conn.closec
}
-// http://code.google.com/p/go/issues/detail?id=5955
+// https://golang.org/issue/5955
// Note that this does not test the "request too large"
// exit path from the http server. This is intentional;
// not sending Connection: close is just a minor wire
@@ -2288,17 +2657,13 @@ func TestTransportAndServerSharedBodyRace(t *testing.T) {
unblockBackend := make(chan bool)
backend := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- io.CopyN(rw, req.Body, bodySize/2)
+ io.CopyN(rw, req.Body, bodySize)
<-unblockBackend
}))
defer backend.Close()
backendRespc := make(chan *Response, 1)
proxy := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {
- if req.RequestURI == "/foo" {
- rw.Write([]byte("bar"))
- return
- }
req2, _ := NewRequest("POST", backend.URL, req.Body)
req2.ContentLength = bodySize
@@ -2307,7 +2672,7 @@ func TestTransportAndServerSharedBodyRace(t *testing.T) {
t.Errorf("Proxy outbound request: %v", err)
return
}
- _, err = io.CopyN(ioutil.Discard, bresp.Body, bodySize/4)
+ _, err = io.CopyN(ioutil.Discard, bresp.Body, bodySize/2)
if err != nil {
t.Errorf("Proxy copy error: %v", err)
return
@@ -2321,6 +2686,7 @@ func TestTransportAndServerSharedBodyRace(t *testing.T) {
}))
defer proxy.Close()
+ defer close(unblockBackend)
req, _ := NewRequest("POST", proxy.URL, io.LimitReader(neverEnding('a'), bodySize))
res, err := DefaultClient.Do(req)
if err != nil {
@@ -2329,8 +2695,12 @@ func TestTransportAndServerSharedBodyRace(t *testing.T) {
// Cleanup, so we don't leak goroutines.
res.Body.Close()
- close(unblockBackend)
- (<-backendRespc).Body.Close()
+ select {
+ case res := <-backendRespc:
+ res.Body.Close()
+ default:
+ // We failed earlier. (e.g. on DefaultClient.Do(req2))
+ }
}
// Test that a hanging Request.Body.Read from another goroutine can't
@@ -2384,19 +2754,24 @@ func TestRequestBodyCloseDoesntBlock(t *testing.T) {
}
}
-func TestResponseWriterWriteStringAllocs(t *testing.T) {
- t.Skip("allocs test unreliable with gccgo")
+// test that ResponseWriter implements io.stringWriter.
+func TestResponseWriterWriteString(t *testing.T) {
+ okc := make(chan bool, 1)
ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) {
- if r.URL.Path == "/s" {
- io.WriteString(w, "Hello world")
- } else {
- w.Write([]byte("Hello world"))
+ type stringWriter interface {
+ WriteString(s string) (n int, err error)
}
+ _, ok := w.(stringWriter)
+ okc <- ok
}))
- before := testing.AllocsPerRun(50, func() { ht.rawResponse("GET / HTTP/1.0") })
- after := testing.AllocsPerRun(50, func() { ht.rawResponse("GET /s HTTP/1.0") })
- if int(after) >= int(before) {
- t.Errorf("WriteString allocs of %v >= Write allocs of %v", after, before)
+ ht.rawResponse("GET / HTTP/1.0")
+ select {
+ case ok := <-okc:
+ if !ok {
+ t.Error("ResponseWriter did not implement io.stringWriter")
+ }
+ default:
+ t.Error("handler was never called")
}
}
@@ -2757,6 +3132,134 @@ func TestServerKeepAliveAfterWriteError(t *testing.T) {
}
}
+// Issue 9987: shouldn't add automatic Content-Length (or
+// Content-Type) if a Transfer-Encoding was set by the handler.
+func TestNoContentLengthIfTransferEncoding(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Transfer-Encoding", "foo")
+ io.WriteString(w, "<html>")
+ }))
+ defer ts.Close()
+ c, err := net.Dial("tcp", ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer c.Close()
+ if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil {
+ t.Fatal(err)
+ }
+ bs := bufio.NewScanner(c)
+ var got bytes.Buffer
+ for bs.Scan() {
+ if strings.TrimSpace(bs.Text()) == "" {
+ break
+ }
+ got.WriteString(bs.Text())
+ got.WriteByte('\n')
+ }
+ if err := bs.Err(); err != nil {
+ t.Fatal(err)
+ }
+ if strings.Contains(got.String(), "Content-Length") {
+ t.Errorf("Unexpected Content-Length in response headers: %s", got.String())
+ }
+ if strings.Contains(got.String(), "Content-Type") {
+ t.Errorf("Unexpected Content-Type in response headers: %s", got.String())
+ }
+}
+
+// tolerate extra CRLF(s) before Request-Line on subsequent requests on a conn
+// Issue 10876.
+func TestTolerateCRLFBeforeRequestLine(t *testing.T) {
+ req := []byte("POST / HTTP/1.1\r\nHost: golang.org\r\nContent-Length: 3\r\n\r\nABC" +
+ "\r\n\r\n" + // <-- this stuff is bogus, but we'll ignore it
+ "GET / HTTP/1.1\r\nHost: golang.org\r\n\r\n")
+ var buf bytes.Buffer
+ conn := &rwTestConn{
+ Reader: bytes.NewReader(req),
+ Writer: &buf,
+ closec: make(chan bool, 1),
+ }
+ ln := &oneConnListener{conn: conn}
+ numReq := 0
+ go Serve(ln, HandlerFunc(func(rw ResponseWriter, r *Request) {
+ numReq++
+ }))
+ <-conn.closec
+ if numReq != 2 {
+ t.Errorf("num requests = %d; want 2", numReq)
+ t.Logf("Res: %s", buf.Bytes())
+ }
+}
+
+func TestIssue11549_Expect100(t *testing.T) {
+ req := reqBytes(`PUT /readbody HTTP/1.1
+User-Agent: PycURL/7.22.0
+Host: 127.0.0.1:9000
+Accept: */*
+Expect: 100-continue
+Content-Length: 10
+
+HelloWorldPUT /noreadbody HTTP/1.1
+User-Agent: PycURL/7.22.0
+Host: 127.0.0.1:9000
+Accept: */*
+Expect: 100-continue
+Content-Length: 10
+
+GET /should-be-ignored HTTP/1.1
+Host: foo
+
+`)
+ var buf bytes.Buffer
+ conn := &rwTestConn{
+ Reader: bytes.NewReader(req),
+ Writer: &buf,
+ closec: make(chan bool, 1),
+ }
+ ln := &oneConnListener{conn: conn}
+ numReq := 0
+ go Serve(ln, HandlerFunc(func(w ResponseWriter, r *Request) {
+ numReq++
+ if r.URL.Path == "/readbody" {
+ ioutil.ReadAll(r.Body)
+ }
+ io.WriteString(w, "Hello world!")
+ }))
+ <-conn.closec
+ if numReq != 2 {
+ t.Errorf("num requests = %d; want 2", numReq)
+ }
+ if !strings.Contains(buf.String(), "Connection: close\r\n") {
+ t.Errorf("expected 'Connection: close' in response; got: %s", buf.String())
+ }
+}
+
+// If a Handler finishes and there's an unread request body,
+// verify the server try to do implicit read on it before replying.
+func TestHandlerFinishSkipBigContentLengthRead(t *testing.T) {
+ conn := &testConn{closec: make(chan bool)}
+ conn.readBuf.Write([]byte(fmt.Sprintf(
+ "POST / HTTP/1.1\r\n" +
+ "Host: test\r\n" +
+ "Content-Length: 9999999999\r\n" +
+ "\r\n" + strings.Repeat("a", 1<<20))))
+
+ ls := &oneConnListener{conn}
+ var inHandlerLen int
+ go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) {
+ inHandlerLen = conn.readBuf.Len()
+ rw.WriteHeader(404)
+ }))
+ <-conn.closec
+ afterHandlerLen := conn.readBuf.Len()
+
+ if afterHandlerLen != inHandlerLen {
+ t.Errorf("unexpected implicit read. Read buffer went from %d -> %d", inHandlerLen, afterHandlerLen)
+ }
+}
+
func BenchmarkClientServer(b *testing.B) {
b.ReportAllocs()
b.StopTimer()
@@ -2886,7 +3389,7 @@ func BenchmarkServer(b *testing.B) {
defer ts.Close()
b.StartTimer()
- cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer")
+ cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer$")
cmd.Env = append([]string{
fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N),
fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL),
@@ -2897,6 +3400,95 @@ func BenchmarkServer(b *testing.B) {
}
}
+// getNoBody wraps Get but closes any Response.Body before returning the response.
+func getNoBody(urlStr string) (*Response, error) {
+ res, err := Get(urlStr)
+ if err != nil {
+ return nil, err
+ }
+ res.Body.Close()
+ return res, nil
+}
+
+// A benchmark for profiling the client without the HTTP server code.
+// The server code runs in a subprocess.
+func BenchmarkClient(b *testing.B) {
+ b.ReportAllocs()
+ b.StopTimer()
+ defer afterTest(b)
+
+ port := os.Getenv("TEST_BENCH_SERVER_PORT") // can be set by user
+ if port == "" {
+ port = "39207"
+ }
+ var data = []byte("Hello world.\n")
+ if server := os.Getenv("TEST_BENCH_SERVER"); server != "" {
+ // Server process mode.
+ HandleFunc("/", func(w ResponseWriter, r *Request) {
+ r.ParseForm()
+ if r.Form.Get("stop") != "" {
+ os.Exit(0)
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ w.Write(data)
+ })
+ log.Fatal(ListenAndServe("localhost:"+port, nil))
+ }
+
+ // Start server process.
+ cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkClient$")
+ cmd.Env = append(os.Environ(), "TEST_BENCH_SERVER=yes")
+ if err := cmd.Start(); err != nil {
+ b.Fatalf("subprocess failed to start: %v", err)
+ }
+ defer cmd.Process.Kill()
+ done := make(chan error)
+ go func() {
+ done <- cmd.Wait()
+ }()
+
+ // Wait for the server process to respond.
+ url := "http://localhost:" + port + "/"
+ for i := 0; i < 100; i++ {
+ time.Sleep(50 * time.Millisecond)
+ if _, err := getNoBody(url); err == nil {
+ break
+ }
+ if i == 99 {
+ b.Fatalf("subprocess does not respond")
+ }
+ }
+
+ // Do b.N requests to the server.
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ res, err := Get(url)
+ if err != nil {
+ b.Fatalf("Get: %v", err)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ b.Fatalf("ReadAll: %v", err)
+ }
+ if bytes.Compare(body, data) != 0 {
+ b.Fatalf("Got body: %q", body)
+ }
+ }
+ b.StopTimer()
+
+ // Instruct server process to stop.
+ getNoBody(url + "?stop=yes")
+ select {
+ case err := <-done:
+ if err != nil {
+ b.Fatalf("subprocess failed: %v", err)
+ }
+ case <-time.After(5 * time.Second):
+ b.Fatalf("subprocess did not stop")
+ }
+}
+
func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) {
b.ReportAllocs()
req := reqBytes(`GET / HTTP/1.0
diff --git a/libgo/go/net/http/server.go b/libgo/go/net/http/server.go
index 008d5aa7a74..a3e43555bb3 100644
--- a/libgo/go/net/http/server.go
+++ b/libgo/go/net/http/server.go
@@ -15,6 +15,7 @@ import (
"io/ioutil"
"log"
"net"
+ "net/textproto"
"net/url"
"os"
"path"
@@ -55,9 +56,12 @@ type Handler interface {
// A ResponseWriter interface is used by an HTTP handler to
// construct an HTTP response.
type ResponseWriter interface {
- // Header returns the header map that will be sent by WriteHeader.
- // Changing the header after a call to WriteHeader (or Write) has
- // no effect.
+ // Header returns the header map that will be sent by
+ // WriteHeader. Changing the header after a call to
+ // WriteHeader (or Write) has no effect unless the modified
+ // headers were declared as trailers by setting the
+ // "Trailer" header before the call to WriteHeader (see example).
+ // To suppress implicit response headers, set their value to nil.
Header() Header
// Write writes the data to the connection as part of an HTTP reply.
@@ -93,8 +97,14 @@ type Hijacker interface {
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the HTTP server library
// will not do anything else with the connection.
+ //
// It becomes the caller's responsibility to manage
// and close the connection.
+ //
+ // The returned net.Conn may have read or write deadlines
+ // already set, depending on the configuration of the
+ // Server. It is the caller's responsibility to set
+ // or clear those deadlines as needed.
Hijack() (net.Conn, *bufio.ReadWriter, error)
}
@@ -120,6 +130,7 @@ type conn struct {
lr *io.LimitedReader // io.LimitReader(sr)
buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc
tlsState *tls.ConnectionState // or nil when not using TLS
+ lastMethod string // method of previous request, or ""
mu sync.Mutex // guards the following
clientGone bool // if client has disconnected mid-request
@@ -188,20 +199,14 @@ func (c *conn) noteClientGone() {
c.clientGone = true
}
-// A switchReader can have its Reader changed at runtime.
-// It's not safe for concurrent Reads and switches.
-type switchReader struct {
- io.Reader
-}
-
// A switchWriter can have its Writer changed at runtime.
// It's not safe for concurrent Writes and switches.
type switchWriter struct {
io.Writer
}
-// A liveSwitchReader is a switchReader that's safe for concurrent
-// reads and switches, if its mutex is held.
+// A liveSwitchReader can have its Reader changed at runtime. It's
+// safe for concurrent reads and switches, if its mutex is held.
type liveSwitchReader struct {
sync.Mutex
r io.Reader
@@ -288,10 +293,21 @@ func (cw *chunkWriter) close() {
cw.writeHeader(nil)
}
if cw.chunking {
- // zero EOF chunk, trailer key/value pairs (currently
- // unsupported in Go's server), followed by a blank
- // line.
- cw.res.conn.buf.WriteString("0\r\n\r\n")
+ bw := cw.res.conn.buf // conn's bufio writer
+ // zero chunk to mark EOF
+ bw.WriteString("0\r\n")
+ if len(cw.res.trailers) > 0 {
+ trailers := make(Header)
+ for _, h := range cw.res.trailers {
+ if vv := cw.res.handlerHeader[h]; len(vv) > 0 {
+ trailers[h] = vv
+ }
+ }
+ trailers.Write(bw) // the writer handles noting errors
+ }
+ // final blank line after the trailers (whether
+ // present or not)
+ bw.WriteString("\r\n")
}
}
@@ -332,6 +348,12 @@ type response struct {
// input from it.
requestBodyLimitHit bool
+ // trailers are the headers to be sent after the handler
+ // finishes writing the body. This field is initialized from
+ // the Trailer response header when the response header is
+ // written.
+ trailers []string
+
handlerDone bool // set true when the handler exits
// Buffers for Date and Content-Length
@@ -339,6 +361,19 @@ type response struct {
clenBuf [10]byte
}
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (w *response) declareTrailer(k string) {
+ k = CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Content-Length", "Trailer":
+ // Forbidden by RFC 2616 14.40.
+ return
+ }
+ w.trailers = append(w.trailers, k)
+}
+
// requestTooLarge is called by maxBytesReader when too much input has
// been read from the client.
func (w *response) requestTooLarge() {
@@ -438,7 +473,7 @@ func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) {
if debugServerConnections {
c.rwc = newLoggingConn("server", c.rwc)
}
- c.sr = liveSwitchReader{r: c.rwc}
+ c.sr.r = c.rwc
c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader)
br := newBufioReader(c.lr)
bw := newBufioWriterSize(checkConnErrorWriter{c}, 4<<10)
@@ -468,6 +503,8 @@ func newBufioReader(r io.Reader) *bufio.Reader {
br.Reset(r)
return br
}
+ // Note: if this reader size is every changed, update
+ // TestHandlerBodyClose's assumptions.
return bufio.NewReader(r)
}
@@ -517,6 +554,7 @@ type expectContinueReader struct {
resp *response
readCloser io.ReadCloser
closed bool
+ sawEOF bool
}
func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
@@ -528,7 +566,11 @@ func (ecr *expectContinueReader) Read(p []byte) (n int, err error) {
ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n")
ecr.resp.conn.buf.Flush()
}
- return ecr.readCloser.Read(p)
+ n, err = ecr.readCloser.Read(p)
+ if err == io.EOF {
+ ecr.sawEOF = true
+ }
+ return
}
func (ecr *expectContinueReader) Close() error {
@@ -582,6 +624,11 @@ func (c *conn) readRequest() (w *response, err error) {
}
c.lr.N = c.server.initialLimitedReaderSize()
+ if c.lastMethod == "POST" {
+ // RFC 2616 section 4.1 tolerance for old buggy clients.
+ peek, _ := c.buf.Reader.Peek(4) // ReadRequest will get err below
+ c.buf.Reader.Discard(numLeadingCRorLF(peek))
+ }
var req *Request
if req, err = ReadRequest(c.buf.Reader); err != nil {
if c.lr.N == 0 {
@@ -590,9 +637,13 @@ func (c *conn) readRequest() (w *response, err error) {
return nil, err
}
c.lr.N = noLimit
+ c.lastMethod = req.Method
req.RemoteAddr = c.remoteAddr
req.TLS = c.tlsState
+ if body, ok := req.Body.(*body); ok {
+ body.doEarlyClose = true
+ }
w = &response{
conn: c,
@@ -747,6 +798,15 @@ func (cw *chunkWriter) writeHeader(p []byte) {
}
var setHeader extraHeader
+ trailers := false
+ for _, v := range cw.header["Trailer"] {
+ trailers = true
+ foreachHeaderElement(v, cw.res.declareTrailer)
+ }
+
+ te := header.get("Transfer-Encoding")
+ hasTE := te != ""
+
// If the handler is done but never sent a Content-Length
// response header and this is our first (and last) write, set
// it, even to zero. This helps HTTP/1.0 clients keep their
@@ -759,7 +819,9 @@ func (cw *chunkWriter) writeHeader(p []byte) {
// write non-zero bytes. If it's actually 0 bytes and the
// handler never looked at the Request.Method, we just don't
// send a Content-Length header.
- if w.handlerDone && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
+ // Further, we don't send an automatic Content-Length if they
+ // set a Transfer-Encoding, because they're generally incompatible.
+ if w.handlerDone && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) {
w.contentLength = int64(len(p))
setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10)
}
@@ -789,21 +851,78 @@ func (cw *chunkWriter) writeHeader(p []byte) {
w.closeAfterReply = true
}
+ // If the client wanted a 100-continue but we never sent it to
+ // them (or, more strictly: we never finished reading their
+ // request body), don't reuse this connection because it's now
+ // in an unknown state: we might be sending this response at
+ // the same time the client is now sending its request body
+ // after a timeout. (Some HTTP clients send Expect:
+ // 100-continue but knowing that some servers don't support
+ // it, the clients set a timer and send the body later anyway)
+ // If we haven't seen EOF, we can't skip over the unread body
+ // because we don't know if the next bytes on the wire will be
+ // the body-following-the-timer or the subsequent request.
+ // See Issue 11549.
+ if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF {
+ w.closeAfterReply = true
+ }
+
// Per RFC 2616, we should consume the request body before
// replying, if the handler hasn't already done so. But we
// don't want to do an unbounded amount of reading here for
// DoS reasons, so we only try up to a threshold.
if w.req.ContentLength != 0 && !w.closeAfterReply {
- ecr, isExpecter := w.req.Body.(*expectContinueReader)
- if !isExpecter || ecr.resp.wroteContinue {
- n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1)
- if n >= maxPostHandlerReadBytes {
- w.requestTooLarge()
- delHeader("Connection")
- setHeader.connection = "close"
- } else {
- w.req.Body.Close()
+ var discard, tooBig bool
+
+ switch bdy := w.req.Body.(type) {
+ case *expectContinueReader:
+ if bdy.resp.wroteContinue {
+ discard = true
+ }
+ case *body:
+ bdy.mu.Lock()
+ switch {
+ case bdy.closed:
+ if !bdy.sawEOF {
+ // Body was closed in handler with non-EOF error.
+ w.closeAfterReply = true
+ }
+ case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes:
+ tooBig = true
+ default:
+ discard = true
}
+ bdy.mu.Unlock()
+ default:
+ discard = true
+ }
+
+ if discard {
+ _, err := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1)
+ switch err {
+ case nil:
+ // There must be even more data left over.
+ tooBig = true
+ case ErrBodyReadAfterClose:
+ // Body was already consumed and closed.
+ case io.EOF:
+ // The remaining body was just consumed, close it.
+ err = w.req.Body.Close()
+ if err != nil {
+ w.closeAfterReply = true
+ }
+ default:
+ // Some other kind of error occured, like a read timeout, or
+ // corrupt chunked encoding. In any case, whatever remains
+ // on the wire must not be parsed as another HTTP request.
+ w.closeAfterReply = true
+ }
+ }
+
+ if tooBig {
+ w.requestTooLarge()
+ delHeader("Connection")
+ setHeader.connection = "close"
}
}
@@ -811,7 +930,7 @@ func (cw *chunkWriter) writeHeader(p []byte) {
if bodyAllowedForStatus(code) {
// If no content type, apply sniffing algorithm to body.
_, haveType := header["Content-Type"]
- if !haveType {
+ if !haveType && !hasTE {
setHeader.contentType = DetectContentType(p)
}
} else {
@@ -824,8 +943,6 @@ func (cw *chunkWriter) writeHeader(p []byte) {
setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now())
}
- te := header.get("Transfer-Encoding")
- hasTE := te != ""
if hasCL && hasTE && te != "identity" {
// TODO: return an error if WriteHeader gets a return parameter
// For now just ignore the Content-Length.
@@ -885,6 +1002,24 @@ func (cw *chunkWriter) writeHeader(p []byte) {
w.conn.buf.Write(crlf)
}
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
// statusLines is a cache of Status-Line strings, keyed by code (for
// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a
// map keyed by struct of two fields. This map's max size is bounded
@@ -930,7 +1065,7 @@ func statusLine(req *Request, code int) string {
return line
}
-// bodyAllowed returns true if a Write is allowed for this response type.
+// bodyAllowed reports whether a Write is allowed for this response type.
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
if !w.wroteHeader {
@@ -1027,17 +1162,39 @@ func (w *response) finishRequest() {
if w.req.MultipartForm != nil {
w.req.MultipartForm.RemoveAll()
}
+}
+
+// shouldReuseConnection reports whether the underlying TCP connection can be reused.
+// It must only be called after the handler is done executing.
+func (w *response) shouldReuseConnection() bool {
+ if w.closeAfterReply {
+ // The request or something set while executing the
+ // handler indicated we shouldn't reuse this
+ // connection.
+ return false
+ }
if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written {
// Did not write enough. Avoid getting out of sync.
- w.closeAfterReply = true
+ return false
}
// There was some error writing to the underlying connection
// during the request, so don't re-use this conn.
if w.conn.werr != nil {
- w.closeAfterReply = true
+ return false
}
+
+ if w.closedRequestBodyEarly() {
+ return false
+ }
+
+ return true
+}
+
+func (w *response) closedRequestBodyEarly() bool {
+ body, ok := w.req.Body.(*body)
+ return ok && body.didEarlyClose()
}
func (w *response) Flush() {
@@ -1093,7 +1250,7 @@ var _ closeWriter = (*net.TCPConn)(nil)
// pause for a bit, hoping the client processes it before any
// subsequent RST.
//
-// See http://golang.org/issue/3595
+// See https://golang.org/issue/3595
func (c *conn) closeWriteAndWait() {
c.finalFlush()
if tcp, ok := c.rwc.(closeWriter); ok {
@@ -1206,8 +1363,8 @@ func (c *conn) serve() {
return
}
w.finishRequest()
- if w.closeAfterReply {
- if w.requestBodyLimitHit {
+ if !w.shouldReuseConnection() {
+ if w.requestBodyLimitHit || w.closedRequestBodyEarly() {
c.closeWriteAndWait()
}
break
@@ -1271,6 +1428,7 @@ func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) {
// The error message should be plain text.
func Error(w ResponseWriter, error string, code int) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(code)
fmt.Fprintln(w, error)
}
@@ -1576,7 +1734,8 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) {
// strings.Index can't be -1.
path = pattern[strings.Index(pattern, "/"):]
}
- mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern}
+ url := &url.URL{Path: path}
+ mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern}
}
}
@@ -1760,11 +1919,11 @@ func (s *Server) doKeepAlives() bool {
// By default, keep-alives are always enabled. Only very
// resource-constrained environments or servers in the process of
// shutting down should disable them.
-func (s *Server) SetKeepAlivesEnabled(v bool) {
+func (srv *Server) SetKeepAlivesEnabled(v bool) {
if v {
- atomic.StoreInt32(&s.disableKeepAlives, 0)
+ atomic.StoreInt32(&srv.disableKeepAlives, 0)
} else {
- atomic.StoreInt32(&s.disableKeepAlives, 1)
+ atomic.StoreInt32(&srv.disableKeepAlives, 1)
}
}
@@ -1812,7 +1971,7 @@ func ListenAndServe(addr string, handler Handler) error {
// expects HTTPS connections. Additionally, files containing a certificate and
// matching private key for the server must be provided. If the certificate
// is signed by a certificate authority, the certFile should be the concatenation
-// of the server's certificate followed by the CA's certificate.
+// of the server's certificate, any intermediates, and the CA's certificate.
//
// A trivial example server is:
//
@@ -1844,10 +2003,11 @@ func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Han
// ListenAndServeTLS listens on the TCP network address srv.Addr and
// then calls Serve to handle requests on incoming TLS connections.
//
-// Filenames containing a certificate and matching private key for
-// the server must be provided. If the certificate is signed by a
-// certificate authority, the certFile should be the concatenation
-// of the server's certificate followed by the CA's certificate.
+// Filenames containing a certificate and matching private key for the
+// server must be provided if the Server's TLSConfig.Certificates is
+// not populated. If the certificate is signed by a certificate
+// authority, the certFile should be the concatenation of the server's
+// certificate, any intermediates, and the CA's certificate.
//
// If srv.Addr is blank, ":https" is used.
func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
@@ -1855,19 +2015,18 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
if addr == "" {
addr = ":https"
}
- config := &tls.Config{}
- if srv.TLSConfig != nil {
- *config = *srv.TLSConfig
- }
+ config := cloneTLSConfig(srv.TLSConfig)
if config.NextProtos == nil {
config.NextProtos = []string{"http/1.1"}
}
- var err error
- config.Certificates = make([]tls.Certificate, 1)
- config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return err
+ if len(config.Certificates) == 0 || certFile != "" || keyFile != "" {
+ var err error
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
}
ln, err := net.Listen("tcp", addr)
@@ -2094,3 +2253,15 @@ func (w checkConnErrorWriter) Write(p []byte) (n int, err error) {
}
return
}
+
+func numLeadingCRorLF(v []byte) (n int) {
+ for _, b := range v {
+ if b == '\r' || b == '\n' {
+ n++
+ continue
+ }
+ break
+ }
+ return
+
+}
diff --git a/libgo/go/net/http/sniff.go b/libgo/go/net/http/sniff.go
index 68f519b0542..3be8c865d3b 100644
--- a/libgo/go/net/http/sniff.go
+++ b/libgo/go/net/http/sniff.go
@@ -38,7 +38,11 @@ func DetectContentType(data []byte) string {
}
func isWS(b byte) bool {
- return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1
+ switch b {
+ case '\t', '\n', '\x0c', '\r', ' ':
+ return true
+ }
+ return false
}
type sniffSig interface {
@@ -161,6 +165,8 @@ func (h htmlSig) match(data []byte, firstNonWS int) string {
return "text/html; charset=utf-8"
}
+var mp4ftype = []byte("ftyp")
+
type mp4Sig int
func (mp4Sig) match(data []byte, firstNonWS int) string {
@@ -172,7 +178,7 @@ func (mp4Sig) match(data []byte, firstNonWS int) string {
if boxSize%4 != 0 || len(data) < boxSize {
return ""
}
- if !bytes.Equal(data[4:8], []byte("ftyp")) {
+ if !bytes.Equal(data[4:8], mp4ftype) {
return ""
}
for st := 8; st < boxSize; st += 4 {
diff --git a/libgo/go/net/http/transfer.go b/libgo/go/net/http/transfer.go
index 520500330bc..a8736b28e16 100644
--- a/libgo/go/net/http/transfer.go
+++ b/libgo/go/net/http/transfer.go
@@ -27,7 +27,7 @@ type errorReader struct {
err error
}
-func (r *errorReader) Read(p []byte) (n int, err error) {
+func (r errorReader) Read(p []byte) (n int, err error) {
return 0, r.err
}
@@ -43,6 +43,7 @@ type transferWriter struct {
Close bool
TransferEncoding []string
Trailer Header
+ IsResponse bool
}
func newTransferWriter(r interface{}) (t *transferWriter, err error) {
@@ -70,7 +71,7 @@ func newTransferWriter(r interface{}) (t *transferWriter, err error) {
n, rerr := io.ReadFull(t.Body, buf[:])
if rerr != nil && rerr != io.EOF {
t.ContentLength = -1
- t.Body = &errorReader{rerr}
+ t.Body = errorReader{rerr}
} else if n == 1 {
// Oh, guess there is data in this Body Reader after all.
// The ContentLength field just wasn't set.
@@ -89,6 +90,7 @@ func newTransferWriter(r interface{}) (t *transferWriter, err error) {
}
}
case *Response:
+ t.IsResponse = true
if rr.Request != nil {
t.Method = rr.Request.Method
}
@@ -138,11 +140,17 @@ func (t *transferWriter) shouldSendContentLength() bool {
if t.ContentLength > 0 {
return true
}
+ if t.ContentLength < 0 {
+ return false
+ }
// Many servers expect a Content-Length for these methods
if t.Method == "POST" || t.Method == "PUT" {
return true
}
if t.ContentLength == 0 && isIdentity(t.TransferEncoding) {
+ if t.Method == "GET" || t.Method == "HEAD" {
+ return false
+ }
return true
}
@@ -203,6 +211,9 @@ func (t *transferWriter) WriteBody(w io.Writer) error {
// Write body
if t.Body != nil {
if chunked(t.TransferEncoding) {
+ if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse {
+ w = &internal.FlushAfterChunkWriter{bw}
+ }
cw := internal.NewChunkedWriter(w)
_, err = io.Copy(cw, t.Body)
if err == nil {
@@ -232,7 +243,6 @@ func (t *transferWriter) WriteBody(w io.Writer) error {
t.ContentLength, ncopy)
}
- // TODO(petar): Place trailer writer code here.
if chunked(t.TransferEncoding) {
// Write Trailer header
if t.Trailer != nil {
@@ -310,11 +320,13 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
}
case *Request:
t.Header = rr.Header
+ t.RequestMethod = rr.Method
t.ProtoMajor = rr.ProtoMajor
t.ProtoMinor = rr.ProtoMinor
// Transfer semantics for Requests are exactly like those for
// Responses with status code 200, responding to a GET method
t.StatusCode = 200
+ t.Close = rr.Close
default:
panic("unexpected type")
}
@@ -325,7 +337,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
}
// Transfer encoding, content length
- t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header)
+ t.TransferEncoding, err = fixTransferEncoding(isResponse, t.RequestMethod, t.Header)
if err != nil {
return err
}
@@ -413,12 +425,11 @@ func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" }
// Sanitize transfer encoding
-func fixTransferEncoding(requestMethod string, header Header) ([]string, error) {
+func fixTransferEncoding(isResponse bool, requestMethod string, header Header) ([]string, error) {
raw, present := header["Transfer-Encoding"]
if !present {
return nil, nil
}
-
delete(header, "Transfer-Encoding")
encodings := strings.Split(raw[0], ",")
@@ -443,9 +454,22 @@ func fixTransferEncoding(requestMethod string, header Header) ([]string, error)
return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")}
}
if len(te) > 0 {
- // Chunked encoding trumps Content-Length. See RFC 2616
- // Section 4.4. Currently len(te) > 0 implies chunked
- // encoding.
+ // RFC 7230 3.3.2 says "A sender MUST NOT send a
+ // Content-Length header field in any message that
+ // contains a Transfer-Encoding header field."
+ //
+ // but also:
+ // "If a message is received with both a
+ // Transfer-Encoding and a Content-Length header
+ // field, the Transfer-Encoding overrides the
+ // Content-Length. Such a message might indicate an
+ // attempt to perform request smuggling (Section 9.5)
+ // or response splitting (Section 9.4) and ought to be
+ // handled as an error. A sender MUST remove the
+ // received Content-Length field prior to forwarding
+ // such a message downstream."
+ //
+ // Reportedly, these appear in the wild.
delete(header, "Content-Length")
return te, nil
}
@@ -457,9 +481,17 @@ func fixTransferEncoding(requestMethod string, header Header) ([]string, error)
// function is not a method, because ultimately it should be shared by
// ReadResponse and ReadRequest.
func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) {
-
+ contentLens := header["Content-Length"]
+ isRequest := !isResponse
// Logic based on response type or status
if noBodyExpected(requestMethod) {
+ // For HTTP requests, as part of hardening against request
+ // smuggling (RFC 7230), don't allow a Content-Length header for
+ // methods which don't permit bodies. As an exception, allow
+ // exactly one Content-Length header if its value is "0".
+ if isRequest && len(contentLens) > 0 && !(len(contentLens) == 1 && contentLens[0] == "0") {
+ return 0, fmt.Errorf("http: method cannot contain a Content-Length; got %q", contentLens)
+ }
return 0, nil
}
if status/100 == 1 {
@@ -470,13 +502,21 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header,
return 0, nil
}
+ if len(contentLens) > 1 {
+ // harden against HTTP request smuggling. See RFC 7230.
+ return 0, errors.New("http: message cannot contain multiple Content-Length headers")
+ }
+
// Logic based on Transfer-Encoding
if chunked(te) {
return -1, nil
}
// Logic based on Content-Length
- cl := strings.TrimSpace(header.get("Content-Length"))
+ var cl string
+ if len(contentLens) == 1 {
+ cl = strings.TrimSpace(contentLens[0])
+ }
if cl != "" {
n, err := parseContentLength(cl)
if err != nil {
@@ -487,11 +527,14 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header,
header.Del("Content-Length")
}
- if !isResponse && requestMethod == "GET" {
- // RFC 2616 doesn't explicitly permit nor forbid an
+ if !isResponse {
+ // RFC 2616 neither explicitly permits nor forbids an
// entity-body on a GET request so we permit one if
// declared, but we default to 0 here (not -1 below)
// if there's no mention of a body.
+ // Likewise, all other request methods are assumed to have
+ // no body if neither Transfer-Encoding chunked nor a
+ // Content-Length are set.
return 0, nil
}
@@ -506,14 +549,13 @@ func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
if major < 1 {
return true
} else if major == 1 && minor == 0 {
- if !strings.Contains(strings.ToLower(header.get("Connection")), "keep-alive") {
+ vv := header["Connection"]
+ if headerValuesContainsToken(vv, "close") || !headerValuesContainsToken(vv, "keep-alive") {
return true
}
return false
} else {
- // TODO: Should split on commas, toss surrounding white space,
- // and check each field.
- if strings.ToLower(header.get("Connection")) == "close" {
+ if headerValuesContainsToken(header["Connection"], "close") {
if removeCloseHeader {
header.Del("Connection")
}
@@ -555,13 +597,16 @@ func fixTrailer(header Header, te []string) (Header, error) {
// Close ensures that the body has been fully read
// and then reads the trailer if necessary.
type body struct {
- src io.Reader
- hdr interface{} // non-nil (Response or Request) value means read trailer
- r *bufio.Reader // underlying wire-format reader for the trailer
- closing bool // is the connection to be closed after reading body?
-
- mu sync.Mutex // guards closed, and calls to Read and Close
- closed bool
+ src io.Reader
+ hdr interface{} // non-nil (Response or Request) value means read trailer
+ r *bufio.Reader // underlying wire-format reader for the trailer
+ closing bool // is the connection to be closed after reading body?
+ doEarlyClose bool // whether Close should stop early
+
+ mu sync.Mutex // guards closed, and calls to Read and Close
+ sawEOF bool
+ closed bool
+ earlyClose bool // Close called and we didn't read to the end of src
}
// ErrBodyReadAfterClose is returned when reading a Request or Response
@@ -581,13 +626,23 @@ func (b *body) Read(p []byte) (n int, err error) {
// Must hold b.mu.
func (b *body) readLocked(p []byte) (n int, err error) {
+ if b.sawEOF {
+ return 0, io.EOF
+ }
n, err = b.src.Read(p)
if err == io.EOF {
+ b.sawEOF = true
// Chunked case. Read the trailer.
if b.hdr != nil {
if e := b.readTrailer(); e != nil {
err = e
+ // Something went wrong in the trailer, we must not allow any
+ // further reads of any kind to succeed from body, nor any
+ // subsequent requests on the server connection. See
+ // golang.org/issue/12027
+ b.sawEOF = false
+ b.closed = true
}
b.hdr = nil
} else {
@@ -607,6 +662,7 @@ func (b *body) readLocked(p []byte) (n int, err error) {
if err == nil && n > 0 {
if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 {
err = io.EOF
+ b.sawEOF = true
}
}
@@ -639,8 +695,7 @@ func (b *body) readTrailer() error {
// The common case, since nobody uses trailers.
buf, err := b.r.Peek(2)
if bytes.Equal(buf, singleCRLF) {
- b.r.ReadByte()
- b.r.ReadByte()
+ b.r.Discard(2)
return nil
}
if len(buf) < 2 {
@@ -688,6 +743,16 @@ func mergeSetHeader(dst *Header, src Header) {
}
}
+// unreadDataSizeLocked returns the number of bytes of unread input.
+// It returns -1 if unknown.
+// b.mu must be held.
+func (b *body) unreadDataSizeLocked() int64 {
+ if lr, ok := b.src.(*io.LimitedReader); ok {
+ return lr.N
+ }
+ return -1
+}
+
func (b *body) Close() error {
b.mu.Lock()
defer b.mu.Unlock()
@@ -696,9 +761,30 @@ func (b *body) Close() error {
}
var err error
switch {
+ case b.sawEOF:
+ // Already saw EOF, so no need going to look for it.
case b.hdr == nil && b.closing:
// no trailer and closing the connection next.
// no point in reading to EOF.
+ case b.doEarlyClose:
+ // Read up to maxPostHandlerReadBytes bytes of the body, looking for
+ // for EOF (and trailers), so we can re-use this connection.
+ if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > maxPostHandlerReadBytes {
+ // There was a declared Content-Length, and we have more bytes remaining
+ // than our maxPostHandlerReadBytes tolerance. So, give up.
+ b.earlyClose = true
+ } else {
+ var n int64
+ // Consume the body, or, which will also lead to us reading
+ // the trailer headers after the body, if present.
+ n, err = io.CopyN(ioutil.Discard, bodyLocked{b}, maxPostHandlerReadBytes)
+ if err == io.EOF {
+ err = nil
+ }
+ if n == maxPostHandlerReadBytes {
+ b.earlyClose = true
+ }
+ }
default:
// Fully consume the body, which will also lead to us reading
// the trailer headers after the body, if present.
@@ -708,6 +794,12 @@ func (b *body) Close() error {
return err
}
+func (b *body) didEarlyClose() bool {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.earlyClose
+}
+
// bodyLocked is a io.Reader reading from a *body when its mutex is
// already held.
type bodyLocked struct {
diff --git a/libgo/go/net/http/transport.go b/libgo/go/net/http/transport.go
index 782f7cd395b..70d18646059 100644
--- a/libgo/go/net/http/transport.go
+++ b/libgo/go/net/http/transport.go
@@ -274,11 +274,12 @@ func (t *Transport) CloseIdleConnections() {
}
}
-// CancelRequest cancels an in-flight request by closing its
-// connection.
+// CancelRequest cancels an in-flight request by closing its connection.
+// CancelRequest should only be called after RoundTrip has returned.
func (t *Transport) CancelRequest(req *Request) {
t.reqMu.Lock()
cancel := t.reqCanceler[req]
+ delete(t.reqCanceler, req)
t.reqMu.Unlock()
if cancel != nil {
cancel()
@@ -474,6 +475,25 @@ func (t *Transport) setReqCanceler(r *Request, fn func()) {
}
}
+// replaceReqCanceler replaces an existing cancel function. If there is no cancel function
+// for the request, we don't set the function and return false.
+// Since CancelRequest will clear the canceler, we can use the return value to detect if
+// the request was canceled since the last setReqCancel call.
+func (t *Transport) replaceReqCanceler(r *Request, fn func()) bool {
+ t.reqMu.Lock()
+ defer t.reqMu.Unlock()
+ _, ok := t.reqCanceler[r]
+ if !ok {
+ return false
+ }
+ if fn != nil {
+ t.reqCanceler[r] = fn
+ } else {
+ delete(t.reqCanceler, r)
+ }
+ return true
+}
+
func (t *Transport) dial(network, addr string) (c net.Conn, err error) {
if t.Dial != nil {
return t.Dial(network, addr)
@@ -490,6 +510,10 @@ var prePendingDial, postPendingDial func()
// is ready to write requests to.
func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error) {
if pc := t.getIdleConn(cm); pc != nil {
+ // set request canceler to some non-nil function so we
+ // can detect whether it was cleared between now and when
+ // we enter roundTrip
+ t.setReqCanceler(req, func() {})
return pc, nil
}
@@ -499,6 +523,11 @@ func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error
}
dialc := make(chan dialRes)
+ // Copy these hooks so we don't race on the postPendingDial in
+ // the goroutine we launch. Issue 11136.
+ prePendingDial := prePendingDial
+ postPendingDial := postPendingDial
+
handlePendingDial := func() {
if prePendingDial != nil {
prePendingDial()
@@ -534,6 +563,9 @@ func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error
// when it finishes:
handlePendingDial()
return pc, nil
+ case <-req.Cancel:
+ handlePendingDial()
+ return nil, errors.New("net/http: request canceled while waiting for connection")
case <-cancelc:
handlePendingDial()
return nil, errors.New("net/http: request canceled while waiting for connection")
@@ -613,16 +645,9 @@ func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) {
if cm.targetScheme == "https" && !tlsDial {
// Initiate TLS and check remote host name against certificate.
- cfg := t.TLSClientConfig
- if cfg == nil || cfg.ServerName == "" {
- host := cm.tlsHost()
- if cfg == nil {
- cfg = &tls.Config{ServerName: host}
- } else {
- clone := *cfg // shallow clone
- clone.ServerName = host
- cfg = &clone
- }
+ cfg := cloneTLSClientConfig(t.TLSClientConfig)
+ if cfg.ServerName == "" {
+ cfg.ServerName = cm.tlsHost()
}
plainConn := pconn.conn
tlsConn := tls.Client(plainConn, cfg)
@@ -662,7 +687,7 @@ func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) {
return pconn, nil
}
-// useProxy returns true if requests to addr should use a proxy,
+// useProxy reports whether requests to addr should use a proxy,
// according to the NO_PROXY or no_proxy environment variable.
// addr is always a canonicalAddr with a host and port.
func useProxy(addr string) bool {
@@ -805,6 +830,7 @@ type persistConn struct {
numExpectedResponses int
closed bool // whether conn has been closed
broken bool // an error has happened on this connection; marked broken so it's not reused.
+ canceled bool // whether this conn was broken due a CancelRequest
// mutateHeaderFunc is an optional func to modify extra
// headers on each outbound request before it's written. (the
// original Request given to RoundTrip is not modified)
@@ -819,25 +845,33 @@ func (pc *persistConn) isBroken() bool {
return b
}
-func (pc *persistConn) cancelRequest() {
- pc.conn.Close()
+// isCanceled reports whether this connection was closed due to CancelRequest.
+func (pc *persistConn) isCanceled() bool {
+ pc.lk.Lock()
+ defer pc.lk.Unlock()
+ return pc.canceled
}
-var remoteSideClosedFunc func(error) bool // or nil to use default
-
-func remoteSideClosed(err error) bool {
- if err == io.EOF {
- return true
- }
- if remoteSideClosedFunc != nil {
- return remoteSideClosedFunc(err)
- }
- return false
+func (pc *persistConn) cancelRequest() {
+ pc.lk.Lock()
+ defer pc.lk.Unlock()
+ pc.canceled = true
+ pc.closeLocked()
}
func (pc *persistConn) readLoop() {
- alive := true
+ // eofc is used to block http.Handler goroutines reading from Response.Body
+ // at EOF until this goroutines has (potentially) added the connection
+ // back to the idle pool.
+ eofc := make(chan struct{})
+ defer close(eofc) // unblock reader on errors
+
+ // Read this once, before loop starts. (to avoid races in tests)
+ testHookMu.Lock()
+ testHookReadLoopBeforeNextRead := testHookReadLoopBeforeNextRead
+ testHookMu.Unlock()
+ alive := true
for alive {
pb, err := pc.br.Peek(1)
@@ -895,49 +929,79 @@ func (pc *persistConn) readLoop() {
alive = false
}
- var waitForBodyRead chan bool
+ var waitForBodyRead chan bool // channel is nil when there's no body
if hasBody {
waitForBodyRead = make(chan bool, 2)
resp.Body.(*bodyEOFSignal).earlyCloseFn = func() error {
- // Sending false here sets alive to
- // false and closes the connection
- // below.
waitForBodyRead <- false
return nil
}
- resp.Body.(*bodyEOFSignal).fn = func(err error) {
- waitForBodyRead <- alive &&
- err == nil &&
- !pc.sawEOF &&
- pc.wroteRequest() &&
- pc.t.putIdleConn(pc)
+ resp.Body.(*bodyEOFSignal).fn = func(err error) error {
+ isEOF := err == io.EOF
+ waitForBodyRead <- isEOF
+ if isEOF {
+ <-eofc // see comment at top
+ } else if err != nil && pc.isCanceled() {
+ return errRequestCanceled
+ }
+ return err
}
+ } else {
+ // Before send on rc.ch, as client might re-use the
+ // same *Request pointer, and we don't want to set this
+ // on t from this persistConn while the Transport
+ // potentially spins up a different persistConn for the
+ // caller's subsequent request.
+ pc.t.setReqCanceler(rc.req, nil)
}
- if alive && !hasBody {
- alive = !pc.sawEOF &&
- pc.wroteRequest() &&
- pc.t.putIdleConn(pc)
- }
+ pc.lk.Lock()
+ pc.numExpectedResponses--
+ pc.lk.Unlock()
+ // The connection might be going away when we put the
+ // idleConn below. When that happens, we close the response channel to signal
+ // to roundTrip that the connection is gone. roundTrip waits for
+ // both closing and a response in a select, so it might choose
+ // the close channel, rather than the response.
+ // We send the response first so that roundTrip can check
+ // if there is a pending one with a non-blocking select
+ // on the response channel before erroring out.
rc.ch <- responseAndError{resp, err}
- // Wait for the just-returned response body to be fully consumed
- // before we race and peek on the underlying bufio reader.
- if waitForBodyRead != nil {
+ if hasBody {
+ // To avoid a race, wait for the just-returned
+ // response body to be fully consumed before peek on
+ // the underlying bufio reader.
select {
- case alive = <-waitForBodyRead:
+ case <-rc.req.Cancel:
+ alive = false
+ pc.t.CancelRequest(rc.req)
+ case bodyEOF := <-waitForBodyRead:
+ pc.t.setReqCanceler(rc.req, nil) // before pc might return to idle pool
+ alive = alive &&
+ bodyEOF &&
+ !pc.sawEOF &&
+ pc.wroteRequest() &&
+ pc.t.putIdleConn(pc)
+ if bodyEOF {
+ eofc <- struct{}{}
+ }
case <-pc.closech:
alive = false
}
+ } else {
+ alive = alive &&
+ !pc.sawEOF &&
+ pc.wroteRequest() &&
+ pc.t.putIdleConn(pc)
}
- pc.t.setReqCanceler(rc.req, nil)
-
- if !alive {
- pc.close()
+ if hook := testHookReadLoopBeforeNextRead; hook != nil {
+ hook()
}
}
+ pc.close()
}
func (pc *persistConn) writeLoop() {
@@ -1027,9 +1091,24 @@ func (e *httpError) Temporary() bool { return true }
var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
var errClosed error = &httpError{err: "net/http: transport closed before response was received"}
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+// nil except for tests
+var (
+ testHookPersistConnClosedGotRes func()
+ testHookEnterRoundTrip func()
+ testHookMu sync.Locker = fakeLocker{} // guards following
+ testHookReadLoopBeforeNextRead func()
+)
func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) {
- pc.t.setReqCanceler(req.Request, pc.cancelRequest)
+ if hook := testHookEnterRoundTrip; hook != nil {
+ hook()
+ }
+ if !pc.t.replaceReqCanceler(req.Request, pc.cancelRequest) {
+ pc.t.putIdleConn(pc)
+ return nil, errRequestCanceled
+ }
pc.lk.Lock()
pc.numExpectedResponses++
headerFn := pc.mutateHeaderFunc
@@ -1055,15 +1134,19 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
- // http://golang.org/issue/5522
+ // https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
- // anyway. See http://golang.org/issue/8923
+ // anyway. See https://golang.org/issue/8923
requestedGzip = true
req.extraHeaders().Set("Accept-Encoding", "gzip")
}
+ if pc.t.DisableKeepAlives {
+ req.extraHeaders().Set("Connection", "close")
+ }
+
// Write the request concurrently with waiting for a response,
// in case the server decides to reply before reading our full
// request body.
@@ -1074,38 +1157,57 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
pc.reqch <- requestAndChan{req.Request, resc, requestedGzip}
var re responseAndError
- var pconnDeadCh = pc.closech
- var failTicker <-chan time.Time
var respHeaderTimer <-chan time.Time
+ cancelChan := req.Request.Cancel
WaitResponse:
for {
select {
case err := <-writeErrCh:
+ if isNetWriteError(err) {
+ // Issue 11745. If we failed to write the request
+ // body, it's possible the server just heard enough
+ // and already wrote to us. Prioritize the server's
+ // response over returning a body write error.
+ select {
+ case re = <-resc:
+ pc.close()
+ break WaitResponse
+ case <-time.After(50 * time.Millisecond):
+ // Fall through.
+ }
+ }
if err != nil {
re = responseAndError{nil, err}
pc.close()
break WaitResponse
}
if d := pc.t.ResponseHeaderTimeout; d > 0 {
- respHeaderTimer = time.After(d)
+ timer := time.NewTimer(d)
+ defer timer.Stop() // prevent leaks
+ respHeaderTimer = timer.C
}
- case <-pconnDeadCh:
+ case <-pc.closech:
// The persist connection is dead. This shouldn't
// usually happen (only with Connection: close responses
// with no response bodies), but if it does happen it
// means either a) the remote server hung up on us
// prematurely, or b) the readLoop sent us a response &
// closed its closech at roughly the same time, and we
- // selected this case first, in which case a response
- // might still be coming soon.
- //
- // We can't avoid the select race in b) by using a unbuffered
- // resc channel instead, because then goroutines can
- // leak if we exit due to other errors.
- pconnDeadCh = nil // avoid spinning
- failTicker = time.After(100 * time.Millisecond) // arbitrary time to wait for resc
- case <-failTicker:
- re = responseAndError{err: errClosed}
+ // selected this case first. If we got a response, readLoop makes sure
+ // to send it before it puts the conn and closes the channel.
+ // That way, we can fetch the response, if there is one,
+ // with a non-blocking receive.
+ select {
+ case re = <-resc:
+ if fn := testHookPersistConnClosedGotRes; fn != nil {
+ fn()
+ }
+ default:
+ re = responseAndError{err: errClosed}
+ if pc.isCanceled() {
+ re = responseAndError{err: errRequestCanceled}
+ }
+ }
break WaitResponse
case <-respHeaderTimer:
pc.close()
@@ -1113,13 +1215,12 @@ WaitResponse:
break WaitResponse
case re = <-resc:
break WaitResponse
+ case <-cancelChan:
+ pc.t.CancelRequest(req.Request)
+ cancelChan = nil
}
}
- pc.lk.Lock()
- pc.numExpectedResponses--
- pc.lk.Unlock()
-
if re.err != nil {
pc.t.setReqCanceler(req.Request, nil)
}
@@ -1167,16 +1268,18 @@ func canonicalAddr(url *url.URL) string {
// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most
// once, right before its final (error-producing) Read or Close call
-// returns. If earlyCloseFn is non-nil and Close is called before
-// io.EOF is seen, earlyCloseFn is called instead of fn, and its
-// return value is the return value from Close.
+// returns. fn should return the new error to return from Read or Close.
+//
+// If earlyCloseFn is non-nil and Close is called before io.EOF is
+// seen, earlyCloseFn is called instead of fn, and its return value is
+// the return value from Close.
type bodyEOFSignal struct {
body io.ReadCloser
- mu sync.Mutex // guards following 4 fields
- closed bool // whether Close has been called
- rerr error // sticky Read error
- fn func(error) // error will be nil on Read io.EOF
- earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
+ mu sync.Mutex // guards following 4 fields
+ closed bool // whether Close has been called
+ rerr error // sticky Read error
+ fn func(error) error // err will be nil on Read io.EOF
+ earlyCloseFn func() error // optional alt Close func used if io.EOF not seen
}
func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
@@ -1197,7 +1300,7 @@ func (es *bodyEOFSignal) Read(p []byte) (n int, err error) {
if es.rerr == nil {
es.rerr = err
}
- es.condfn(err)
+ err = es.condfn(err)
}
return
}
@@ -1213,20 +1316,17 @@ func (es *bodyEOFSignal) Close() error {
return es.earlyCloseFn()
}
err := es.body.Close()
- es.condfn(err)
- return err
+ return es.condfn(err)
}
// caller must hold es.mu.
-func (es *bodyEOFSignal) condfn(err error) {
+func (es *bodyEOFSignal) condfn(err error) error {
if es.fn == nil {
- return
- }
- if err == io.EOF {
- err = nil
+ return err
}
- es.fn(err)
+ err = es.fn(err)
es.fn = nil
+ return err
}
// gzipReader wraps a response body so it can lazily
@@ -1273,3 +1373,89 @@ func (nr noteEOFReader) Read(p []byte) (n int, err error) {
}
return
}
+
+// fakeLocker is a sync.Locker which does nothing. It's used to guard
+// test-only fields when not under test, to avoid runtime atomic
+// overhead.
+type fakeLocker struct{}
+
+func (fakeLocker) Lock() {}
+func (fakeLocker) Unlock() {}
+
+func isNetWriteError(err error) bool {
+ switch e := err.(type) {
+ case *url.Error:
+ return isNetWriteError(e.Err)
+ case *net.OpError:
+ return e.Op == "write"
+ default:
+ return false
+ }
+}
+
+// cloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// The cfg must not be in active use by tls.Server, or else
+// there can still be a race with tls.Server updating SessionTicketKey
+// and our copying it, and also a race with the server setting
+// SessionTicketsDisabled=false on failure to set the random
+// ticket key.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ SessionTicketsDisabled: cfg.SessionTicketsDisabled,
+ SessionTicketKey: cfg.SessionTicketKey,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
+
+// cloneTLSClientConfig is like cloneTLSConfig but omits
+// the fields SessionTicketsDisabled and SessionTicketKey.
+// This makes it safe to call cloneTLSClientConfig on a config
+// in active use by a server.
+func cloneTLSClientConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/libgo/go/net/http/transport_test.go b/libgo/go/net/http/transport_test.go
index defa6337082..c21d4afa87f 100644
--- a/libgo/go/net/http/transport_test.go
+++ b/libgo/go/net/http/transport_test.go
@@ -18,11 +18,11 @@ import (
"io/ioutil"
"log"
"net"
- "net/http"
. "net/http"
"net/http/httptest"
"net/url"
"os"
+ "reflect"
"runtime"
"strconv"
"strings"
@@ -39,6 +39,7 @@ var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) {
if r.FormValue("close") == "true" {
w.Header().Set("Connection", "close")
}
+ w.Header().Set("X-Saw-Close", fmt.Sprint(r.Close))
w.Write([]byte(r.RemoteAddr))
})
@@ -228,6 +229,10 @@ func TestTransportConnectionCloseOnRequest(t *testing.T) {
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err)
}
+ if got, want := res.Header.Get("X-Saw-Close"), fmt.Sprint(connectionClose); got != want {
+ t.Errorf("For connectionClose = %v; handler's X-Saw-Close was %v; want %v",
+ connectionClose, got, !connectionClose)
+ }
body, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err)
@@ -249,6 +254,27 @@ func TestTransportConnectionCloseOnRequest(t *testing.T) {
connSet.check(t)
}
+// if the Transport's DisableKeepAlives is set, all requests should
+// send Connection: close.
+func TestTransportConnectionCloseOnRequestDisableKeepAlive(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(hostPortHandler)
+ defer ts.Close()
+
+ tr := &Transport{
+ DisableKeepAlives: true,
+ }
+ c := &Client{Transport: tr}
+ res, err := c.Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ if res.Header.Get("X-Saw-Close") != "true" {
+ t.Errorf("handler didn't see Connection: close ")
+ }
+}
+
func TestTransportIdleCacheKeys(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(hostPortHandler)
@@ -293,7 +319,7 @@ func TestTransportReadToEndReusesConn(t *testing.T) {
addrSeen[r.RemoteAddr]++
if r.URL.Path == "/chunked/" {
w.WriteHeader(200)
- w.(http.Flusher).Flush()
+ w.(Flusher).Flush()
} else {
w.Header().Set("Content-Type", strconv.Itoa(len(msg)))
w.WriteHeader(200)
@@ -308,7 +334,7 @@ func TestTransportReadToEndReusesConn(t *testing.T) {
wantLen := []int{len(msg), -1}[pi]
addrSeen = make(map[string]int)
for i := 0; i < 3; i++ {
- res, err := http.Get(ts.URL + path)
+ res, err := Get(ts.URL + path)
if err != nil {
t.Errorf("Get %s: %v", path, err)
continue
@@ -459,7 +485,7 @@ func TestTransportServerClosingUnexpectedly(t *testing.T) {
}
}
-// Test for http://golang.org/issue/2616 (appropriate issue number)
+// Test for https://golang.org/issue/2616 (appropriate issue number)
// This fails pretty reliably with GOMAXPROCS=100 or something high.
func TestStressSurpriseServerCloses(t *testing.T) {
defer afterTest(t)
@@ -479,12 +505,17 @@ func TestStressSurpriseServerCloses(t *testing.T) {
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
+ defer tr.CloseIdleConnections()
// Do a bunch of traffic from different goroutines. Send to activityc
// after each request completes, regardless of whether it failed.
+ // If these are too high, OS X exhausts its ephemeral ports
+ // and hangs waiting for them to transition TCP states. That's
+ // not what we want to test. TODO(bradfitz): use an io.Pipe
+ // dialer for this test instead?
const (
- numClients = 50
- reqsPerClient = 250
+ numClients = 20
+ reqsPerClient = 25
)
activityc := make(chan bool)
for i := 0; i < numClients; i++ {
@@ -567,11 +598,22 @@ func TestTransportHeadChunkedResponse(t *testing.T) {
tr := &Transport{DisableKeepAlives: false}
c := &Client{Transport: tr}
+ // Ensure that we wait for the readLoop to complete before
+ // calling Head again
+ didRead := make(chan bool)
+ SetReadLoopBeforeNextReadHook(func() { didRead <- true })
+ defer SetReadLoopBeforeNextReadHook(nil)
+
res1, err := c.Head(ts.URL)
+ <-didRead
+
if err != nil {
t.Fatalf("request 1 error: %v", err)
}
+
res2, err := c.Head(ts.URL)
+ <-didRead
+
if err != nil {
t.Fatalf("request 2 error: %v", err)
}
@@ -833,7 +875,7 @@ func TestTransportGzipShort(t *testing.T) {
// tests that persistent goroutine connections shut down when no longer desired.
func TestTransportPersistConnLeak(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
gotReqCh := make(chan bool)
@@ -902,7 +944,7 @@ func TestTransportPersistConnLeak(t *testing.T) {
// request.ContentLength is explicitly short
func TestTransportPersistConnLeakShortBody(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
@@ -941,7 +983,7 @@ func TestTransportPersistConnLeakShortBody(t *testing.T) {
}
}
-// This used to crash; http://golang.org/issue/3266
+// This used to crash; https://golang.org/issue/3266
func TestTransportIdleConnCrash(t *testing.T) {
defer afterTest(t)
tr := &Transport{}
@@ -1023,7 +1065,7 @@ func TestIssue3595(t *testing.T) {
}
}
-// From http://golang.org/issue/4454 ,
+// From https://golang.org/issue/4454 ,
// "client fails to handle requests with no body and chunked encoding"
func TestChunkedNoContent(t *testing.T) {
defer afterTest(t)
@@ -1110,7 +1152,7 @@ func TestTransportConcurrency(t *testing.T) {
func TestIssue4191_InfiniteGetTimeout(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
const debug = false
@@ -1174,7 +1216,7 @@ func TestIssue4191_InfiniteGetTimeout(t *testing.T) {
func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7237")
+ t.Skip("skipping test; see https://golang.org/issue/7237")
}
defer afterTest(t)
const debug = false
@@ -1345,8 +1387,8 @@ func TestTransportCancelRequest(t *testing.T) {
body, err := ioutil.ReadAll(res.Body)
d := time.Since(t0)
- if err == nil {
- t.Error("expected an error reading the body")
+ if err != ExportErrRequestCanceled {
+ t.Errorf("Body.Read error = %v; want errRequestCanceled", err)
}
if string(body) != "Hello" {
t.Errorf("Body = %q; want Hello", body)
@@ -1356,7 +1398,7 @@ func TestTransportCancelRequest(t *testing.T) {
}
// Verify no outstanding requests after readLoop/writeLoop
// goroutines shut down.
- for tries := 3; tries > 0; tries-- {
+ for tries := 5; tries > 0; tries-- {
n := tr.NumPendingRequestsForTesting()
if n == 0 {
break
@@ -1405,6 +1447,7 @@ func TestTransportCancelRequestInDial(t *testing.T) {
eventLog.Printf("canceling")
tr.CancelRequest(req)
+ tr.CancelRequest(req) // used to panic on second call
select {
case <-gotres:
@@ -1422,6 +1465,135 @@ Get = Get http://something.no-network.tld/: net/http: request canceled while wai
}
}
+func TestCancelRequestWithChannel(t *testing.T) {
+ defer afterTest(t)
+ if testing.Short() {
+ t.Skip("skipping test in -short mode")
+ }
+ unblockc := make(chan bool)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ fmt.Fprintf(w, "Hello")
+ w.(Flusher).Flush() // send headers and some body
+ <-unblockc
+ }))
+ defer ts.Close()
+ defer close(unblockc)
+
+ tr := &Transport{}
+ defer tr.CloseIdleConnections()
+ c := &Client{Transport: tr}
+
+ req, _ := NewRequest("GET", ts.URL, nil)
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ go func() {
+ time.Sleep(1 * time.Second)
+ close(ch)
+ }()
+ t0 := time.Now()
+ body, err := ioutil.ReadAll(res.Body)
+ d := time.Since(t0)
+
+ if err != ExportErrRequestCanceled {
+ t.Errorf("Body.Read error = %v; want errRequestCanceled", err)
+ }
+ if string(body) != "Hello" {
+ t.Errorf("Body = %q; want Hello", body)
+ }
+ if d < 500*time.Millisecond {
+ t.Errorf("expected ~1 second delay; got %v", d)
+ }
+ // Verify no outstanding requests after readLoop/writeLoop
+ // goroutines shut down.
+ for tries := 5; tries > 0; tries-- {
+ n := tr.NumPendingRequestsForTesting()
+ if n == 0 {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ if tries == 1 {
+ t.Errorf("pending requests = %d; want 0", n)
+ }
+ }
+}
+
+func TestCancelRequestWithChannelBeforeDo(t *testing.T) {
+ defer afterTest(t)
+ unblockc := make(chan bool)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ <-unblockc
+ }))
+ defer ts.Close()
+ defer close(unblockc)
+
+ // Don't interfere with the next test on plan9.
+ // Cf. https://golang.org/issues/11476
+ if runtime.GOOS == "plan9" {
+ defer time.Sleep(500 * time.Millisecond)
+ }
+
+ tr := &Transport{}
+ defer tr.CloseIdleConnections()
+ c := &Client{Transport: tr}
+
+ req, _ := NewRequest("GET", ts.URL, nil)
+ ch := make(chan struct{})
+ req.Cancel = ch
+ close(ch)
+
+ _, err := c.Do(req)
+ if err == nil || !strings.Contains(err.Error(), "canceled") {
+ t.Errorf("Do error = %v; want cancelation", err)
+ }
+}
+
+// Issue 11020. The returned error message should be errRequestCanceled
+func TestTransportCancelBeforeResponseHeaders(t *testing.T) {
+ t.Skip("Skipping flaky test; see Issue 11894")
+ defer afterTest(t)
+
+ serverConnCh := make(chan net.Conn, 1)
+ tr := &Transport{
+ Dial: func(network, addr string) (net.Conn, error) {
+ cc, sc := net.Pipe()
+ serverConnCh <- sc
+ return cc, nil
+ },
+ }
+ defer tr.CloseIdleConnections()
+ errc := make(chan error, 1)
+ req, _ := NewRequest("GET", "http://example.com/", nil)
+ go func() {
+ _, err := tr.RoundTrip(req)
+ errc <- err
+ }()
+
+ sc := <-serverConnCh
+ verb := make([]byte, 3)
+ if _, err := io.ReadFull(sc, verb); err != nil {
+ t.Errorf("Error reading HTTP verb from server: %v", err)
+ }
+ if string(verb) != "GET" {
+ t.Errorf("server received %q; want GET", verb)
+ }
+ defer sc.Close()
+
+ tr.CancelRequest(req)
+
+ err := <-errc
+ if err == nil {
+ t.Fatalf("unexpected success from RoundTrip")
+ }
+ if err != ExportErrRequestCanceled {
+ t.Errorf("RoundTrip error = %v; want ExportErrRequestCanceled", err)
+ }
+}
+
// golang.org/issue/3672 -- Client can't close HTTP stream
// Calling Close on a Response.Body used to just read until EOF.
// Now it actually closes the TCP connection.
@@ -1795,6 +1967,11 @@ func TestIdleConnChannelLeak(t *testing.T) {
}))
defer ts.Close()
+ const nReqs = 5
+ didRead := make(chan bool, nReqs)
+ SetReadLoopBeforeNextReadHook(func() { didRead <- true })
+ defer SetReadLoopBeforeNextReadHook(nil)
+
tr := &Transport{
Dial: func(netw, addr string) (net.Conn, error) {
return net.Dial(netw, ts.Listener.Addr().String())
@@ -1807,12 +1984,28 @@ func TestIdleConnChannelLeak(t *testing.T) {
// First, without keep-alives.
for _, disableKeep := range []bool{true, false} {
tr.DisableKeepAlives = disableKeep
- for i := 0; i < 5; i++ {
+ for i := 0; i < nReqs; i++ {
_, err := c.Get(fmt.Sprintf("http://foo-host-%d.tld/", i))
if err != nil {
t.Fatal(err)
}
+ // Note: no res.Body.Close is needed here, since the
+ // response Content-Length is zero. Perhaps the test
+ // should be more explicit and use a HEAD, but tests
+ // elsewhere guarantee that zero byte responses generate
+ // a "Content-Length: 0" instead of chunking.
+ }
+
+ // At this point, each of the 5 Transport.readLoop goroutines
+ // are scheduling noting that there are no response bodies (see
+ // earlier comment), and are then calling putIdleConn, which
+ // decrements this count. Usually that happens quickly, which is
+ // why this test has seemed to work for ages. But it's still
+ // racey: we have wait for them to finish first. See Issue 10427
+ for i := 0; i < nReqs; i++ {
+ <-didRead
}
+
if got := tr.IdleConnChMapSizeForTesting(); got != 0 {
t.Fatalf("ForDisableKeepAlives = %v, map size = %d; want 0", disableKeep, got)
}
@@ -1824,7 +2017,7 @@ func TestIdleConnChannelLeak(t *testing.T) {
// then closes it.
func TestTransportClosesRequestBody(t *testing.T) {
defer afterTest(t)
- ts := httptest.NewServer(http.HandlerFunc(func(w ResponseWriter, r *Request) {
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
io.Copy(ioutil.Discard, r.Body)
}))
defer ts.Close()
@@ -2060,6 +2253,38 @@ func TestTransportNoReuseAfterEarlyResponse(t *testing.T) {
}
}
+// Tests that we don't leak Transport persistConn.readLoop goroutines
+// when a server hangs up immediately after saying it would keep-alive.
+func TestTransportIssue10457(t *testing.T) {
+ defer afterTest(t) // used to fail in goroutine leak check
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ // Send a response with no body, keep-alive
+ // (implicit), and then lie and immediately close the
+ // connection. This forces the Transport's readLoop to
+ // immediately Peek an io.EOF and get to the point
+ // that used to hang.
+ conn, _, _ := w.(Hijacker).Hijack()
+ conn.Write([]byte("HTTP/1.1 200 OK\r\nFoo: Bar\r\nContent-Length: 0\r\n\r\n")) // keep-alive
+ conn.Close()
+ }))
+ defer ts.Close()
+ tr := &Transport{}
+ defer tr.CloseIdleConnections()
+ cl := &Client{Transport: tr}
+ res, err := cl.Get(ts.URL)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ defer res.Body.Close()
+
+ // Just a sanity check that we at least get the response. The real
+ // test here is that the "defer afterTest" above doesn't find any
+ // leaked goroutines.
+ if got, want := res.Header.Get("Foo"), "Bar"; got != want {
+ t.Errorf("Foo header = %q; want %q", got, want)
+ }
+}
+
type errorReader struct {
err error
}
@@ -2073,7 +2298,7 @@ func (f closerFunc) Close() error { return f() }
// Issue 6981
func TestTransportClosesBodyOnError(t *testing.T) {
if runtime.GOOS == "plan9" {
- t.Skip("skipping test; see http://golang.org/issue/7782")
+ t.Skip("skipping test; see https://golang.org/issue/7782")
}
defer afterTest(t)
readBody := make(chan error, 1)
@@ -2162,13 +2387,13 @@ func TestTransportDialTLS(t *testing.T) {
// Test for issue 8755
// Ensure that if a proxy returns an error, it is exposed by RoundTrip
func TestRoundTripReturnsProxyError(t *testing.T) {
- badProxy := func(*http.Request) (*url.URL, error) {
+ badProxy := func(*Request) (*url.URL, error) {
return nil, errors.New("errorMessage")
}
tr := &Transport{Proxy: badProxy}
- req, _ := http.NewRequest("GET", "http://example.com", nil)
+ req, _ := NewRequest("GET", "http://example.com", nil)
_, err := tr.RoundTrip(req)
@@ -2249,7 +2474,268 @@ func TestTransportRangeAndGzip(t *testing.T) {
res.Body.Close()
}
-func wantBody(res *http.Response, err error, want string) error {
+// Previously, we used to handle a logical race within RoundTrip by waiting for 100ms
+// in the case of an error. Changing the order of the channel operations got rid of this
+// race.
+//
+// In order to test that the channel op reordering works, we install a hook into the
+// roundTrip function which gets called if we saw the connection go away and
+// we subsequently received a response.
+func TestTransportResponseCloseRace(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ defer afterTest(t)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ }))
+ defer ts.Close()
+ sawRace := false
+ SetInstallConnClosedHook(func() {
+ sawRace = true
+ })
+ defer SetInstallConnClosedHook(nil)
+ tr := &Transport{
+ DisableKeepAlives: true,
+ }
+ req, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // selects are not deterministic, so do this a bunch
+ // and see if we handle the logical race at least once.
+ for i := 0; i < 10000; i++ {
+ resp, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ continue
+ }
+ resp.Body.Close()
+ if sawRace {
+ break
+ }
+ }
+ if !sawRace {
+ t.Errorf("didn't see response/connection going away race")
+ }
+}
+
+// Test for issue 10474
+func TestTransportResponseCancelRace(t *testing.T) {
+ defer afterTest(t)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ // important that this response has a body.
+ var b [1024]byte
+ w.Write(b[:])
+ }))
+ defer ts.Close()
+
+ tr := &Transport{}
+ defer tr.CloseIdleConnections()
+
+ req, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // If we do an early close, Transport just throws the connection away and
+ // doesn't reuse it. In order to trigger the bug, it has to reuse the connection
+ // so read the body
+ if _, err := io.Copy(ioutil.Discard, res.Body); err != nil {
+ t.Fatal(err)
+ }
+
+ req2, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tr.CancelRequest(req)
+ res, err = tr.RoundTrip(req2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+}
+
+func TestTransportDialCancelRace(t *testing.T) {
+ defer afterTest(t)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
+ defer ts.Close()
+
+ tr := &Transport{}
+ defer tr.CloseIdleConnections()
+
+ req, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ SetEnterRoundTripHook(func() {
+ tr.CancelRequest(req)
+ })
+ defer SetEnterRoundTripHook(nil)
+ res, err := tr.RoundTrip(req)
+ if err != ExportErrRequestCanceled {
+ t.Errorf("expected canceled request error; got %v", err)
+ if err == nil {
+ res.Body.Close()
+ }
+ }
+}
+
+// logWritesConn is a net.Conn that logs each Write call to writes
+// and then proxies to w.
+// It proxies Read calls to a reader it receives from rch.
+type logWritesConn struct {
+ net.Conn // nil. crash on use.
+
+ w io.Writer
+
+ rch <-chan io.Reader
+ r io.Reader // nil until received by rch
+
+ mu sync.Mutex
+ writes []string
+}
+
+func (c *logWritesConn) Write(p []byte) (n int, err error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.writes = append(c.writes, string(p))
+ return c.w.Write(p)
+}
+
+func (c *logWritesConn) Read(p []byte) (n int, err error) {
+ if c.r == nil {
+ c.r = <-c.rch
+ }
+ return c.r.Read(p)
+}
+
+func (c *logWritesConn) Close() error { return nil }
+
+// Issue 6574
+func TestTransportFlushesBodyChunks(t *testing.T) {
+ defer afterTest(t)
+ resBody := make(chan io.Reader, 1)
+ connr, connw := io.Pipe() // connection pipe pair
+ lw := &logWritesConn{
+ rch: resBody,
+ w: connw,
+ }
+ tr := &Transport{
+ Dial: func(network, addr string) (net.Conn, error) {
+ return lw, nil
+ },
+ }
+ bodyr, bodyw := io.Pipe() // body pipe pair
+ go func() {
+ defer bodyw.Close()
+ for i := 0; i < 3; i++ {
+ fmt.Fprintf(bodyw, "num%d\n", i)
+ }
+ }()
+ resc := make(chan *Response)
+ go func() {
+ req, _ := NewRequest("POST", "http://localhost:8080", bodyr)
+ req.Header.Set("User-Agent", "x") // known value for test
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ t.Error("RoundTrip: %v", err)
+ close(resc)
+ return
+ }
+ resc <- res
+
+ }()
+ // Fully consume the request before checking the Write log vs. want.
+ req, err := ReadRequest(bufio.NewReader(connr))
+ if err != nil {
+ t.Fatal(err)
+ }
+ io.Copy(ioutil.Discard, req.Body)
+
+ // Unblock the transport's roundTrip goroutine.
+ resBody <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n")
+ res, ok := <-resc
+ if !ok {
+ return
+ }
+ defer res.Body.Close()
+
+ want := []string{
+ // Because Request.ContentLength = 0, the body is sniffed for 1 byte to determine whether there's content.
+ // That explains the initial "num0" being split into "n" and "um0".
+ // The first byte is included with the request headers Write. Perhaps in the future
+ // we will want to flush the headers out early if the first byte of the request body is
+ // taking a long time to arrive. But not yet.
+ "POST / HTTP/1.1\r\nHost: localhost:8080\r\nUser-Agent: x\r\nTransfer-Encoding: chunked\r\nAccept-Encoding: gzip\r\n\r\n" +
+ "1\r\nn\r\n",
+ "4\r\num0\n\r\n",
+ "5\r\nnum1\n\r\n",
+ "5\r\nnum2\n\r\n",
+ "0\r\n\r\n",
+ }
+ if !reflect.DeepEqual(lw.writes, want) {
+ t.Errorf("Writes differed.\n Got: %q\nWant: %q\n", lw.writes, want)
+ }
+}
+
+// Issue 11745.
+func TestTransportPrefersResponseOverWriteError(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ defer afterTest(t)
+ const contentLengthLimit = 1024 * 1024 // 1MB
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ if r.ContentLength >= contentLengthLimit {
+ w.WriteHeader(StatusBadRequest)
+ r.Body.Close()
+ return
+ }
+ w.WriteHeader(StatusOK)
+ }))
+ defer ts.Close()
+
+ fail := 0
+ count := 100
+ bigBody := strings.Repeat("a", contentLengthLimit*2)
+ for i := 0; i < count; i++ {
+ req, err := NewRequest("PUT", ts.URL, strings.NewReader(bigBody))
+ if err != nil {
+ t.Fatal(err)
+ }
+ tr := new(Transport)
+ defer tr.CloseIdleConnections()
+ client := &Client{Transport: tr}
+ resp, err := client.Do(req)
+ if err != nil {
+ fail++
+ t.Logf("%d = %#v", i, err)
+ if ue, ok := err.(*url.Error); ok {
+ t.Logf("urlErr = %#v", ue.Err)
+ if ne, ok := ue.Err.(*net.OpError); ok {
+ t.Logf("netOpError = %#v", ne.Err)
+ }
+ }
+ } else {
+ resp.Body.Close()
+ if resp.StatusCode != 400 {
+ t.Errorf("Expected status code 400, got %v", resp.Status)
+ }
+ }
+ }
+ if fail > 0 {
+ t.Errorf("Failed %v out of %v\n", fail, count)
+ }
+}
+
+func wantBody(res *Response, err error, want string) error {
if err != nil {
return err
}