1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httputil
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strings"
"time"
)
// drainBody reads all of b to memory and then returns two equivalent
// ReadClosers yielding the same bytes.
//
// It returns an error if the initial slurp of all bytes fails. It does not attempt
// to make the returned ReadClosers have identical error-matching behavior.
func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
if b == nil || b == http.NoBody {
// No copying needed. Preserve the magic sentinel meaning of NoBody.
return http.NoBody, http.NoBody, nil
}
var buf bytes.Buffer
if _, err = buf.ReadFrom(b); err != nil {
return nil, b, err
}
if err = b.Close(); err != nil {
return nil, b, err
}
return io.NopCloser(&buf), io.NopCloser(bytes.NewReader(buf.Bytes())), nil
}
// dumpConn is a net.Conn which writes to Writer and reads from Reader
type dumpConn struct {
io.Writer
io.Reader
}
func (c *dumpConn) Close() error { return nil }
func (c *dumpConn) LocalAddr() net.Addr { return nil }
func (c *dumpConn) RemoteAddr() net.Addr { return nil }
func (c *dumpConn) SetDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil }
func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil }
type neverEnding byte
func (b neverEnding) Read(p []byte) (n int, err error) {
for i := range p {
p[i] = byte(b)
}
return len(p), nil
}
// outGoingLength is a copy of the unexported
// (*http.Request).outgoingLength method.
func outgoingLength(req *http.Request) int64 {
if req.Body == nil || req.Body == http.NoBody {
return 0
}
if req.ContentLength != 0 {
return req.ContentLength
}
return -1
}
// DumpRequestOut is like DumpRequest but for outgoing client requests. It
// includes any headers that the standard http.Transport adds, such as
// User-Agent.
func DumpRequestOut(req *http.Request, body bool) ([]byte, error) {
save := req.Body
dummyBody := false
if !body {
contentLength := outgoingLength(req)
if contentLength != 0 {
req.Body = io.NopCloser(io.LimitReader(neverEnding('x'), contentLength))
dummyBody = true
}
} else {
var err error
save, req.Body, err = drainBody(req.Body)
if err != nil {
return nil, err
}
}
// Since we're using the actual Transport code to write the request,
// switch to http so the Transport doesn't try to do an SSL
// negotiation with our dumpConn and its bytes.Buffer & pipe.
// The wire format for https and http are the same, anyway.
reqSend := req
if req.URL.Scheme == "https" {
reqSend = new(http.Request)
*reqSend = *req
reqSend.URL = new(url.URL)
*reqSend.URL = *req.URL
reqSend.URL.Scheme = "http"
}
// Use the actual Transport code to record what we would send
// on the wire, but not using TCP. Use a Transport with a
// custom dialer that returns a fake net.Conn that waits
// for the full input (and recording it), and then responds
// with a dummy response.
var buf bytes.Buffer // records the output
pr, pw := io.Pipe()
defer pr.Close()
defer pw.Close()
dr := &delegateReader{c: make(chan io.Reader)}
t := &http.Transport{
Dial: func(net, addr string) (net.Conn, error) {
return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil
},
}
defer t.CloseIdleConnections()
// We need this channel to ensure that the reader
// goroutine exits if t.RoundTrip returns an error.
// See golang.org/issue/32571.
quitReadCh := make(chan struct{})
// Wait for the request before replying with a dummy response:
go func() {
req, err := http.ReadRequest(bufio.NewReader(pr))
if err == nil {
// Ensure all the body is read; otherwise
// we'll get a partial dump.
io.Copy(io.Discard, req.Body)
req.Body.Close()
}
select {
case dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\nConnection: close\r\n\r\n"):
case <-quitReadCh:
}
}()
_, err := t.RoundTrip(reqSend)
req.Body = save
if err != nil {
pw.Close()
quitReadCh <- struct{}{}
return nil, err
}
dump := buf.Bytes()
// If we used a dummy body above, remove it now.
// TODO: if the req.ContentLength is large, we allocate memory
// unnecessarily just to slice it off here. But this is just
// a debug function, so this is acceptable for now. We could
// discard the body earlier if this matters.
if dummyBody {
if i := bytes.Index(dump, []byte("\r\n\r\n")); i >= 0 {
dump = dump[:i+4]
}
}
return dump, nil
}
// delegateReader is a reader that delegates to another reader,
// once it arrives on a channel.
type delegateReader struct {
c chan io.Reader
r io.Reader // nil until received from c
}
func (r *delegateReader) Read(p []byte) (int, error) {
if r.r == nil {
r.r = <-r.c
}
return r.r.Read(p)
}
// Return value if nonempty, def otherwise.
func valueOrDefault(value, def string) string {
if value != "" {
return value
}
return def
}
var reqWriteExcludeHeaderDump = map[string]bool{
"Host": true, // not in Header map anyway
"Transfer-Encoding": true,
"Trailer": true,
}
// DumpRequest returns the given request in its HTTP/1.x wire
// representation. It should only be used by servers to debug client
// requests. The returned representation is an approximation only;
// some details of the initial request are lost while parsing it into
// an http.Request. In particular, the order and case of header field
// names are lost. The order of values in multi-valued headers is kept
// intact. HTTP/2 requests are dumped in HTTP/1.x form, not in their
// original binary representations.
//
// If body is true, DumpRequest also returns the body. To do so, it
// consumes req.Body and then replaces it with a new io.ReadCloser
// that yields the same bytes. If DumpRequest returns an error,
// the state of req is undefined.
//
// The documentation for http.Request.Write details which fields
// of req are included in the dump.
func DumpRequest(req *http.Request, body bool) ([]byte, error) {
var err error
save := req.Body
if !body || req.Body == nil {
req.Body = nil
} else {
save, req.Body, err = drainBody(req.Body)
if err != nil {
return nil, err
}
}
var b bytes.Buffer
// By default, print out the unmodified req.RequestURI, which
// is always set for incoming server requests. But because we
// previously used req.URL.RequestURI and the docs weren't
// always so clear about when to use DumpRequest vs
// DumpRequestOut, fall back to the old way if the caller
// provides a non-server Request.
reqURI := req.RequestURI
if reqURI == "" {
reqURI = req.URL.RequestURI()
}
fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"),
reqURI, req.ProtoMajor, req.ProtoMinor)
absRequestURI := strings.HasPrefix(req.RequestURI, "http://") || strings.HasPrefix(req.RequestURI, "https://")
if !absRequestURI {
host := req.Host
if host == "" && req.URL != nil {
host = req.URL.Host
}
if host != "" {
fmt.Fprintf(&b, "Host: %s\r\n", host)
}
}
chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
if len(req.TransferEncoding) > 0 {
fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
}
if req.Close {
fmt.Fprintf(&b, "Connection: close\r\n")
}
err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
if err != nil {
return nil, err
}
io.WriteString(&b, "\r\n")
if req.Body != nil {
var dest io.Writer = &b
if chunked {
dest = NewChunkedWriter(dest)
}
_, err = io.Copy(dest, req.Body)
if chunked {
dest.(io.Closer).Close()
io.WriteString(&b, "\r\n")
}
}
req.Body = save
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
// errNoBody is a sentinel error value used by failureToReadBody so we
// can detect that the lack of body was intentional.
var errNoBody = errors.New("sentinel error value")
// failureToReadBody is a io.ReadCloser that just returns errNoBody on
// Read. It's swapped in when we don't actually want to consume
// the body, but need a non-nil one, and want to distinguish the
// error from reading the dummy body.
type failureToReadBody struct{}
func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody }
func (failureToReadBody) Close() error { return nil }
// emptyBody is an instance of empty reader.
var emptyBody = io.NopCloser(strings.NewReader(""))
// DumpResponse is like DumpRequest but dumps a response.
func DumpResponse(resp *http.Response, body bool) ([]byte, error) {
var b bytes.Buffer
var err error
save := resp.Body
savecl := resp.ContentLength
if !body {
// For content length of zero. Make sure the body is an empty
// reader, instead of returning error through failureToReadBody{}.
if resp.ContentLength == 0 {
resp.Body = emptyBody
} else {
resp.Body = failureToReadBody{}
}
} else if resp.Body == nil {
resp.Body = emptyBody
} else {
save, resp.Body, err = drainBody(resp.Body)
if err != nil {
return nil, err
}
}
err = resp.Write(&b)
if err == errNoBody {
err = nil
}
resp.Body = save
resp.ContentLength = savecl
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
|