summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacob Vosmaer <jacob@gitlab.com>2018-01-18 17:49:20 +0100
committerJacob Vosmaer <jacob@gitlab.com>2018-01-18 17:49:30 +0100
commita969b8865c56944ecdb55d0c61529f691d1d4864 (patch)
tree912287e74b458eb5f13890f1f84433324474ed34
parentd2bcae4c016ddb88a305e91762df46f9c8ebb0ff (diff)
downloadgitlab-shell-grpc-go-1.9.1.tar.gz
Use grpc-go 1.9.1grpc-go-1.9.1
-rw-r--r--go/vendor/github.com/golang/protobuf/ptypes/any.go139
-rw-r--r--go/vendor/github.com/golang/protobuf/ptypes/doc.go35
-rw-r--r--go/vendor/github.com/golang/protobuf/ptypes/duration.go102
-rw-r--r--go/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go144
-rw-r--r--go/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto117
-rwxr-xr-xgo/vendor/github.com/golang/protobuf/ptypes/regen.sh43
-rw-r--r--go/vendor/github.com/golang/protobuf/ptypes/timestamp.go134
-rw-r--r--go/vendor/google.golang.org/grpc/AUTHORS1
-rw-r--r--go/vendor/google.golang.org/grpc/CONTRIBUTING.md64
-rw-r--r--go/vendor/google.golang.org/grpc/LICENSE230
-rw-r--r--go/vendor/google.golang.org/grpc/Makefile13
-rw-r--r--go/vendor/google.golang.org/grpc/PATENTS22
-rw-r--r--go/vendor/google.golang.org/grpc/README.md33
-rw-r--r--go/vendor/google.golang.org/grpc/backoff.go32
-rw-r--r--go/vendor/google.golang.org/grpc/balancer.go69
-rw-r--r--go/vendor/google.golang.org/grpc/balancer/balancer.go223
-rw-r--r--go/vendor/google.golang.org/grpc/balancer/base/balancer.go209
-rw-r--r--go/vendor/google.golang.org/grpc/balancer/base/base.go52
-rw-r--r--go/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go79
-rw-r--r--go/vendor/google.golang.org/grpc/balancer_conn_wrappers.go300
-rw-r--r--go/vendor/google.golang.org/grpc/balancer_v1_wrapper.go375
-rw-r--r--go/vendor/google.golang.org/grpc/call.go284
-rw-r--r--go/vendor/google.golang.org/grpc/clientconn.go1338
-rw-r--r--go/vendor/google.golang.org/grpc/codec.go114
-rw-r--r--go/vendor/google.golang.org/grpc/codes/code_string.go66
-rw-r--r--go/vendor/google.golang.org/grpc/codes/codes.go80
-rw-r--r--go/vendor/google.golang.org/grpc/connectivity/connectivity.go72
-rwxr-xr-xgo/vendor/google.golang.org/grpc/coverage.sh48
-rw-r--r--go/vendor/google.golang.org/grpc/credentials/credentials.go69
-rw-r--r--go/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go35
-rw-r--r--go/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go35
-rw-r--r--go/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go35
-rw-r--r--go/vendor/google.golang.org/grpc/doc.go20
-rw-r--r--go/vendor/google.golang.org/grpc/encoding/encoding.go61
-rw-r--r--go/vendor/google.golang.org/grpc/go16.go98
-rw-r--r--go/vendor/google.golang.org/grpc/go17.go99
-rw-r--r--go/vendor/google.golang.org/grpc/grpclb.go342
-rw-r--r--go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go615
-rw-r--r--go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto155
-rw-r--r--go/vendor/google.golang.org/grpc/grpclb_picker.go159
-rw-r--r--go/vendor/google.golang.org/grpc/grpclb_remote_balancer.go254
-rw-r--r--go/vendor/google.golang.org/grpc/grpclb_util.go90
-rw-r--r--go/vendor/google.golang.org/grpc/grpclog/grpclog.go123
-rw-r--r--go/vendor/google.golang.org/grpc/grpclog/logger.go106
-rw-r--r--go/vendor/google.golang.org/grpc/grpclog/loggerv2.go195
-rw-r--r--go/vendor/google.golang.org/grpc/interceptor.go43
-rw-r--r--go/vendor/google.golang.org/grpc/internal/internal.go42
-rw-r--r--go/vendor/google.golang.org/grpc/keepalive/keepalive.go39
-rw-r--r--go/vendor/google.golang.org/grpc/metadata/metadata.go133
-rw-r--r--go/vendor/google.golang.org/grpc/naming/dns_resolver.go290
-rw-r--r--go/vendor/google.golang.org/grpc/naming/go17.go34
-rw-r--r--go/vendor/google.golang.org/grpc/naming/go18.go28
-rw-r--r--go/vendor/google.golang.org/grpc/naming/naming.go35
-rw-r--r--go/vendor/google.golang.org/grpc/peer/peer.go38
-rw-r--r--go/vendor/google.golang.org/grpc/picker_wrapper.go141
-rw-r--r--go/vendor/google.golang.org/grpc/pickfirst.go108
-rw-r--r--go/vendor/google.golang.org/grpc/proxy.go130
-rw-r--r--go/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go377
-rw-r--r--go/vendor/google.golang.org/grpc/resolver/dns/go17.go35
-rw-r--r--go/vendor/google.golang.org/grpc/resolver/dns/go18.go29
-rw-r--r--go/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go57
-rw-r--r--go/vendor/google.golang.org/grpc/resolver/resolver.go155
-rw-r--r--go/vendor/google.golang.org/grpc/resolver_conn_wrapper.go159
-rw-r--r--go/vendor/google.golang.org/grpc/rpc_util.go496
-rw-r--r--go/vendor/google.golang.org/grpc/server.go846
-rw-r--r--go/vendor/google.golang.org/grpc/service_config.go226
-rw-r--r--go/vendor/google.golang.org/grpc/stats/handlers.go42
-rw-r--r--go/vendor/google.golang.org/grpc/stats/stats.go147
-rw-r--r--go/vendor/google.golang.org/grpc/status/status.go99
-rw-r--r--go/vendor/google.golang.org/grpc/stream.go424
-rw-r--r--go/vendor/google.golang.org/grpc/tap/tap.go55
-rw-r--r--go/vendor/google.golang.org/grpc/trace.go50
-rw-r--r--go/vendor/google.golang.org/grpc/transport/bdp_estimator.go140
-rw-r--r--go/vendor/google.golang.org/grpc/transport/control.go265
-rw-r--r--go/vendor/google.golang.org/grpc/transport/go16.go55
-rw-r--r--go/vendor/google.golang.org/grpc/transport/go17.go58
-rw-r--r--go/vendor/google.golang.org/grpc/transport/handler_server.go134
-rw-r--r--go/vendor/google.golang.org/grpc/transport/http2_client.go1187
-rw-r--r--go/vendor/google.golang.org/grpc/transport/http2_server.go1110
-rw-r--r--go/vendor/google.golang.org/grpc/transport/http_util.go388
-rw-r--r--go/vendor/google.golang.org/grpc/transport/log.go50
-rw-r--r--go/vendor/google.golang.org/grpc/transport/pre_go16.go51
-rw-r--r--go/vendor/google.golang.org/grpc/transport/transport.go499
-rwxr-xr-xgo/vendor/google.golang.org/grpc/vet.sh84
-rw-r--r--go/vendor/vendor.json188
85 files changed, 11819 insertions, 3557 deletions
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/any.go b/go/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..b2af97f
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,139 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+ if any == nil {
+ return "", fmt.Errorf("message is nil")
+ }
+ slash := strings.LastIndex(any.TypeUrl, "/")
+ if slash < 0 {
+ return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+ }
+ return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+ value, err := proto.Marshal(pb)
+ if err != nil {
+ return nil, err
+ }
+ return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+ proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return nil, err
+ }
+
+ t := proto.MessageType(aname)
+ if t == nil {
+ return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+ }
+ return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+ if d, ok := pb.(*DynamicAny); ok {
+ if d.Message == nil {
+ var err error
+ d.Message, err = Empty(any)
+ if err != nil {
+ return err
+ }
+ }
+ return UnmarshalAny(any, d.Message)
+ }
+
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return err
+ }
+
+ mname := proto.MessageName(pb)
+ if aname != mname {
+ return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+ }
+ return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+ aname, err := AnyMessageName(any)
+ if err != nil {
+ return false
+ }
+
+ return aname == proto.MessageName(pb)
+}
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/doc.go b/go/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/duration.go b/go/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..65cb0f8
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+ // Range of a durpb.Duration in seconds, as specified in
+ // google/protobuf/duration.proto. This is about 10,000 years in seconds.
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+ if d == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", d)
+ }
+ if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", d)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+ }
+ return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+ if err := validateDuration(p); err != nil {
+ return 0, err
+ }
+ d := time.Duration(p.Seconds) * time.Second
+ if int64(d/time.Second) != p.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ if p.Nanos != 0 {
+ d += time.Duration(p.Nanos)
+ if (d < 0) != (p.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &durpb.Duration{
+ Seconds: secs,
+ Nanos: int32(nanos),
+ }
+}
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/go/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..b2410a0
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,144 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+/*
+Package duration is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/duration.proto
+
+It has these top-level messages:
+ Duration
+*/
+package duration
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 190 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+ 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+ 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+ 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+ 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+ 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+ 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+ 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+ 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+ 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+ 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/go/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
+//
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
+//
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (durations.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
+//
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
+//
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/regen.sh b/go/vendor/github.com/golang/protobuf/ptypes/regen.sh
new file mode 100755
index 0000000..b50a941
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/regen.sh
@@ -0,0 +1,43 @@
+#!/bin/bash -e
+#
+# This script fetches and rebuilds the "well-known types" protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+# You also need Go and Git installed.
+
+PKG=github.com/golang/protobuf/ptypes
+UPSTREAM=https://github.com/google/protobuf
+UPSTREAM_SUBDIR=src/google/protobuf
+PROTO_FILES=(any duration empty struct timestamp wrappers)
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go git protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
+trap 'rm -rf $tmpdir' EXIT
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd "$base"
+
+echo 1>&2 "fetching latest protos... "
+git clone -q $UPSTREAM $tmpdir
+
+for file in ${PROTO_FILES[@]}; do
+ echo 1>&2 "* $file"
+ protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
+ cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
+done
+
+echo 1>&2 "All OK"
diff --git a/go/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/go/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..47f10db
--- /dev/null
+++ b/go/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,134 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+ ts, err := TimestampProto(time.Now())
+ if err != nil {
+ panic("ptypes: time.Now() out of Timestamp range")
+ }
+ return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+ seconds := t.Unix()
+ nanos := int32(t.Sub(time.Unix(seconds, 0)))
+ ts := &tspb.Timestamp{
+ Seconds: seconds,
+ Nanos: nanos,
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+ t, err := Timestamp(ts)
+ if err != nil {
+ return fmt.Sprintf("(%v)", err)
+ }
+ return t.Format(time.RFC3339Nano)
+}
diff --git a/go/vendor/google.golang.org/grpc/AUTHORS b/go/vendor/google.golang.org/grpc/AUTHORS
new file mode 100644
index 0000000..e491a9e
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/go/vendor/google.golang.org/grpc/CONTRIBUTING.md b/go/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 36cd6f7..8ec6c95 100644
--- a/go/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/go/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -1,46 +1,32 @@
# How to contribute
-We definitely welcome patches and contribution to grpc! Here are some guidelines
-and information about how to do so.
+We definitely welcome your patches and contributions to gRPC!
-## Sending patches
-
-### Getting started
-
-1. Check out the code:
-
- $ go get google.golang.org/grpc
- $ cd $GOPATH/src/google.golang.org/grpc
-
-1. Create a fork of the grpc-go repository.
-1. Add your fork as a remote:
-
- $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
-
-1. Make changes, commit them.
-1. Run the test suite:
-
- $ make test
-
-1. Push your changes to your fork:
-
- $ git push fork ...
-
-1. Open a pull request.
+If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://cla.developers.google.com/clas).
-
-## Filing Issues
-When filing an issue, make sure to answer these five questions:
-
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
-
-### Contributing code
-Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
+[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
+
+## Guidelines for Pull Requests
+How to get your contributions merged smoothly and quickly.
+
+- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy.
+
+- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal).
+
+- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists.
+
+- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR.
+
+- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity.
+
+- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review).
+
+- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
+
+- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
+
+- Exceptions to the rules can be made if there's a compelling reason for doing so.
+
diff --git a/go/vendor/google.golang.org/grpc/LICENSE b/go/vendor/google.golang.org/grpc/LICENSE
index f4988b4..d645695 100644
--- a/go/vendor/google.golang.org/grpc/LICENSE
+++ b/go/vendor/google.golang.org/grpc/LICENSE
@@ -1,28 +1,202 @@
-Copyright 2014, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/go/vendor/google.golang.org/grpc/Makefile b/go/vendor/google.golang.org/grpc/Makefile
index 03bb01f..c445343 100644
--- a/go/vendor/google.golang.org/grpc/Makefile
+++ b/go/vendor/google.golang.org/grpc/Makefile
@@ -20,24 +20,17 @@ proto:
echo "error: protoc not installed" >&2; \
exit 1; \
fi
- go get -u -v github.com/golang/protobuf/protoc-gen-go
- # use $$dir as the root for all proto files in the same directory
- for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \
- protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \
- done
+ go generate google.golang.org/grpc/...
test: testdeps
- go test -v -cpu 1,4 google.golang.org/grpc/...
+ go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
testrace: testdeps
- go test -v -race -cpu 1,4 google.golang.org/grpc/...
+ go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
-coverage: testdeps
- ./coverage.sh --coveralls
-
.PHONY: \
all \
deps \
diff --git a/go/vendor/google.golang.org/grpc/PATENTS b/go/vendor/google.golang.org/grpc/PATENTS
deleted file mode 100644
index 69b4795..0000000
--- a/go/vendor/google.golang.org/grpc/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the gRPC project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of gRPC, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of gRPC. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of gRPC or any code incorporated within this
-implementation of gRPC constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of gRPC
-shall terminate as of the date such litigation is filed.
diff --git a/go/vendor/google.golang.org/grpc/README.md b/go/vendor/google.golang.org/grpc/README.md
index 39120c2..118327b 100644
--- a/go/vendor/google.golang.org/grpc/README.md
+++ b/go/vendor/google.golang.org/grpc/README.md
@@ -1,8 +1,8 @@
-#gRPC-Go
+# gRPC-Go
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
-The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
+The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide.
Installation
------------
@@ -10,29 +10,14 @@ Installation
To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
```
-$ go get google.golang.org/grpc
+$ go get -u google.golang.org/grpc
```
Prerequisites
-------------
-This requires Go 1.5 or later.
-
-A note on the version used: significant performance improvements in benchmarks
-of grpc-go have been seen by upgrading the go version from 1.5 to the latest
-1.7.1.
-
-From https://golang.org/doc/install, one way to install the latest version of go is:
-```
-$ GO_VERSION=1.7.1
-$ OS=linux
-$ ARCH=amd64
-$ curl -O https://storage.googleapis.com/golang/go${GO_VERSION}.${OS}-${ARCH}.tar.gz
-$ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz
-$ # Put go on the PATH, keep the usual installation dir
-$ sudo ln -s /usr/local/go/bin/go /usr/bin/go
-$ rm go$GO_VERSION.$OS-$ARCH.tar.gz
-```
+This requires Go 1.6 or later. Go 1.7 will be required as of the next gRPC-Go
+release (1.8).
Constraints
-----------
@@ -42,9 +27,13 @@ Documentation
-------------
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
+Performance
+-----------
+See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
+
Status
------
-GA
+General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages).
FAQ
---
diff --git a/go/vendor/google.golang.org/grpc/backoff.go b/go/vendor/google.golang.org/grpc/backoff.go
index c99024e..c40facc 100644
--- a/go/vendor/google.golang.org/grpc/backoff.go
+++ b/go/vendor/google.golang.org/grpc/backoff.go
@@ -1,3 +1,21 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
package grpc
import (
@@ -7,14 +25,12 @@ import (
// DefaultBackoffConfig uses values specified for backoff in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-var (
- DefaultBackoffConfig = BackoffConfig{
- MaxDelay: 120 * time.Second,
- baseDelay: 1.0 * time.Second,
- factor: 1.6,
- jitter: 0.2,
- }
-)
+var DefaultBackoffConfig = BackoffConfig{
+ MaxDelay: 120 * time.Second,
+ baseDelay: 1.0 * time.Second,
+ factor: 1.6,
+ jitter: 0.2,
+}
// backoffStrategy defines the methodology for backing off after a grpc
// connection failure.
diff --git a/go/vendor/google.golang.org/grpc/balancer.go b/go/vendor/google.golang.org/grpc/balancer.go
index 9d943fb..300da6c 100644
--- a/go/vendor/google.golang.org/grpc/balancer.go
+++ b/go/vendor/google.golang.org/grpc/balancer.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -35,6 +20,7 @@ package grpc
import (
"fmt"
+ "net"
"sync"
"golang.org/x/net/context"
@@ -42,6 +28,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/naming"
+ "google.golang.org/grpc/status"
)
// Address represents a server the client connects to.
@@ -60,6 +47,10 @@ type BalancerConfig struct {
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
+ // Dialer is the custom dialer the Balancer implementation can use to dial
+ // to a remote load balancer server. The Balancer implementations
+ // can ignore this if it doesn't need to talk to remote balancer.
+ Dialer func(context.Context, string) (net.Conn, error)
}
// BalancerGetOptions configures a Get call.
@@ -167,7 +158,7 @@ type roundRobin struct {
func (rr *roundRobin) watchAddrUpdates() error {
updates, err := rr.w.Next()
if err != nil {
- grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err)
+ grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
return err
}
rr.mu.Lock()
@@ -183,7 +174,7 @@ func (rr *roundRobin) watchAddrUpdates() error {
for _, v := range rr.addrs {
if addr == v.addr {
exist = true
- grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
+ grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
break
}
}
@@ -200,7 +191,7 @@ func (rr *roundRobin) watchAddrUpdates() error {
}
}
default:
- grpclog.Println("Unknown update.Op ", update.Op)
+ grpclog.Errorln("Unknown update.Op ", update.Op)
}
}
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
@@ -211,6 +202,10 @@ func (rr *roundRobin) watchAddrUpdates() error {
if rr.done {
return ErrClientConnClosing
}
+ select {
+ case <-rr.addrCh:
+ default:
+ }
rr.addrCh <- open
return nil
}
@@ -233,7 +228,7 @@ func (rr *roundRobin) Start(target string, config BalancerConfig) error {
return err
}
rr.w = w
- rr.addrCh = make(chan []Address)
+ rr.addrCh = make(chan []Address, 1)
go func() {
for {
if err := rr.watchAddrUpdates(); err != nil {
@@ -316,7 +311,7 @@ func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Ad
if !opts.BlockingWait {
if len(rr.addrs) == 0 {
rr.mu.Unlock()
- err = Errorf(codes.Unavailable, "there is no address available")
+ err = status.Errorf(codes.Unavailable, "there is no address available")
return
}
// Returns the next addr on rr.addrs for failfast RPCs.
@@ -385,6 +380,9 @@ func (rr *roundRobin) Notify() <-chan []Address {
func (rr *roundRobin) Close() error {
rr.mu.Lock()
defer rr.mu.Unlock()
+ if rr.done {
+ return errBalancerClosed
+ }
rr.done = true
if rr.w != nil {
rr.w.Close()
@@ -398,3 +396,14 @@ func (rr *roundRobin) Close() error {
}
return nil
}
+
+// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
+// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
+// returns the only address Up by resetTransport().
+type pickFirst struct {
+ *roundRobin
+}
+
+func pickFirstBalancerV1(r naming.Resolver) Balancer {
+ return &pickFirst{&roundRobin{r: r}}
+}
diff --git a/go/vendor/google.golang.org/grpc/balancer/balancer.go b/go/vendor/google.golang.org/grpc/balancer/balancer.go
new file mode 100644
index 0000000..219a294
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -0,0 +1,223 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package balancer defines APIs for load balancing in gRPC.
+// All APIs in this package are experimental.
+package balancer
+
+import (
+ "errors"
+ "net"
+ "strings"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/resolver"
+)
+
+var (
+ // m is a map from name to balancer builder.
+ m = make(map[string]Builder)
+)
+
+// Register registers the balancer builder to the balancer map.
+// b.Name (lowercased) will be used as the name registered with
+// this builder.
+func Register(b Builder) {
+ m[strings.ToLower(b.Name())] = b
+}
+
+// Get returns the resolver builder registered with the given name.
+// Note that the compare is done in a case-insenstive fashion.
+// If no builder is register with the name, nil will be returned.
+func Get(name string) Builder {
+ if b, ok := m[strings.ToLower(name)]; ok {
+ return b
+ }
+ return nil
+}
+
+// SubConn represents a gRPC sub connection.
+// Each sub connection contains a list of addresses. gRPC will
+// try to connect to them (in sequence), and stop trying the
+// remainder once one connection is successful.
+//
+// The reconnect backoff will be applied on the list, not a single address.
+// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
+//
+// All SubConns start in IDLE, and will not try to connect. To trigger
+// the connecting, Balancers must call Connect.
+// When the connection encounters an error, it will reconnect immediately.
+// When the connection becomes IDLE, it will not reconnect unless Connect is
+// called.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type SubConn interface {
+ // UpdateAddresses updates the addresses used in this SubConn.
+ // gRPC checks if currently-connected address is still in the new list.
+ // If it's in the list, the connection will be kept.
+ // If it's not in the list, the connection will gracefully closed, and
+ // a new connection will be created.
+ //
+ // This will trigger a state transition for the SubConn.
+ UpdateAddresses([]resolver.Address)
+ // Connect starts the connecting for this SubConn.
+ Connect()
+}
+
+// NewSubConnOptions contains options to create new SubConn.
+type NewSubConnOptions struct{}
+
+// ClientConn represents a gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type ClientConn interface {
+ // NewSubConn is called by balancer to create a new SubConn.
+ // It doesn't block and wait for the connections to be established.
+ // Behaviors of the SubConn can be controlled by options.
+ NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
+ // RemoveSubConn removes the SubConn from ClientConn.
+ // The SubConn will be shutdown.
+ RemoveSubConn(SubConn)
+
+ // UpdateBalancerState is called by balancer to nofity gRPC that some internal
+ // state in balancer has changed.
+ //
+ // gRPC will update the connectivity state of the ClientConn, and will call pick
+ // on the new picker to pick new SubConn.
+ UpdateBalancerState(s connectivity.State, p Picker)
+
+ // ResolveNow is called by balancer to notify gRPC to do a name resolving.
+ ResolveNow(resolver.ResolveNowOption)
+
+ // Target returns the dial target for this ClientConn.
+ Target() string
+}
+
+// BuildOptions contains additional information for Build.
+type BuildOptions struct {
+ // DialCreds is the transport credential the Balancer implementation can
+ // use to dial to a remote load balancer server. The Balancer implementations
+ // can ignore this if it does not need to talk to another party securely.
+ DialCreds credentials.TransportCredentials
+ // Dialer is the custom dialer the Balancer implementation can use to dial
+ // to a remote load balancer server. The Balancer implementations
+ // can ignore this if it doesn't need to talk to remote balancer.
+ Dialer func(context.Context, string) (net.Conn, error)
+}
+
+// Builder creates a balancer.
+type Builder interface {
+ // Build creates a new balancer with the ClientConn.
+ Build(cc ClientConn, opts BuildOptions) Balancer
+ // Name returns the name of balancers built by this builder.
+ // It will be used to pick balancers (for example in service config).
+ Name() string
+}
+
+// PickOptions contains addition information for the Pick operation.
+type PickOptions struct{}
+
+// DoneInfo contains additional information for done.
+type DoneInfo struct {
+ // Err is the rpc error the RPC finished with. It could be nil.
+ Err error
+ // BytesSent indicates if any bytes have been sent to the server.
+ BytesSent bool
+ // BytesReceived indicates if any byte has been received from the server.
+ BytesReceived bool
+}
+
+var (
+ // ErrNoSubConnAvailable indicates no SubConn is available for pick().
+ // gRPC will block the RPC until a new picker is available via UpdateBalancerState().
+ ErrNoSubConnAvailable = errors.New("no SubConn is available")
+ // ErrTransientFailure indicates all SubConns are in TransientFailure.
+ // WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
+ ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
+)
+
+// Picker is used by gRPC to pick a SubConn to send an RPC.
+// Balancer is expected to generate a new picker from its snapshot everytime its
+// internal state has changed.
+//
+// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+type Picker interface {
+ // Pick returns the SubConn to be used to send the RPC.
+ // The returned SubConn must be one returned by NewSubConn().
+ //
+ // This functions is expected to return:
+ // - a SubConn that is known to be READY;
+ // - ErrNoSubConnAvailable if no SubConn is available, but progress is being
+ // made (for example, some SubConn is in CONNECTING mode);
+ // - other errors if no active connecting is happening (for example, all SubConn
+ // are in TRANSIENT_FAILURE mode).
+ //
+ // If a SubConn is returned:
+ // - If it is READY, gRPC will send the RPC on it;
+ // - If it is not ready, or becomes not ready after it's returned, gRPC will block
+ // until UpdateBalancerState() is called and will call pick on the new picker.
+ //
+ // If the returned error is not nil:
+ // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
+ // - If the error is ErrTransientFailure:
+ // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
+ // is called to pick again;
+ // - Otherwise, RPC will fail with unavailable error.
+ // - Else (error is other non-nil error):
+ // - The RPC will fail with unavailable error.
+ //
+ // The returned done() function will be called once the rpc has finished, with the
+ // final status of that RPC.
+ // done may be nil if balancer doesn't care about the RPC status.
+ Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
+}
+
+// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
+// the connectivity states.
+//
+// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
+//
+// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
+// to be called synchronously from the same goroutine.
+// There's no guarantee on picker.Pick, it may be called anytime.
+type Balancer interface {
+ // HandleSubConnStateChange is called by gRPC when the connectivity state
+ // of sc has changed.
+ // Balancer is expected to aggregate all the state of SubConn and report
+ // that back to gRPC.
+ // Balancer should also generate and update Pickers when its internal state has
+ // been changed by the new state.
+ HandleSubConnStateChange(sc SubConn, state connectivity.State)
+ // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
+ // balancers.
+ // Balancer can create new SubConn or remove SubConn with the addresses.
+ // An empty address slice and a non-nil error will be passed if the resolver returns
+ // non-nil error to gRPC.
+ HandleResolvedAddrs([]resolver.Address, error)
+ // Close closes the balancer. The balancer is not required to call
+ // ClientConn.RemoveSubConn for its existing SubConns.
+ Close()
+}
diff --git a/go/vendor/google.golang.org/grpc/balancer/base/balancer.go b/go/vendor/google.golang.org/grpc/balancer/base/balancer.go
new file mode 100644
index 0000000..1e962b7
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -0,0 +1,209 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package base
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+type baseBuilder struct {
+ name string
+ pickerBuilder PickerBuilder
+}
+
+func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+ return &baseBalancer{
+ cc: cc,
+ pickerBuilder: bb.pickerBuilder,
+
+ subConns: make(map[resolver.Address]balancer.SubConn),
+ scStates: make(map[balancer.SubConn]connectivity.State),
+ csEvltr: &connectivityStateEvaluator{},
+ // Initialize picker to a picker that always return
+ // ErrNoSubConnAvailable, because when state of a SubConn changes, we
+ // may call UpdateBalancerState with this picker.
+ picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
+ }
+}
+
+func (bb *baseBuilder) Name() string {
+ return bb.name
+}
+
+type baseBalancer struct {
+ cc balancer.ClientConn
+ pickerBuilder PickerBuilder
+
+ csEvltr *connectivityStateEvaluator
+ state connectivity.State
+
+ subConns map[resolver.Address]balancer.SubConn
+ scStates map[balancer.SubConn]connectivity.State
+ picker balancer.Picker
+}
+
+func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+ if err != nil {
+ grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err)
+ return
+ }
+ grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs)
+ // addrsSet is the set converted from addrs, it's used for quick lookup of an address.
+ addrsSet := make(map[resolver.Address]struct{})
+ for _, a := range addrs {
+ addrsSet[a] = struct{}{}
+ if _, ok := b.subConns[a]; !ok {
+ // a is a new address (not existing in b.subConns).
+ sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+ continue
+ }
+ b.subConns[a] = sc
+ b.scStates[sc] = connectivity.Idle
+ sc.Connect()
+ }
+ }
+ for a, sc := range b.subConns {
+ // a was removed by resolver.
+ if _, ok := addrsSet[a]; !ok {
+ b.cc.RemoveSubConn(sc)
+ delete(b.subConns, a)
+ // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+ // The entry will be deleted in HandleSubConnStateChange.
+ }
+ }
+}
+
+// regeneratePicker takes a snapshot of the balancer, and generates a picker
+// from it. The picker is
+// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
+// - built by the pickerBuilder with all READY SubConns otherwise.
+func (b *baseBalancer) regeneratePicker() {
+ if b.state == connectivity.TransientFailure {
+ b.picker = NewErrPicker(balancer.ErrTransientFailure)
+ return
+ }
+ readySCs := make(map[resolver.Address]balancer.SubConn)
+
+ // Filter out all ready SCs from full subConn map.
+ for addr, sc := range b.subConns {
+ if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs[addr] = sc
+ }
+ }
+ b.picker = b.pickerBuilder.Build(readySCs)
+}
+
+func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+ oldS, ok := b.scStates[sc]
+ if !ok {
+ grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ return
+ }
+ b.scStates[sc] = s
+ switch s {
+ case connectivity.Idle:
+ sc.Connect()
+ case connectivity.Shutdown:
+ // When an address was removed by resolver, b called RemoveSubConn but
+ // kept the sc's state in scStates. Remove state for this sc here.
+ delete(b.scStates, sc)
+ }
+
+ oldAggrState := b.state
+ b.state = b.csEvltr.recordTransition(oldS, s)
+
+ // Regenerate picker when one of the following happens:
+ // - this sc became ready from not-ready
+ // - this sc became not-ready from ready
+ // - the aggregated state of balancer became TransientFailure from non-TransientFailure
+ // - the aggregated state of balancer became non-TransientFailure from TransientFailure
+ if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
+ (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+ b.regeneratePicker()
+ }
+
+ b.cc.UpdateBalancerState(b.state, b.picker)
+ return
+}
+
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (b *baseBalancer) Close() {
+}
+
+// NewErrPicker returns a picker that always returns err on Pick().
+func NewErrPicker(err error) balancer.Picker {
+ return &errPicker{err: err}
+}
+
+type errPicker struct {
+ err error // Pick() always returns this err.
+}
+
+func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ return nil, nil, p.err
+}
+
+// connectivityStateEvaluator gets updated by addrConns when their
+// states transition, based on which it evaluates the state of
+// ClientConn.
+type connectivityStateEvaluator struct {
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+// recordTransition records state change happening in every subConn and based on
+// that it evaluates what aggregated state should be.
+// It can only transition between Ready, Connecting and TransientFailure. Other states,
+// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
+// before any subConn is created ClientConn is in idle state. In the end when ClientConn
+// closes it is in Shutdown state.
+//
+// recordTransition should only be called synchronously from the same goroutine.
+func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
+ // Update counters.
+ for idx, state := range []connectivity.State{oldState, newState} {
+ updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+ switch state {
+ case connectivity.Ready:
+ cse.numReady += updateVal
+ case connectivity.Connecting:
+ cse.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ cse.numTransientFailure += updateVal
+ }
+ }
+
+ // Evaluate.
+ if cse.numReady > 0 {
+ return connectivity.Ready
+ }
+ if cse.numConnecting > 0 {
+ return connectivity.Connecting
+ }
+ return connectivity.TransientFailure
+}
diff --git a/go/vendor/google.golang.org/grpc/balancer/base/base.go b/go/vendor/google.golang.org/grpc/balancer/base/base.go
new file mode 100644
index 0000000..012ace2
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package base defines a balancer base that can be used to build balancers with
+// different picking algorithms.
+//
+// The base balancer creates a new SubConn for each resolved address. The
+// provided picker will only be notified about READY SubConns.
+//
+// This package is the base of round_robin balancer, its purpose is to be used
+// to build round_robin like balancers with complex picking algorithms.
+// Balancers with more complicated logic should try to implement a balancer
+// builder from scratch.
+//
+// All APIs in this package are experimental.
+package base
+
+import (
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/resolver"
+)
+
+// PickerBuilder creates balancer.Picker.
+type PickerBuilder interface {
+ // Build takes a slice of ready SubConns, and returns a picker that will be
+ // used by gRPC to pick a SubConn.
+ Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
+}
+
+// NewBalancerBuilder returns a balancer builder. The balancers
+// built by this builder will use the picker builder to build pickers.
+func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
+ return &baseBuilder{
+ name: name,
+ pickerBuilder: pb,
+ }
+}
diff --git a/go/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/go/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
new file mode 100644
index 0000000..2eda0a1
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
+// installed as one of the default balancers in gRPC, users don't need to
+// explicitly install this balancer.
+package roundrobin
+
+import (
+ "sync"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+// Name is the name of round_robin balancer.
+const Name = "round_robin"
+
+// newBuilder creates a new roundrobin balancer builder.
+func newBuilder() balancer.Builder {
+ return base.NewBalancerBuilder(Name, &rrPickerBuilder{})
+}
+
+func init() {
+ balancer.Register(newBuilder())
+}
+
+type rrPickerBuilder struct{}
+
+func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
+ grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
+ var scs []balancer.SubConn
+ for _, sc := range readySCs {
+ scs = append(scs, sc)
+ }
+ return &rrPicker{
+ subConns: scs,
+ }
+}
+
+type rrPicker struct {
+ // subConns is the snapshot of the roundrobin balancer when this picker was
+ // created. The slice is immutable. Each Get() will do a round robin
+ // selection from it and return the selected SubConn.
+ subConns []balancer.SubConn
+
+ mu sync.Mutex
+ next int
+}
+
+func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ if len(p.subConns) <= 0 {
+ return nil, nil, balancer.ErrNoSubConnAvailable
+ }
+
+ p.mu.Lock()
+ sc := p.subConns[p.next]
+ p.next = (p.next + 1) % len(p.subConns)
+ p.mu.Unlock()
+ return sc, nil, nil
+}
diff --git a/go/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/go/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
new file mode 100644
index 0000000..db6f0ae
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -0,0 +1,300 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+// scStateUpdate contains the subConn and the new state it changed to.
+type scStateUpdate struct {
+ sc balancer.SubConn
+ state connectivity.State
+}
+
+// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple.
+// TODO make a general purpose buffer that uses interface{}.
+type scStateUpdateBuffer struct {
+ c chan *scStateUpdate
+ mu sync.Mutex
+ backlog []*scStateUpdate
+}
+
+func newSCStateUpdateBuffer() *scStateUpdateBuffer {
+ return &scStateUpdateBuffer{
+ c: make(chan *scStateUpdate, 1),
+ }
+}
+
+func (b *scStateUpdateBuffer) put(t *scStateUpdate) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(b.backlog) == 0 {
+ select {
+ case b.c <- t:
+ return
+ default:
+ }
+ }
+ b.backlog = append(b.backlog, t)
+}
+
+func (b *scStateUpdateBuffer) load() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(b.backlog) > 0 {
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog[0] = nil
+ b.backlog = b.backlog[1:]
+ default:
+ }
+ }
+}
+
+// get returns the channel that the scStateUpdate will be sent to.
+//
+// Upon receiving, the caller should call load to send another
+// scStateChangeTuple onto the channel if there is any.
+func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate {
+ return b.c
+}
+
+// resolverUpdate contains the new resolved addresses or error if there's
+// any.
+type resolverUpdate struct {
+ addrs []resolver.Address
+ err error
+}
+
+// ccBalancerWrapper is a wrapper on top of cc for balancers.
+// It implements balancer.ClientConn interface.
+type ccBalancerWrapper struct {
+ cc *ClientConn
+ balancer balancer.Balancer
+ stateChangeQueue *scStateUpdateBuffer
+ resolverUpdateCh chan *resolverUpdate
+ done chan struct{}
+
+ mu sync.Mutex
+ subConns map[*acBalancerWrapper]struct{}
+}
+
+func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
+ ccb := &ccBalancerWrapper{
+ cc: cc,
+ stateChangeQueue: newSCStateUpdateBuffer(),
+ resolverUpdateCh: make(chan *resolverUpdate, 1),
+ done: make(chan struct{}),
+ subConns: make(map[*acBalancerWrapper]struct{}),
+ }
+ go ccb.watcher()
+ ccb.balancer = b.Build(ccb, bopts)
+ return ccb
+}
+
+// watcher balancer functions sequencially, so the balancer can be implemeneted
+// lock-free.
+func (ccb *ccBalancerWrapper) watcher() {
+ for {
+ select {
+ case t := <-ccb.stateChangeQueue.get():
+ ccb.stateChangeQueue.load()
+ select {
+ case <-ccb.done:
+ ccb.balancer.Close()
+ return
+ default:
+ }
+ ccb.balancer.HandleSubConnStateChange(t.sc, t.state)
+ case t := <-ccb.resolverUpdateCh:
+ select {
+ case <-ccb.done:
+ ccb.balancer.Close()
+ return
+ default:
+ }
+ ccb.balancer.HandleResolvedAddrs(t.addrs, t.err)
+ case <-ccb.done:
+ }
+
+ select {
+ case <-ccb.done:
+ ccb.balancer.Close()
+ ccb.mu.Lock()
+ scs := ccb.subConns
+ ccb.subConns = nil
+ ccb.mu.Unlock()
+ for acbw := range scs {
+ ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
+ }
+ return
+ default:
+ }
+ }
+}
+
+func (ccb *ccBalancerWrapper) close() {
+ close(ccb.done)
+}
+
+func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ // When updating addresses for a SubConn, if the address in use is not in
+ // the new addresses, the old ac will be tearDown() and a new ac will be
+ // created. tearDown() generates a state change with Shutdown state, we
+ // don't want the balancer to receive this state change. So before
+ // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
+ // this function will be called with (nil, Shutdown). We don't need to call
+ // balancer method in this case.
+ if sc == nil {
+ return
+ }
+ ccb.stateChangeQueue.put(&scStateUpdate{
+ sc: sc,
+ state: s,
+ })
+}
+
+func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) {
+ select {
+ case <-ccb.resolverUpdateCh:
+ default:
+ }
+ ccb.resolverUpdateCh <- &resolverUpdate{
+ addrs: addrs,
+ err: err,
+ }
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+ if len(addrs) <= 0 {
+ return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+ }
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+ if ccb.subConns == nil {
+ return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
+ }
+ ac, err := ccb.cc.newAddrConn(addrs)
+ if err != nil {
+ return nil, err
+ }
+ acbw := &acBalancerWrapper{ac: ac}
+ acbw.ac.mu.Lock()
+ ac.acbw = acbw
+ acbw.ac.mu.Unlock()
+ ccb.subConns[acbw] = struct{}{}
+ return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+ acbw, ok := sc.(*acBalancerWrapper)
+ if !ok {
+ return
+ }
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+ if ccb.subConns == nil {
+ return
+ }
+ delete(ccb.subConns, acbw)
+ ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
+}
+
+func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+ if ccb.subConns == nil {
+ return
+ }
+ ccb.cc.csMgr.updateState(s)
+ ccb.cc.blockingpicker.updatePicker(p)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
+ ccb.cc.resolveNow(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+ return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+ mu sync.Mutex
+ ac *addrConn
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+ acbw.mu.Lock()
+ defer acbw.mu.Unlock()
+ if len(addrs) <= 0 {
+ acbw.ac.tearDown(errConnDrain)
+ return
+ }
+ if !acbw.ac.tryUpdateAddrs(addrs) {
+ cc := acbw.ac.cc
+ acbw.ac.mu.Lock()
+ // Set old ac.acbw to nil so the Shutdown state update will be ignored
+ // by balancer.
+ //
+ // TODO(bar) the state transition could be wrong when tearDown() old ac
+ // and creating new ac, fix the transition.
+ acbw.ac.acbw = nil
+ acbw.ac.mu.Unlock()
+ acState := acbw.ac.getState()
+ acbw.ac.tearDown(errConnDrain)
+
+ if acState == connectivity.Shutdown {
+ return
+ }
+
+ ac, err := cc.newAddrConn(addrs)
+ if err != nil {
+ grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
+ return
+ }
+ acbw.ac = ac
+ ac.mu.Lock()
+ ac.acbw = acbw
+ ac.mu.Unlock()
+ if acState != connectivity.Idle {
+ ac.connect()
+ }
+ }
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+ acbw.mu.Lock()
+ defer acbw.mu.Unlock()
+ acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
+ acbw.mu.Lock()
+ defer acbw.mu.Unlock()
+ return acbw.ac
+}
diff --git a/go/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/go/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
new file mode 100644
index 0000000..faabf87
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -0,0 +1,375 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "strings"
+ "sync"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
+)
+
+type balancerWrapperBuilder struct {
+ b Balancer // The v1 balancer.
+}
+
+func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
+ targetAddr := cc.Target()
+ targetSplitted := strings.Split(targetAddr, ":///")
+ if len(targetSplitted) >= 2 {
+ targetAddr = targetSplitted[1]
+ }
+
+ bwb.b.Start(targetAddr, BalancerConfig{
+ DialCreds: opts.DialCreds,
+ Dialer: opts.Dialer,
+ })
+ _, pickfirst := bwb.b.(*pickFirst)
+ bw := &balancerWrapper{
+ balancer: bwb.b,
+ pickfirst: pickfirst,
+ cc: cc,
+ targetAddr: targetAddr,
+ startCh: make(chan struct{}),
+ conns: make(map[resolver.Address]balancer.SubConn),
+ connSt: make(map[balancer.SubConn]*scState),
+ csEvltr: &connectivityStateEvaluator{},
+ state: connectivity.Idle,
+ }
+ cc.UpdateBalancerState(connectivity.Idle, bw)
+ go bw.lbWatcher()
+ return bw
+}
+
+func (bwb *balancerWrapperBuilder) Name() string {
+ return "wrapper"
+}
+
+type scState struct {
+ addr Address // The v1 address type.
+ s connectivity.State
+ down func(error)
+}
+
+type balancerWrapper struct {
+ balancer Balancer // The v1 balancer.
+ pickfirst bool
+
+ cc balancer.ClientConn
+ targetAddr string // Target without the scheme.
+
+ // To aggregate the connectivity state.
+ csEvltr *connectivityStateEvaluator
+ state connectivity.State
+
+ mu sync.Mutex
+ conns map[resolver.Address]balancer.SubConn
+ connSt map[balancer.SubConn]*scState
+ // This channel is closed when handling the first resolver result.
+ // lbWatcher blocks until this is closed, to avoid race between
+ // - NewSubConn is created, cc wants to notify balancer of state changes;
+ // - Build hasn't return, cc doesn't have access to balancer.
+ startCh chan struct{}
+}
+
+// lbWatcher watches the Notify channel of the balancer and manages
+// connections accordingly.
+func (bw *balancerWrapper) lbWatcher() {
+ <-bw.startCh
+ notifyCh := bw.balancer.Notify()
+ if notifyCh == nil {
+ // There's no resolver in the balancer. Connect directly.
+ a := resolver.Address{
+ Addr: bw.targetAddr,
+ Type: resolver.Backend,
+ }
+ sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
+ } else {
+ bw.mu.Lock()
+ bw.conns[a] = sc
+ bw.connSt[sc] = &scState{
+ addr: Address{Addr: bw.targetAddr},
+ s: connectivity.Idle,
+ }
+ bw.mu.Unlock()
+ sc.Connect()
+ }
+ return
+ }
+
+ for addrs := range notifyCh {
+ grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs)
+ if bw.pickfirst {
+ var (
+ oldA resolver.Address
+ oldSC balancer.SubConn
+ )
+ bw.mu.Lock()
+ for oldA, oldSC = range bw.conns {
+ break
+ }
+ bw.mu.Unlock()
+ if len(addrs) <= 0 {
+ if oldSC != nil {
+ // Teardown old sc.
+ bw.mu.Lock()
+ delete(bw.conns, oldA)
+ delete(bw.connSt, oldSC)
+ bw.mu.Unlock()
+ bw.cc.RemoveSubConn(oldSC)
+ }
+ continue
+ }
+
+ var newAddrs []resolver.Address
+ for _, a := range addrs {
+ newAddr := resolver.Address{
+ Addr: a.Addr,
+ Type: resolver.Backend, // All addresses from balancer are all backends.
+ ServerName: "",
+ Metadata: a.Metadata,
+ }
+ newAddrs = append(newAddrs, newAddr)
+ }
+ if oldSC == nil {
+ // Create new sc.
+ sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
+ } else {
+ bw.mu.Lock()
+ // For pickfirst, there should be only one SubConn, so the
+ // address doesn't matter. All states updating (up and down)
+ // and picking should all happen on that only SubConn.
+ bw.conns[resolver.Address{}] = sc
+ bw.connSt[sc] = &scState{
+ addr: addrs[0], // Use the first address.
+ s: connectivity.Idle,
+ }
+ bw.mu.Unlock()
+ sc.Connect()
+ }
+ } else {
+ bw.mu.Lock()
+ bw.connSt[oldSC].addr = addrs[0]
+ bw.mu.Unlock()
+ oldSC.UpdateAddresses(newAddrs)
+ }
+ } else {
+ var (
+ add []resolver.Address // Addresses need to setup connections.
+ del []balancer.SubConn // Connections need to tear down.
+ )
+ resAddrs := make(map[resolver.Address]Address)
+ for _, a := range addrs {
+ resAddrs[resolver.Address{
+ Addr: a.Addr,
+ Type: resolver.Backend, // All addresses from balancer are all backends.
+ ServerName: "",
+ Metadata: a.Metadata,
+ }] = a
+ }
+ bw.mu.Lock()
+ for a := range resAddrs {
+ if _, ok := bw.conns[a]; !ok {
+ add = append(add, a)
+ }
+ }
+ for a, c := range bw.conns {
+ if _, ok := resAddrs[a]; !ok {
+ del = append(del, c)
+ delete(bw.conns, a)
+ // Keep the state of this sc in bw.connSt until its state becomes Shutdown.
+ }
+ }
+ bw.mu.Unlock()
+ for _, a := range add {
+ sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
+ } else {
+ bw.mu.Lock()
+ bw.conns[a] = sc
+ bw.connSt[sc] = &scState{
+ addr: resAddrs[a],
+ s: connectivity.Idle,
+ }
+ bw.mu.Unlock()
+ sc.Connect()
+ }
+ }
+ for _, c := range del {
+ bw.cc.RemoveSubConn(c)
+ }
+ }
+ }
+}
+
+func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ bw.mu.Lock()
+ defer bw.mu.Unlock()
+ scSt, ok := bw.connSt[sc]
+ if !ok {
+ return
+ }
+ if s == connectivity.Idle {
+ sc.Connect()
+ }
+ oldS := scSt.s
+ scSt.s = s
+ if oldS != connectivity.Ready && s == connectivity.Ready {
+ scSt.down = bw.balancer.Up(scSt.addr)
+ } else if oldS == connectivity.Ready && s != connectivity.Ready {
+ if scSt.down != nil {
+ scSt.down(errConnClosing)
+ }
+ }
+ sa := bw.csEvltr.recordTransition(oldS, s)
+ if bw.state != sa {
+ bw.state = sa
+ }
+ bw.cc.UpdateBalancerState(bw.state, bw)
+ if s == connectivity.Shutdown {
+ // Remove state for this sc.
+ delete(bw.connSt, sc)
+ }
+ return
+}
+
+func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
+ bw.mu.Lock()
+ defer bw.mu.Unlock()
+ select {
+ case <-bw.startCh:
+ default:
+ close(bw.startCh)
+ }
+ // There should be a resolver inside the balancer.
+ // All updates here, if any, are ignored.
+ return
+}
+
+func (bw *balancerWrapper) Close() {
+ bw.mu.Lock()
+ defer bw.mu.Unlock()
+ select {
+ case <-bw.startCh:
+ default:
+ close(bw.startCh)
+ }
+ bw.balancer.Close()
+ return
+}
+
+// The picker is the balancerWrapper itself.
+// Pick should never return ErrNoSubConnAvailable.
+// It either blocks or returns error, consistent with v1 balancer Get().
+func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ failfast := true // Default failfast is true.
+ if ss, ok := rpcInfoFromContext(ctx); ok {
+ failfast = ss.failfast
+ }
+ a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
+ if err != nil {
+ return nil, nil, err
+ }
+ var done func(balancer.DoneInfo)
+ if p != nil {
+ done = func(i balancer.DoneInfo) { p() }
+ }
+ var sc balancer.SubConn
+ bw.mu.Lock()
+ defer bw.mu.Unlock()
+ if bw.pickfirst {
+ // Get the first sc in conns.
+ for _, sc = range bw.conns {
+ break
+ }
+ } else {
+ var ok bool
+ sc, ok = bw.conns[resolver.Address{
+ Addr: a.Addr,
+ Type: resolver.Backend,
+ ServerName: "",
+ Metadata: a.Metadata,
+ }]
+ if !ok && failfast {
+ return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
+ }
+ if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) {
+ // If the returned sc is not ready and RPC is failfast,
+ // return error, and this RPC will fail.
+ return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available")
+ }
+ }
+
+ return sc, done, nil
+}
+
+// connectivityStateEvaluator gets updated by addrConns when their
+// states transition, based on which it evaluates the state of
+// ClientConn.
+type connectivityStateEvaluator struct {
+ mu sync.Mutex
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+// recordTransition records state change happening in every subConn and based on
+// that it evaluates what aggregated state should be.
+// It can only transition between Ready, Connecting and TransientFailure. Other states,
+// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
+// before any subConn is created ClientConn is in idle state. In the end when ClientConn
+// closes it is in Shutdown state.
+// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
+func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
+ cse.mu.Lock()
+ defer cse.mu.Unlock()
+
+ // Update counters.
+ for idx, state := range []connectivity.State{oldState, newState} {
+ updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+ switch state {
+ case connectivity.Ready:
+ cse.numReady += updateVal
+ case connectivity.Connecting:
+ cse.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ cse.numTransientFailure += updateVal
+ }
+ }
+
+ // Evaluate.
+ if cse.numReady > 0 {
+ return connectivity.Ready
+ }
+ if cse.numConnecting > 0 {
+ return connectivity.Connecting
+ }
+ return connectivity.TransientFailure
+}
diff --git a/go/vendor/google.golang.org/grpc/call.go b/go/vendor/google.golang.org/grpc/call.go
index ba17721..13cf8b1 100644
--- a/go/vendor/google.golang.org/grpc/call.go
+++ b/go/vendor/google.golang.org/grpc/call.go
@@ -1,48 +1,35 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
package grpc
import (
- "bytes"
"io"
- "math"
"time"
"golang.org/x/net/context"
"golang.org/x/net/trace"
+ "google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@@ -72,14 +59,27 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
}
for {
- if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil {
+ if c.maxReceiveMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+
+ // Set dc if it exists and matches the message compression type used,
+ // otherwise set comp if a registered compressor exists for it.
+ var comp encoding.Compressor
+ var dc Decompressor
+ if rc := stream.RecvCompress(); dopts.dc != nil && dopts.dc.Type() == rc {
+ dc = dopts.dc
+ } else if rc != "" && rc != encoding.Identity {
+ comp = encoding.GetCompressor(rc)
+ }
+ if err = recv(p, dopts.codec, stream, dc, reply, *c.maxReceiveMessageSize, inPayload, comp); err != nil {
if err == io.EOF {
break
}
return
}
}
- if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
+ if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
// TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
// Fix the order if necessary.
dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
@@ -89,11 +89,7 @@ func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTran
}
// sendRequest writes out various information of an RPC such as Context and Message.
-func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
- stream, err := t.NewStream(ctx, callHdr)
- if err != nil {
- return nil, err
- }
+func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
defer func() {
if err != nil {
// If err is connection error, t will be closed, no need to close stream here.
@@ -103,22 +99,35 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
}
}()
var (
- cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
- if compressor != nil {
- cbuf = new(bytes.Buffer)
- }
if dopts.copts.StatsHandler != nil {
outPayload = &stats.OutPayload{
Client: true,
}
}
- outBuf, err := encode(dopts.codec, args, compressor, cbuf, outPayload)
+ // Set comp and clear compressor if a registered compressor matches the type
+ // specified via UseCompressor. (And error if a matching compressor is not
+ // registered.)
+ var comp encoding.Compressor
+ if ct := c.compressorType; ct != "" && ct != encoding.Identity {
+ compressor = nil // Disable the legacy compressor.
+ comp = encoding.GetCompressor(ct)
+ if comp == nil {
+ return status.Errorf(codes.Internal, "grpc: Compressor is not installed for grpc-encoding %q", ct)
+ }
+ }
+ hdr, data, err := encode(dopts.codec, args, compressor, outPayload, comp)
if err != nil {
- return nil, Errorf(codes.Internal, "grpc: %v", err)
+ return err
+ }
+ if c.maxSendMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
- err = t.Write(stream, outBuf, opts)
+ if len(data) > *c.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
+ }
+ err = t.Write(stream, hdr, data, opts)
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
@@ -127,42 +136,57 @@ func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor,
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status.
if err != nil && err != io.EOF {
- return nil, err
+ return err
}
// Sent successfully.
- return stream, nil
+ return nil
}
-// Invoke sends the RPC request on the wire and returns after response is received.
-// Invoke is called by generated code. Also users can call Invoke directly when it
-// is really needed in their use cases.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+// Invoke sends the RPC request on the wire and returns after response is
+// received. This is typically called by generated code.
+func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
if cc.dopts.unaryInt != nil {
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
}
return invoke(ctx, method, args, reply, cc, opts...)
}
+// Invoke sends the RPC request on the wire and returns after response is
+// received. This is typically called by generated code.
+//
+// DEPRECATED: Use ClientConn.Invoke instead.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+ return cc.Invoke(ctx, method, args, reply, opts...)
+}
+
func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
- c := defaultCallInfo
- if mc, ok := cc.getMethodConfig(method); ok {
- c.failFast = !mc.WaitForReady
- if mc.Timeout > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
- defer cancel()
- }
+ c := defaultCallInfo()
+ mc := cc.GetMethodConfig(method)
+ if mc.WaitForReady != nil {
+ c.failFast = !*mc.WaitForReady
}
+
+ if mc.Timeout != nil && *mc.Timeout >= 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+ defer cancel()
+ }
+
+ opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
- if err := o.before(&c); err != nil {
+ if err := o.before(c); err != nil {
return toRPCErr(err)
}
}
defer func() {
for _, o := range opts {
- o.after(&c)
+ o.after(c)
}
}()
+
+ c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+
if EnableTracing {
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
defer c.traceInfo.tr.Finish()
@@ -179,97 +203,120 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
}()
}
+ ctx = newContextWithRPCInfo(ctx, c.failFast)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
- }
- defer func() {
- if sh != nil {
+ defer func() {
end := &stats.End{
Client: true,
EndTime: time.Now(),
Error: e,
}
sh.HandleRPC(ctx, end)
- }
- }()
+ }()
+ }
topts := &transport.Options{
Last: true,
Delay: false,
}
+ callHdr := &transport.CallHdr{
+ Host: cc.authority,
+ Method: method,
+ }
+ if c.creds != nil {
+ callHdr.Creds = c.creds
+ }
+ if c.compressorType != "" {
+ callHdr.SendCompress = c.compressorType
+ } else if cc.dopts.cp != nil {
+ callHdr.SendCompress = cc.dopts.cp.Type()
+ }
+ firstAttempt := true
+
for {
- var (
- err error
- t transport.ClientTransport
- stream *transport.Stream
- // Record the put handler from Balancer.Get(...). It is called once the
- // RPC has completed or failed.
- put func()
- )
- // TODO(zhaoq): Need a formal spec of fail-fast.
- callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- }
- if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
+ // Check to make sure the context has expired. This will prevent us from
+ // looping forever if an error occurs for wait-for-ready RPCs where no data
+ // is sent on the wire.
+ select {
+ case <-ctx.Done():
+ return toRPCErr(ctx.Err())
+ default:
}
- gopts := BalancerGetOptions{
- BlockingWait: !c.failFast,
+ // Record the done handler from Balancer.Get(...). It is called once the
+ // RPC has completed or failed.
+ t, done, err := cc.getTransport(ctx, c.failFast)
+ if err != nil {
+ return err
}
- t, put, err = cc.getTransport(ctx, gopts)
+ stream, err := t.NewStream(ctx, callHdr)
if err != nil {
- // TODO(zhaoq): Probably revisit the error handling.
- if _, ok := err.(*rpcError); ok {
- return err
+ if done != nil {
+ done(balancer.DoneInfo{Err: err})
}
- if err == errConnClosing || err == errConnUnavailable {
- if c.failFast {
- return Errorf(codes.Unavailable, "%v", err)
- }
+ // In the event of any error from NewStream, we never attempted to write
+ // anything to the wire, so we can retry indefinitely for non-fail-fast
+ // RPCs.
+ if !c.failFast {
continue
}
- // All the other errors are treated as Internal errors.
- return Errorf(codes.Internal, "%v", err)
+ return toRPCErr(err)
+ }
+ if peer, ok := peer.FromContext(stream.Context()); ok {
+ c.peer = peer
}
if c.traceInfo.tr != nil {
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
}
- stream, err = sendRequest(ctx, cc.dopts, cc.dopts.cp, callHdr, t, args, topts)
+ err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
if err != nil {
- if put != nil {
- put()
- put = nil
+ if done != nil {
+ done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: stream.BytesReceived(),
+ })
}
// Retry a non-failfast RPC when
- // i) there is a connection error; or
- // ii) the server started to drain before this RPC was initiated.
- if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
- if c.failFast {
- return toRPCErr(err)
+ // i) the server started to drain before this RPC was initiated.
+ // ii) the server refused the stream.
+ if !c.failFast && stream.Unprocessed() {
+ // In this case, the server did not receive the data, but we still
+ // created wire traffic, so we should not retry indefinitely.
+ if firstAttempt {
+ // TODO: Add a field to header for grpc-transparent-retry-attempts
+ firstAttempt = false
+ continue
}
- continue
+ // Otherwise, give up and return an error anyway.
}
return toRPCErr(err)
}
- err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
+ err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
if err != nil {
- if put != nil {
- put()
- put = nil
+ if done != nil {
+ done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: stream.BytesReceived(),
+ })
}
- if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
- if c.failFast {
- return toRPCErr(err)
+ if !c.failFast && stream.Unprocessed() {
+ // In these cases, the server did not receive the data, but we still
+ // created wire traffic, so we should not retry indefinitely.
+ if firstAttempt {
+ // TODO: Add a field to header for grpc-transparent-retry-attempts
+ firstAttempt = false
+ continue
}
- continue
+ // Otherwise, give up and return an error anyway.
}
return toRPCErr(err)
}
@@ -277,10 +324,23 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
}
t.CloseStream(stream, nil)
- if put != nil {
- put()
- put = nil
+ err = stream.Status().Err()
+ if done != nil {
+ done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: stream.BytesReceived(),
+ })
+ }
+ if !c.failFast && stream.Unprocessed() {
+ // In these cases, the server did not receive the data, but we still
+ // created wire traffic, so we should not retry indefinitely.
+ if firstAttempt {
+ // TODO: Add a field to header for grpc-transparent-retry-attempts
+ firstAttempt = false
+ continue
+ }
}
- return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
+ return err
}
}
diff --git a/go/vendor/google.golang.org/grpc/clientconn.go b/go/vendor/google.golang.org/grpc/clientconn.go
index 146166a..bfbef36 100644
--- a/go/vendor/google.golang.org/grpc/clientconn.go
+++ b/go/vendor/google.golang.org/grpc/clientconn.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -36,15 +21,24 @@ package grpc
import (
"errors"
"fmt"
+ "math"
"net"
+ "reflect"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/trace"
+ "google.golang.org/grpc/balancer"
+ _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+ "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/resolver"
+ _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
+ _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
"google.golang.org/grpc/stats"
"google.golang.org/grpc/transport"
)
@@ -55,10 +49,22 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout.
- // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
- // removed in Q1 2017.
+ // DEPRECATED: Please use context.DeadlineExceeded instead.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+ // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
+ errConnDrain = errors.New("grpc: the connection is drained")
+ // errConnClosing indicates that the connection is closing.
+ errConnClosing = errors.New("grpc: the connection is closing")
+ // errConnUnavailable indicates that the connection is unavailable.
+ errConnUnavailable = errors.New("grpc: the connection is unavailable")
+ // errBalancerClosed indicates that the balancer is closed.
+ errBalancerClosed = errors.New("grpc: balancer is closed")
+ // minimum time to give a connection to complete
+ minConnectTimeout = 20 * time.Second
+)
+// The following errors are returned from Dial and DialContext
+var (
// errNoTransportSecurity indicates that there is no transport security
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
@@ -72,37 +78,94 @@ var (
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
// errNetworkIO indicates that the connection is down due to some network I/O error.
errNetworkIO = errors.New("grpc: failed with network I/O error")
- // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
- errConnDrain = errors.New("grpc: the connection is drained")
- // errConnClosing indicates that the connection is closing.
- errConnClosing = errors.New("grpc: the connection is closing")
- // errConnUnavailable indicates that the connection is unavailable.
- errConnUnavailable = errors.New("grpc: the connection is unavailable")
- errNoAddr = errors.New("grpc: there is no address available to dial")
- // minimum time to give a connection to complete
- minConnectTimeout = 20 * time.Second
)
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
- unaryInt UnaryClientInterceptor
- streamInt StreamClientInterceptor
- codec Codec
- cp Compressor
- dc Decompressor
- bs backoffStrategy
- balancer Balancer
- block bool
- insecure bool
- timeout time.Duration
- scChan <-chan ServiceConfig
- copts transport.ConnectOptions
+ unaryInt UnaryClientInterceptor
+ streamInt StreamClientInterceptor
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ bs backoffStrategy
+ block bool
+ insecure bool
+ timeout time.Duration
+ scChan <-chan ServiceConfig
+ copts transport.ConnectOptions
+ callOptions []CallOption
+ // This is used by v1 balancer dial option WithBalancer to support v1
+ // balancer, and also by WithBalancerName dial option.
+ balancerBuilder balancer.Builder
+ // This is to support grpclb.
+ resolverBuilder resolver.Builder
+ // Custom user options for resolver.Build.
+ resolverBuildUserOptions interface{}
+ waitForHandshake bool
}
+const (
+ defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
+ defaultClientMaxSendMessageSize = math.MaxInt32
+)
+
// DialOption configures how we set up the connection.
type DialOption func(*dialOptions)
+// WithWaitForHandshake blocks until the initial settings frame is received from the
+// server before assigning RPCs to the connection.
+// Experimental API.
+func WithWaitForHandshake() DialOption {
+ return func(o *dialOptions) {
+ o.waitForHandshake = true
+ }
+}
+
+// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
+// before doing a write on the wire.
+func WithWriteBufferSize(s int) DialOption {
+ return func(o *dialOptions) {
+ o.copts.WriteBufferSize = s
+ }
+}
+
+// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
+// for each read syscall.
+func WithReadBufferSize(s int) DialOption {
+ return func(o *dialOptions) {
+ o.copts.ReadBufferSize = s
+ }
+}
+
+// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func WithInitialWindowSize(s int32) DialOption {
+ return func(o *dialOptions) {
+ o.copts.InitialWindowSize = s
+ }
+}
+
+// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func WithInitialConnWindowSize(s int32) DialOption {
+ return func(o *dialOptions) {
+ o.copts.InitialConnWindowSize = s
+ }
+}
+
+// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
+func WithMaxMsgSize(s int) DialOption {
+ return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
+}
+
+// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
+func WithDefaultCallOptions(cos ...CallOption) DialOption {
+ return func(o *dialOptions) {
+ o.callOptions = append(o.callOptions, cos...)
+ }
+}
+
// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
func WithCodec(c Codec) DialOption {
return func(o *dialOptions) {
@@ -110,30 +173,80 @@ func WithCodec(c Codec) DialOption {
}
}
-// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
-// compressor.
+// WithCompressor returns a DialOption which sets a Compressor to use for
+// message compression. It has lower priority than the compressor set by
+// the UseCompressor CallOption.
+//
+// Deprecated: use UseCompressor instead.
func WithCompressor(cp Compressor) DialOption {
return func(o *dialOptions) {
o.cp = cp
}
}
-// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
-// message decompressor.
+// WithDecompressor returns a DialOption which sets a Decompressor to use for
+// incoming message decompression. If incoming response messages are encoded
+// using the decompressor's Type(), it will be used. Otherwise, the message
+// encoding will be used to look up the compressor registered via
+// encoding.RegisterCompressor, which will then be used to decompress the
+// message. If no compressor is registered for the encoding, an Unimplemented
+// status error will be returned.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
func WithDecompressor(dc Decompressor) DialOption {
return func(o *dialOptions) {
o.dc = dc
}
}
-// WithBalancer returns a DialOption which sets a load balancer.
+// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
+// Name resolver will be ignored if this DialOption is specified.
+//
+// Deprecated: use the new balancer APIs in balancer package and WithBalancerName.
func WithBalancer(b Balancer) DialOption {
return func(o *dialOptions) {
- o.balancer = b
+ o.balancerBuilder = &balancerWrapperBuilder{
+ b: b,
+ }
+ }
+}
+
+// WithBalancerName sets the balancer that the ClientConn will be initialized
+// with. Balancer registered with balancerName will be used. This function
+// panics if no balancer was registered by balancerName.
+//
+// The balancer cannot be overridden by balancer option specified by service
+// config.
+//
+// This is an EXPERIMENTAL API.
+func WithBalancerName(balancerName string) DialOption {
+ builder := balancer.Get(balancerName)
+ if builder == nil {
+ panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
+ }
+ return func(o *dialOptions) {
+ o.balancerBuilder = builder
+ }
+}
+
+// withResolverBuilder is only for grpclb.
+func withResolverBuilder(b resolver.Builder) DialOption {
+ return func(o *dialOptions) {
+ o.resolverBuilder = b
+ }
+}
+
+// WithResolverUserOptions returns a DialOption which sets the UserOptions
+// field of resolver's BuildOption.
+func WithResolverUserOptions(userOpt interface{}) DialOption {
+ return func(o *dialOptions) {
+ o.resolverBuildUserOptions = userOpt
}
}
// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
+// DEPRECATED: service config should be received through name resolver, as specified here.
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
return func(o *dialOptions) {
o.scChan = c
@@ -158,7 +271,7 @@ func WithBackoffConfig(b BackoffConfig) DialOption {
return withBackoff(b)
}
-// withBackoff sets the backoff strategy used for retries after a
+// withBackoff sets the backoff strategy used for connectRetryNum after a
// failed connection attempt.
//
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
@@ -194,7 +307,7 @@ func WithTransportCredentials(creds credentials.TransportCredentials) DialOption
}
// WithPerRPCCredentials returns a DialOption which sets
-// credentials which will place auth state on each outbound RPC.
+// credentials and places auth state on each outbound RPC.
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
return func(o *dialOptions) {
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
@@ -203,24 +316,30 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
// initially. This is valid if and only if WithBlock() is present.
+// Deprecated: use DialContext and context.WithTimeout instead.
func WithTimeout(d time.Duration) DialOption {
return func(o *dialOptions) {
o.timeout = d
}
}
+func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Dialer = f
+ }
+}
+
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
// Temporary() method to decide if it should try to reconnect to the network address.
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
- return func(o *dialOptions) {
- o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
+ return withContextDialer(
+ func(ctx context.Context, addr string) (net.Conn, error) {
if deadline, ok := ctx.Deadline(); ok {
return f(addr, deadline.Sub(time.Now()))
}
return f(addr, 0)
- }
- }
+ })
}
// WithStatsHandler returns a DialOption that specifies the stats handler
@@ -231,7 +350,7 @@ func WithStatsHandler(h stats.Handler) DialOption {
}
}
-// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
+// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
// address and won't try to reconnect.
// The default value of FailOnNonTempDialError is false.
@@ -249,6 +368,13 @@ func WithUserAgent(s string) DialOption {
}
}
+// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport.
+func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
+ return func(o *dialOptions) {
+ o.copts.KeepaliveParams = kp
+ }
+}
+
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
return func(o *dialOptions) {
@@ -263,25 +389,69 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
}
}
+// WithAuthority returns a DialOption that specifies the value to be used as
+// the :authority pseudo-header. This value only works with WithInsecure and
+// has no effect if TransportCredentials are present.
+func WithAuthority(a string) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Authority = a
+ }
+}
+
// Dial creates a client connection to the given target.
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
return DialContext(context.Background(), target, opts...)
}
// DialContext creates a client connection to the given target. ctx can be used to
-// cancel or expire the pending connecting. Once this function returns, the
+// cancel or expire the pending connection. Once this function returns, the
// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
// to terminate all the pending operations after this function returns.
-// This is the EXPERIMENTAL API.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
- conns: make(map[Address]*addrConn),
+ csMgr: &connectivityStateManager{},
+ conns: make(map[*addrConn]struct{}),
+
+ blockingpicker: newPickerWrapper(),
}
cc.ctx, cc.cancel = context.WithCancel(context.Background())
+
for _, opt := range opts {
opt(&cc.dopts)
}
+
+ if !cc.dopts.insecure {
+ if cc.dopts.copts.TransportCredentials == nil {
+ return nil, errNoTransportSecurity
+ }
+ } else {
+ if cc.dopts.copts.TransportCredentials != nil {
+ return nil, errCredentialsConflict
+ }
+ for _, cd := range cc.dopts.copts.PerRPCCredentials {
+ if cd.RequireTransportSecurity() {
+ return nil, errTransportCredentialsMissing
+ }
+ }
+ }
+
+ cc.mkp = cc.dopts.copts.KeepaliveParams
+
+ if cc.dopts.copts.Dialer == nil {
+ cc.dopts.copts.Dialer = newProxyDialer(
+ func(ctx context.Context, addr string) (net.Conn, error) {
+ return dialContext(ctx, "tcp", addr)
+ },
+ )
+ }
+
+ if cc.dopts.copts.UserAgent != "" {
+ cc.dopts.copts.UserAgent += " " + grpcUA
+ } else {
+ cc.dopts.copts.UserAgent = grpcUA
+ }
+
if cc.dopts.timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
@@ -300,15 +470,16 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
+ scSet := false
if cc.dopts.scChan != nil {
- // Wait for the initial service config.
+ // Try to get an initial service config.
select {
case sc, ok := <-cc.dopts.scChan:
if ok {
cc.sc = sc
+ scSet = true
}
- case <-ctx.Done():
- return nil, ctx.Err()
+ default:
}
}
// Set defaults.
@@ -318,110 +489,114 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if cc.dopts.bs == nil {
cc.dopts.bs = DefaultBackoffConfig
}
+ cc.parsedTarget = parseTarget(cc.target)
creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName
+ } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
+ cc.authority = cc.dopts.copts.Authority
} else {
- colonPos := strings.LastIndex(target, ":")
- if colonPos == -1 {
- colonPos = len(target)
- }
- cc.authority = target[:colonPos]
+ // Use endpoint from "scheme://authority/endpoint" as the default
+ // authority for ClientConn.
+ cc.authority = cc.parsedTarget.Endpoint
}
- var ok bool
- waitC := make(chan error, 1)
- go func() {
- var addrs []Address
- if cc.dopts.balancer == nil && cc.sc.LB != nil {
- cc.dopts.balancer = cc.sc.LB
- }
- if cc.dopts.balancer == nil {
- // Connect to target directly if balancer is nil.
- addrs = append(addrs, Address{Addr: target})
- } else {
- var credsClone credentials.TransportCredentials
- if creds != nil {
- credsClone = creds.Clone()
- }
- config := BalancerConfig{
- DialCreds: credsClone,
- }
- if err := cc.dopts.balancer.Start(target, config); err != nil {
- waitC <- err
- return
- }
- ch := cc.dopts.balancer.Notify()
- if ch == nil {
- // There is no name resolver installed.
- addrs = append(addrs, Address{Addr: target})
- } else {
- addrs, ok = <-ch
- if !ok || len(addrs) == 0 {
- waitC <- errNoAddr
- return
- }
- }
- }
- for _, a := range addrs {
- if err := cc.resetAddrConn(a, false, nil); err != nil {
- waitC <- err
- return
+
+ if cc.dopts.scChan != nil && !scSet {
+ // Blocking wait for the initial service config.
+ select {
+ case sc, ok := <-cc.dopts.scChan:
+ if ok {
+ cc.sc = sc
}
- }
- close(waitC)
- }()
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case err := <-waitC:
- if err != nil {
- return nil, err
+ case <-ctx.Done():
+ return nil, ctx.Err()
}
}
+ if cc.dopts.scChan != nil {
+ go cc.scWatcher()
+ }
- // If balancer is nil or balancer.Notify() is nil, ok will be false here.
- // The lbWatcher goroutine will not be created.
- if ok {
- go cc.lbWatcher()
+ var credsClone credentials.TransportCredentials
+ if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+ credsClone = creds.Clone()
+ }
+ cc.balancerBuildOpts = balancer.BuildOptions{
+ DialCreds: credsClone,
+ Dialer: cc.dopts.copts.Dialer,
}
- if cc.dopts.scChan != nil {
- go cc.scWatcher()
+ // Build the resolver.
+ cc.resolverWrapper, err = newCCResolverWrapper(cc)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build resolver: %v", err)
}
+ // Start the resolver wrapper goroutine after resolverWrapper is created.
+ //
+ // If the goroutine is started before resolverWrapper is ready, the
+ // following may happen: The goroutine sends updates to cc. cc forwards
+ // those to balancer. Balancer creates new addrConn. addrConn fails to
+ // connect, and calls resolveNow(). resolveNow() tries to use the non-ready
+ // resolverWrapper.
+ cc.resolverWrapper.start()
+
+ // A blocking dial blocks until the clientConn is ready.
+ if cc.dopts.block {
+ for {
+ s := cc.GetState()
+ if s == connectivity.Ready {
+ break
+ }
+ if !cc.WaitForStateChange(ctx, s) {
+ // ctx got timeout or canceled.
+ return nil, ctx.Err()
+ }
+ }
+ }
+
return cc, nil
}
-// ConnectivityState indicates the state of a client connection.
-type ConnectivityState int
+// connectivityStateManager keeps the connectivity.State of ClientConn.
+// This struct will eventually be exported so the balancers can access it.
+type connectivityStateManager struct {
+ mu sync.Mutex
+ state connectivity.State
+ notifyChan chan struct{}
+}
-const (
- // Idle indicates the ClientConn is idle.
- Idle ConnectivityState = iota
- // Connecting indicates the ClienConn is connecting.
- Connecting
- // Ready indicates the ClientConn is ready for work.
- Ready
- // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
- TransientFailure
- // Shutdown indicates the ClientConn has started shutting down.
- Shutdown
-)
+// updateState updates the connectivity.State of ClientConn.
+// If there's a change it notifies goroutines waiting on state change to
+// happen.
+func (csm *connectivityStateManager) updateState(state connectivity.State) {
+ csm.mu.Lock()
+ defer csm.mu.Unlock()
+ if csm.state == connectivity.Shutdown {
+ return
+ }
+ if csm.state == state {
+ return
+ }
+ csm.state = state
+ if csm.notifyChan != nil {
+ // There are other goroutines waiting on this channel.
+ close(csm.notifyChan)
+ csm.notifyChan = nil
+ }
+}
+
+func (csm *connectivityStateManager) getState() connectivity.State {
+ csm.mu.Lock()
+ defer csm.mu.Unlock()
+ return csm.state
+}
-func (s ConnectivityState) String() string {
- switch s {
- case Idle:
- return "IDLE"
- case Connecting:
- return "CONNECTING"
- case Ready:
- return "READY"
- case TransientFailure:
- return "TRANSIENT_FAILURE"
- case Shutdown:
- return "SHUTDOWN"
- default:
- panic(fmt.Sprintf("unknown connectivity state: %d", s))
+func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
+ csm.mu.Lock()
+ defer csm.mu.Unlock()
+ if csm.notifyChan == nil {
+ csm.notifyChan = make(chan struct{})
}
+ return csm.notifyChan
}
// ClientConn represents a client connection to an RPC server.
@@ -429,50 +604,50 @@ type ClientConn struct {
ctx context.Context
cancel context.CancelFunc
- target string
- authority string
- dopts dialOptions
+ target string
+ parsedTarget resolver.Target
+ authority string
+ dopts dialOptions
+ csMgr *connectivityStateManager
+
+ balancerBuildOpts balancer.BuildOptions
+ resolverWrapper *ccResolverWrapper
+ blockingpicker *pickerWrapper
mu sync.RWMutex
sc ServiceConfig
- conns map[Address]*addrConn
+ scRaw string
+ conns map[*addrConn]struct{}
+ // Keepalive parameter can be updated if a GoAway is received.
+ mkp keepalive.ClientParameters
+ curBalancerName string
+ preBalancerName string // previous balancer name.
+ curAddresses []resolver.Address
+ balancerWrapper *ccBalancerWrapper
}
-func (cc *ClientConn) lbWatcher() {
- for addrs := range cc.dopts.balancer.Notify() {
- var (
- add []Address // Addresses need to setup connections.
- del []*addrConn // Connections need to tear down.
- )
- cc.mu.Lock()
- for _, a := range addrs {
- if _, ok := cc.conns[a]; !ok {
- add = append(add, a)
- }
- }
- for k, c := range cc.conns {
- var keep bool
- for _, a := range addrs {
- if k == a {
- keep = true
- break
- }
- }
- if !keep {
- del = append(del, c)
- delete(cc.conns, c.addr)
- }
- }
- cc.mu.Unlock()
- for _, a := range add {
- cc.resetAddrConn(a, true, nil)
- }
- for _, c := range del {
- c.tearDown(errConnDrain)
- }
+// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
+// ctx expires. A true value is returned in former case and false in latter.
+// This is an EXPERIMENTAL API.
+func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
+ ch := cc.csMgr.getNotifyChan()
+ if cc.csMgr.getState() != sourceState {
+ return true
+ }
+ select {
+ case <-ctx.Done():
+ return false
+ case <-ch:
+ return true
}
}
+// GetState returns the connectivity.State of ClientConn.
+// This is an EXPERIMENTAL API.
+func (cc *ClientConn) GetState() connectivity.State {
+ return cc.csMgr.getState()
+}
+
func (cc *ClientConn) scWatcher() {
for {
select {
@@ -484,6 +659,7 @@ func (cc *ClientConn) scWatcher() {
// TODO: load balance policy runtime change is ignored.
// We may revist this decision in the future.
cc.sc = sc
+ cc.scRaw = ""
cc.mu.Unlock()
case <-cc.ctx.Done():
return
@@ -491,151 +667,271 @@ func (cc *ClientConn) scWatcher() {
}
}
-// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
-// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
-// If tearDownErr is nil, errConnDrain will be used instead.
-func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error {
- ac := &addrConn{
- cc: cc,
- addr: addr,
- dopts: cc.dopts,
+func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cc.conns == nil {
+ // cc was closed.
+ return
}
- ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
- ac.stateCV = sync.NewCond(&ac.mu)
- if EnableTracing {
- ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
+
+ if reflect.DeepEqual(cc.curAddresses, addrs) {
+ return
}
- if !ac.dopts.insecure {
- if ac.dopts.copts.TransportCredentials == nil {
- return errNoTransportSecurity
- }
- } else {
- if ac.dopts.copts.TransportCredentials != nil {
- return errCredentialsConflict
+
+ cc.curAddresses = addrs
+
+ if cc.dopts.balancerBuilder == nil {
+ // Only look at balancer types and switch balancer if balancer dial
+ // option is not set.
+ var isGRPCLB bool
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ isGRPCLB = true
+ break
+ }
}
- for _, cd := range ac.dopts.copts.PerRPCCredentials {
- if cd.RequireTransportSecurity() {
- return errTransportCredentialsMissing
+ var newBalancerName string
+ if isGRPCLB {
+ newBalancerName = grpclbName
+ } else {
+ // Address list doesn't contain grpclb address. Try to pick a
+ // non-grpclb balancer.
+ newBalancerName = cc.curBalancerName
+ // If current balancer is grpclb, switch to the previous one.
+ if newBalancerName == grpclbName {
+ newBalancerName = cc.preBalancerName
+ }
+ // The following could be true in two cases:
+ // - the first time handling resolved addresses
+ // (curBalancerName="")
+ // - the first time handling non-grpclb addresses
+ // (curBalancerName="grpclb", preBalancerName="")
+ if newBalancerName == "" {
+ newBalancerName = PickFirstBalancerName
}
}
+ cc.switchBalancer(newBalancerName)
+ } else if cc.balancerWrapper == nil {
+ // Balancer dial option was set, and this is the first time handling
+ // resolved addresses. Build a balancer with dopts.balancerBuilder.
+ cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
}
+
+ cc.balancerWrapper.handleResolvedAddrs(addrs, nil)
+}
+
+// switchBalancer starts the switching from current balancer to the balancer
+// with the given name.
+//
+// It will NOT send the current address list to the new balancer. If needed,
+// caller of this function should send address list to the new balancer after
+// this function returns.
+//
+// Caller must hold cc.mu.
+func (cc *ClientConn) switchBalancer(name string) {
+ if cc.conns == nil {
+ return
+ }
+
+ if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) {
+ return
+ }
+
+ grpclog.Infof("ClientConn switching balancer to %q", name)
+ if cc.dopts.balancerBuilder != nil {
+ grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
+ return
+ }
+ // TODO(bar switching) change this to two steps: drain and close.
+ // Keep track of sc in wrapper.
+ if cc.balancerWrapper != nil {
+ cc.balancerWrapper.close()
+ }
+
+ builder := balancer.Get(name)
+ if builder == nil {
+ grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
+ builder = newPickfirstBuilder()
+ }
+ cc.preBalancerName = cc.curBalancerName
+ cc.curBalancerName = builder.Name()
+ cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
+}
+
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ cc.mu.Lock()
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return
+ }
+ // TODO(bar switching) send updates to all balancer wrappers when balancer
+ // gracefully switching is supported.
+ cc.balancerWrapper.handleSubConnStateChange(sc, s)
+ cc.mu.Unlock()
+}
+
+// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
+//
+// Caller needs to make sure len(addrs) > 0.
+func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
+ ac := &addrConn{
+ cc: cc,
+ addrs: addrs,
+ dopts: cc.dopts,
+ }
+ ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Track ac in cc. This needs to be done before any getTransport(...) is called.
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
- return ErrClientConnClosing
+ return nil, ErrClientConnClosing
}
- stale := cc.conns[ac.addr]
- cc.conns[ac.addr] = ac
+ cc.conns[ac] = struct{}{}
cc.mu.Unlock()
- if stale != nil {
- // There is an addrConn alive on ac.addr already. This could be due to
- // 1) a buggy Balancer notifies duplicated Addresses;
- // 2) goaway was received, a new ac will replace the old ac.
- // The old ac should be deleted from cc.conns, but the
- // underlying transport should drain rather than close.
- if tearDownErr == nil {
- // tearDownErr is nil if resetAddrConn is called by
- // 1) Dial
- // 2) lbWatcher
- // In both cases, the stale ac should drain, not close.
- stale.tearDown(errConnDrain)
- } else {
- stale.tearDown(tearDownErr)
- }
+ return ac, nil
+}
+
+// removeAddrConn removes the addrConn in the subConn from clientConn.
+// It also tears down the ac with the given error.
+func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
+ cc.mu.Lock()
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return
}
- // skipWait may overwrite the decision in ac.dopts.block.
- if ac.dopts.block && !skipWait {
- if err := ac.resetTransport(false); err != nil {
+ delete(cc.conns, ac)
+ cc.mu.Unlock()
+ ac.tearDown(err)
+}
+
+// connect starts to creating transport and also starts the transport monitor
+// goroutine for this ac.
+// It does nothing if the ac is not IDLE.
+// TODO(bar) Move this to the addrConn section.
+// This was part of resetAddrConn, keep it here to make the diff look clean.
+func (ac *addrConn) connect() error {
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
+ ac.mu.Unlock()
+ return errConnClosing
+ }
+ if ac.state != connectivity.Idle {
+ ac.mu.Unlock()
+ return nil
+ }
+ ac.state = connectivity.Connecting
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ ac.mu.Unlock()
+
+ // Start a goroutine connecting to the server asynchronously.
+ go func() {
+ if err := ac.resetTransport(); err != nil {
+ grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
if err != errConnClosing {
- // Tear down ac and delete it from cc.conns.
- cc.mu.Lock()
- delete(cc.conns, ac.addr)
- cc.mu.Unlock()
+ // Keep this ac in cc.conns, to get the reason it's torn down.
ac.tearDown(err)
}
- if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
- return e.Origin()
- }
- return err
+ return
}
- // Start to monitor the error status of transport.
- go ac.transportMonitor()
- } else {
- // Start a goroutine connecting to the server asynchronously.
- go func() {
- if err := ac.resetTransport(false); err != nil {
- grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
- if err != errConnClosing {
- // Keep this ac in cc.conns, to get the reason it's torn down.
- ac.tearDown(err)
- }
- return
- }
- ac.transportMonitor()
- }()
- }
+ ac.transportMonitor()
+ }()
return nil
}
-// TODO: Avoid the locking here.
-func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
- cc.mu.RLock()
- defer cc.mu.RUnlock()
- m, ok = cc.sc.Methods[method]
- return
-}
+// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
+//
+// It checks whether current connected address of ac is in the new addrs list.
+// - If true, it updates ac.addrs and returns true. The ac will keep using
+// the existing connection.
+// - If false, it does nothing and returns false.
+func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+ if ac.state == connectivity.Shutdown {
+ ac.addrs = addrs
+ return true
+ }
-func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
- var (
- ac *addrConn
- ok bool
- put func()
- )
- if cc.dopts.balancer == nil {
- // If balancer is nil, there should be only one addrConn available.
- cc.mu.RLock()
- if cc.conns == nil {
- cc.mu.RUnlock()
- return nil, nil, toRPCErr(ErrClientConnClosing)
- }
- for _, ac = range cc.conns {
- // Break after the first iteration to get the first addrConn.
- ok = true
+ var curAddrFound bool
+ for _, a := range addrs {
+ if reflect.DeepEqual(ac.curAddr, a) {
+ curAddrFound = true
break
}
- cc.mu.RUnlock()
- } else {
- var (
- addr Address
- err error
- )
- addr, put, err = cc.dopts.balancer.Get(ctx, opts)
- if err != nil {
- return nil, nil, toRPCErr(err)
- }
- cc.mu.RLock()
- if cc.conns == nil {
- cc.mu.RUnlock()
- return nil, nil, toRPCErr(ErrClientConnClosing)
- }
- ac, ok = cc.conns[addr]
- cc.mu.RUnlock()
}
+ grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
+ if curAddrFound {
+ ac.addrs = addrs
+ ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
+ }
+
+ return curAddrFound
+}
+
+// GetMethodConfig gets the method config of the input method.
+// If there's an exact match for input method (i.e. /service/method), we return
+// the corresponding MethodConfig.
+// If there isn't an exact match for the input method, we look for the default config
+// under the service (i.e /service/). If there is a default MethodConfig for
+// the serivce, we return it.
+// Otherwise, we return an empty MethodConfig.
+func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
+ // TODO: Avoid the locking here.
+ cc.mu.RLock()
+ defer cc.mu.RUnlock()
+ m, ok := cc.sc.Methods[method]
if !ok {
- if put != nil {
- put()
- }
- return nil, nil, errConnClosing
+ i := strings.LastIndex(method, "/")
+ m, _ = cc.sc.Methods[method[:i+1]]
}
- t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait)
+ return m
+}
+
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+ t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{})
if err != nil {
- if put != nil {
- put()
+ return nil, nil, toRPCErr(err)
+ }
+ return t, done, nil
+}
+
+// handleServiceConfig parses the service config string in JSON format to Go native
+// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
+func (cc *ClientConn) handleServiceConfig(js string) error {
+ sc, err := parseServiceConfig(js)
+ if err != nil {
+ return err
+ }
+ cc.mu.Lock()
+ cc.scRaw = js
+ cc.sc = sc
+ if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
+ if cc.curBalancerName == grpclbName {
+ // If current balancer is grpclb, there's at least one grpclb
+ // balancer address in the resolved list. Don't switch the balancer,
+ // but change the previous balancer name, so if a new resolved
+ // address list doesn't contain grpclb address, balancer will be
+ // switched to *sc.LB.
+ cc.preBalancerName = *sc.LB
+ } else {
+ cc.switchBalancer(*sc.LB)
+ cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
}
- return nil, nil, err
}
- return t, put, nil
+ cc.mu.Unlock()
+ return nil
+}
+
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+ cc.mu.Lock()
+ r := cc.resolverWrapper
+ cc.mu.Unlock()
+ if r == nil {
+ return
+ }
+ go r.resolveNow(o)
}
// Close tears down the ClientConn and all underlying connections.
@@ -649,11 +945,21 @@ func (cc *ClientConn) Close() error {
}
conns := cc.conns
cc.conns = nil
+ cc.csMgr.updateState(connectivity.Shutdown)
+
+ rWrapper := cc.resolverWrapper
+ cc.resolverWrapper = nil
+ bWrapper := cc.balancerWrapper
+ cc.balancerWrapper = nil
cc.mu.Unlock()
- if cc.dopts.balancer != nil {
- cc.dopts.balancer.Close()
+ cc.blockingpicker.close()
+ if rWrapper != nil {
+ rWrapper.close()
+ }
+ if bWrapper != nil {
+ bWrapper.close()
}
- for _, ac := range conns {
+ for ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
return nil
@@ -665,14 +971,15 @@ type addrConn struct {
cancel context.CancelFunc
cc *ClientConn
- addr Address
+ addrs []resolver.Address
dopts dialOptions
events trace.EventLog
+ acbw balancer.SubConn
- mu sync.Mutex
- state ConnectivityState
- stateCV *sync.Cond
- down func(error) // the handler called when a connection is down.
+ mu sync.Mutex
+ curAddr resolver.Address
+ reconnectIdx int // The index in addrs list to start reconnecting from.
+ state connectivity.State
// ready is closed and becomes nil when a new transport is up or failed
// due to timeout.
ready chan struct{}
@@ -680,6 +987,28 @@ type addrConn struct {
// The reason this addrConn is torn down.
tearDownErr error
+
+ connectRetryNum int
+ // backoffDeadline is the time until which resetTransport needs to
+ // wait before increasing connectRetryNum count.
+ backoffDeadline time.Time
+ // connectDeadline is the time by which all connection
+ // negotiations must complete.
+ connectDeadline time.Time
+}
+
+// adjustParams updates parameters used to create transports upon
+// receiving a GoAway.
+func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
+ switch r {
+ case transport.GoAwayTooManyPings:
+ v := 2 * ac.dopts.copts.KeepaliveParams.Time
+ ac.cc.mu.Lock()
+ if v > ac.cc.mkp.Time {
+ ac.cc.mkp.Time = v
+ }
+ ac.cc.mu.Unlock()
+ }
}
// printf records an event in ac's event log, unless ac has been closed.
@@ -698,198 +1027,270 @@ func (ac *addrConn) errorf(format string, a ...interface{}) {
}
}
-// getState returns the connectivity state of the Conn
-func (ac *addrConn) getState() ConnectivityState {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- return ac.state
-}
-
-// waitForStateChange blocks until the state changes to something other than the sourceState.
-func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
+// resetTransport recreates a transport to the address for ac. The old
+// transport will close itself on error or when the clientconn is closed.
+// The created transport must receive initial settings frame from the server.
+// In case that doesnt happen, transportMonitor will kill the newly created
+// transport after connectDeadline has expired.
+// In case there was an error on the transport before the settings frame was
+// received, resetTransport resumes connecting to backends after the one that
+// was previously connected to. In case end of the list is reached, resetTransport
+// backs off until the original deadline.
+// If the DialOption WithWaitForHandshake was set, resetTrasport returns
+// successfully only after server settings are received.
+//
+// TODO(bar) make sure all state transitions are valid.
+func (ac *addrConn) resetTransport() error {
ac.mu.Lock()
- defer ac.mu.Unlock()
- if sourceState != ac.state {
- return ac.state, nil
+ if ac.state == connectivity.Shutdown {
+ ac.mu.Unlock()
+ return errConnClosing
}
- done := make(chan struct{})
- var err error
- go func() {
- select {
- case <-ctx.Done():
- ac.mu.Lock()
- err = ctx.Err()
- ac.stateCV.Broadcast()
- ac.mu.Unlock()
- case <-done:
- }
- }()
- defer close(done)
- for sourceState == ac.state {
- ac.stateCV.Wait()
- if err != nil {
- return ac.state, err
- }
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
}
- return ac.state, nil
-}
-
-func (ac *addrConn) resetTransport(closeTransport bool) error {
- for retries := 0; ; retries++ {
+ ac.transport = nil
+ ridx := ac.reconnectIdx
+ ac.mu.Unlock()
+ ac.cc.mu.RLock()
+ ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+ ac.cc.mu.RUnlock()
+ var backoffDeadline, connectDeadline time.Time
+ for connectRetryNum := 0; ; connectRetryNum++ {
ac.mu.Lock()
- ac.printf("connecting")
- if ac.state == Shutdown {
- // ac.tearDown(...) has been invoked.
+ if ac.backoffDeadline.IsZero() {
+ // This means either a successful HTTP2 connection was established
+ // or this is the first time this addrConn is trying to establish a
+ // connection.
+ backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
+ // This will be the duration that dial gets to finish.
+ dialDuration := minConnectTimeout
+ if backoffFor > dialDuration {
+ // Give dial more time as we keep failing to connect.
+ dialDuration = backoffFor
+ }
+ start := time.Now()
+ backoffDeadline = start.Add(backoffFor)
+ connectDeadline = start.Add(dialDuration)
+ ridx = 0 // Start connecting from the beginning.
+ } else {
+ // Continue trying to conect with the same deadlines.
+ connectRetryNum = ac.connectRetryNum
+ backoffDeadline = ac.backoffDeadline
+ connectDeadline = ac.connectDeadline
+ ac.backoffDeadline = time.Time{}
+ ac.connectDeadline = time.Time{}
+ ac.connectRetryNum = 0
+ }
+ if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
return errConnClosing
}
- if ac.down != nil {
- ac.down(downErrorf(false, true, "%v", errNetworkIO))
- ac.down = nil
+ ac.printf("connecting")
+ if ac.state != connectivity.Connecting {
+ ac.state = connectivity.Connecting
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
}
- ac.state = Connecting
- ac.stateCV.Broadcast()
- t := ac.transport
+ // copy ac.addrs in case of race
+ addrsIter := make([]resolver.Address, len(ac.addrs))
+ copy(addrsIter, ac.addrs)
+ copts := ac.dopts.copts
ac.mu.Unlock()
- if closeTransport && t != nil {
- t.Close()
+ connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
+ if err != nil {
+ return err
}
- sleepTime := ac.dopts.bs.backoff(retries)
- timeout := minConnectTimeout
- if timeout < sleepTime {
- timeout = sleepTime
+ if connected {
+ return nil
}
- ctx, cancel := context.WithTimeout(ac.ctx, timeout)
- connectTime := time.Now()
- sinfo := transport.TargetInfo{
- Addr: ac.addr.Addr,
- Metadata: ac.addr.Metadata,
+ }
+}
+
+// createTransport creates a connection to one of the backends in addrs.
+// It returns true if a connection was established.
+func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
+ for i := ridx; i < len(addrs); i++ {
+ addr := addrs[i]
+ target := transport.TargetInfo{
+ Addr: addr.Addr,
+ Metadata: addr.Metadata,
+ Authority: ac.cc.authority,
}
- newTransport, err := transport.NewClientTransport(ctx, sinfo, ac.dopts.copts)
+ done := make(chan struct{})
+ onPrefaceReceipt := func() {
+ ac.mu.Lock()
+ close(done)
+ if !ac.backoffDeadline.IsZero() {
+ // If we haven't already started reconnecting to
+ // other backends.
+ // Note, this can happen when writer notices an error
+ // and triggers resetTransport while at the same time
+ // reader receives the preface and invokes this closure.
+ ac.backoffDeadline = time.Time{}
+ ac.connectDeadline = time.Time{}
+ ac.connectRetryNum = 0
+ }
+ ac.mu.Unlock()
+ }
+ // Do not cancel in the success path because of
+ // this issue in Go1.6: https://github.com/golang/go/issues/15078.
+ connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+ newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
if err != nil {
cancel()
-
if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
- return err
+ ac.mu.Lock()
+ if ac.state != connectivity.Shutdown {
+ ac.state = connectivity.TransientFailure
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ }
+ ac.mu.Unlock()
+ return false, err
}
- grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %v", err, ac.addr)
ac.mu.Lock()
- if ac.state == Shutdown {
+ if ac.state == connectivity.Shutdown {
// ac.tearDown(...) has been invoked.
ac.mu.Unlock()
- return errConnClosing
- }
- ac.errorf("transient failure: %v", err)
- ac.state = TransientFailure
- ac.stateCV.Broadcast()
- if ac.ready != nil {
- close(ac.ready)
- ac.ready = nil
+ return false, errConnClosing
}
ac.mu.Unlock()
- closeTransport = false
+ grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
+ continue
+ }
+ if ac.dopts.waitForHandshake {
select {
- case <-time.After(sleepTime - time.Since(connectTime)):
+ case <-done:
+ case <-connectCtx.Done():
+ // Didn't receive server preface, must kill this new transport now.
+ grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
+ newTr.Close()
+ break
case <-ac.ctx.Done():
- return ac.ctx.Err()
}
- continue
}
ac.mu.Lock()
- ac.printf("ready")
- if ac.state == Shutdown {
- // ac.tearDown(...) has been invoked.
+ if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
- newTransport.Close()
- return errConnClosing
+ // ac.tearDonn(...) has been invoked.
+ newTr.Close()
+ return false, errConnClosing
}
- ac.state = Ready
- ac.stateCV.Broadcast()
- ac.transport = newTransport
+ ac.printf("ready")
+ ac.state = connectivity.Ready
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ ac.transport = newTr
+ ac.curAddr = addr
if ac.ready != nil {
close(ac.ready)
ac.ready = nil
}
- if ac.cc.dopts.balancer != nil {
- ac.down = ac.cc.dopts.balancer.Up(ac.addr)
+ select {
+ case <-done:
+ // If the server has responded back with preface already,
+ // don't set the reconnect parameters.
+ default:
+ ac.connectRetryNum = connectRetryNum
+ ac.backoffDeadline = backoffDeadline
+ ac.connectDeadline = connectDeadline
+ ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
}
ac.mu.Unlock()
- return nil
+ return true, nil
+ }
+ ac.mu.Lock()
+ ac.state = connectivity.TransientFailure
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ ac.cc.resolveNow(resolver.ResolveNowOption{})
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
}
+ ac.mu.Unlock()
+ timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
+ select {
+ case <-timer.C:
+ case <-ac.ctx.Done():
+ timer.Stop()
+ return false, ac.ctx.Err()
+ }
+ return false, nil
}
// Run in a goroutine to track the error in transport and create the
// new transport if an error happens. It returns when the channel is closing.
func (ac *addrConn) transportMonitor() {
for {
+ var timer *time.Timer
+ var cdeadline <-chan time.Time
ac.mu.Lock()
t := ac.transport
+ if !ac.connectDeadline.IsZero() {
+ timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
+ cdeadline = timer.C
+ }
ac.mu.Unlock()
+ // Block until we receive a goaway or an error occurs.
select {
- // This is needed to detect the teardown when
- // the addrConn is idle (i.e., no RPC in flight).
- case <-ac.ctx.Done():
- select {
- case <-t.Error():
- t.Close()
- default:
- }
- return
case <-t.GoAway():
- // If GoAway happens without any network I/O error, ac is closed without shutting down the
- // underlying transport (the transport will be closed when all the pending RPCs finished or
- // failed.).
- // If GoAway and some network I/O error happen concurrently, ac and its underlying transport
- // are closed.
- // In both cases, a new ac is created.
- select {
- case <-t.Error():
- ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
- default:
- ac.cc.resetAddrConn(ac.addr, true, errConnDrain)
- }
- return
case <-t.Error():
- select {
- case <-ac.ctx.Done():
- t.Close()
- return
- case <-t.GoAway():
- ac.cc.resetAddrConn(ac.addr, true, errNetworkIO)
- return
- default:
- }
+ case <-cdeadline:
ac.mu.Lock()
- if ac.state == Shutdown {
- // ac has been shutdown.
+ // This implies that client received server preface.
+ if ac.backoffDeadline.IsZero() {
ac.mu.Unlock()
- return
+ continue
}
- ac.state = TransientFailure
- ac.stateCV.Broadcast()
ac.mu.Unlock()
- if err := ac.resetTransport(true); err != nil {
- ac.mu.Lock()
- ac.printf("transport exiting: %v", err)
- ac.mu.Unlock()
- grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err)
- if err != errConnClosing {
- // Keep this ac in cc.conns, to get the reason it's torn down.
- ac.tearDown(err)
- }
- return
+ timer = nil
+ // No server preface received until deadline.
+ // Kill the connection.
+ grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
+ t.Close()
+ }
+ if timer != nil {
+ timer.Stop()
+ }
+ // If a GoAway happened, regardless of error, adjust our keepalive
+ // parameters as appropriate.
+ select {
+ case <-t.GoAway():
+ ac.adjustParams(t.GetGoAwayReason())
+ default:
+ }
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
+ ac.mu.Unlock()
+ return
+ }
+ // Set connectivity state to TransientFailure before calling
+ // resetTransport. Transition READY->CONNECTING is not valid.
+ ac.state = connectivity.TransientFailure
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ ac.cc.resolveNow(resolver.ResolveNowOption{})
+ ac.curAddr = resolver.Address{}
+ ac.mu.Unlock()
+ if err := ac.resetTransport(); err != nil {
+ ac.mu.Lock()
+ ac.printf("transport exiting: %v", err)
+ ac.mu.Unlock()
+ grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err)
+ if err != errConnClosing {
+ // Keep this ac in cc.conns, to get the reason it's torn down.
+ ac.tearDown(err)
}
+ return
}
}
}
// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
-// iv) transport is in TransientFailure and there is a balancer/failfast is true.
+// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true.
func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
for {
ac.mu.Lock()
switch {
- case ac.state == Shutdown:
+ case ac.state == connectivity.Shutdown:
if failfast || !hasBalancer {
// RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
err := ac.tearDownErr
@@ -898,11 +1299,11 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans
}
ac.mu.Unlock()
return nil, errConnClosing
- case ac.state == Ready:
+ case ac.state == connectivity.Ready:
ct := ac.transport
ac.mu.Unlock()
return ct, nil
- case ac.state == TransientFailure:
+ case ac.state == connectivity.TransientFailure:
if failfast || hasBalancer {
ac.mu.Unlock()
return nil, errConnUnavailable
@@ -923,6 +1324,28 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans
}
}
+// getReadyTransport returns the transport if ac's state is READY.
+// Otherwise it returns nil, false.
+// If ac's state is IDLE, it will trigger ac to connect.
+func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
+ ac.mu.Lock()
+ if ac.state == connectivity.Ready {
+ t := ac.transport
+ ac.mu.Unlock()
+ return t, true
+ }
+ var idle bool
+ if ac.state == connectivity.Idle {
+ idle = true
+ }
+ ac.mu.Unlock()
+ // Trigger idle ac to connect.
+ if idle {
+ ac.connect()
+ }
+ return nil, false
+}
+
// tearDown starts to tear down the addrConn.
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
// some edge cases (e.g., the caller opens and closes many addrConn's in a
@@ -930,13 +1353,12 @@ func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (trans
// tearDown doesn't remove ac from ac.cc.conns.
func (ac *addrConn) tearDown(err error) {
ac.cancel()
-
ac.mu.Lock()
defer ac.mu.Unlock()
- if ac.down != nil {
- ac.down(downErrorf(false, false, "%v", err))
- ac.down = nil
+ if ac.state == connectivity.Shutdown {
+ return
}
+ ac.curAddr = resolver.Address{}
if err == errConnDrain && ac.transport != nil {
// GracefulClose(...) may be executed multiple times when
// i) receiving multiple GoAway frames from the server; or
@@ -944,12 +1366,9 @@ func (ac *addrConn) tearDown(err error) {
// address removal and GoAway.
ac.transport.GracefulClose()
}
- if ac.state == Shutdown {
- return
- }
- ac.state = Shutdown
+ ac.state = connectivity.Shutdown
ac.tearDownErr = err
- ac.stateCV.Broadcast()
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
if ac.events != nil {
ac.events.Finish()
ac.events = nil
@@ -958,8 +1377,11 @@ func (ac *addrConn) tearDown(err error) {
close(ac.ready)
ac.ready = nil
}
- if ac.transport != nil && err != errConnDrain {
- ac.transport.Close()
- }
return
}
+
+func (ac *addrConn) getState() connectivity.State {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ return ac.state
+}
diff --git a/go/vendor/google.golang.org/grpc/codec.go b/go/vendor/google.golang.org/grpc/codec.go
new file mode 100644
index 0000000..43d81ed
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/codec.go
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "math"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// Codec defines the interface gRPC uses to encode and decode messages.
+// Note that implementations of this interface must be thread safe;
+// a Codec's methods can be called from concurrent goroutines.
+type Codec interface {
+ // Marshal returns the wire format of v.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal parses the wire format into v.
+ Unmarshal(data []byte, v interface{}) error
+ // String returns the name of the Codec implementation. The returned
+ // string will be used as part of content type in transmission.
+ String() string
+}
+
+// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
+type protoCodec struct {
+}
+
+type cachedProtoBuffer struct {
+ lastMarshaledSize uint32
+ proto.Buffer
+}
+
+func capToMaxInt32(val int) uint32 {
+ if val > math.MaxInt32 {
+ return uint32(math.MaxInt32)
+ }
+ return uint32(val)
+}
+
+func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
+ protoMsg := v.(proto.Message)
+ newSlice := make([]byte, 0, cb.lastMarshaledSize)
+
+ cb.SetBuf(newSlice)
+ cb.Reset()
+ if err := cb.Marshal(protoMsg); err != nil {
+ return nil, err
+ }
+ out := cb.Bytes()
+ cb.lastMarshaledSize = capToMaxInt32(len(out))
+ return out, nil
+}
+
+func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
+ if pm, ok := v.(proto.Marshaler); ok {
+ // object can marshal itself, no need for buffer
+ return pm.Marshal()
+ }
+
+ cb := protoBufferPool.Get().(*cachedProtoBuffer)
+ out, err := p.marshal(v, cb)
+
+ // put back buffer and lose the ref to the slice
+ cb.SetBuf(nil)
+ protoBufferPool.Put(cb)
+ return out, err
+}
+
+func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
+ protoMsg := v.(proto.Message)
+ protoMsg.Reset()
+
+ if pu, ok := protoMsg.(proto.Unmarshaler); ok {
+ // object can unmarshal itself, no need for buffer
+ return pu.Unmarshal(data)
+ }
+
+ cb := protoBufferPool.Get().(*cachedProtoBuffer)
+ cb.SetBuf(data)
+ err := cb.Unmarshal(protoMsg)
+ cb.SetBuf(nil)
+ protoBufferPool.Put(cb)
+ return err
+}
+
+func (protoCodec) String() string {
+ return "proto"
+}
+
+var protoBufferPool = &sync.Pool{
+ New: func() interface{} {
+ return &cachedProtoBuffer{
+ Buffer: proto.Buffer{},
+ lastMarshaledSize: 16,
+ }
+ },
+}
diff --git a/go/vendor/google.golang.org/grpc/codes/code_string.go b/go/vendor/google.golang.org/grpc/codes/code_string.go
index e6762d0..0b206a5 100644
--- a/go/vendor/google.golang.org/grpc/codes/code_string.go
+++ b/go/vendor/google.golang.org/grpc/codes/code_string.go
@@ -1,16 +1,62 @@
-// generated by stringer -type=Code; DO NOT EDIT
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
package codes
-import "fmt"
+import "strconv"
-const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
-
-var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
-
-func (i Code) String() string {
- if i+1 >= Code(len(_Code_index)) {
- return fmt.Sprintf("Code(%d)", i)
+func (c Code) String() string {
+ switch c {
+ case OK:
+ return "OK"
+ case Canceled:
+ return "Canceled"
+ case Unknown:
+ return "Unknown"
+ case InvalidArgument:
+ return "InvalidArgument"
+ case DeadlineExceeded:
+ return "DeadlineExceeded"
+ case NotFound:
+ return "NotFound"
+ case AlreadyExists:
+ return "AlreadyExists"
+ case PermissionDenied:
+ return "PermissionDenied"
+ case ResourceExhausted:
+ return "ResourceExhausted"
+ case FailedPrecondition:
+ return "FailedPrecondition"
+ case Aborted:
+ return "Aborted"
+ case OutOfRange:
+ return "OutOfRange"
+ case Unimplemented:
+ return "Unimplemented"
+ case Internal:
+ return "Internal"
+ case Unavailable:
+ return "Unavailable"
+ case DataLoss:
+ return "DataLoss"
+ case Unauthenticated:
+ return "Unauthenticated"
+ default:
+ return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
}
- return _Code_name[_Code_index[i]:_Code_index[i+1]]
}
diff --git a/go/vendor/google.golang.org/grpc/codes/codes.go b/go/vendor/google.golang.org/grpc/codes/codes.go
index e14b464..f3719d5 100644
--- a/go/vendor/google.golang.org/grpc/codes/codes.go
+++ b/go/vendor/google.golang.org/grpc/codes/codes.go
@@ -1,50 +1,36 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
// Package codes defines the canonical error codes used by gRPC. It is
// consistent across various languages.
package codes // import "google.golang.org/grpc/codes"
+import (
+ "fmt"
+)
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
type Code uint32
-//go:generate stringer -type=Code
-
const (
// OK is returned on success.
OK Code = 0
- // Canceled indicates the operation was cancelled (typically by the caller).
+ // Canceled indicates the operation was canceled (typically by the caller).
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
@@ -157,3 +143,41 @@ const (
// DataLoss indicates unrecoverable data loss or corruption.
DataLoss Code = 15
)
+
+var strToCode = map[string]Code{
+ `"OK"`: OK,
+ `"CANCELLED"`:/* [sic] */ Canceled,
+ `"UNKNOWN"`: Unknown,
+ `"INVALID_ARGUMENT"`: InvalidArgument,
+ `"DEADLINE_EXCEEDED"`: DeadlineExceeded,
+ `"NOT_FOUND"`: NotFound,
+ `"ALREADY_EXISTS"`: AlreadyExists,
+ `"PERMISSION_DENIED"`: PermissionDenied,
+ `"RESOURCE_EXHAUSTED"`: ResourceExhausted,
+ `"FAILED_PRECONDITION"`: FailedPrecondition,
+ `"ABORTED"`: Aborted,
+ `"OUT_OF_RANGE"`: OutOfRange,
+ `"UNIMPLEMENTED"`: Unimplemented,
+ `"INTERNAL"`: Internal,
+ `"UNAVAILABLE"`: Unavailable,
+ `"DATA_LOSS"`: DataLoss,
+ `"UNAUTHENTICATED"`: Unauthenticated,
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+func (c *Code) UnmarshalJSON(b []byte) error {
+ // From json.Unmarshaler: By convention, to approximate the behavior of
+ // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+ // a no-op.
+ if string(b) == "null" {
+ return nil
+ }
+ if c == nil {
+ return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+ }
+ if jc, ok := strToCode[string(b)]; ok {
+ *c = jc
+ return nil
+ }
+ return fmt.Errorf("invalid code: %q", string(b))
+}
diff --git a/go/vendor/google.golang.org/grpc/connectivity/connectivity.go b/go/vendor/google.golang.org/grpc/connectivity/connectivity.go
new file mode 100644
index 0000000..568ef5d
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package connectivity defines connectivity semantics.
+// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
+// All APIs in this package are experimental.
+package connectivity
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/grpclog"
+)
+
+// State indicates the state of connectivity.
+// It can be the state of a ClientConn or SubConn.
+type State int
+
+func (s State) String() string {
+ switch s {
+ case Idle:
+ return "IDLE"
+ case Connecting:
+ return "CONNECTING"
+ case Ready:
+ return "READY"
+ case TransientFailure:
+ return "TRANSIENT_FAILURE"
+ case Shutdown:
+ return "SHUTDOWN"
+ default:
+ grpclog.Errorf("unknown connectivity state: %d", s)
+ return "Invalid-State"
+ }
+}
+
+const (
+ // Idle indicates the ClientConn is idle.
+ Idle State = iota
+ // Connecting indicates the ClienConn is connecting.
+ Connecting
+ // Ready indicates the ClientConn is ready for work.
+ Ready
+ // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
+ TransientFailure
+ // Shutdown indicates the ClientConn has started shutting down.
+ Shutdown
+)
+
+// Reporter reports the connectivity states.
+type Reporter interface {
+ // CurrentState returns the current state of the reporter.
+ CurrentState() State
+ // WaitForStateChange blocks until the reporter's state is different from the given state,
+ // and returns true.
+ // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
+ WaitForStateChange(context.Context, State) bool
+}
diff --git a/go/vendor/google.golang.org/grpc/coverage.sh b/go/vendor/google.golang.org/grpc/coverage.sh
deleted file mode 100755
index b85f918..0000000
--- a/go/vendor/google.golang.org/grpc/coverage.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-
-
-set -e
-
-workdir=.cover
-profile="$workdir/cover.out"
-mode=set
-end2endtest="google.golang.org/grpc/test"
-
-generate_cover_data() {
- rm -rf "$workdir"
- mkdir "$workdir"
-
- for pkg in "$@"; do
- if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
- then
- f="$workdir/$(echo $pkg | tr / -)"
- go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
- go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
- fi
- done
-
- echo "mode: $mode" >"$profile"
- grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
-}
-
-show_cover_report() {
- go tool cover -${1}="$profile"
-}
-
-push_to_coveralls() {
- goveralls -coverprofile="$profile"
-}
-
-generate_cover_data $(go list ./...)
-show_cover_report func
-case "$1" in
-"")
- ;;
---html)
- show_cover_report html ;;
---coveralls)
- push_to_coveralls ;;
-*)
- echo >&2 "error: invalid option: $1" ;;
-esac
-rm -rf "$workdir"
diff --git a/go/vendor/google.golang.org/grpc/credentials/credentials.go b/go/vendor/google.golang.org/grpc/credentials/credentials.go
index a8114d6..1d2e864 100644
--- a/go/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/go/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -49,10 +34,8 @@ import (
"golang.org/x/net/context"
)
-var (
- // alpnProtoStr are the specified application level protocols for gRPC.
- alpnProtoStr = []string{"h2"}
-)
+// alpnProtoStr are the specified application level protocols for gRPC.
+var alpnProtoStr = []string{"h2"}
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
@@ -89,11 +72,9 @@ type AuthInfo interface {
AuthType() string
}
-var (
- // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
- // and the caller should not close rawConn.
- ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
-)
+// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
+// and the caller should not close rawConn.
+var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
@@ -106,10 +87,14 @@ type TransportCredentials interface {
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// If the returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
+ //
+ // If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
// the connection.
+ //
+ // If the returned net.Conn is closed, it MUST close the net.Conn provided.
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
// Info provides the ProtocolInfo of this TransportCredentials.
Info() ProtocolInfo
@@ -146,15 +131,15 @@ func (c tlsCreds) Info() ProtocolInfo {
}
}
-func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
// use local cfg to avoid clobbering ServerName if using multiple endpoints
cfg := cloneTLSConfig(c.config)
if cfg.ServerName == "" {
- colonPos := strings.LastIndex(addr, ":")
+ colonPos := strings.LastIndex(authority, ":")
if colonPos == -1 {
- colonPos = len(addr)
+ colonPos = len(authority)
}
- cfg.ServerName = addr[:colonPos]
+ cfg.ServerName = authority[:colonPos]
}
conn := tls.Client(rawConn, cfg)
errChannel := make(chan error, 1)
@@ -196,14 +181,14 @@ func NewTLS(c *tls.Config) TransportCredentials {
return tc
}
-// NewClientTLSFromCert constructs a TLS from the input certificate for client.
+// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
}
-// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
+// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
// serverNameOverride is for testing only. If set to a non empty string,
// it will override the virtual host name of authority (e.g. :authority header field) in requests.
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
@@ -218,12 +203,12 @@ func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredent
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
}
-// NewServerTLSFromCert constructs a TLS from the input certificate for server.
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
}
-// NewServerTLSFromFile constructs a TLS from the input certificate file and key
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
// file for server.
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
diff --git a/go/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/go/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
index 7597b09..60409aa 100644
--- a/go/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
+++ b/go/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
@@ -3,34 +3,19 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
diff --git a/go/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/go/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
index 0ecf342..93f0e1d 100644
--- a/go/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
+++ b/go/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
@@ -2,34 +2,19 @@
/*
*
- * Copyright 2017, Google Inc.
- * All rights reserved.
+ * Copyright 2017 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
diff --git a/go/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/go/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
index cfd40df..d6bbcc9 100644
--- a/go/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
+++ b/go/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
@@ -2,34 +2,19 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
diff --git a/go/vendor/google.golang.org/grpc/doc.go b/go/vendor/google.golang.org/grpc/doc.go
index a35f218..187adbb 100644
--- a/go/vendor/google.golang.org/grpc/doc.go
+++ b/go/vendor/google.golang.org/grpc/doc.go
@@ -1,6 +1,24 @@
/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/*
Package grpc implements an RPC system called gRPC.
-See www.grpc.io for more information about gRPC.
+See grpc.io for more information about gRPC.
*/
package grpc // import "google.golang.org/grpc"
diff --git a/go/vendor/google.golang.org/grpc/encoding/encoding.go b/go/vendor/google.golang.org/grpc/encoding/encoding.go
new file mode 100644
index 0000000..47d10b0
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package encoding defines the interface for the compressor and the functions
+// to register and get the compossor.
+// This package is EXPERIMENTAL.
+package encoding
+
+import (
+ "io"
+)
+
+var registerCompressor = make(map[string]Compressor)
+
+// Compressor is used for compressing and decompressing when sending or receiving messages.
+type Compressor interface {
+ // Compress writes the data written to wc to w after compressing it. If an error
+ // occurs while initializing the compressor, that error is returned instead.
+ Compress(w io.Writer) (io.WriteCloser, error)
+ // Decompress reads data from r, decompresses it, and provides the uncompressed data
+ // via the returned io.Reader. If an error occurs while initializing the decompressor, that error
+ // is returned instead.
+ Decompress(r io.Reader) (io.Reader, error)
+ // Name is the name of the compression codec and is used to set the content coding header.
+ Name() string
+}
+
+// RegisterCompressor registers the compressor with gRPC by its name. It can be activated when
+// sending an RPC via grpc.UseCompressor(). It will be automatically accessed when receiving a
+// message based on the content coding header. Servers also use it to send a response with the
+// same encoding as the request.
+//
+// NOTE: this function must only be called during initialization time (i.e. in an init() function). If
+// multiple Compressors are registered with the same name, the one registered last will take effect.
+func RegisterCompressor(c Compressor) {
+ registerCompressor[c.Name()] = c
+}
+
+// GetCompressor returns Compressor for the given compressor name.
+func GetCompressor(name string) Compressor {
+ return registerCompressor[name]
+}
+
+// Identity specifies the optional encoding for uncompressed streams.
+// It is intended for grpc internal use only.
+const Identity = "identity"
diff --git a/go/vendor/google.golang.org/grpc/go16.go b/go/vendor/google.golang.org/grpc/go16.go
new file mode 100644
index 0000000..f3dbf21
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/go16.go
@@ -0,0 +1,98 @@
+// +build go1.6,!go1.7
+
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+ req.Cancel = ctx.Done()
+ if err := req.Write(conn); err != nil {
+ return fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+ return nil
+}
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+ if _, ok := status.FromError(err); ok {
+ return err
+ }
+ switch e := err.(type) {
+ case transport.StreamError:
+ return status.Error(e.Code, e.Desc)
+ case transport.ConnectionError:
+ return status.Error(codes.Unavailable, e.Desc)
+ default:
+ switch err {
+ case context.DeadlineExceeded:
+ return status.Error(codes.DeadlineExceeded, err.Error())
+ case context.Canceled:
+ return status.Error(codes.Canceled, err.Error())
+ case ErrClientConnClosing:
+ return status.Error(codes.FailedPrecondition, err.Error())
+ }
+ }
+ return status.Error(codes.Unknown, err.Error())
+}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+ switch err {
+ case nil:
+ return codes.OK
+ case io.EOF:
+ return codes.OutOfRange
+ case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+ return codes.FailedPrecondition
+ case os.ErrInvalid:
+ return codes.InvalidArgument
+ case context.Canceled:
+ return codes.Canceled
+ case context.DeadlineExceeded:
+ return codes.DeadlineExceeded
+ }
+ switch {
+ case os.IsExist(err):
+ return codes.AlreadyExists
+ case os.IsNotExist(err):
+ return codes.NotFound
+ case os.IsPermission(err):
+ return codes.PermissionDenied
+ }
+ return codes.Unknown
+}
diff --git a/go/vendor/google.golang.org/grpc/go17.go b/go/vendor/google.golang.org/grpc/go17.go
new file mode 100644
index 0000000..de23098
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/go17.go
@@ -0,0 +1,99 @@
+// +build go1.7
+
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+
+ netctx "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
+)
+
+// dialContext connects to the address on the named network.
+func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ return (&net.Dialer{}).DialContext(ctx, network, address)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+ req = req.WithContext(ctx)
+ if err := req.Write(conn); err != nil {
+ return fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+ return nil
+}
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+ if _, ok := status.FromError(err); ok {
+ return err
+ }
+ switch e := err.(type) {
+ case transport.StreamError:
+ return status.Error(e.Code, e.Desc)
+ case transport.ConnectionError:
+ return status.Error(codes.Unavailable, e.Desc)
+ default:
+ switch err {
+ case context.DeadlineExceeded, netctx.DeadlineExceeded:
+ return status.Error(codes.DeadlineExceeded, err.Error())
+ case context.Canceled, netctx.Canceled:
+ return status.Error(codes.Canceled, err.Error())
+ case ErrClientConnClosing:
+ return status.Error(codes.FailedPrecondition, err.Error())
+ }
+ }
+ return status.Error(codes.Unknown, err.Error())
+}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+ switch err {
+ case nil:
+ return codes.OK
+ case io.EOF:
+ return codes.OutOfRange
+ case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+ return codes.FailedPrecondition
+ case os.ErrInvalid:
+ return codes.InvalidArgument
+ case context.Canceled, netctx.Canceled:
+ return codes.Canceled
+ case context.DeadlineExceeded, netctx.DeadlineExceeded:
+ return codes.DeadlineExceeded
+ }
+ switch {
+ case os.IsExist(err):
+ return codes.AlreadyExists
+ case os.IsNotExist(err):
+ return codes.NotFound
+ case os.IsPermission(err):
+ return codes.PermissionDenied
+ }
+ return codes.Unknown
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclb.go b/go/vendor/google.golang.org/grpc/grpclb.go
new file mode 100644
index 0000000..d14a5d4
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclb.go
@@ -0,0 +1,342 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+const (
+ lbTokeyKey = "lb-token"
+ defaultFallbackTimeout = 10 * time.Second
+ grpclbName = "grpclb"
+)
+
+func convertDuration(d *lbpb.Duration) time.Duration {
+ if d == nil {
+ return 0
+ }
+ return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
+}
+
+// Client API for LoadBalancer service.
+// Mostly copied from generated pb.go file.
+// To avoid circular dependency.
+type loadBalancerClient struct {
+ cc *ClientConn
+}
+
+func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
+ desc := &StreamDesc{
+ StreamName: "BalanceLoad",
+ ServerStreams: true,
+ ClientStreams: true,
+ }
+ stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &balanceLoadClientStream{stream}
+ return x, nil
+}
+
+type balanceLoadClientStream struct {
+ ClientStream
+}
+
+func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
+ m := new(lbpb.LoadBalanceResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func init() {
+ balancer.Register(newLBBuilder())
+}
+
+// newLBBuilder creates a builder for grpclb.
+func newLBBuilder() balancer.Builder {
+ return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
+}
+
+// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
+// fallbackTimeout. If no response is received from the remote balancer within
+// fallbackTimeout, the backend addresses from the resolved address list will be
+// used.
+//
+// Only call this function when a non-default fallback timeout is needed.
+func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
+ return &lbBuilder{
+ fallbackTimeout: fallbackTimeout,
+ }
+}
+
+type lbBuilder struct {
+ fallbackTimeout time.Duration
+}
+
+func (b *lbBuilder) Name() string {
+ return grpclbName
+}
+
+func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+ // This generates a manual resolver builder with a random scheme. This
+ // scheme will be used to dial to remote LB, so we can send filtered address
+ // updates to remote LB ClientConn using this manual resolver.
+ scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
+ r := &lbManualResolver{scheme: scheme, ccb: cc}
+
+ var target string
+ targetSplitted := strings.Split(cc.Target(), ":///")
+ if len(targetSplitted) < 2 {
+ target = cc.Target()
+ } else {
+ target = targetSplitted[1]
+ }
+
+ lb := &lbBalancer{
+ cc: cc,
+ target: target,
+ opt: opt,
+ fallbackTimeout: b.fallbackTimeout,
+ doneCh: make(chan struct{}),
+
+ manualResolver: r,
+ csEvltr: &connectivityStateEvaluator{},
+ subConns: make(map[resolver.Address]balancer.SubConn),
+ scStates: make(map[balancer.SubConn]connectivity.State),
+ picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
+ clientStats: &rpcStats{},
+ }
+
+ return lb
+}
+
+type lbBalancer struct {
+ cc balancer.ClientConn
+ target string
+ opt balancer.BuildOptions
+ fallbackTimeout time.Duration
+ doneCh chan struct{}
+
+ // manualResolver is used in the remote LB ClientConn inside grpclb. When
+ // resolved address updates are received by grpclb, filtered updates will be
+ // send to remote LB ClientConn through this resolver.
+ manualResolver *lbManualResolver
+ // The ClientConn to talk to the remote balancer.
+ ccRemoteLB *ClientConn
+
+ // Support client side load reporting. Each picker gets a reference to this,
+ // and will update its content.
+ clientStats *rpcStats
+
+ mu sync.Mutex // guards everything following.
+ // The full server list including drops, used to check if the newly received
+ // serverList contains anything new. Each generate picker will also have
+ // reference to this list to do the first layer pick.
+ fullServerList []*lbpb.Server
+ // All backends addresses, with metadata set to nil. This list contains all
+ // backend addresses in the same order and with the same duplicates as in
+ // serverlist. When generating picker, a SubConn slice with the same order
+ // but with only READY SCs will be gerenated.
+ backendAddrs []resolver.Address
+ // Roundrobin functionalities.
+ csEvltr *connectivityStateEvaluator
+ state connectivity.State
+ subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
+ scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
+ picker balancer.Picker
+ // Support fallback to resolved backend addresses if there's no response
+ // from remote balancer within fallbackTimeout.
+ fallbackTimerExpired bool
+ serverListReceived bool
+ // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
+ // when resolved address updates are received, and read in the goroutine
+ // handling fallback.
+ resolvedBackendAddrs []resolver.Address
+}
+
+// regeneratePicker takes a snapshot of the balancer, and generates a picker from
+// it. The picker
+// - always returns ErrTransientFailure if the balancer is in TransientFailure,
+// - does two layer roundrobin pick otherwise.
+// Caller must hold lb.mu.
+func (lb *lbBalancer) regeneratePicker() {
+ if lb.state == connectivity.TransientFailure {
+ lb.picker = &errPicker{err: balancer.ErrTransientFailure}
+ return
+ }
+ var readySCs []balancer.SubConn
+ for _, a := range lb.backendAddrs {
+ if sc, ok := lb.subConns[a]; ok {
+ if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs = append(readySCs, sc)
+ }
+ }
+ }
+
+ if len(lb.fullServerList) <= 0 {
+ if len(readySCs) <= 0 {
+ lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
+ return
+ }
+ lb.picker = &rrPicker{subConns: readySCs}
+ return
+ }
+ lb.picker = &lbPicker{
+ serverList: lb.fullServerList,
+ subConns: readySCs,
+ stats: lb.clientStats,
+ }
+ return
+}
+
+func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
+ lb.mu.Lock()
+ defer lb.mu.Unlock()
+
+ oldS, ok := lb.scStates[sc]
+ if !ok {
+ grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ return
+ }
+ lb.scStates[sc] = s
+ switch s {
+ case connectivity.Idle:
+ sc.Connect()
+ case connectivity.Shutdown:
+ // When an address was removed by resolver, b called RemoveSubConn but
+ // kept the sc's state in scStates. Remove state for this sc here.
+ delete(lb.scStates, sc)
+ }
+
+ oldAggrState := lb.state
+ lb.state = lb.csEvltr.recordTransition(oldS, s)
+
+ // Regenerate picker when one of the following happens:
+ // - this sc became ready from not-ready
+ // - this sc became not-ready from ready
+ // - the aggregated state of balancer became TransientFailure from non-TransientFailure
+ // - the aggregated state of balancer became non-TransientFailure from TransientFailure
+ if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
+ (lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+ lb.regeneratePicker()
+ }
+
+ lb.cc.UpdateBalancerState(lb.state, lb.picker)
+ return
+}
+
+// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
+// resolved backends (backends received from resolver, not from remote balancer)
+// if no connection to remote balancers was successful.
+func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
+ timer := time.NewTimer(fallbackTimeout)
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ case <-lb.doneCh:
+ return
+ }
+ lb.mu.Lock()
+ if lb.serverListReceived {
+ lb.mu.Unlock()
+ return
+ }
+ lb.fallbackTimerExpired = true
+ lb.refreshSubConns(lb.resolvedBackendAddrs)
+ lb.mu.Unlock()
+}
+
+// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
+// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
+// connections.
+func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+ grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
+ if len(addrs) <= 0 {
+ return
+ }
+
+ var remoteBalancerAddrs, backendAddrs []resolver.Address
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ remoteBalancerAddrs = append(remoteBalancerAddrs, a)
+ } else {
+ backendAddrs = append(backendAddrs, a)
+ }
+ }
+
+ if lb.ccRemoteLB == nil {
+ if len(remoteBalancerAddrs) <= 0 {
+ grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
+ return
+ }
+ // First time receiving resolved addresses, create a cc to remote
+ // balancers.
+ lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
+ // Start the fallback goroutine.
+ go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
+ }
+
+ // cc to remote balancers uses lb.manualResolver. Send the updated remote
+ // balancer addresses to it through manualResolver.
+ lb.manualResolver.NewAddress(remoteBalancerAddrs)
+
+ lb.mu.Lock()
+ lb.resolvedBackendAddrs = backendAddrs
+ // If serverListReceived is true, connection to remote balancer was
+ // successful and there's no need to do fallback anymore.
+ // If fallbackTimerExpired is false, fallback hasn't happened yet.
+ if !lb.serverListReceived && lb.fallbackTimerExpired {
+ // This means we received a new list of resolved backends, and we are
+ // still in fallback mode. Need to update the list of backends we are
+ // using to the new list of backends.
+ lb.refreshSubConns(lb.resolvedBackendAddrs)
+ }
+ lb.mu.Unlock()
+}
+
+func (lb *lbBalancer) Close() {
+ select {
+ case <-lb.doneCh:
+ return
+ default:
+ }
+ close(lb.doneCh)
+ if lb.ccRemoteLB != nil {
+ lb.ccRemoteLB.Close()
+ }
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
new file mode 100644
index 0000000..f4a2712
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
@@ -0,0 +1,615 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: grpc_lb_v1/messages/messages.proto
+
+/*
+Package messages is a generated protocol buffer package.
+
+It is generated from these files:
+ grpc_lb_v1/messages/messages.proto
+
+It has these top-level messages:
+ Duration
+ Timestamp
+ LoadBalanceRequest
+ InitialLoadBalanceRequest
+ ClientStats
+ LoadBalanceResponse
+ InitialLoadBalanceResponse
+ ServerList
+ Server
+*/
+package messages
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+type LoadBalanceRequest struct {
+ // Types that are valid to be assigned to LoadBalanceRequestType:
+ // *LoadBalanceRequest_InitialRequest
+ // *LoadBalanceRequest_ClientStats
+ LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
+}
+
+func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
+func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
+func (*LoadBalanceRequest) ProtoMessage() {}
+func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+type isLoadBalanceRequest_LoadBalanceRequestType interface {
+ isLoadBalanceRequest_LoadBalanceRequestType()
+}
+
+type LoadBalanceRequest_InitialRequest struct {
+ InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
+}
+type LoadBalanceRequest_ClientStats struct {
+ ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
+}
+
+func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
+func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
+
+func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
+ if m != nil {
+ return m.LoadBalanceRequestType
+ }
+ return nil
+}
+
+func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
+ if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
+ return x.InitialRequest
+ }
+ return nil
+}
+
+func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
+ if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
+ return x.ClientStats
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
+ (*LoadBalanceRequest_InitialRequest)(nil),
+ (*LoadBalanceRequest_ClientStats)(nil),
+ }
+}
+
+func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*LoadBalanceRequest)
+ // load_balance_request_type
+ switch x := m.LoadBalanceRequestType.(type) {
+ case *LoadBalanceRequest_InitialRequest:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.InitialRequest); err != nil {
+ return err
+ }
+ case *LoadBalanceRequest_ClientStats:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ClientStats); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*LoadBalanceRequest)
+ switch tag {
+ case 1: // load_balance_request_type.initial_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(InitialLoadBalanceRequest)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
+ return true, err
+ case 2: // load_balance_request_type.client_stats
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ClientStats)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*LoadBalanceRequest)
+ // load_balance_request_type
+ switch x := m.LoadBalanceRequestType.(type) {
+ case *LoadBalanceRequest_InitialRequest:
+ s := proto.Size(x.InitialRequest)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *LoadBalanceRequest_ClientStats:
+ s := proto.Size(x.ClientStats)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type InitialLoadBalanceRequest struct {
+ // Name of load balanced service (IE, balancer.service.com)
+ // length should be less than 256 bytes.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
+func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
+func (*InitialLoadBalanceRequest) ProtoMessage() {}
+func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *InitialLoadBalanceRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// Contains client level statistics that are useful to load balancing. Each
+// count except the timestamp should be reset to zero after reporting the stats.
+type ClientStats struct {
+ // The timestamp of generating the report.
+ Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
+ // The total number of RPCs that started.
+ NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
+ // The total number of RPCs that finished.
+ NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
+ // The total number of RPCs that were dropped by the client because of rate
+ // limiting.
+ NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
+ // The total number of RPCs that were dropped by the client because of load
+ // balancing.
+ NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
+ // The total number of RPCs that failed to reach a server except dropped RPCs.
+ NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
+ // The total number of RPCs that finished and are known to have been received
+ // by a server.
+ NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
+}
+
+func (m *ClientStats) Reset() { *m = ClientStats{} }
+func (m *ClientStats) String() string { return proto.CompactTextString(m) }
+func (*ClientStats) ProtoMessage() {}
+func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *ClientStats) GetTimestamp() *Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *ClientStats) GetNumCallsStarted() int64 {
+ if m != nil {
+ return m.NumCallsStarted
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinished() int64 {
+ if m != nil {
+ return m.NumCallsFinished
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
+ if m != nil {
+ return m.NumCallsFinishedWithDropForRateLimiting
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
+ if m != nil {
+ return m.NumCallsFinishedWithDropForLoadBalancing
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
+ if m != nil {
+ return m.NumCallsFinishedWithClientFailedToSend
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
+ if m != nil {
+ return m.NumCallsFinishedKnownReceived
+ }
+ return 0
+}
+
+type LoadBalanceResponse struct {
+ // Types that are valid to be assigned to LoadBalanceResponseType:
+ // *LoadBalanceResponse_InitialResponse
+ // *LoadBalanceResponse_ServerList
+ LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
+}
+
+func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
+func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
+func (*LoadBalanceResponse) ProtoMessage() {}
+func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+type isLoadBalanceResponse_LoadBalanceResponseType interface {
+ isLoadBalanceResponse_LoadBalanceResponseType()
+}
+
+type LoadBalanceResponse_InitialResponse struct {
+ InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
+}
+type LoadBalanceResponse_ServerList struct {
+ ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
+}
+
+func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
+func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
+
+func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
+ if m != nil {
+ return m.LoadBalanceResponseType
+ }
+ return nil
+}
+
+func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
+ if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
+ return x.InitialResponse
+ }
+ return nil
+}
+
+func (m *LoadBalanceResponse) GetServerList() *ServerList {
+ if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
+ return x.ServerList
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
+ (*LoadBalanceResponse_InitialResponse)(nil),
+ (*LoadBalanceResponse_ServerList)(nil),
+ }
+}
+
+func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*LoadBalanceResponse)
+ // load_balance_response_type
+ switch x := m.LoadBalanceResponseType.(type) {
+ case *LoadBalanceResponse_InitialResponse:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.InitialResponse); err != nil {
+ return err
+ }
+ case *LoadBalanceResponse_ServerList:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ServerList); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*LoadBalanceResponse)
+ switch tag {
+ case 1: // load_balance_response_type.initial_response
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(InitialLoadBalanceResponse)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
+ return true, err
+ case 2: // load_balance_response_type.server_list
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ServerList)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*LoadBalanceResponse)
+ // load_balance_response_type
+ switch x := m.LoadBalanceResponseType.(type) {
+ case *LoadBalanceResponse_InitialResponse:
+ s := proto.Size(x.InitialResponse)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *LoadBalanceResponse_ServerList:
+ s := proto.Size(x.ServerList)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type InitialLoadBalanceResponse struct {
+ // This is an application layer redirect that indicates the client should use
+ // the specified server for load balancing. When this field is non-empty in
+ // the response, the client should open a separate connection to the
+ // load_balancer_delegate and call the BalanceLoad method. Its length should
+ // be less than 64 bytes.
+ LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
+ // This interval defines how often the client should send the client stats
+ // to the load balancer. Stats should only be reported when the duration is
+ // positive.
+ ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
+}
+
+func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
+func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
+func (*InitialLoadBalanceResponse) ProtoMessage() {}
+func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
+ if m != nil {
+ return m.LoadBalancerDelegate
+ }
+ return ""
+}
+
+func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
+ if m != nil {
+ return m.ClientStatsReportInterval
+ }
+ return nil
+}
+
+type ServerList struct {
+ // Contains a list of servers selected by the load balancer. The list will
+ // be updated when server resolutions change or as needed to balance load
+ // across more servers. The client should consume the server list in order
+ // unless instructed otherwise via the client_config.
+ Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
+}
+
+func (m *ServerList) Reset() { *m = ServerList{} }
+func (m *ServerList) String() string { return proto.CompactTextString(m) }
+func (*ServerList) ProtoMessage() {}
+func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *ServerList) GetServers() []*Server {
+ if m != nil {
+ return m.Servers
+ }
+ return nil
+}
+
+// Contains server information. When none of the [drop_for_*] fields are true,
+// use the other fields. When drop_for_rate_limiting is true, ignore all other
+// fields. Use drop_for_load_balancing only when it is true and
+// drop_for_rate_limiting is false.
+type Server struct {
+ // A resolved address for the server, serialized in network-byte-order. It may
+ // either be an IPv4 or IPv6 address.
+ IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
+ // A resolved port number for the server.
+ Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
+ // An opaque but printable token given to the frontend for each pick. All
+ // frontend requests for that pick must include the token in its initial
+ // metadata. The token is used by the backend to verify the request and to
+ // allow the backend to report load to the gRPC LB system.
+ //
+ // Its length is variable but less than 50 bytes.
+ LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
+ // Indicates whether this particular request should be dropped by the client
+ // for rate limiting.
+ DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
+ // Indicates whether this particular request should be dropped by the client
+ // for load balancing.
+ DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
+}
+
+func (m *Server) Reset() { *m = Server{} }
+func (m *Server) String() string { return proto.CompactTextString(m) }
+func (*Server) ProtoMessage() {}
+func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *Server) GetIpAddress() []byte {
+ if m != nil {
+ return m.IpAddress
+ }
+ return nil
+}
+
+func (m *Server) GetPort() int32 {
+ if m != nil {
+ return m.Port
+ }
+ return 0
+}
+
+func (m *Server) GetLoadBalanceToken() string {
+ if m != nil {
+ return m.LoadBalanceToken
+ }
+ return ""
+}
+
+func (m *Server) GetDropForRateLimiting() bool {
+ if m != nil {
+ return m.DropForRateLimiting
+ }
+ return false
+}
+
+func (m *Server) GetDropForLoadBalancing() bool {
+ if m != nil {
+ return m.DropForLoadBalancing
+ }
+ return false
+}
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
+ proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
+ proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
+ proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
+ proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
+ proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
+ proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
+ proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
+ proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
+}
+
+func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 709 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b,
+ 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69,
+ 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55,
+ 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28,
+ 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f,
+ 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb,
+ 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56,
+ 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3,
+ 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a,
+ 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18,
+ 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8,
+ 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a,
+ 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc,
+ 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d,
+ 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f,
+ 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42,
+ 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b,
+ 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf,
+ 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60,
+ 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3,
+ 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29,
+ 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9,
+ 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1,
+ 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e,
+ 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd,
+ 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a,
+ 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa,
+ 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31,
+ 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a,
+ 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79,
+ 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8,
+ 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89,
+ 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f,
+ 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7,
+ 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a,
+ 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62,
+ 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d,
+ 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77,
+ 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc,
+ 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76,
+ 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b,
+ 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06,
+ 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd,
+ 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86,
+ 0xa6, 0x4a, 0x06, 0x00, 0x00,
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
new file mode 100644
index 0000000..42d99c1
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
@@ -0,0 +1,155 @@
+// Copyright 2016 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package grpc.lb.v1;
+option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages";
+
+message Duration {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
+
+message Timestamp {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
+
+message LoadBalanceRequest {
+ oneof load_balance_request_type {
+ // This message should be sent on the first request to the load balancer.
+ InitialLoadBalanceRequest initial_request = 1;
+
+ // The client stats should be periodically reported to the load balancer
+ // based on the duration defined in the InitialLoadBalanceResponse.
+ ClientStats client_stats = 2;
+ }
+}
+
+message InitialLoadBalanceRequest {
+ // Name of load balanced service (IE, balancer.service.com)
+ // length should be less than 256 bytes.
+ string name = 1;
+}
+
+// Contains client level statistics that are useful to load balancing. Each
+// count except the timestamp should be reset to zero after reporting the stats.
+message ClientStats {
+ // The timestamp of generating the report.
+ Timestamp timestamp = 1;
+
+ // The total number of RPCs that started.
+ int64 num_calls_started = 2;
+
+ // The total number of RPCs that finished.
+ int64 num_calls_finished = 3;
+
+ // The total number of RPCs that were dropped by the client because of rate
+ // limiting.
+ int64 num_calls_finished_with_drop_for_rate_limiting = 4;
+
+ // The total number of RPCs that were dropped by the client because of load
+ // balancing.
+ int64 num_calls_finished_with_drop_for_load_balancing = 5;
+
+ // The total number of RPCs that failed to reach a server except dropped RPCs.
+ int64 num_calls_finished_with_client_failed_to_send = 6;
+
+ // The total number of RPCs that finished and are known to have been received
+ // by a server.
+ int64 num_calls_finished_known_received = 7;
+}
+
+message LoadBalanceResponse {
+ oneof load_balance_response_type {
+ // This message should be sent on the first response to the client.
+ InitialLoadBalanceResponse initial_response = 1;
+
+ // Contains the list of servers selected by the load balancer. The client
+ // should send requests to these servers in the specified order.
+ ServerList server_list = 2;
+ }
+}
+
+message InitialLoadBalanceResponse {
+ // This is an application layer redirect that indicates the client should use
+ // the specified server for load balancing. When this field is non-empty in
+ // the response, the client should open a separate connection to the
+ // load_balancer_delegate and call the BalanceLoad method. Its length should
+ // be less than 64 bytes.
+ string load_balancer_delegate = 1;
+
+ // This interval defines how often the client should send the client stats
+ // to the load balancer. Stats should only be reported when the duration is
+ // positive.
+ Duration client_stats_report_interval = 2;
+}
+
+message ServerList {
+ // Contains a list of servers selected by the load balancer. The list will
+ // be updated when server resolutions change or as needed to balance load
+ // across more servers. The client should consume the server list in order
+ // unless instructed otherwise via the client_config.
+ repeated Server servers = 1;
+
+ // Was google.protobuf.Duration expiration_interval.
+ reserved 3;
+}
+
+// Contains server information. When none of the [drop_for_*] fields are true,
+// use the other fields. When drop_for_rate_limiting is true, ignore all other
+// fields. Use drop_for_load_balancing only when it is true and
+// drop_for_rate_limiting is false.
+message Server {
+ // A resolved address for the server, serialized in network-byte-order. It may
+ // either be an IPv4 or IPv6 address.
+ bytes ip_address = 1;
+
+ // A resolved port number for the server.
+ int32 port = 2;
+
+ // An opaque but printable token given to the frontend for each pick. All
+ // frontend requests for that pick must include the token in its initial
+ // metadata. The token is used by the backend to verify the request and to
+ // allow the backend to report load to the gRPC LB system.
+ //
+ // Its length is variable but less than 50 bytes.
+ string load_balance_token = 3;
+
+ // Indicates whether this particular request should be dropped by the client
+ // for rate limiting.
+ bool drop_for_rate_limiting = 4;
+
+ // Indicates whether this particular request should be dropped by the client
+ // for load balancing.
+ bool drop_for_load_balancing = 5;
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclb_picker.go b/go/vendor/google.golang.org/grpc/grpclb_picker.go
new file mode 100644
index 0000000..872c7cc
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclb_picker.go
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/status"
+)
+
+type rpcStats struct {
+ NumCallsStarted int64
+ NumCallsFinished int64
+ NumCallsFinishedWithDropForRateLimiting int64
+ NumCallsFinishedWithDropForLoadBalancing int64
+ NumCallsFinishedWithClientFailedToSend int64
+ NumCallsFinishedKnownReceived int64
+}
+
+// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
+func (s *rpcStats) toClientStats() *lbpb.ClientStats {
+ stats := &lbpb.ClientStats{
+ NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0),
+ NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0),
+ NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
+ NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
+ NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
+ NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
+ }
+ return stats
+}
+
+func (s *rpcStats) dropForRateLimiting() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) dropForLoadBalancing() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) failedToSend() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) knownReceived() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+type errPicker struct {
+ // Pick always returns this err.
+ err error
+}
+
+func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ return nil, nil, p.err
+}
+
+// rrPicker does roundrobin on subConns. It's typically used when there's no
+// response from remote balancer, and grpclb falls back to the resolved
+// backends.
+//
+// It guaranteed that len(subConns) > 0.
+type rrPicker struct {
+ mu sync.Mutex
+ subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
+ subConnsNext int
+}
+
+func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ sc := p.subConns[p.subConnsNext]
+ p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
+ return sc, nil, nil
+}
+
+// lbPicker does two layers of picks:
+//
+// First layer: roundrobin on all servers in serverList, including drops and backends.
+// - If it picks a drop, the RPC will fail as being dropped.
+// - If it picks a backend, do a second layer pick to pick the real backend.
+//
+// Second layer: roundrobin on all READY backends.
+//
+// It's guaranteed that len(serverList) > 0.
+type lbPicker struct {
+ mu sync.Mutex
+ serverList []*lbpb.Server
+ serverListNext int
+ subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
+ subConnsNext int
+
+ stats *rpcStats
+}
+
+func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ // Layer one roundrobin on serverList.
+ s := p.serverList[p.serverListNext]
+ p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
+
+ // If it's a drop, return an error and fail the RPC.
+ if s.DropForRateLimiting {
+ p.stats.dropForRateLimiting()
+ return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
+ }
+ if s.DropForLoadBalancing {
+ p.stats.dropForLoadBalancing()
+ return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
+ }
+
+ // If not a drop but there's no ready subConns.
+ if len(p.subConns) <= 0 {
+ return nil, nil, balancer.ErrNoSubConnAvailable
+ }
+
+ // Return the next ready subConn in the list, also collect rpc stats.
+ sc := p.subConns[p.subConnsNext]
+ p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
+ done := func(info balancer.DoneInfo) {
+ if !info.BytesSent {
+ p.stats.failedToSend()
+ } else if info.BytesReceived {
+ p.stats.knownReceived()
+ }
+ }
+ return sc, done, nil
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclb_remote_balancer.go b/go/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
new file mode 100644
index 0000000..1b580df
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
@@ -0,0 +1,254 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+// processServerList updates balaner's internal state, create/remove SubConns
+// and regenerates picker using the received serverList.
+func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
+ grpclog.Infof("lbBalancer: processing server list: %+v", l)
+ lb.mu.Lock()
+ defer lb.mu.Unlock()
+
+ // Set serverListReceived to true so fallback will not take effect if it has
+ // not hit timeout.
+ lb.serverListReceived = true
+
+ // If the new server list == old server list, do nothing.
+ if reflect.DeepEqual(lb.fullServerList, l.Servers) {
+ grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
+ return
+ }
+ lb.fullServerList = l.Servers
+
+ var backendAddrs []resolver.Address
+ for _, s := range l.Servers {
+ if s.DropForLoadBalancing || s.DropForRateLimiting {
+ continue
+ }
+
+ md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
+ ip := net.IP(s.IpAddress)
+ ipStr := ip.String()
+ if ip.To4() == nil {
+ // Add square brackets to ipv6 addresses, otherwise net.Dial() and
+ // net.SplitHostPort() will return too many colons error.
+ ipStr = fmt.Sprintf("[%s]", ipStr)
+ }
+ addr := resolver.Address{
+ Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
+ Metadata: &md,
+ }
+
+ backendAddrs = append(backendAddrs, addr)
+ }
+
+ // Call refreshSubConns to create/remove SubConns.
+ backendsUpdated := lb.refreshSubConns(backendAddrs)
+ // If no backend was updated, no SubConn will be newed/removed. But since
+ // the full serverList was different, there might be updates in drops or
+ // pick weights(different number of duplicates). We need to update picker
+ // with the fulllist.
+ if !backendsUpdated {
+ lb.regeneratePicker()
+ lb.cc.UpdateBalancerState(lb.state, lb.picker)
+ }
+}
+
+// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
+// indicating whether the backendAddrs are different from the cached
+// backendAddrs (whether any SubConn was newed/removed).
+// Caller must hold lb.mu.
+func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
+ lb.backendAddrs = nil
+ var backendsUpdated bool
+ // addrsSet is the set converted from backendAddrs, it's used to quick
+ // lookup for an address.
+ addrsSet := make(map[resolver.Address]struct{})
+ // Create new SubConns.
+ for _, addr := range backendAddrs {
+ addrWithoutMD := addr
+ addrWithoutMD.Metadata = nil
+ addrsSet[addrWithoutMD] = struct{}{}
+ lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
+
+ if _, ok := lb.subConns[addrWithoutMD]; !ok {
+ backendsUpdated = true
+
+ // Use addrWithMD to create the SubConn.
+ sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
+ continue
+ }
+ lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
+ lb.scStates[sc] = connectivity.Idle
+ sc.Connect()
+ }
+ }
+
+ for a, sc := range lb.subConns {
+ // a was removed by resolver.
+ if _, ok := addrsSet[a]; !ok {
+ backendsUpdated = true
+
+ lb.cc.RemoveSubConn(sc)
+ delete(lb.subConns, a)
+ // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+ // The entry will be deleted in HandleSubConnStateChange.
+ }
+ }
+
+ return backendsUpdated
+}
+
+func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
+ for {
+ reply, err := s.Recv()
+ if err != nil {
+ return fmt.Errorf("grpclb: failed to recv server list: %v", err)
+ }
+ if serverList := reply.GetServerList(); serverList != nil {
+ lb.processServerList(serverList)
+ }
+ }
+}
+
+func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ case <-s.Context().Done():
+ return
+ }
+ stats := lb.clientStats.toClientStats()
+ t := time.Now()
+ stats.Timestamp = &lbpb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+ if err := s.Send(&lbpb.LoadBalanceRequest{
+ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
+ ClientStats: stats,
+ },
+ }); err != nil {
+ return
+ }
+ }
+}
+func (lb *lbBalancer) callRemoteBalancer() error {
+ lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
+ if err != nil {
+ return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
+ }
+
+ // grpclb handshake on the stream.
+ initReq := &lbpb.LoadBalanceRequest{
+ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
+ InitialRequest: &lbpb.InitialLoadBalanceRequest{
+ Name: lb.target,
+ },
+ },
+ }
+ if err := stream.Send(initReq); err != nil {
+ return fmt.Errorf("grpclb: failed to send init request: %v", err)
+ }
+ reply, err := stream.Recv()
+ if err != nil {
+ return fmt.Errorf("grpclb: failed to recv init response: %v", err)
+ }
+ initResp := reply.GetInitialResponse()
+ if initResp == nil {
+ return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
+ }
+ if initResp.LoadBalancerDelegate != "" {
+ return fmt.Errorf("grpclb: Delegation is not supported")
+ }
+
+ go func() {
+ if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
+ lb.sendLoadReport(stream, d)
+ }
+ }()
+ return lb.readServerList(stream)
+}
+
+func (lb *lbBalancer) watchRemoteBalancer() {
+ for {
+ err := lb.callRemoteBalancer()
+ select {
+ case <-lb.doneCh:
+ return
+ default:
+ if err != nil {
+ grpclog.Error(err)
+ }
+ }
+
+ }
+}
+
+func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
+ var dopts []DialOption
+ if creds := lb.opt.DialCreds; creds != nil {
+ if err := creds.OverrideServerName(remoteLBName); err == nil {
+ dopts = append(dopts, WithTransportCredentials(creds))
+ } else {
+ grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
+ dopts = append(dopts, WithInsecure())
+ }
+ } else {
+ dopts = append(dopts, WithInsecure())
+ }
+ if lb.opt.Dialer != nil {
+ // WithDialer takes a different type of function, so we instead use a
+ // special DialOption here.
+ dopts = append(dopts, withContextDialer(lb.opt.Dialer))
+ }
+ // Explicitly set pickfirst as the balancer.
+ dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
+ dopts = append(dopts, withResolverBuilder(lb.manualResolver))
+ // Dial using manualResolver.Scheme, which is a random scheme generated
+ // when init grpclb. The target name is not important.
+ cc, err := Dial("grpclb:///grpclb.server", dopts...)
+ if err != nil {
+ grpclog.Fatalf("failed to dial: %v", err)
+ }
+ lb.ccRemoteLB = cc
+ go lb.watchRemoteBalancer()
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclb_util.go b/go/vendor/google.golang.org/grpc/grpclb_util.go
new file mode 100644
index 0000000..93ab2db
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclb_util.go
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/resolver"
+)
+
+// The parent ClientConn should re-resolve when grpclb loses connection to the
+// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
+// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
+// ResolveNow, and eventually results in re-resolve happening in parent
+// ClientConn's resolver (DNS for example).
+//
+// parent
+// ClientConn
+// +-----------------------------------------------------------------+
+// | parent +---------------------------------+ |
+// | DNS ClientConn | grpclb | |
+// | resolver balancerWrapper | | |
+// | + + | grpclb grpclb | |
+// | | | | ManualResolver ClientConn | |
+// | | | | + + | |
+// | | | | | | Transient | |
+// | | | | | | Failure | |
+// | | | | | <--------- | | |
+// | | | <--------------- | ResolveNow | | |
+// | | <--------- | ResolveNow | | | | |
+// | | ResolveNow | | | | | |
+// | | | | | | | |
+// | + + | + + | |
+// | +---------------------------------+ |
+// +-----------------------------------------------------------------+
+
+// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
+// resolver with a special ResolveNow() function.
+//
+// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
+// so when grpclb client lose contact with remote balancers, the parent
+// ClientConn's resolver will re-resolve.
+type lbManualResolver struct {
+ scheme string
+ ccr resolver.ClientConn
+
+ ccb balancer.ClientConn
+}
+
+func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
+ r.ccr = cc
+ return r, nil
+}
+
+func (r *lbManualResolver) Scheme() string {
+ return r.scheme
+}
+
+// ResolveNow calls resolveNow on the parent ClientConn.
+func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
+ r.ccb.ResolveNow(o)
+}
+
+// Close is a noop for Resolver.
+func (*lbManualResolver) Close() {}
+
+// NewAddress calls cc.NewAddress.
+func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
+ r.ccr.NewAddress(addrs)
+}
+
+// NewServiceConfig calls cc.NewServiceConfig.
+func (r *lbManualResolver) NewServiceConfig(sc string) {
+ r.ccr.NewServiceConfig(sc)
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclog/grpclog.go b/go/vendor/google.golang.org/grpc/grpclog/grpclog.go
new file mode 100644
index 0000000..16a7d88
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -0,0 +1,123 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog defines logging for grpc.
+//
+// All logs in transport package only go to verbose level 2.
+// All logs in other packages in grpc are logged in spite of the verbosity level.
+//
+// In the default logger,
+// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
+// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog // import "google.golang.org/grpc/grpclog"
+
+import "os"
+
+var logger = newLoggerV2()
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func V(l int) bool {
+ return logger.V(l)
+}
+
+// Info logs to the INFO log.
+func Info(args ...interface{}) {
+ logger.Info(args...)
+}
+
+// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
+func Infof(format string, args ...interface{}) {
+ logger.Infof(format, args...)
+}
+
+// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
+func Infoln(args ...interface{}) {
+ logger.Infoln(args...)
+}
+
+// Warning logs to the WARNING log.
+func Warning(args ...interface{}) {
+ logger.Warning(args...)
+}
+
+// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
+func Warningf(format string, args ...interface{}) {
+ logger.Warningf(format, args...)
+}
+
+// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
+func Warningln(args ...interface{}) {
+ logger.Warningln(args...)
+}
+
+// Error logs to the ERROR log.
+func Error(args ...interface{}) {
+ logger.Error(args...)
+}
+
+// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
+func Errorf(format string, args ...interface{}) {
+ logger.Errorf(format, args...)
+}
+
+// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
+func Errorln(args ...interface{}) {
+ logger.Errorln(args...)
+}
+
+// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
+// It calls os.Exit() with exit code 1.
+func Fatal(args ...interface{}) {
+ logger.Fatal(args...)
+ // Make sure fatal logs will exit.
+ os.Exit(1)
+}
+
+// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
+// It calles os.Exit() with exit code 1.
+func Fatalf(format string, args ...interface{}) {
+ logger.Fatalf(format, args...)
+ // Make sure fatal logs will exit.
+ os.Exit(1)
+}
+
+// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
+// It calle os.Exit()) with exit code 1.
+func Fatalln(args ...interface{}) {
+ logger.Fatalln(args...)
+ // Make sure fatal logs will exit.
+ os.Exit(1)
+}
+
+// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+// Deprecated: use Info.
+func Print(args ...interface{}) {
+ logger.Info(args...)
+}
+
+// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+// Deprecated: use Infof.
+func Printf(format string, args ...interface{}) {
+ logger.Infof(format, args...)
+}
+
+// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+// Deprecated: use Infoln.
+func Println(args ...interface{}) {
+ logger.Infoln(args...)
+}
diff --git a/go/vendor/google.golang.org/grpc/grpclog/logger.go b/go/vendor/google.golang.org/grpc/grpclog/logger.go
index 3b29330..d03b239 100644
--- a/go/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/go/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -1,52 +1,25 @@
/*
*
- * Copyright 2015, Google Inc.
- * All rights reserved.
+ * Copyright 2015 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
-/*
-Package grpclog defines logging for grpc.
-*/
-package grpclog // import "google.golang.org/grpc/grpclog"
-
-import (
- "log"
- "os"
-)
-
-// Use golang's standard logger by default.
-// Access is not mutex-protected: do not modify except in init()
-// functions.
-var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
+package grpclog
// Logger mimics golang's standard Logger as an interface.
+// Deprecated: use LoggerV2.
type Logger interface {
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
@@ -58,36 +31,53 @@ type Logger interface {
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
+// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- logger = l
+ logger = &loggerWrapper{Logger: l}
+}
+
+// loggerWrapper wraps Logger into a LoggerV2.
+type loggerWrapper struct {
+ Logger
+}
+
+func (g *loggerWrapper) Info(args ...interface{}) {
+ g.Logger.Print(args...)
+}
+
+func (g *loggerWrapper) Infoln(args ...interface{}) {
+ g.Logger.Println(args...)
+}
+
+func (g *loggerWrapper) Infof(format string, args ...interface{}) {
+ g.Logger.Printf(format, args...)
+}
+
+func (g *loggerWrapper) Warning(args ...interface{}) {
+ g.Logger.Print(args...)
}
-// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
-func Fatal(args ...interface{}) {
- logger.Fatal(args...)
+func (g *loggerWrapper) Warningln(args ...interface{}) {
+ g.Logger.Println(args...)
}
-// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
-func Fatalf(format string, args ...interface{}) {
- logger.Fatalf(format, args...)
+func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
+ g.Logger.Printf(format, args...)
}
-// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
-func Fatalln(args ...interface{}) {
- logger.Fatalln(args...)
+func (g *loggerWrapper) Error(args ...interface{}) {
+ g.Logger.Print(args...)
}
-// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
-func Print(args ...interface{}) {
- logger.Print(args...)
+func (g *loggerWrapper) Errorln(args ...interface{}) {
+ g.Logger.Println(args...)
}
-// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
-func Printf(format string, args ...interface{}) {
- logger.Printf(format, args...)
+func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
+ g.Logger.Printf(format, args...)
}
-// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
-func Println(args ...interface{}) {
- logger.Println(args...)
+func (g *loggerWrapper) V(l int) bool {
+ // Returns true for all verbose level.
+ return true
}
diff --git a/go/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/go/vendor/google.golang.org/grpc/grpclog/loggerv2.go
new file mode 100644
index 0000000..d493257
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -0,0 +1,195 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strconv"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 interface {
+ // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+ Info(args ...interface{})
+ // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+ Infoln(args ...interface{})
+ // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+ Infof(format string, args ...interface{})
+ // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+ Warning(args ...interface{})
+ // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+ Warningln(args ...interface{})
+ // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+ Warningf(format string, args ...interface{})
+ // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+ Error(args ...interface{})
+ // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+ Errorln(args ...interface{})
+ // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+ Errorf(format string, args ...interface{})
+ // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatal(args ...interface{})
+ // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatalln(args ...interface{})
+ // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatalf(format string, args ...interface{})
+ // V reports whether verbosity level l is at least the requested verbose level.
+ V(l int) bool
+}
+
+// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
+// Not mutex-protected, should be called before any gRPC functions.
+func SetLoggerV2(l LoggerV2) {
+ logger = l
+}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+}
+
+// NewLoggerV2 creates a loggerV2 with the provided writers.
+// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
+// Error logs will be written to errorW, warningW and infoW.
+// Warning logs will be written to warningW and infoW.
+// Info logs will be written to infoW.
+func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
+ return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
+}
+
+// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
+// verbosity level.
+func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
+ var m []*log.Logger
+ m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
+ m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
+ ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+ m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
+ m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
+ return &loggerT{m: m, v: v}
+}
+
+// newLoggerV2 creates a loggerV2 to be used as default logger.
+// All logs are written to stderr.
+func newLoggerV2() LoggerV2 {
+ errorW := ioutil.Discard
+ warningW := ioutil.Discard
+ infoW := ioutil.Discard
+
+ logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
+ switch logLevel {
+ case "", "ERROR", "error": // If env is unset, set level to ERROR.
+ errorW = os.Stderr
+ case "WARNING", "warning":
+ warningW = os.Stderr
+ case "INFO", "info":
+ infoW = os.Stderr
+ }
+
+ var v int
+ vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
+ if vl, err := strconv.Atoi(vLevel); err == nil {
+ v = vl
+ }
+ return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
+}
+
+func (g *loggerT) Info(args ...interface{}) {
+ g.m[infoLog].Print(args...)
+}
+
+func (g *loggerT) Infoln(args ...interface{}) {
+ g.m[infoLog].Println(args...)
+}
+
+func (g *loggerT) Infof(format string, args ...interface{}) {
+ g.m[infoLog].Printf(format, args...)
+}
+
+func (g *loggerT) Warning(args ...interface{}) {
+ g.m[warningLog].Print(args...)
+}
+
+func (g *loggerT) Warningln(args ...interface{}) {
+ g.m[warningLog].Println(args...)
+}
+
+func (g *loggerT) Warningf(format string, args ...interface{}) {
+ g.m[warningLog].Printf(format, args...)
+}
+
+func (g *loggerT) Error(args ...interface{}) {
+ g.m[errorLog].Print(args...)
+}
+
+func (g *loggerT) Errorln(args ...interface{}) {
+ g.m[errorLog].Println(args...)
+}
+
+func (g *loggerT) Errorf(format string, args ...interface{}) {
+ g.m[errorLog].Printf(format, args...)
+}
+
+func (g *loggerT) Fatal(args ...interface{}) {
+ g.m[fatalLog].Fatal(args...)
+ // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+}
+
+func (g *loggerT) Fatalln(args ...interface{}) {
+ g.m[fatalLog].Fatalln(args...)
+ // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+}
+
+func (g *loggerT) Fatalf(format string, args ...interface{}) {
+ g.m[fatalLog].Fatalf(format, args...)
+ // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
diff --git a/go/vendor/google.golang.org/grpc/interceptor.go b/go/vendor/google.golang.org/grpc/interceptor.go
index 8d932ef..06dc825 100644
--- a/go/vendor/google.golang.org/grpc/interceptor.go
+++ b/go/vendor/google.golang.org/grpc/interceptor.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -40,17 +25,17 @@ import (
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. inovker is the handler to complete the RPC
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it.
-// This is the EXPERIMENTAL API.
+// This is an EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
-// operations. streamer is the handlder to create a ClientStream and it is the responsibility of the interceptor to call it.
-// This is the EXPERIMENTAL API.
+// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
+// This is an EXPERIMENTAL API.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on
diff --git a/go/vendor/google.golang.org/grpc/internal/internal.go b/go/vendor/google.golang.org/grpc/internal/internal.go
index 5489143..53f1775 100644
--- a/go/vendor/google.golang.org/grpc/internal/internal.go
+++ b/go/vendor/google.golang.org/grpc/internal/internal.go
@@ -1,32 +1,17 @@
/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -34,13 +19,6 @@
// the godoc of the top-level grpc package.
package internal
-// TestingCloseConns closes all existing transports but keeps
-// grpcServer.lis accepting new connections.
-//
-// The provided grpcServer must be of type *grpc.Server. It is untyped
-// for circular dependency reasons.
-var TestingCloseConns func(grpcServer interface{})
-
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
// It must be called before Serve and requires TLS credentials.
//
diff --git a/go/vendor/google.golang.org/grpc/keepalive/keepalive.go b/go/vendor/google.golang.org/grpc/keepalive/keepalive.go
index d492589..f8adc7e 100644
--- a/go/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/go/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2017, Google Inc.
- * All rights reserved.
+ * Copyright 2017 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -39,8 +24,8 @@ import (
)
// ClientParameters is used to set keepalive parameters on the client-side.
-// These configure how the client will actively probe to notice when a connection broken
-// and to cause activity so intermediaries are aware the connection is still in use.
+// These configure how the client will actively probe to notice when a connection is broken
+// and send pings so intermediaries will be aware of the liveness of the connection.
// Make sure these parameters are set in coordination with the keepalive policy on the server,
// as incompatible settings can result in closing of connection.
type ClientParameters struct {
diff --git a/go/vendor/google.golang.org/grpc/metadata/metadata.go b/go/vendor/google.golang.org/grpc/metadata/metadata.go
index 7ca4418..ccfea5d 100644
--- a/go/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/go/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -1,93 +1,56 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
// Package metadata define the structure of the metadata supported by gRPC library.
-// Please refer to http://www.grpc.io/docs/guides/wire.html for more information about custom-metadata.
+// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
package metadata // import "google.golang.org/grpc/metadata"
import (
- "encoding/base64"
"fmt"
"strings"
"golang.org/x/net/context"
)
-const (
- binHdrSuffix = "-bin"
-)
-
-// encodeKeyValue encodes key and value qualified for transmission via gRPC.
-// Transmitting binary headers violates HTTP/2 spec.
-// TODO(zhaoq): Maybe check if k is ASCII also.
-func encodeKeyValue(k, v string) (string, string) {
- k = strings.ToLower(k)
- if strings.HasSuffix(k, binHdrSuffix) {
- val := base64.StdEncoding.EncodeToString([]byte(v))
- v = string(val)
- }
- return k, v
-}
-
-// DecodeKeyValue returns the original key and value corresponding to the
-// encoded data in k, v.
-// If k is a binary header and v contains comma, v is split on comma before decoded,
-// and the decoded v will be joined with comma before returned.
+// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
func DecodeKeyValue(k, v string) (string, string, error) {
- if !strings.HasSuffix(k, binHdrSuffix) {
- return k, v, nil
- }
- vvs := strings.Split(v, ",")
- for i, vv := range vvs {
- val, err := base64.StdEncoding.DecodeString(vv)
- if err != nil {
- return "", "", err
- }
- vvs[i] = string(val)
- }
- return k, strings.Join(vvs, ","), nil
+ return k, v, nil
}
// MD is a mapping from metadata keys to values. Users should use the following
// two convenience functions New and Pairs to generate MD.
type MD map[string][]string
-// New creates a MD from given key-value map.
-// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be applied Base64 encoding.
+// New creates an MD from a given key-value map.
+//
+// Only the following ASCII characters are allowed in keys:
+// - digits: 0-9
+// - uppercase letters: A-Z (normalized to lower)
+// - lowercase letters: a-z
+// - special characters: -_.
+// Uppercase letters are automatically converted to lowercase.
+//
+// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
+// result in errors if set in metadata.
func New(m map[string]string) MD {
md := MD{}
- for k, v := range m {
- key, val := encodeKeyValue(k, v)
+ for k, val := range m {
+ key := strings.ToLower(k)
md[key] = append(md[key], val)
}
return md
@@ -95,20 +58,28 @@ func New(m map[string]string) MD {
// Pairs returns an MD formed by the mapping of key, value ...
// Pairs panics if len(kv) is odd.
-// Keys are automatically converted to lowercase. And for keys having "-bin" as suffix, their values will be appplied Base64 encoding.
+//
+// Only the following ASCII characters are allowed in keys:
+// - digits: 0-9
+// - uppercase letters: A-Z (normalized to lower)
+// - lowercase letters: a-z
+// - special characters: -_.
+// Uppercase letters are automatically converted to lowercase.
+//
+// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
+// result in errors if set in metadata.
func Pairs(kv ...string) MD {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
}
md := MD{}
- var k string
+ var key string
for i, s := range kv {
if i%2 == 0 {
- k = s
+ key = strings.ToLower(s)
continue
}
- key, val := encodeKeyValue(k, s)
- md[key] = append(md[key], val)
+ md[key] = append(md[key], s)
}
return md
}
@@ -123,9 +94,9 @@ func (md MD) Copy() MD {
return Join(md)
}
-// Join joins any number of MDs into a single MD.
+// Join joins any number of mds into a single MD.
// The order of values for each key is determined by the order in which
-// the MDs containing those values are presented to Join.
+// the mds containing those values are presented to Join.
func Join(mds ...MD) MD {
out := MD{}
for _, md := range mds {
@@ -139,11 +110,6 @@ func Join(mds ...MD) MD {
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
-// NewContext is a wrapper for NewOutgoingContext(ctx, md). Deprecated.
-func NewContext(ctx context.Context, md MD) context.Context {
- return NewOutgoingContext(ctx, md)
-}
-
// NewIncomingContext creates a new context with incoming md attached.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
@@ -154,22 +120,17 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, md)
}
-// FromContext is a wrapper for FromIncomingContext(ctx). Deprecated.
-func FromContext(ctx context.Context) (md MD, ok bool) {
- return FromIncomingContext(ctx)
-}
-
-// FromIncomingContext returns the incoming MD in ctx if it exists. The
-// returned md should be immutable, writing to it may cause races.
-// Modification should be made to the copies of the returned md.
+// FromIncomingContext returns the incoming metadata in ctx if it exists. The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdIncomingKey{}).(MD)
return
}
-// FromOutgoingContext returns the outgoing MD in ctx if it exists. The
-// returned md should be immutable, writing to it may cause races.
-// Modification should be made to the copies of the returned md.
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to the copies of the returned MD.
func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdOutgoingKey{}).(MD)
return
diff --git a/go/vendor/google.golang.org/grpc/naming/dns_resolver.go b/go/vendor/google.golang.org/grpc/naming/dns_resolver.go
new file mode 100644
index 0000000..7e69a2c
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/naming/dns_resolver.go
@@ -0,0 +1,290 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package naming
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/grpclog"
+)
+
+const (
+ defaultPort = "443"
+ defaultFreq = time.Minute * 30
+)
+
+var (
+ errMissingAddr = errors.New("missing address")
+ errWatcherClose = errors.New("watcher has been closed")
+)
+
+// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
+// create watchers that poll the DNS server using the frequency set by freq.
+func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
+ return &dnsResolver{freq: freq}, nil
+}
+
+// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
+// watchers that poll the DNS server using the default frequency defined by defaultFreq.
+func NewDNSResolver() (Resolver, error) {
+ return NewDNSResolverWithFreq(defaultFreq)
+}
+
+// dnsResolver handles name resolution for names following the DNS scheme
+type dnsResolver struct {
+ // frequency of polling the DNS server that the watchers created by this resolver will use.
+ freq time.Duration
+}
+
+// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
+// If addr is an IPv4 address, return the addr and ok = true.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
+func formatIP(addr string) (addrIP string, ok bool) {
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ return "", false
+ }
+ if ip.To4() != nil {
+ return addr, true
+ }
+ return "[" + addr + "]", true
+}
+
+// parseTarget takes the user input target string, returns formatted host and port info.
+// If target doesn't specify a port, set the port to be the defaultPort.
+// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
+// are strippd when setting the host.
+// examples:
+// target: "www.google.com" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
+// target: ":80" returns host: "localhost", port: "80"
+// target: ":" returns host: "localhost", port: "443"
+func parseTarget(target string) (host, port string, err error) {
+ if target == "" {
+ return "", "", errMissingAddr
+ }
+
+ if ip := net.ParseIP(target); ip != nil {
+ // target is an IPv4 or IPv6(without brackets) address
+ return target, defaultPort, nil
+ }
+ if host, port, err := net.SplitHostPort(target); err == nil {
+ // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
+ if host == "" {
+ // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+ host = "localhost"
+ }
+ if port == "" {
+ // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
+ port = defaultPort
+ }
+ return host, port, nil
+ }
+ if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
+ // target doesn't have port
+ return host, port, nil
+ }
+ return "", "", fmt.Errorf("invalid target address %v", target)
+}
+
+// Resolve creates a watcher that watches the name resolution of the target.
+func (r *dnsResolver) Resolve(target string) (Watcher, error) {
+ host, port, err := parseTarget(target)
+ if err != nil {
+ return nil, err
+ }
+
+ if net.ParseIP(host) != nil {
+ ipWatcher := &ipWatcher{
+ updateChan: make(chan *Update, 1),
+ }
+ host, _ = formatIP(host)
+ ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
+ return ipWatcher, nil
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ return &dnsWatcher{
+ r: r,
+ host: host,
+ port: port,
+ ctx: ctx,
+ cancel: cancel,
+ t: time.NewTimer(0),
+ }, nil
+}
+
+// dnsWatcher watches for the name resolution update for a specific target
+type dnsWatcher struct {
+ r *dnsResolver
+ host string
+ port string
+ // The latest resolved address set
+ curAddrs map[string]*Update
+ ctx context.Context
+ cancel context.CancelFunc
+ t *time.Timer
+}
+
+// ipWatcher watches for the name resolution update for an IP address.
+type ipWatcher struct {
+ updateChan chan *Update
+}
+
+// Next returns the adrress resolution Update for the target. For IP address,
+// the resolution is itself, thus polling name server is unncessary. Therefore,
+// Next() will return an Update the first time it is called, and will be blocked
+// for all following calls as no Update exisits until watcher is closed.
+func (i *ipWatcher) Next() ([]*Update, error) {
+ u, ok := <-i.updateChan
+ if !ok {
+ return nil, errWatcherClose
+ }
+ return []*Update{u}, nil
+}
+
+// Close closes the ipWatcher.
+func (i *ipWatcher) Close() {
+ close(i.updateChan)
+}
+
+// AddressType indicates the address type returned by name resolution.
+type AddressType uint8
+
+const (
+ // Backend indicates the server is a backend server.
+ Backend AddressType = iota
+ // GRPCLB indicates the server is a grpclb load balancer.
+ GRPCLB
+)
+
+// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
+// name resolver used by the grpclb balancer is required to provide this type of metadata in
+// its address updates.
+type AddrMetadataGRPCLB struct {
+ // AddrType is the type of server (grpc load balancer or backend).
+ AddrType AddressType
+ // ServerName is the name of the grpc load balancer. Used for authentication.
+ ServerName string
+}
+
+// compileUpdate compares the old resolved addresses and newly resolved addresses,
+// and generates an update list
+func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
+ var res []*Update
+ for a, u := range w.curAddrs {
+ if _, ok := newAddrs[a]; !ok {
+ u.Op = Delete
+ res = append(res, u)
+ }
+ }
+ for a, u := range newAddrs {
+ if _, ok := w.curAddrs[a]; !ok {
+ res = append(res, u)
+ }
+ }
+ return res
+}
+
+func (w *dnsWatcher) lookupSRV() map[string]*Update {
+ newAddrs := make(map[string]*Update)
+ _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
+ if err != nil {
+ grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
+ return nil
+ }
+ for _, s := range srvs {
+ lbAddrs, err := lookupHost(w.ctx, s.Target)
+ if err != nil {
+ grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
+ continue
+ }
+ for _, a := range lbAddrs {
+ a, ok := formatIP(a)
+ if !ok {
+ grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+ continue
+ }
+ addr := a + ":" + strconv.Itoa(int(s.Port))
+ newAddrs[addr] = &Update{Addr: addr,
+ Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
+ }
+ }
+ return newAddrs
+}
+
+func (w *dnsWatcher) lookupHost() map[string]*Update {
+ newAddrs := make(map[string]*Update)
+ addrs, err := lookupHost(w.ctx, w.host)
+ if err != nil {
+ grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
+ return nil
+ }
+ for _, a := range addrs {
+ a, ok := formatIP(a)
+ if !ok {
+ grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+ continue
+ }
+ addr := a + ":" + w.port
+ newAddrs[addr] = &Update{Addr: addr}
+ }
+ return newAddrs
+}
+
+func (w *dnsWatcher) lookup() []*Update {
+ newAddrs := w.lookupSRV()
+ if newAddrs == nil {
+ // If failed to get any balancer address (either no corresponding SRV for the
+ // target, or caused by failure during resolution/parsing of the balancer target),
+ // return any A record info available.
+ newAddrs = w.lookupHost()
+ }
+ result := w.compileUpdate(newAddrs)
+ w.curAddrs = newAddrs
+ return result
+}
+
+// Next returns the resolved address update(delta) for the target. If there's no
+// change, it will sleep for 30 mins and try to resolve again after that.
+func (w *dnsWatcher) Next() ([]*Update, error) {
+ for {
+ select {
+ case <-w.ctx.Done():
+ return nil, errWatcherClose
+ case <-w.t.C:
+ }
+ result := w.lookup()
+ // Next lookup should happen after an interval defined by w.r.freq.
+ w.t.Reset(w.r.freq)
+ if len(result) > 0 {
+ return result, nil
+ }
+ }
+}
+
+func (w *dnsWatcher) Close() {
+ w.cancel()
+}
diff --git a/go/vendor/google.golang.org/grpc/naming/go17.go b/go/vendor/google.golang.org/grpc/naming/go17.go
new file mode 100644
index 0000000..57b65d7
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/naming/go17.go
@@ -0,0 +1,34 @@
+// +build go1.6,!go1.8
+
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package naming
+
+import (
+ "net"
+
+ "golang.org/x/net/context"
+)
+
+var (
+ lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
+ lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
+ return net.LookupSRV(service, proto, name)
+ }
+)
diff --git a/go/vendor/google.golang.org/grpc/naming/go18.go b/go/vendor/google.golang.org/grpc/naming/go18.go
new file mode 100644
index 0000000..b5a0f84
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/naming/go18.go
@@ -0,0 +1,28 @@
+// +build go1.8
+
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package naming
+
+import "net"
+
+var (
+ lookupHost = net.DefaultResolver.LookupHost
+ lookupSRV = net.DefaultResolver.LookupSRV
+)
diff --git a/go/vendor/google.golang.org/grpc/naming/naming.go b/go/vendor/google.golang.org/grpc/naming/naming.go
index c2e0871..1af7e32 100644
--- a/go/vendor/google.golang.org/grpc/naming/naming.go
+++ b/go/vendor/google.golang.org/grpc/naming/naming.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
diff --git a/go/vendor/google.golang.org/grpc/peer/peer.go b/go/vendor/google.golang.org/grpc/peer/peer.go
index bfa6205..317b8b9 100644
--- a/go/vendor/google.golang.org/grpc/peer/peer.go
+++ b/go/vendor/google.golang.org/grpc/peer/peer.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -42,7 +27,8 @@ import (
"google.golang.org/grpc/credentials"
)
-// Peer contains the information of the peer for an RPC.
+// Peer contains the information of the peer for an RPC, such as the address
+// and authentication information.
type Peer struct {
// Addr is the peer address.
Addr net.Addr
diff --git a/go/vendor/google.golang.org/grpc/picker_wrapper.go b/go/vendor/google.golang.org/grpc/picker_wrapper.go
new file mode 100644
index 0000000..db82bfb
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "sync"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
+)
+
+// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
+// actions and unblock when there's a picker update.
+type pickerWrapper struct {
+ mu sync.Mutex
+ done bool
+ blockingCh chan struct{}
+ picker balancer.Picker
+}
+
+func newPickerWrapper() *pickerWrapper {
+ bp := &pickerWrapper{blockingCh: make(chan struct{})}
+ return bp
+}
+
+// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
+ bp.mu.Lock()
+ if bp.done {
+ bp.mu.Unlock()
+ return
+ }
+ bp.picker = p
+ // bp.blockingCh should never be nil.
+ close(bp.blockingCh)
+ bp.blockingCh = make(chan struct{})
+ bp.mu.Unlock()
+}
+
+// pick returns the transport that will be used for the RPC.
+// It may block in the following cases:
+// - there's no picker
+// - the current picker returns ErrNoSubConnAvailable
+// - the current picker returns other errors and failfast is false.
+// - the subConn returned by the current picker is not READY
+// When one of these situations happens, pick blocks until the picker gets updated.
+func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+ var (
+ p balancer.Picker
+ ch chan struct{}
+ )
+
+ for {
+ bp.mu.Lock()
+ if bp.done {
+ bp.mu.Unlock()
+ return nil, nil, ErrClientConnClosing
+ }
+
+ if bp.picker == nil {
+ ch = bp.blockingCh
+ }
+ if ch == bp.blockingCh {
+ // This could happen when either:
+ // - bp.picker is nil (the previous if condition), or
+ // - has called pick on the current picker.
+ bp.mu.Unlock()
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err()
+ case <-ch:
+ }
+ continue
+ }
+
+ ch = bp.blockingCh
+ p = bp.picker
+ bp.mu.Unlock()
+
+ subConn, done, err := p.Pick(ctx, opts)
+
+ if err != nil {
+ switch err {
+ case balancer.ErrNoSubConnAvailable:
+ continue
+ case balancer.ErrTransientFailure:
+ if !failfast {
+ continue
+ }
+ return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
+ default:
+ // err is some other error.
+ return nil, nil, toRPCErr(err)
+ }
+ }
+
+ acw, ok := subConn.(*acBalancerWrapper)
+ if !ok {
+ grpclog.Infof("subconn returned from pick is not *acBalancerWrapper")
+ continue
+ }
+ if t, ok := acw.getAddrConn().getReadyTransport(); ok {
+ return t, done, nil
+ }
+ grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+ // If ok == false, ac.state is not READY.
+ // A valid picker always returns READY subConn. This means the state of ac
+ // just changed, and picker will be updated shortly.
+ // continue back to the beginning of the for loop to repick.
+ }
+}
+
+func (bp *pickerWrapper) close() {
+ bp.mu.Lock()
+ defer bp.mu.Unlock()
+ if bp.done {
+ return
+ }
+ bp.done = true
+ close(bp.blockingCh)
+}
diff --git a/go/vendor/google.golang.org/grpc/pickfirst.go b/go/vendor/google.golang.org/grpc/pickfirst.go
new file mode 100644
index 0000000..bf659d4
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/pickfirst.go
@@ -0,0 +1,108 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+// PickFirstBalancerName is the name of the pick_first balancer.
+const PickFirstBalancerName = "pick_first"
+
+func newPickfirstBuilder() balancer.Builder {
+ return &pickfirstBuilder{}
+}
+
+type pickfirstBuilder struct{}
+
+func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+ return &pickfirstBalancer{cc: cc}
+}
+
+func (*pickfirstBuilder) Name() string {
+ return PickFirstBalancerName
+}
+
+type pickfirstBalancer struct {
+ cc balancer.ClientConn
+ sc balancer.SubConn
+}
+
+func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+ if err != nil {
+ grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
+ return
+ }
+ if b.sc == nil {
+ b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
+ return
+ }
+ b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
+ b.sc.Connect()
+ } else {
+ b.sc.UpdateAddresses(addrs)
+ b.sc.Connect()
+ }
+}
+
+func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
+ if b.sc != sc {
+ grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
+ return
+ }
+ if s == connectivity.Shutdown {
+ b.sc = nil
+ return
+ }
+
+ switch s {
+ case connectivity.Ready, connectivity.Idle:
+ b.cc.UpdateBalancerState(s, &picker{sc: sc})
+ case connectivity.Connecting:
+ b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
+ case connectivity.TransientFailure:
+ b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
+ }
+}
+
+func (b *pickfirstBalancer) Close() {
+}
+
+type picker struct {
+ err error
+ sc balancer.SubConn
+}
+
+func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ if p.err != nil {
+ return nil, nil, p.err
+ }
+ return p.sc, nil, nil
+}
+
+func init() {
+ balancer.Register(newPickfirstBuilder())
+}
diff --git a/go/vendor/google.golang.org/grpc/proxy.go b/go/vendor/google.golang.org/grpc/proxy.go
new file mode 100644
index 0000000..2d40236
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/proxy.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+var (
+ // errDisabled indicates that proxy is disabled for the address.
+ errDisabled = errors.New("proxy is disabled for the address")
+ // The following variable will be overwritten in the tests.
+ httpProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+func mapAddress(ctx context.Context, address string) (string, error) {
+ req := &http.Request{
+ URL: &url.URL{
+ Scheme: "https",
+ Host: address,
+ },
+ }
+ url, err := httpProxyFromEnvironment(req)
+ if err != nil {
+ return "", err
+ }
+ if url == nil {
+ return "", errDisabled
+ }
+ return url.Host, nil
+}
+
+// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
+// It's possible that this reader reads more than what's need for the response and stores
+// those bytes in the buffer.
+// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
+// bytes in the buffer.
+type bufConn struct {
+ net.Conn
+ r io.Reader
+}
+
+func (c *bufConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) {
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+
+ req := (&http.Request{
+ Method: http.MethodConnect,
+ URL: &url.URL{Host: addr},
+ Header: map[string][]string{"User-Agent": {grpcUA}},
+ })
+
+ if err := sendHTTPRequest(ctx, req, conn); err != nil {
+ return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+
+ r := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(r, req)
+ if err != nil {
+ return nil, fmt.Errorf("reading server HTTP response: %v", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ dump, err := httputil.DumpResponse(resp, true)
+ if err != nil {
+ return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
+ }
+ return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
+ }
+
+ return &bufConn{Conn: conn, r: r}, nil
+}
+
+// newProxyDialer returns a dialer that connects to proxy first if necessary.
+// The returned dialer checks if a proxy is necessary, dial to the proxy with the
+// provided dialer, does HTTP CONNECT handshake and returns the connection.
+func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
+ return func(ctx context.Context, addr string) (conn net.Conn, err error) {
+ var skipHandshake bool
+ newAddr, err := mapAddress(ctx, addr)
+ if err != nil {
+ if err != errDisabled {
+ return nil, err
+ }
+ skipHandshake = true
+ newAddr = addr
+ }
+
+ conn, err = dialer(ctx, newAddr)
+ if err != nil {
+ return
+ }
+ if !skipHandshake {
+ conn, err = doHTTPConnectHandshake(ctx, conn, addr)
+ }
+ return
+ }
+}
diff --git a/go/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/go/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
new file mode 100644
index 0000000..a543a70
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -0,0 +1,377 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package dns implements a dns resolver to be installed as the default resolver
+// in grpc.
+package dns
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/rand"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+func init() {
+ resolver.Register(NewBuilder())
+}
+
+const (
+ defaultPort = "443"
+ defaultFreq = time.Minute * 30
+ golang = "GO"
+ // In DNS, service config is encoded in a TXT record via the mechanism
+ // described in RFC-1464 using the attribute name grpc_config.
+ txtAttribute = "grpc_config="
+)
+
+var errMissingAddr = errors.New("missing address")
+
+// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
+func NewBuilder() resolver.Builder {
+ return &dnsBuilder{freq: defaultFreq}
+}
+
+type dnsBuilder struct {
+ // frequency of polling the DNS server.
+ freq time.Duration
+}
+
+// Build creates and starts a DNS resolver that watches the name resolution of the target.
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+ host, port, err := parseTarget(target.Endpoint)
+ if err != nil {
+ return nil, err
+ }
+
+ // IP address.
+ if net.ParseIP(host) != nil {
+ host, _ = formatIP(host)
+ addr := []resolver.Address{{Addr: host + ":" + port}}
+ i := &ipResolver{
+ cc: cc,
+ ip: addr,
+ rn: make(chan struct{}, 1),
+ q: make(chan struct{}),
+ }
+ cc.NewAddress(addr)
+ go i.watcher()
+ return i, nil
+ }
+
+ // DNS address (non-IP).
+ ctx, cancel := context.WithCancel(context.Background())
+ d := &dnsResolver{
+ freq: b.freq,
+ host: host,
+ port: port,
+ ctx: ctx,
+ cancel: cancel,
+ cc: cc,
+ t: time.NewTimer(0),
+ rn: make(chan struct{}, 1),
+ }
+
+ d.wg.Add(1)
+ go d.watcher()
+ return d, nil
+}
+
+// Scheme returns the naming scheme of this resolver builder, which is "dns".
+func (b *dnsBuilder) Scheme() string {
+ return "dns"
+}
+
+// ipResolver watches for the name resolution update for an IP address.
+type ipResolver struct {
+ cc resolver.ClientConn
+ ip []resolver.Address
+ // rn channel is used by ResolveNow() to force an immediate resolution of the target.
+ rn chan struct{}
+ q chan struct{}
+}
+
+// ResolveNow resend the address it stores, no resolution is needed.
+func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
+ select {
+ case i.rn <- struct{}{}:
+ default:
+ }
+}
+
+// Close closes the ipResolver.
+func (i *ipResolver) Close() {
+ close(i.q)
+}
+
+func (i *ipResolver) watcher() {
+ for {
+ select {
+ case <-i.rn:
+ i.cc.NewAddress(i.ip)
+ case <-i.q:
+ return
+ }
+ }
+}
+
+// dnsResolver watches for the name resolution update for a non-IP target.
+type dnsResolver struct {
+ freq time.Duration
+ host string
+ port string
+ ctx context.Context
+ cancel context.CancelFunc
+ cc resolver.ClientConn
+ // rn channel is used by ResolveNow() to force an immediate resolution of the target.
+ rn chan struct{}
+ t *time.Timer
+ // wg is used to enforce Close() to return after the watcher() goroutine has finished.
+ // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
+ // replace the real lookup functions with mocked ones to facilitate testing.
+ // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
+ // will warns lookup (READ the lookup function pointers) inside watcher() goroutine
+ // has data race with replaceNetFunc (WRITE the lookup function pointers).
+ wg sync.WaitGroup
+}
+
+// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
+func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+ select {
+ case d.rn <- struct{}{}:
+ default:
+ }
+}
+
+// Close closes the dnsResolver.
+func (d *dnsResolver) Close() {
+ d.cancel()
+ d.wg.Wait()
+ d.t.Stop()
+}
+
+func (d *dnsResolver) watcher() {
+ defer d.wg.Done()
+ for {
+ select {
+ case <-d.ctx.Done():
+ return
+ case <-d.t.C:
+ case <-d.rn:
+ }
+ result, sc := d.lookup()
+ // Next lookup should happen after an interval defined by d.freq.
+ d.t.Reset(d.freq)
+ d.cc.NewServiceConfig(string(sc))
+ d.cc.NewAddress(result)
+ }
+}
+
+func (d *dnsResolver) lookupSRV() []resolver.Address {
+ var newAddrs []resolver.Address
+ _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
+ if err != nil {
+ grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
+ return nil
+ }
+ for _, s := range srvs {
+ lbAddrs, err := lookupHost(d.ctx, s.Target)
+ if err != nil {
+ grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
+ continue
+ }
+ for _, a := range lbAddrs {
+ a, ok := formatIP(a)
+ if !ok {
+ grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+ continue
+ }
+ addr := a + ":" + strconv.Itoa(int(s.Port))
+ newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+ }
+ }
+ return newAddrs
+}
+
+func (d *dnsResolver) lookupTXT() string {
+ ss, err := lookupTXT(d.ctx, d.host)
+ if err != nil {
+ grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
+ return ""
+ }
+ var res string
+ for _, s := range ss {
+ res += s
+ }
+
+ // TXT record must have "grpc_config=" attribute in order to be used as service config.
+ if !strings.HasPrefix(res, txtAttribute) {
+ grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
+ return ""
+ }
+ return strings.TrimPrefix(res, txtAttribute)
+}
+
+func (d *dnsResolver) lookupHost() []resolver.Address {
+ var newAddrs []resolver.Address
+ addrs, err := lookupHost(d.ctx, d.host)
+ if err != nil {
+ grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
+ return nil
+ }
+ for _, a := range addrs {
+ a, ok := formatIP(a)
+ if !ok {
+ grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+ continue
+ }
+ addr := a + ":" + d.port
+ newAddrs = append(newAddrs, resolver.Address{Addr: addr})
+ }
+ return newAddrs
+}
+
+func (d *dnsResolver) lookup() ([]resolver.Address, string) {
+ var newAddrs []resolver.Address
+ newAddrs = d.lookupSRV()
+ // Support fallback to non-balancer address.
+ newAddrs = append(newAddrs, d.lookupHost()...)
+ sc := d.lookupTXT()
+ return newAddrs, canaryingSC(sc)
+}
+
+// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
+// If addr is an IPv4 address, return the addr and ok = true.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
+func formatIP(addr string) (addrIP string, ok bool) {
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ return "", false
+ }
+ if ip.To4() != nil {
+ return addr, true
+ }
+ return "[" + addr + "]", true
+}
+
+// parseTarget takes the user input target string, returns formatted host and port info.
+// If target doesn't specify a port, set the port to be the defaultPort.
+// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
+// are strippd when setting the host.
+// examples:
+// target: "www.google.com" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
+// target: ":80" returns host: "localhost", port: "80"
+// target: ":" returns host: "localhost", port: "443"
+func parseTarget(target string) (host, port string, err error) {
+ if target == "" {
+ return "", "", errMissingAddr
+ }
+ if ip := net.ParseIP(target); ip != nil {
+ // target is an IPv4 or IPv6(without brackets) address
+ return target, defaultPort, nil
+ }
+ if host, port, err = net.SplitHostPort(target); err == nil {
+ // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
+ if host == "" {
+ // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+ host = "localhost"
+ }
+ if port == "" {
+ // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
+ port = defaultPort
+ }
+ return host, port, nil
+ }
+ if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
+ // target doesn't have port
+ return host, port, nil
+ }
+ return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
+}
+
+type rawChoice struct {
+ ClientLanguage *[]string `json:"clientLanguage,omitempty"`
+ Percentage *int `json:"percentage,omitempty"`
+ ClientHostName *[]string `json:"clientHostName,omitempty"`
+ ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"`
+}
+
+func containsString(a *[]string, b string) bool {
+ if a == nil {
+ return true
+ }
+ for _, c := range *a {
+ if c == b {
+ return true
+ }
+ }
+ return false
+}
+
+func chosenByPercentage(a *int) bool {
+ if a == nil {
+ return true
+ }
+ s := rand.NewSource(time.Now().UnixNano())
+ r := rand.New(s)
+ if r.Intn(100)+1 > *a {
+ return false
+ }
+ return true
+}
+
+func canaryingSC(js string) string {
+ if js == "" {
+ return ""
+ }
+ var rcs []rawChoice
+ err := json.Unmarshal([]byte(js), &rcs)
+ if err != nil {
+ grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+ return ""
+ }
+ cliHostname, err := os.Hostname()
+ if err != nil {
+ grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+ return ""
+ }
+ var sc string
+ for _, c := range rcs {
+ if !containsString(c.ClientLanguage, golang) ||
+ !chosenByPercentage(c.Percentage) ||
+ !containsString(c.ClientHostName, cliHostname) ||
+ c.ServiceConfig == nil {
+ continue
+ }
+ sc = string(*c.ServiceConfig)
+ break
+ }
+ return sc
+}
diff --git a/go/vendor/google.golang.org/grpc/resolver/dns/go17.go b/go/vendor/google.golang.org/grpc/resolver/dns/go17.go
new file mode 100644
index 0000000..b466bc8
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/resolver/dns/go17.go
@@ -0,0 +1,35 @@
+// +build go1.6, !go1.8
+
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package dns
+
+import (
+ "net"
+
+ "golang.org/x/net/context"
+)
+
+var (
+ lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
+ lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
+ return net.LookupSRV(service, proto, name)
+ }
+ lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
+)
diff --git a/go/vendor/google.golang.org/grpc/resolver/dns/go18.go b/go/vendor/google.golang.org/grpc/resolver/dns/go18.go
new file mode 100644
index 0000000..fa34f14
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/resolver/dns/go18.go
@@ -0,0 +1,29 @@
+// +build go1.8
+
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package dns
+
+import "net"
+
+var (
+ lookupHost = net.DefaultResolver.LookupHost
+ lookupSRV = net.DefaultResolver.LookupSRV
+ lookupTXT = net.DefaultResolver.LookupTXT
+)
diff --git a/go/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/go/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
new file mode 100644
index 0000000..b76010d
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package passthrough implements a pass-through resolver. It sends the target
+// name without scheme back to gRPC as resolved address.
+package passthrough
+
+import "google.golang.org/grpc/resolver"
+
+const scheme = "passthrough"
+
+type passthroughBuilder struct{}
+
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+ r := &passthroughResolver{
+ target: target,
+ cc: cc,
+ }
+ r.start()
+ return r, nil
+}
+
+func (*passthroughBuilder) Scheme() string {
+ return scheme
+}
+
+type passthroughResolver struct {
+ target resolver.Target
+ cc resolver.ClientConn
+}
+
+func (r *passthroughResolver) start() {
+ r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}})
+}
+
+func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+
+func (*passthroughResolver) Close() {}
+
+func init() {
+ resolver.Register(&passthroughBuilder{})
+}
diff --git a/go/vendor/google.golang.org/grpc/resolver/resolver.go b/go/vendor/google.golang.org/grpc/resolver/resolver.go
new file mode 100644
index 0000000..df097ee
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -0,0 +1,155 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver defines APIs for name resolution in gRPC.
+// All APIs in this package are experimental.
+package resolver
+
+var (
+ // m is a map from scheme to resolver builder.
+ m = make(map[string]Builder)
+ // defaultScheme is the default scheme to use.
+ defaultScheme = "passthrough"
+)
+
+// TODO(bar) install dns resolver in init(){}.
+
+// Register registers the resolver builder to the resolver map.
+// b.Scheme will be used as the scheme registered with this builder.
+func Register(b Builder) {
+ m[b.Scheme()] = b
+}
+
+// Get returns the resolver builder registered with the given scheme.
+// If no builder is register with the scheme, the default scheme will
+// be used.
+// If the default scheme is not modified, "passthrough" will be the default
+// scheme, and the preinstalled dns resolver will be used.
+// If the default scheme is modified, and a resolver is registered with
+// the scheme, that resolver will be returned.
+// If the default scheme is modified, and no resolver is registered with
+// the scheme, nil will be returned.
+func Get(scheme string) Builder {
+ if b, ok := m[scheme]; ok {
+ return b
+ }
+ if b, ok := m[defaultScheme]; ok {
+ return b
+ }
+ return nil
+}
+
+// SetDefaultScheme sets the default scheme that will be used.
+// The default default scheme is "passthrough".
+func SetDefaultScheme(scheme string) {
+ defaultScheme = scheme
+}
+
+// AddressType indicates the address type returned by name resolution.
+type AddressType uint8
+
+const (
+ // Backend indicates the address is for a backend server.
+ Backend AddressType = iota
+ // GRPCLB indicates the address is for a grpclb load balancer.
+ GRPCLB
+)
+
+// Address represents a server the client connects to.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type Address struct {
+ // Addr is the server address on which a connection will be established.
+ Addr string
+ // Type is the type of this address.
+ Type AddressType
+ // ServerName is the name of this address.
+ //
+ // e.g. if Type is GRPCLB, ServerName should be the name of the remote load
+ // balancer, not the name of the backend.
+ ServerName string
+ // Metadata is the information associated with Addr, which may be used
+ // to make load balancing decision.
+ Metadata interface{}
+}
+
+// BuildOption includes additional information for the builder to create
+// the resolver.
+type BuildOption struct {
+ // UserOptions can be used to pass configuration between DialOptions and the
+ // resolver.
+ UserOptions interface{}
+}
+
+// ClientConn contains the callbacks for resolver to notify any updates
+// to the gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type ClientConn interface {
+ // NewAddress is called by resolver to notify ClientConn a new list
+ // of resolved addresses.
+ // The address list should be the complete list of resolved addresses.
+ NewAddress(addresses []Address)
+ // NewServiceConfig is called by resolver to notify ClientConn a new
+ // service config. The service config should be provided as a json string.
+ NewServiceConfig(serviceConfig string)
+}
+
+// Target represents a target for gRPC, as specified in:
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.
+type Target struct {
+ Scheme string
+ Authority string
+ Endpoint string
+}
+
+// Builder creates a resolver that will be used to watch name resolution updates.
+type Builder interface {
+ // Build creates a new resolver for the given target.
+ //
+ // gRPC dial calls Build synchronously, and fails if the returned error is
+ // not nil.
+ Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
+ // Scheme returns the scheme supported by this resolver.
+ // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
+ Scheme() string
+}
+
+// ResolveNowOption includes additional information for ResolveNow.
+type ResolveNowOption struct{}
+
+// Resolver watches for the updates on the specified target.
+// Updates include address updates and service config updates.
+type Resolver interface {
+ // ResolveNow will be called by gRPC to try to resolve the target name
+ // again. It's just a hint, resolver can ignore this if it's not necessary.
+ //
+ // It could be called multiple times concurrently.
+ ResolveNow(ResolveNowOption)
+ // Close closes the resolver.
+ Close()
+}
+
+// UnregisterForTesting removes the resolver builder with the given scheme from the
+// resolver map.
+// This function is for testing only.
+func UnregisterForTesting(scheme string) {
+ delete(m, scheme)
+}
diff --git a/go/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/go/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
new file mode 100644
index 0000000..ef5d4c2
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConnection interface.
+type ccResolverWrapper struct {
+ cc *ClientConn
+ resolver resolver.Resolver
+ addrCh chan []resolver.Address
+ scCh chan string
+ done chan struct{}
+}
+
+// split2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", s, false) instead.
+func split2(s, sep string) (string, string, bool) {
+ spl := strings.SplitN(s, sep, 2)
+ if len(spl) < 2 {
+ return "", "", false
+ }
+ return spl[0], spl[1], true
+}
+
+// parseTarget splits target into a struct containing scheme, authority and
+// endpoint.
+func parseTarget(target string) (ret resolver.Target) {
+ var ok bool
+ ret.Scheme, ret.Endpoint, ok = split2(target, "://")
+ if !ok {
+ return resolver.Target{Endpoint: target}
+ }
+ ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/")
+ return ret
+}
+
+// newCCResolverWrapper parses cc.target for scheme and gets the resolver
+// builder for this scheme. It then builds the resolver and starts the
+// monitoring goroutine for it.
+//
+// If withResolverBuilder dial option is set, the specified resolver will be
+// used instead.
+func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
+ grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme)
+
+ rb := cc.dopts.resolverBuilder
+ if rb == nil {
+ rb = resolver.Get(cc.parsedTarget.Scheme)
+ if rb == nil {
+ return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
+ }
+ }
+
+ ccr := &ccResolverWrapper{
+ cc: cc,
+ addrCh: make(chan []resolver.Address, 1),
+ scCh: make(chan string, 1),
+ done: make(chan struct{}),
+ }
+
+ var err error
+ ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{
+ UserOptions: cc.dopts.resolverBuildUserOptions,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return ccr, nil
+}
+
+func (ccr *ccResolverWrapper) start() {
+ go ccr.watcher()
+}
+
+// watcher processes address updates and service config updates sequencially.
+// Otherwise, we need to resolve possible races between address and service
+// config (e.g. they specify different balancer types).
+func (ccr *ccResolverWrapper) watcher() {
+ for {
+ select {
+ case <-ccr.done:
+ return
+ default:
+ }
+
+ select {
+ case addrs := <-ccr.addrCh:
+ select {
+ case <-ccr.done:
+ return
+ default:
+ }
+ grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
+ ccr.cc.handleResolvedAddrs(addrs, nil)
+ case sc := <-ccr.scCh:
+ select {
+ case <-ccr.done:
+ return
+ default:
+ }
+ grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
+ ccr.cc.handleServiceConfig(sc)
+ case <-ccr.done:
+ return
+ }
+ }
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
+ ccr.resolver.ResolveNow(o)
+}
+
+func (ccr *ccResolverWrapper) close() {
+ ccr.resolver.Close()
+ close(ccr.done)
+}
+
+// NewAddress is called by the resolver implemenetion to send addresses to gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+ select {
+ case <-ccr.addrCh:
+ default:
+ }
+ ccr.addrCh <- addrs
+}
+
+// NewServiceConfig is called by the resolver implemenetion to send service
+// configs to gPRC.
+func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
+ select {
+ case <-ccr.scCh:
+ default:
+ }
+ ccr.scCh <- sc
+}
diff --git a/go/vendor/google.golang.org/grpc/rpc_util.go b/go/vendor/google.golang.org/grpc/rpc_util.go
index 2619d39..bf384b6 100644
--- a/go/vendor/google.golang.org/grpc/rpc_util.go
+++ b/go/vendor/google.golang.org/grpc/rpc_util.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -37,47 +22,23 @@ import (
"bytes"
"compress/gzip"
"encoding/binary"
- "fmt"
"io"
"io/ioutil"
"math"
- "os"
+ "sync"
"time"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/encoding"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
-// Codec defines the interface gRPC uses to encode and decode messages.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
- // String returns the name of the Codec implementation. The returned
- // string will be used as part of content type in transmission.
- String() string
-}
-
-// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type protoCodec struct{}
-
-func (protoCodec) Marshal(v interface{}) ([]byte, error) {
- return proto.Marshal(v.(proto.Message))
-}
-
-func (protoCodec) Unmarshal(data []byte, v interface{}) error {
- return proto.Unmarshal(data, v.(proto.Message))
-}
-
-func (protoCodec) String() string {
- return "proto"
-}
-
// Compressor defines the interface gRPC uses to compress a message.
type Compressor interface {
// Do compresses p into w.
@@ -86,16 +47,25 @@ type Compressor interface {
Type() string
}
-// NewGZIPCompressor creates a Compressor based on GZIP.
-func NewGZIPCompressor() Compressor {
- return &gzipCompressor{}
+type gzipCompressor struct {
+ pool sync.Pool
}
-type gzipCompressor struct {
+// NewGZIPCompressor creates a Compressor based on GZIP.
+func NewGZIPCompressor() Compressor {
+ return &gzipCompressor{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return gzip.NewWriter(ioutil.Discard)
+ },
+ },
+ }
}
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
- z := gzip.NewWriter(w)
+ z := c.pool.Get().(*gzip.Writer)
+ defer c.pool.Put(z)
+ z.Reset(w)
if _, err := z.Write(p); err != nil {
return err
}
@@ -115,6 +85,7 @@ type Decompressor interface {
}
type gzipDecompressor struct {
+ pool sync.Pool
}
// NewGZIPDecompressor creates a Decompressor based on GZIP.
@@ -123,11 +94,26 @@ func NewGZIPDecompressor() Decompressor {
}
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
- z, err := gzip.NewReader(r)
- if err != nil {
- return nil, err
+ var z *gzip.Reader
+ switch maybeZ := d.pool.Get().(type) {
+ case nil:
+ newZ, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ z = newZ
+ case *gzip.Reader:
+ z = maybeZ
+ if err := z.Reset(r); err != nil {
+ d.pool.Put(z)
+ return nil, err
+ }
}
- defer z.Close()
+
+ defer func() {
+ z.Close()
+ d.pool.Put(z)
+ }()
return ioutil.ReadAll(z)
}
@@ -137,13 +123,20 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
- failFast bool
- headerMD metadata.MD
- trailerMD metadata.MD
- traceInfo traceInfo // in trace.go
+ compressorType string
+ failFast bool
+ headerMD metadata.MD
+ trailerMD metadata.MD
+ peer *peer.Peer
+ traceInfo traceInfo // in trace.go
+ maxReceiveMessageSize *int
+ maxSendMessageSize *int
+ creds credentials.PerRPCCredentials
}
-var defaultCallInfo = callInfo{failFast: true}
+func defaultCallInfo() *callInfo {
+ return &callInfo{failFast: true}
+}
// CallOption configures a Call before it starts or extracts information from
// a Call after it completes.
@@ -157,6 +150,14 @@ type CallOption interface {
after(*callInfo)
}
+// EmptyCallOption does not alter the Call configuration.
+// It can be embedded in another structure to carry satellite data for use
+// by interceptors.
+type EmptyCallOption struct{}
+
+func (EmptyCallOption) before(*callInfo) error { return nil }
+func (EmptyCallOption) after(*callInfo) {}
+
type beforeCall func(c *callInfo) error
func (o beforeCall) before(c *callInfo) error { return o(c) }
@@ -183,12 +184,26 @@ func Trailer(md *metadata.MD) CallOption {
})
}
+// Peer returns a CallOption that retrieves peer information for a
+// unary RPC.
+func Peer(peer *peer.Peer) CallOption {
+ return afterCall(func(c *callInfo) {
+ if c.peer != nil {
+ *peer = *c.peer
+ }
+ })
+}
+
// FailFast configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If failfast is true, the RPC will fail
+// connections or unreachable servers. If failFast is true, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will retry
-// the call if it fails due to a transient error. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md
+// connection is available (or the call is canceled or times out) and will
+// retry the call if it fails due to a transient error. gRPC will not retry if
+// data was written to the wire unless the server indicates it did not process
+// the data. Please refer to
+// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+//
+// By default, RPCs are "Fail Fast".
func FailFast(failFast bool) CallOption {
return beforeCall(func(c *callInfo) error {
c.failFast = failFast
@@ -196,6 +211,43 @@ func FailFast(failFast bool) CallOption {
})
}
+// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
+func MaxCallRecvMsgSize(s int) CallOption {
+ return beforeCall(func(o *callInfo) error {
+ o.maxReceiveMessageSize = &s
+ return nil
+ })
+}
+
+// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
+func MaxCallSendMsgSize(s int) CallOption {
+ return beforeCall(func(o *callInfo) error {
+ o.maxSendMessageSize = &s
+ return nil
+ })
+}
+
+// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
+// for a call.
+func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
+ return beforeCall(func(c *callInfo) error {
+ c.creds = creds
+ return nil
+ })
+}
+
+// UseCompressor returns a CallOption which sets the compressor used when
+// sending the request. If WithCompressor is also set, UseCompressor has
+// higher priority.
+//
+// This API is EXPERIMENTAL.
+func UseCompressor(name string) CallOption {
+ return beforeCall(func(c *callInfo) error {
+ c.compressorType = name
+ return nil
+ })
+}
+
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -212,7 +264,7 @@ type parser struct {
r io.Reader
// The header of a gRPC message. Find more detail
- // at http://www.grpc.io/docs/guides/wire.html.
+ // at https://grpc.io/docs/guides/wire.html.
header [5]byte
}
@@ -229,8 +281,8 @@ type parser struct {
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.
-func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
+ if _, err := p.r.Read(p.header[:]); err != nil {
return 0, nil, err
}
@@ -240,13 +292,16 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
if length == 0 {
return pf, nil, nil
}
- if length > uint32(maxMsgSize) {
- return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize)
+ if int64(length) > int64(maxInt) {
+ return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
+ }
+ if int(length) > maxReceiveMessageSize {
+ return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
// of making it for each message:
msg = make([]byte, int(length))
- if _, err := io.ReadFull(p.r, msg); err != nil {
+ if _, err := p.r.Read(msg); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
@@ -255,19 +310,23 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
return pf, msg, nil
}
-// encode serializes msg and prepends the message header. If msg is nil, it
-// generates the message header of 0 message length.
-func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) {
+// encode serializes msg and returns a buffer of message header and a buffer of msg.
+// If msg is nil, it generates the message header and an empty msg buffer.
+// TODO(ddyihai): eliminate extra Compressor parameter.
+func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
var (
- b []byte
- length uint
+ b []byte
+ cbuf *bytes.Buffer
+ )
+ const (
+ payloadLen = 1
+ sizeLen = 4
)
if msg != nil {
var err error
- // TODO(zhaoq): optimize to reduce memory alloc and copying.
b, err = c.Marshal(msg)
if err != nil {
- return nil, err
+ return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
if outPayload != nil {
outPayload.Payload = msg
@@ -275,80 +334,101 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayl
outPayload.Data = b
outPayload.Length = len(b)
}
- if cp != nil {
- if err := cp.Do(cbuf, b); err != nil {
- return nil, err
+ if compressor != nil || cp != nil {
+ cbuf = new(bytes.Buffer)
+ // Has compressor, check Compressor is set by UseCompressor first.
+ if compressor != nil {
+ z, _ := compressor.Compress(cbuf)
+ if _, err := z.Write(b); err != nil {
+ return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+ }
+ z.Close()
+ } else {
+ // If Compressor is not set by UseCompressor, use default Compressor
+ if err := cp.Do(cbuf, b); err != nil {
+ return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+ }
}
b = cbuf.Bytes()
}
- length = uint(len(b))
}
- if length > math.MaxUint32 {
- return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
+ if uint(len(b)) > math.MaxUint32 {
+ return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
- const (
- payloadLen = 1
- sizeLen = 4
- )
-
- var buf = make([]byte, payloadLen+sizeLen+len(b))
-
- // Write payload format
- if cp == nil {
- buf[0] = byte(compressionNone)
+ bufHeader := make([]byte, payloadLen+sizeLen)
+ if compressor != nil || cp != nil {
+ bufHeader[0] = byte(compressionMade)
} else {
- buf[0] = byte(compressionMade)
+ bufHeader[0] = byte(compressionNone)
}
- // Write length of b into buf
- binary.BigEndian.PutUint32(buf[1:], uint32(length))
- // Copy encoded msg to buf
- copy(buf[5:], b)
+ // Write length of b into buf
+ binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
if outPayload != nil {
- outPayload.WireLength = len(buf)
+ outPayload.WireLength = payloadLen + sizeLen + len(b)
}
-
- return buf, nil
+ return bufHeader, b, nil
}
-func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
- if dc == nil || recvCompress != dc.Type() {
- return Errorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ if recvCompress == "" || recvCompress == encoding.Identity {
+ return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
+ }
+ if !haveCompressor {
+ return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
- return Errorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+ return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
}
return nil
}
-func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error {
- pf, d, err := p.recvMsg(maxMsgSize)
+// For the two compressor parameters, both should not be set, but if they are,
+// dc takes precedence over compressor.
+// TODO(dfawley): wrap the old compressor/decompressor using the new API?
+func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
+ pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return err
}
if inPayload != nil {
inPayload.WireLength = len(d)
}
- if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
- return err
+
+ if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
+ return st.Err()
}
+
if pf == compressionMade {
- d, err = dc.Do(bytes.NewReader(d))
- if err != nil {
- return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
+ // use this decompressor as the default.
+ if dc != nil {
+ d, err = dc.Do(bytes.NewReader(d))
+ if err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
+ } else {
+ dcReader, err := compressor.Decompress(bytes.NewReader(d))
+ if err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
+ d, err = ioutil.ReadAll(dcReader)
+ if err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
}
}
- if len(d) > maxMsgSize {
+ if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
- return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize)
+ return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
if err := c.Unmarshal(d, m); err != nil {
- return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+ return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
}
if inPayload != nil {
inPayload.RecvTime = time.Now()
@@ -360,160 +440,66 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
return nil
}
-// rpcError defines the status from an RPC.
-type rpcError struct {
- code codes.Code
- desc string
+type rpcInfo struct {
+ failfast bool
}
-func (e *rpcError) Error() string {
- return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
+type rpcInfoContextKey struct{}
+
+func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context {
+ return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast})
+}
+
+func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
+ s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
+ return
}
// Code returns the error code for err if it was produced by the rpc system.
// Otherwise, it returns codes.Unknown.
+//
+// Deprecated: use status.FromError and Code method instead.
func Code(err error) codes.Code {
- if err == nil {
- return codes.OK
- }
- if e, ok := err.(*rpcError); ok {
- return e.code
+ if s, ok := status.FromError(err); ok {
+ return s.Code()
}
return codes.Unknown
}
// ErrorDesc returns the error description of err if it was produced by the rpc system.
// Otherwise, it returns err.Error() or empty string when err is nil.
+//
+// Deprecated: use status.FromError and Message method instead.
func ErrorDesc(err error) string {
- if err == nil {
- return ""
- }
- if e, ok := err.(*rpcError); ok {
- return e.desc
+ if s, ok := status.FromError(err); ok {
+ return s.Message()
}
return err.Error()
}
// Errorf returns an error containing an error code and a description;
// Errorf returns nil if c is OK.
+//
+// Deprecated: use status.Errorf instead.
func Errorf(c codes.Code, format string, a ...interface{}) error {
- if c == codes.OK {
- return nil
- }
- return &rpcError{
- code: c,
- desc: fmt.Sprintf(format, a...),
- }
+ return status.Errorf(c, format, a...)
}
-// toRPCErr converts an error into a rpcError.
-func toRPCErr(err error) error {
- switch e := err.(type) {
- case *rpcError:
- return err
- case transport.StreamError:
- return &rpcError{
- code: e.Code,
- desc: e.Desc,
- }
- case transport.ConnectionError:
- return &rpcError{
- code: codes.Internal,
- desc: e.Desc,
- }
- default:
- switch err {
- case context.DeadlineExceeded:
- return &rpcError{
- code: codes.DeadlineExceeded,
- desc: err.Error(),
- }
- case context.Canceled:
- return &rpcError{
- code: codes.Canceled,
- desc: err.Error(),
- }
- case ErrClientConnClosing:
- return &rpcError{
- code: codes.FailedPrecondition,
- desc: err.Error(),
- }
- }
-
- }
- return Errorf(codes.Unknown, "%v", err)
-}
+// The SupportPackageIsVersion variables are referenced from generated protocol
+// buffer files to ensure compatibility with the gRPC version used. The latest
+// support package version is 5.
+//
+// Older versions are kept for compatibility. They may be removed if
+// compatibility cannot be maintained.
+//
+// These constants should not be referenced from any other code.
+const (
+ SupportPackageIsVersion3 = true
+ SupportPackageIsVersion4 = true
+ SupportPackageIsVersion5 = true
+)
-// convertCode converts a standard Go error into its canonical code. Note that
-// this is only used to translate the error returned by the server applications.
-func convertCode(err error) codes.Code {
- switch err {
- case nil:
- return codes.OK
- case io.EOF:
- return codes.OutOfRange
- case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
- return codes.FailedPrecondition
- case os.ErrInvalid:
- return codes.InvalidArgument
- case context.Canceled:
- return codes.Canceled
- case context.DeadlineExceeded:
- return codes.DeadlineExceeded
- }
- switch {
- case os.IsExist(err):
- return codes.AlreadyExists
- case os.IsNotExist(err):
- return codes.NotFound
- case os.IsPermission(err):
- return codes.PermissionDenied
- }
- return codes.Unknown
-}
+// Version is the current grpc version.
+const Version = "1.9.1"
-// MethodConfig defines the configuration recommended by the service providers for a
-// particular method.
-// This is EXPERIMENTAL and subject to change.
-type MethodConfig struct {
- // WaitForReady indicates whether RPCs sent to this method should wait until
- // the connection is ready by default (!failfast). The value specified via the
- // gRPC client API will override the value set here.
- WaitForReady bool
- // Timeout is the default timeout for RPCs sent to this method. The actual
- // deadline used will be the minimum of the value specified here and the value
- // set by the application via the gRPC client API. If either one is not set,
- // then the other will be used. If neither is set, then the RPC has no deadline.
- Timeout time.Duration
- // MaxReqSize is the maximum allowed payload size for an individual request in a
- // stream (client->server) in bytes. The size which is measured is the serialized,
- // uncompressed payload in bytes. The actual value used is the minumum of the value
- // specified here and the value set by the application via the gRPC client API. If
- // either one is not set, then the other will be used. If neither is set, then the
- // built-in default is used.
- // TODO: support this.
- MaxReqSize uint64
- // MaxRespSize is the maximum allowed payload size for an individual response in a
- // stream (server->client) in bytes.
- // TODO: support this.
- MaxRespSize uint64
-}
-
-// ServiceConfig is provided by the service provider and contains parameters for how
-// clients that connect to the service should behave.
-// This is EXPERIMENTAL and subject to change.
-type ServiceConfig struct {
- // LB is the load balancer the service providers recommends. The balancer specified
- // via grpc.WithBalancer will override this.
- LB Balancer
- // Methods contains a map for the methods in this service.
- Methods map[string]MethodConfig
-}
-
-// SupportPackageIsVersion4 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the grpc package.
-//
-// This constant may be renamed in the future if a change in the generated code
-// requires a synchronised update of grpc-go and protoc-gen-go. This constant
-// should not be referenced from any other code.
-const SupportPackageIsVersion4 = true
+const grpcUA = "grpc-go/" + Version
diff --git a/go/vendor/google.golang.org/grpc/server.go b/go/vendor/google.golang.org/grpc/server.go
index 985226d..f651621 100644
--- a/go/vendor/google.golang.org/grpc/server.go
+++ b/go/vendor/google.golang.org/grpc/server.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -38,6 +23,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"net"
"net/http"
"reflect"
@@ -46,19 +32,29 @@ import (
"sync"
"time"
+ "io/ioutil"
+
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/encoding"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
+ "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
"google.golang.org/grpc/transport"
)
+const (
+ defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
+ defaultServerMaxSendMessageSize = math.MaxInt32
+)
+
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
// MethodDesc represents an RPC service's method specification.
@@ -94,35 +90,97 @@ type Server struct {
mu sync.Mutex // guards following
lis map[net.Listener]bool
conns map[io.Closer]bool
+ serve bool
drain bool
- ctx context.Context
- cancel context.CancelFunc
- // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished
- // and all the transport goes away.
- cv *sync.Cond
+ cv *sync.Cond // signaled when connections close for GracefulStop
m map[string]*service // service name -> service info
events trace.EventLog
+
+ quit chan struct{}
+ done chan struct{}
+ quitOnce sync.Once
+ doneOnce sync.Once
+ serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
}
type options struct {
- creds credentials.TransportCredentials
- codec Codec
- cp Compressor
- dc Decompressor
- maxMsgSize int
- unaryInt UnaryServerInterceptor
- streamInt StreamServerInterceptor
- inTapHandle tap.ServerInHandle
- statsHandler stats.Handler
- maxConcurrentStreams uint32
- useHandlerImpl bool // use http.Handler-based server
+ creds credentials.TransportCredentials
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ unaryInt UnaryServerInterceptor
+ streamInt StreamServerInterceptor
+ inTapHandle tap.ServerInHandle
+ statsHandler stats.Handler
+ maxConcurrentStreams uint32
+ maxReceiveMessageSize int
+ maxSendMessageSize int
+ useHandlerImpl bool // use http.Handler-based server
+ unknownStreamDesc *StreamDesc
+ keepaliveParams keepalive.ServerParameters
+ keepalivePolicy keepalive.EnforcementPolicy
+ initialWindowSize int32
+ initialConnWindowSize int32
+ writeBufferSize int
+ readBufferSize int
+ connectionTimeout time.Duration
}
-var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit
+var defaultServerOptions = options{
+ maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
+ maxSendMessageSize: defaultServerMaxSendMessageSize,
+ connectionTimeout: 120 * time.Second,
+}
-// A ServerOption sets options.
+// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options)
+// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
+// before doing a write on the wire.
+func WriteBufferSize(s int) ServerOption {
+ return func(o *options) {
+ o.writeBufferSize = s
+ }
+}
+
+// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
+// for one read syscall.
+func ReadBufferSize(s int) ServerOption {
+ return func(o *options) {
+ o.readBufferSize = s
+ }
+}
+
+// InitialWindowSize returns a ServerOption that sets window size for stream.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func InitialWindowSize(s int32) ServerOption {
+ return func(o *options) {
+ o.initialWindowSize = s
+ }
+}
+
+// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func InitialConnWindowSize(s int32) ServerOption {
+ return func(o *options) {
+ o.initialConnWindowSize = s
+ }
+}
+
+// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
+func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
+ return func(o *options) {
+ o.keepaliveParams = kp
+ }
+}
+
+// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
+func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
+ return func(o *options) {
+ o.keepalivePolicy = kep
+ }
+}
+
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
func CustomCodec(codec Codec) ServerOption {
return func(o *options) {
@@ -130,25 +188,49 @@ func CustomCodec(codec Codec) ServerOption {
}
}
-// RPCCompressor returns a ServerOption that sets a compressor for outbound messages.
+// RPCCompressor returns a ServerOption that sets a compressor for outbound
+// messages. For backward compatibility, all outbound messages will be sent
+// using this compressor, regardless of incoming message compression. By
+// default, server messages will be sent using the same compressor with which
+// request messages were sent.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
func RPCCompressor(cp Compressor) ServerOption {
return func(o *options) {
o.cp = cp
}
}
-// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages.
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
+// messages. It has higher priority than decompressors registered via
+// encoding.RegisterCompressor.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
func RPCDecompressor(dc Decompressor) ServerOption {
return func(o *options) {
o.dc = dc
}
}
-// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages.
-// If this is not set, gRPC uses the default 4MB.
+// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
+// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
func MaxMsgSize(m int) ServerOption {
+ return MaxRecvMsgSize(m)
+}
+
+// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
+// If this is not set, gRPC uses the default 4MB.
+func MaxRecvMsgSize(m int) ServerOption {
return func(o *options) {
- o.maxMsgSize = m
+ o.maxReceiveMessageSize = m
+ }
+}
+
+// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
+// If this is not set, gRPC uses the default 4MB.
+func MaxSendMsgSize(m int) ServerOption {
+ return func(o *options) {
+ o.maxSendMessageSize = m
}
}
@@ -173,7 +255,7 @@ func Creds(c credentials.TransportCredentials) ServerOption {
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) {
if o.unaryInt != nil {
- panic("The unary server interceptor has been set.")
+ panic("The unary server interceptor was already set and may not be reset.")
}
o.unaryInt = i
}
@@ -184,7 +266,7 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
return func(o *options) {
if o.streamInt != nil {
- panic("The stream server interceptor has been set.")
+ panic("The stream server interceptor was already set and may not be reset.")
}
o.streamInt = i
}
@@ -195,7 +277,7 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
func InTapHandle(h tap.ServerInHandle) ServerOption {
return func(o *options) {
if o.inTapHandle != nil {
- panic("The tap handle has been set.")
+ panic("The tap handle was already set and may not be reset.")
}
o.inTapHandle = h
}
@@ -208,11 +290,40 @@ func StatsHandler(h stats.Handler) ServerOption {
}
}
+// UnknownServiceHandler returns a ServerOption that allows for adding a custom
+// unknown service handler. The provided method is a bidi-streaming RPC service
+// handler that will be invoked instead of returning the "unimplemented" gRPC
+// error whenever a request is received for an unregistered service or method.
+// The handling function has full access to the Context of the request and the
+// stream, and the invocation bypasses interceptors.
+func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
+ return func(o *options) {
+ o.unknownStreamDesc = &StreamDesc{
+ StreamName: "unknown_service_handler",
+ Handler: streamHandler,
+ // We need to assume that the users of the streamHandler will want to use both.
+ ClientStreams: true,
+ ServerStreams: true,
+ }
+ }
+}
+
+// ConnectionTimeout returns a ServerOption that sets the timeout for
+// connection establishment (up to and including HTTP/2 handshaking) for all
+// new connections. If this is not set, the default is 120 seconds. A zero or
+// negative value will result in an immediate timeout.
+//
+// This API is EXPERIMENTAL.
+func ConnectionTimeout(d time.Duration) ServerOption {
+ return func(o *options) {
+ o.connectionTimeout = d
+ }
+}
+
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
- var opts options
- opts.maxMsgSize = defaultMaxMsgSize
+ opts := defaultServerOptions
for _, o := range opt {
o(&opts)
}
@@ -225,9 +336,10 @@ func NewServer(opt ...ServerOption) *Server {
opts: opts,
conns: make(map[io.Closer]bool),
m: make(map[string]*service),
+ quit: make(chan struct{}),
+ done: make(chan struct{}),
}
s.cv = sync.NewCond(&s.mu)
- s.ctx, s.cancel = context.WithCancel(context.Background())
if EnableTracing {
_, file, line, _ := runtime.Caller(1)
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
@@ -251,8 +363,8 @@ func (s *Server) errorf(format string, a ...interface{}) {
}
}
-// RegisterService register a service and its implementation to the gRPC
-// server. Called from the IDL generated code. This must be called before
+// RegisterService registers a service and its implementation to the gRPC
+// server. It is called from the IDL generated code. This must be called before
// invoking Serve.
func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
ht := reflect.TypeOf(sd.HandlerType).Elem()
@@ -267,6 +379,9 @@ func (s *Server) register(sd *ServiceDesc, ss interface{}) {
s.mu.Lock()
defer s.mu.Unlock()
s.printf("RegisterService(%q)", sd.ServiceName)
+ if s.serve {
+ grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+ }
if _, ok := s.m[sd.ServiceName]; ok {
grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
}
@@ -297,7 +412,7 @@ type MethodInfo struct {
IsServerStream bool
}
-// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service.
+// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
type ServiceInfo struct {
Methods []MethodInfo
// Metadata is the metadata specified in ServiceDesc when registering service.
@@ -333,11 +448,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo {
return ret
}
-var (
- // ErrServerStopped indicates that the operation is now illegal because of
- // the server being stopped.
- ErrServerStopped = errors.New("grpc: the server has been stopped")
-)
+// ErrServerStopped indicates that the operation is now illegal because of
+// the server being stopped.
+var ErrServerStopped = errors.New("grpc: the server has been stopped")
func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
if s.opts.creds == nil {
@@ -351,15 +464,29 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
// read gRPC requests and then call the registered handlers to reply to them.
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns.
-// Serve always returns non-nil error.
+// Serve will return a non-nil error unless Stop or GracefulStop is called.
func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("serving")
+ s.serve = true
if s.lis == nil {
+ // Serve called after Stop or GracefulStop.
s.mu.Unlock()
lis.Close()
return ErrServerStopped
}
+
+ s.serveWG.Add(1)
+ defer func() {
+ s.serveWG.Done()
+ select {
+ // Stop or GracefulStop called; block until done and return nil.
+ case <-s.quit:
+ <-s.done
+ default:
+ }
+ }()
+
s.lis[lis] = true
s.mu.Unlock()
defer func() {
@@ -390,37 +517,55 @@ func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("Accept error: %v; retrying in %v", err, tempDelay)
s.mu.Unlock()
+ timer := time.NewTimer(tempDelay)
select {
- case <-time.After(tempDelay):
- case <-s.ctx.Done():
+ case <-timer.C:
+ case <-s.quit:
+ timer.Stop()
+ return nil
}
continue
}
s.mu.Lock()
s.printf("done serving; Accept = %v", err)
s.mu.Unlock()
+
+ select {
+ case <-s.quit:
+ return nil
+ default:
+ }
return err
}
tempDelay = 0
- // Start a new goroutine to deal with rawConn
- // so we don't stall this Accept loop goroutine.
- go s.handleRawConn(rawConn)
+ // Start a new goroutine to deal with rawConn so we don't stall this Accept
+ // loop goroutine.
+ //
+ // Make sure we account for the goroutine so GracefulStop doesn't nil out
+ // s.conns before this conn can be added.
+ s.serveWG.Add(1)
+ go func() {
+ s.handleRawConn(rawConn)
+ s.serveWG.Done()
+ }()
}
}
-// handleRawConn is run in its own goroutine and handles a just-accepted
-// connection that has not had any I/O performed on it yet.
+// handleRawConn forks a goroutine to handle a just-accepted connection that
+// has not had any I/O performed on it yet.
func (s *Server) handleRawConn(rawConn net.Conn) {
+ rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
if err != nil {
s.mu.Lock()
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
s.mu.Unlock()
- grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
- // If serverHandShake returns ErrConnDispatched, keep rawConn open.
+ grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
+ // If serverHandshake returns ErrConnDispatched, keep rawConn open.
if err != credentials.ErrConnDispatched {
rawConn.Close()
}
+ rawConn.SetDeadline(time.Time{})
return
}
@@ -432,24 +577,44 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
}
s.mu.Unlock()
+ var serve func()
+ c := conn.(io.Closer)
if s.opts.useHandlerImpl {
- s.serveUsingHandler(conn)
+ serve = func() { s.serveUsingHandler(conn) }
} else {
- s.serveHTTP2Transport(conn, authInfo)
+ // Finish handshaking (HTTP2)
+ st := s.newHTTP2Transport(conn, authInfo)
+ if st == nil {
+ return
+ }
+ c = st
+ serve = func() { s.serveStreams(st) }
+ }
+
+ rawConn.SetDeadline(time.Time{})
+ if !s.addConn(c) {
+ return
}
+ go func() {
+ serve()
+ s.removeConn(c)
+ }()
}
-// serveHTTP2Transport sets up a http/2 transport (using the
-// gRPC http2 server transport in transport/http2_server.go) and
-// serves streams on it.
-// This is run in its own goroutine (it does network I/O in
-// transport.NewServerTransport).
-func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
+// newHTTP2Transport sets up a http/2 transport (using the
+// gRPC http2 server transport in transport/http2_server.go).
+func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
config := &transport.ServerConfig{
- MaxStreams: s.opts.maxConcurrentStreams,
- AuthInfo: authInfo,
- InTapHandle: s.opts.inTapHandle,
- StatsHandler: s.opts.statsHandler,
+ MaxStreams: s.opts.maxConcurrentStreams,
+ AuthInfo: authInfo,
+ InTapHandle: s.opts.inTapHandle,
+ StatsHandler: s.opts.statsHandler,
+ KeepaliveParams: s.opts.keepaliveParams,
+ KeepalivePolicy: s.opts.keepalivePolicy,
+ InitialWindowSize: s.opts.initialWindowSize,
+ InitialConnWindowSize: s.opts.initialConnWindowSize,
+ WriteBufferSize: s.opts.writeBufferSize,
+ ReadBufferSize: s.opts.readBufferSize,
}
st, err := transport.NewServerTransport("http2", c, config)
if err != nil {
@@ -457,18 +622,13 @@ func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo)
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
s.mu.Unlock()
c.Close()
- grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
- return
- }
- if !s.addConn(st) {
- st.Close()
- return
+ grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
+ return nil
}
- s.serveStreams(st)
+ return st
}
func (s *Server) serveStreams(st transport.ServerTransport) {
- defer s.removeConn(st)
defer st.Close()
var wg sync.WaitGroup
st.HandleStreams(func(stream *transport.Stream) {
@@ -502,11 +662,6 @@ var _ http.Handler = (*Server)(nil)
//
// conn is the *tls.Conn that's already been authenticated.
func (s *Server) serveUsingHandler(conn net.Conn) {
- if !s.addConn(conn) {
- conn.Close()
- return
- }
- defer s.removeConn(conn)
h2s := &http2.Server{
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
}
@@ -515,6 +670,30 @@ func (s *Server) serveUsingHandler(conn net.Conn) {
})
}
+// ServeHTTP implements the Go standard library's http.Handler
+// interface by responding to the gRPC request r, by looking up
+// the requested gRPC method in the gRPC server s.
+//
+// The provided HTTP request must have arrived on an HTTP/2
+// connection. When using the Go standard library's server,
+// practically this means that the Request must also have arrived
+// over TLS.
+//
+// To share one port (such as 443 for https) between gRPC and an
+// existing http.Handler, use a root http.Handler such as:
+//
+// if r.ProtoMajor == 2 && strings.HasPrefix(
+// r.Header.Get("Content-Type"), "application/grpc") {
+// grpcServer.ServeHTTP(w, r)
+// } else {
+// yourMux.ServeHTTP(w, r)
+// }
+//
+// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
+// separate from grpc-go's HTTP/2 server. Performance and features may vary
+// between the two paths. ServeHTTP does not support some gRPC features
+// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
+// and subject to change.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r)
if err != nil {
@@ -522,7 +701,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
if !s.addConn(st) {
- st.Close()
return
}
defer s.removeConn(st)
@@ -552,9 +730,15 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea
func (s *Server) addConn(c io.Closer) bool {
s.mu.Lock()
defer s.mu.Unlock()
- if s.conns == nil || s.drain {
+ if s.conns == nil {
+ c.Close()
return false
}
+ if s.drain {
+ // Transport added after we drained our existing conns: drain it
+ // immediately.
+ c.(transport.ServerTransport).Drain()
+ }
s.conns[c] = true
return true
}
@@ -568,29 +752,22 @@ func (s *Server) removeConn(c io.Closer) {
}
}
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
var (
- cbuf *bytes.Buffer
outPayload *stats.OutPayload
)
- if cp != nil {
- cbuf = new(bytes.Buffer)
- }
if s.opts.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
- p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
+ hdr, data, err := encode(s.opts.codec, msg, cp, outPayload, comp)
if err != nil {
- // This typically indicates a fatal issue (e.g., memory
- // corruption or hardware faults) the application program
- // cannot handle.
- //
- // TODO(zhaoq): There exist other options also such as only closing the
- // faulty stream locally and remotely (Other streams can keep going). Find
- // the optimal option.
- grpclog.Fatalf("grpc: Server failed to encode response %v", err)
+ grpclog.Errorln("grpc: server failed to encode response: ", err)
+ return err
+ }
+ if len(data) > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize)
}
- err = t.Write(stream, p, opts)
+ err = t.Write(stream, hdr, data, opts)
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
@@ -605,9 +782,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
- }
- defer func() {
- if sh != nil {
+ defer func() {
end := &stats.End{
EndTime: time.Now(),
}
@@ -615,8 +790,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
- }
- }()
+ }()
+ }
if trInfo != nil {
defer trInfo.tr.Finish()
trInfo.firstLine.client = false
@@ -628,141 +803,176 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
}()
}
+
+ // comp and cp are used for compression. decomp and dc are used for
+ // decompression. If comp and decomp are both set, they are the same;
+ // however they are kept separate to ensure that at most one of the
+ // compressor/decompressor variable pairs are set for use later.
+ var comp, decomp encoding.Compressor
+ var cp Compressor
+ var dc Decompressor
+
+ // If dc is set and matches the stream's compression, use it. Otherwise, try
+ // to find a matching registered compressor for decomp.
+ if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+ dc = s.opts.dc
+ } else if rc != "" && rc != encoding.Identity {
+ decomp = encoding.GetCompressor(rc)
+ if decomp == nil {
+ st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+ t.WriteStatus(stream, st)
+ return st.Err()
+ }
+ }
+
+ // If cp is set, use it. Otherwise, attempt to compress the response using
+ // the incoming message compression method.
+ //
+ // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
- // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
- stream.SetSendCompress(s.opts.cp.Type())
+ cp = s.opts.cp
+ stream.SetSendCompress(cp.Type())
+ } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+ // Legacy compressor not specified; attempt to respond with same encoding.
+ comp = encoding.GetCompressor(rc)
+ if comp != nil {
+ stream.SetSendCompress(rc)
+ }
}
+
p := &parser{r: stream}
- for {
- pf, req, err := p.recvMsg(s.opts.maxMsgSize)
- if err == io.EOF {
- // The entire stream is done (for unary RPC only).
- return err
- }
- if err == io.ErrUnexpectedEOF {
- err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
- }
- if err != nil {
- switch err := err.(type) {
- case *rpcError:
- if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
- }
+ pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
+ if err == io.EOF {
+ // The entire stream is done (for unary RPC only).
+ return err
+ }
+ if err == io.ErrUnexpectedEOF {
+ err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ }
+ if err != nil {
+ if st, ok := status.FromError(err); ok {
+ if e := t.WriteStatus(stream, st); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+ }
+ } else {
+ switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
- if e := t.WriteStatus(stream, err.Code, err.Desc); e != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
+ if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
- panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
+ panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
}
- return err
}
-
- if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
- switch err := err.(type) {
- case *rpcError:
- if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
- }
- return err
- default:
- if e := t.WriteStatus(stream, codes.Internal, err.Error()); e != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
- }
- // TODO checkRecvPayload always return RPC error. Add a return here if necessary.
- }
+ return err
+ }
+ if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
+ if e := t.WriteStatus(stream, st); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
- var inPayload *stats.InPayload
- if sh != nil {
- inPayload = &stats.InPayload{
- RecvTime: time.Now(),
- }
+ return st.Err()
+ }
+ var inPayload *stats.InPayload
+ if sh != nil {
+ inPayload = &stats.InPayload{
+ RecvTime: time.Now(),
}
- statusCode := codes.OK
- statusDesc := ""
- df := func(v interface{}) error {
- if inPayload != nil {
- inPayload.WireLength = len(req)
- }
- if pf == compressionMade {
- var err error
- req, err = s.opts.dc.Do(bytes.NewReader(req))
+ }
+ df := func(v interface{}) error {
+ if inPayload != nil {
+ inPayload.WireLength = len(req)
+ }
+ if pf == compressionMade {
+ var err error
+ if dc != nil {
+ req, err = dc.Do(bytes.NewReader(req))
if err != nil {
- if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
- }
- return Errorf(codes.Internal, err.Error())
+ return status.Errorf(codes.Internal, err.Error())
}
- }
- if len(req) > s.opts.maxMsgSize {
- // TODO: Revisit the error code. Currently keep it consistent with
- // java implementation.
- statusCode = codes.Internal
- statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize)
- }
- if err := s.opts.codec.Unmarshal(req, v); err != nil {
- return err
- }
- if inPayload != nil {
- inPayload.Payload = v
- inPayload.Data = req
- inPayload.Length = len(req)
- sh.HandleRPC(stream.Context(), inPayload)
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
- }
- return nil
- }
- reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
- if appErr != nil {
- if err, ok := appErr.(*rpcError); ok {
- statusCode = err.code
- statusDesc = err.desc
} else {
- statusCode = convertCode(appErr)
- statusDesc = appErr.Error()
- }
- if trInfo != nil && statusCode != codes.OK {
- trInfo.tr.LazyLog(stringer(statusDesc), true)
- trInfo.tr.SetError()
- }
- if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
+ tmp, _ := decomp.Decompress(bytes.NewReader(req))
+ req, err = ioutil.ReadAll(tmp)
+ if err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
}
- return Errorf(statusCode, statusDesc)
+ }
+ if len(req) > s.opts.maxReceiveMessageSize {
+ // TODO: Revisit the error code. Currently keep it consistent with
+ // java implementation.
+ return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
+ }
+ if err := s.opts.codec.Unmarshal(req, v); err != nil {
+ return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
+ }
+ if inPayload != nil {
+ inPayload.Payload = v
+ inPayload.Data = req
+ inPayload.Length = len(req)
+ sh.HandleRPC(stream.Context(), inPayload)
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
+ }
+ return nil
+ }
+ reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
+ if appErr != nil {
+ appStatus, ok := status.FromError(appErr)
+ if !ok {
+ // Convert appErr if it is not a grpc status error.
+ appErr = status.Error(convertCode(appErr), appErr.Error())
+ appStatus, _ = status.FromError(appErr)
}
if trInfo != nil {
- trInfo.tr.LazyLog(stringer("OK"), false)
+ trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
+ trInfo.tr.SetError()
+ }
+ if e := t.WriteStatus(stream, appStatus); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
}
- opts := &transport.Options{
- Last: true,
- Delay: false,
+ return appErr
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(stringer("OK"), false)
+ }
+ opts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
+
+ if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+ if err == io.EOF {
+ // The entire stream is done (for unary RPC only).
+ return err
}
- if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
- switch err := err.(type) {
+ if s, ok := status.FromError(err); ok {
+ if e := t.WriteStatus(stream, s); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+ }
+ } else {
+ switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
- statusCode = err.Code
- statusDesc = err.Desc
+ if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+ }
default:
- statusCode = codes.Unknown
- statusDesc = err.Error()
+ panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
- return err
- }
- if trInfo != nil {
- trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
- errWrite := t.WriteStatus(stream, statusCode, statusDesc)
- if statusCode != codes.OK {
- return Errorf(statusCode, statusDesc)
- }
- return errWrite
+ return err
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
+ // TODO: Should we be logging if writing status failed here, like above?
+ // Should the logging be in WriteStatus? Should we ignore the WriteStatus
+ // error or allow the stats handler to see it?
+ return t.WriteStatus(stream, status.New(codes.OK, ""))
}
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
@@ -772,9 +982,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
- }
- defer func() {
- if sh != nil {
+ defer func() {
end := &stats.End{
EndTime: time.Now(),
}
@@ -782,25 +990,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
end.Error = toRPCErr(err)
}
sh.HandleRPC(stream.Context(), end)
+ }()
+ }
+ ss := &serverStream{
+ t: t,
+ s: stream,
+ p: &parser{r: stream},
+ codec: s.opts.codec,
+ maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
+ maxSendMessageSize: s.opts.maxSendMessageSize,
+ trInfo: trInfo,
+ statsHandler: sh,
+ }
+
+ // If dc is set and matches the stream's compression, use it. Otherwise, try
+ // to find a matching registered compressor for decomp.
+ if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+ ss.dc = s.opts.dc
+ } else if rc != "" && rc != encoding.Identity {
+ ss.decomp = encoding.GetCompressor(rc)
+ if ss.decomp == nil {
+ st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+ t.WriteStatus(ss.s, st)
+ return st.Err()
}
- }()
+ }
+
+ // If cp is set, use it. Otherwise, attempt to compress the response using
+ // the incoming message compression method.
+ //
+ // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
+ ss.cp = s.opts.cp
stream.SetSendCompress(s.opts.cp.Type())
+ } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+ // Legacy compressor not specified; attempt to respond with same encoding.
+ ss.comp = encoding.GetCompressor(rc)
+ if ss.comp != nil {
+ stream.SetSendCompress(rc)
+ }
}
- ss := &serverStream{
- t: t,
- s: stream,
- p: &parser{r: stream},
- codec: s.opts.codec,
- cp: s.opts.cp,
- dc: s.opts.dc,
- maxMsgSize: s.opts.maxMsgSize,
- trInfo: trInfo,
- statsHandler: sh,
- }
- if ss.cp != nil {
- ss.cbuf = new(bytes.Buffer)
- }
+
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
defer func() {
@@ -815,43 +1045,47 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
}()
}
var appErr error
+ var server interface{}
+ if srv != nil {
+ server = srv.server
+ }
if s.opts.streamInt == nil {
- appErr = sd.Handler(srv.server, ss)
+ appErr = sd.Handler(server, ss)
} else {
info := &StreamServerInfo{
FullMethod: stream.Method(),
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
- appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler)
+ appErr = s.opts.streamInt(server, ss, info, sd.Handler)
}
if appErr != nil {
- if err, ok := appErr.(*rpcError); ok {
- ss.statusCode = err.code
- ss.statusDesc = err.desc
- } else if err, ok := appErr.(transport.StreamError); ok {
- ss.statusCode = err.Code
- ss.statusDesc = err.Desc
- } else {
- ss.statusCode = convertCode(appErr)
- ss.statusDesc = appErr.Error()
+ appStatus, ok := status.FromError(appErr)
+ if !ok {
+ switch err := appErr.(type) {
+ case transport.StreamError:
+ appStatus = status.New(err.Code, err.Desc)
+ default:
+ appStatus = status.New(convertCode(appErr), appErr.Error())
+ }
+ appErr = appStatus.Err()
+ }
+ if trInfo != nil {
+ ss.mu.Lock()
+ ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
+ ss.trInfo.tr.SetError()
+ ss.mu.Unlock()
}
+ t.WriteStatus(ss.s, appStatus)
+ // TODO: Should we log an error from WriteStatus here and below?
+ return appErr
}
if trInfo != nil {
ss.mu.Lock()
- if ss.statusCode != codes.OK {
- ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
- ss.trInfo.tr.SetError()
- } else {
- ss.trInfo.tr.LazyLog(stringer("OK"), false)
- }
+ ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
- errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
- if ss.statusCode != codes.OK {
- return Errorf(ss.statusCode, ss.statusDesc)
- }
- return errWrite
+ return t.WriteStatus(ss.s, status.New(codes.OK, ""))
}
@@ -867,12 +1101,12 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
- if err := t.WriteStatus(stream, codes.InvalidArgument, errDesc); err != nil {
+ if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
- grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@@ -883,17 +1117,21 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
method := sm[pos+1:]
srv, ok := s.m[service]
if !ok {
+ if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
+ s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ return
+ }
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
trInfo.tr.SetError()
}
errDesc := fmt.Sprintf("unknown service %v", service)
- if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
+ if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
- grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@@ -913,13 +1151,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
trInfo.tr.SetError()
}
+ if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
+ s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ return
+ }
errDesc := fmt.Sprintf("unknown method %v", method)
- if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
+ if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
}
- grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
}
if trInfo != nil {
trInfo.tr.Finish()
@@ -932,6 +1174,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
// pending RPCs on the client side will get notified by connection
// errors.
func (s *Server) Stop() {
+ s.quitOnce.Do(func() {
+ close(s.quit)
+ })
+
+ defer func() {
+ s.serveWG.Wait()
+ s.doneOnce.Do(func() {
+ close(s.done)
+ })
+ }()
+
s.mu.Lock()
listeners := s.lis
s.lis = nil
@@ -949,7 +1202,6 @@ func (s *Server) Stop() {
}
s.mu.Lock()
- s.cancel()
if s.events != nil {
s.events.Finish()
s.events = nil
@@ -957,25 +1209,42 @@ func (s *Server) Stop() {
s.mu.Unlock()
}
-// GracefulStop stops the gRPC server gracefully. It stops the server to accept new
-// connections and RPCs and blocks until all the pending RPCs are finished.
+// GracefulStop stops the gRPC server gracefully. It stops the server from
+// accepting new connections and RPCs and blocks until all the pending RPCs are
+// finished.
func (s *Server) GracefulStop() {
+ s.quitOnce.Do(func() {
+ close(s.quit)
+ })
+
+ defer func() {
+ s.doneOnce.Do(func() {
+ close(s.done)
+ })
+ }()
+
s.mu.Lock()
- defer s.mu.Unlock()
if s.conns == nil {
+ s.mu.Unlock()
return
}
for lis := range s.lis {
lis.Close()
}
s.lis = nil
- s.cancel()
if !s.drain {
for c := range s.conns {
c.(transport.ServerTransport).Drain()
}
s.drain = true
}
+
+ // Wait for serving threads to be ready to exit. Only then can we be sure no
+ // new conns will be created.
+ s.mu.Unlock()
+ s.serveWG.Wait()
+ s.mu.Lock()
+
for len(s.conns) != 0 {
s.cv.Wait()
}
@@ -984,28 +1253,15 @@ func (s *Server) GracefulStop() {
s.events.Finish()
s.events = nil
}
+ s.mu.Unlock()
}
func init() {
- internal.TestingCloseConns = func(arg interface{}) {
- arg.(*Server).testingCloseConns()
- }
internal.TestingUseHandlerImpl = func(arg interface{}) {
arg.(*Server).opts.useHandlerImpl = true
}
}
-// testingCloseConns closes all existing transports but keeps s.lis
-// accepting new connections.
-func (s *Server) testingCloseConns() {
- s.mu.Lock()
- for c := range s.conns {
- c.Close()
- delete(s.conns, c)
- }
- s.mu.Unlock()
-}
-
// SetHeader sets the header metadata.
// When called multiple times, all the provided metadata will be merged.
// All the metadata will be sent out when one of the following happens:
@@ -1018,7 +1274,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
}
stream, ok := transport.StreamFromContext(ctx)
if !ok {
- return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+ return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetHeader(md)
}
@@ -1028,7 +1284,7 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
func SendHeader(ctx context.Context, md metadata.MD) error {
stream, ok := transport.StreamFromContext(ctx)
if !ok {
- return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+ return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
t := stream.ServerTransport()
if t == nil {
@@ -1048,7 +1304,7 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
}
stream, ok := transport.StreamFromContext(ctx)
if !ok {
- return Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+ return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetTrailer(md)
}
diff --git a/go/vendor/google.golang.org/grpc/service_config.go b/go/vendor/google.golang.org/grpc/service_config.go
new file mode 100644
index 0000000..53fa88f
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/service_config.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+// DEPRECATED: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type MethodConfig struct {
+ // WaitForReady indicates whether RPCs sent to this method should wait until
+ // the connection is ready by default (!failfast). The value specified via the
+ // gRPC client API will override the value set here.
+ WaitForReady *bool
+ // Timeout is the default timeout for RPCs sent to this method. The actual
+ // deadline used will be the minimum of the value specified here and the value
+ // set by the application via the gRPC client API. If either one is not set,
+ // then the other will be used. If neither is set, then the RPC has no deadline.
+ Timeout *time.Duration
+ // MaxReqSize is the maximum allowed payload size for an individual request in a
+ // stream (client->server) in bytes. The size which is measured is the serialized
+ // payload after per-message compression (but before stream compression) in bytes.
+ // The actual value used is the minimum of the value specified here and the value set
+ // by the application via the gRPC client API. If either one is not set, then the other
+ // will be used. If neither is set, then the built-in default is used.
+ MaxReqSize *int
+ // MaxRespSize is the maximum allowed payload size for an individual response in a
+ // stream (server->client) in bytes.
+ MaxRespSize *int
+}
+
+// ServiceConfig is provided by the service provider and contains parameters for how
+// clients that connect to the service should behave.
+// DEPRECATED: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type ServiceConfig struct {
+ // LB is the load balancer the service providers recommends. The balancer specified
+ // via grpc.WithBalancer will override this.
+ LB *string
+ // Methods contains a map for the methods in this service.
+ // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
+ // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
+ // Otherwise, the method has no MethodConfig to use.
+ Methods map[string]MethodConfig
+}
+
+func parseDuration(s *string) (*time.Duration, error) {
+ if s == nil {
+ return nil, nil
+ }
+ if !strings.HasSuffix(*s, "s") {
+ return nil, fmt.Errorf("malformed duration %q", *s)
+ }
+ ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
+ if len(ss) > 2 {
+ return nil, fmt.Errorf("malformed duration %q", *s)
+ }
+ // hasDigits is set if either the whole or fractional part of the number is
+ // present, since both are optional but one is required.
+ hasDigits := false
+ var d time.Duration
+ if len(ss[0]) > 0 {
+ i, err := strconv.ParseInt(ss[0], 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
+ }
+ d = time.Duration(i) * time.Second
+ hasDigits = true
+ }
+ if len(ss) == 2 && len(ss[1]) > 0 {
+ if len(ss[1]) > 9 {
+ return nil, fmt.Errorf("malformed duration %q", *s)
+ }
+ f, err := strconv.ParseInt(ss[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
+ }
+ for i := 9; i > len(ss[1]); i-- {
+ f *= 10
+ }
+ d += time.Duration(f)
+ hasDigits = true
+ }
+ if !hasDigits {
+ return nil, fmt.Errorf("malformed duration %q", *s)
+ }
+
+ return &d, nil
+}
+
+type jsonName struct {
+ Service *string
+ Method *string
+}
+
+func (j jsonName) generatePath() (string, bool) {
+ if j.Service == nil {
+ return "", false
+ }
+ res := "/" + *j.Service + "/"
+ if j.Method != nil {
+ res += *j.Method
+ }
+ return res, true
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonMC struct {
+ Name *[]jsonName
+ WaitForReady *bool
+ Timeout *string
+ MaxRequestMessageBytes *int64
+ MaxResponseMessageBytes *int64
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonSC struct {
+ LoadBalancingPolicy *string
+ MethodConfig *[]jsonMC
+}
+
+func parseServiceConfig(js string) (ServiceConfig, error) {
+ var rsc jsonSC
+ err := json.Unmarshal([]byte(js), &rsc)
+ if err != nil {
+ grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ return ServiceConfig{}, err
+ }
+ sc := ServiceConfig{
+ LB: rsc.LoadBalancingPolicy,
+ Methods: make(map[string]MethodConfig),
+ }
+ if rsc.MethodConfig == nil {
+ return sc, nil
+ }
+
+ for _, m := range *rsc.MethodConfig {
+ if m.Name == nil {
+ continue
+ }
+ d, err := parseDuration(m.Timeout)
+ if err != nil {
+ grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ return ServiceConfig{}, err
+ }
+
+ mc := MethodConfig{
+ WaitForReady: m.WaitForReady,
+ Timeout: d,
+ }
+ if m.MaxRequestMessageBytes != nil {
+ if *m.MaxRequestMessageBytes > int64(maxInt) {
+ mc.MaxReqSize = newInt(maxInt)
+ } else {
+ mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
+ }
+ }
+ if m.MaxResponseMessageBytes != nil {
+ if *m.MaxResponseMessageBytes > int64(maxInt) {
+ mc.MaxRespSize = newInt(maxInt)
+ } else {
+ mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
+ }
+ }
+ for _, n := range *m.Name {
+ if path, valid := n.generatePath(); valid {
+ sc.Methods[path] = mc
+ }
+ }
+ }
+
+ return sc, nil
+}
+
+func min(a, b *int) *int {
+ if *a < *b {
+ return a
+ }
+ return b
+}
+
+func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
+ if mcMax == nil && doptMax == nil {
+ return &defaultVal
+ }
+ if mcMax != nil && doptMax != nil {
+ return min(mcMax, doptMax)
+ }
+ if mcMax != nil {
+ return mcMax
+ }
+ return doptMax
+}
+
+func newInt(b int) *int {
+ return &b
+}
diff --git a/go/vendor/google.golang.org/grpc/stats/handlers.go b/go/vendor/google.golang.org/grpc/stats/handlers.go
index 26e1a8e..05b384c 100644
--- a/go/vendor/google.golang.org/grpc/stats/handlers.go
+++ b/go/vendor/google.golang.org/grpc/stats/handlers.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -45,19 +30,22 @@ type ConnTagInfo struct {
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
- // TODO add QOS related fields.
}
// RPCTagInfo defines the relevant information needed by RPC context tagger.
type RPCTagInfo struct {
// FullMethodName is the RPC method in the format of /package.service/method.
FullMethodName string
+ // FailFast indicates if this RPC is failfast.
+ // This field is only valid on client side, it's always false on server side.
+ FailFast bool
}
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
type Handler interface {
// TagRPC can attach some information to the given context.
- // The returned context is used in the rest lifetime of the RPC.
+ // The context used for the rest lifetime of the RPC will be derived from
+ // the returned context.
TagRPC(context.Context, *RPCTagInfo) context.Context
// HandleRPC processes the RPC stats.
HandleRPC(context.Context, RPCStats)
diff --git a/go/vendor/google.golang.org/grpc/stats/stats.go b/go/vendor/google.golang.org/grpc/stats/stats.go
index 43d6f00..d5aa2f7 100644
--- a/go/vendor/google.golang.org/grpc/stats/stats.go
+++ b/go/vendor/google.golang.org/grpc/stats/stats.go
@@ -1,36 +1,23 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
+//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
+
// Package stats is for collecting and reporting various network and RPC stats.
// This package is for monitoring purpose only. All fields are read-only.
// All APIs are experimental.
@@ -39,6 +26,8 @@ package stats // import "google.golang.org/grpc/stats"
import (
"net"
"time"
+
+ "golang.org/x/net/context"
)
// RPCStats contains stats information about RPCs.
@@ -49,7 +38,7 @@ type RPCStats interface {
}
// Begin contains stats when an RPC begins.
-// FailFast are only valid if Client is true.
+// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
Client bool
@@ -59,7 +48,7 @@ type Begin struct {
FailFast bool
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if the stats information is from client side.
func (s *Begin) IsClient() bool { return s.Client }
func (s *Begin) isRPCStats() {}
@@ -80,19 +69,19 @@ type InPayload struct {
RecvTime time.Time
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if the stats information is from client side.
func (s *InPayload) IsClient() bool { return s.Client }
func (s *InPayload) isRPCStats() {}
// InHeader contains stats when a header is received.
-// FullMethod, addresses and Compression are only valid if Client is false.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
+ // The following fields are valid only if Client is false.
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
@@ -103,7 +92,7 @@ type InHeader struct {
Compression string
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if the stats information is from client side.
func (s *InHeader) IsClient() bool { return s.Client }
func (s *InHeader) isRPCStats() {}
@@ -116,7 +105,7 @@ type InTrailer struct {
WireLength int
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if the stats information is from client side.
func (s *InTrailer) IsClient() bool { return s.Client }
func (s *InTrailer) isRPCStats() {}
@@ -137,19 +126,17 @@ type OutPayload struct {
SentTime time.Time
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if this stats information is from client side.
func (s *OutPayload) IsClient() bool { return s.Client }
func (s *OutPayload) isRPCStats() {}
// OutHeader contains stats when a header is sent.
-// FullMethod, addresses and Compression are only valid if Client is true.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
- // WireLength is the wire length of header.
- WireLength int
+ // The following fields are valid only if Client is true.
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
// RemoteAddr is the remote address of the corresponding connection.
@@ -160,7 +147,7 @@ type OutHeader struct {
Compression string
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if this stats information is from client side.
func (s *OutHeader) IsClient() bool { return s.Client }
func (s *OutHeader) isRPCStats() {}
@@ -173,7 +160,7 @@ type OutTrailer struct {
WireLength int
}
-// IsClient indicates if this is from client side.
+// IsClient indicates if this stats information is from client side.
func (s *OutTrailer) IsClient() bool { return s.Client }
func (s *OutTrailer) isRPCStats() {}
@@ -184,7 +171,9 @@ type End struct {
Client bool
// EndTime is the time when the RPC ends.
EndTime time.Time
- // Error is the error just happened. It implements status.Status if non-nil.
+ // Error is the error the RPC ended with. It is an error generated from
+ // status.Status and can be converted back to status.Status using
+ // status.FromError if non-nil.
Error error
}
@@ -221,3 +210,85 @@ type ConnEnd struct {
func (s *ConnEnd) IsClient() bool { return s.Client }
func (s *ConnEnd) isConnStats() {}
+
+type incomingTagsKey struct{}
+type outgoingTagsKey struct{}
+
+// SetTags attaches stats tagging data to the context, which will be sent in
+// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
+// SetTags will overwrite the values from earlier calls.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release. New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func SetTags(ctx context.Context, b []byte) context.Context {
+ return context.WithValue(ctx, outgoingTagsKey{}, b)
+}
+
+// Tags returns the tags from the context for the inbound RPC.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release. New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func Tags(ctx context.Context) []byte {
+ b, _ := ctx.Value(incomingTagsKey{}).([]byte)
+ return b
+}
+
+// SetIncomingTags attaches stats tagging data to the context, to be read by
+// the application (not sent in outgoing RPCs).
+//
+// This is intended for gRPC-internal use ONLY.
+func SetIncomingTags(ctx context.Context, b []byte) context.Context {
+ return context.WithValue(ctx, incomingTagsKey{}, b)
+}
+
+// OutgoingTags returns the tags from the context for the outbound RPC.
+//
+// This is intended for gRPC-internal use ONLY.
+func OutgoingTags(ctx context.Context) []byte {
+ b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
+ return b
+}
+
+type incomingTraceKey struct{}
+type outgoingTraceKey struct{}
+
+// SetTrace attaches stats tagging data to the context, which will be sent in
+// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
+// SetTrace will overwrite the values from earlier calls.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release. New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func SetTrace(ctx context.Context, b []byte) context.Context {
+ return context.WithValue(ctx, outgoingTraceKey{}, b)
+}
+
+// Trace returns the trace from the context for the inbound RPC.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release. New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func Trace(ctx context.Context) []byte {
+ b, _ := ctx.Value(incomingTraceKey{}).([]byte)
+ return b
+}
+
+// SetIncomingTrace attaches stats tagging data to the context, to be read by
+// the application (not sent in outgoing RPCs). It is intended for
+// gRPC-internal use.
+func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
+ return context.WithValue(ctx, incomingTraceKey{}, b)
+}
+
+// OutgoingTrace returns the trace from the context for the outbound RPC. It is
+// intended for gRPC-internal use.
+func OutgoingTrace(ctx context.Context) []byte {
+ b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
+ return b
+}
diff --git a/go/vendor/google.golang.org/grpc/status/status.go b/go/vendor/google.golang.org/grpc/status/status.go
index 99f1a09..d9defae 100644
--- a/go/vendor/google.golang.org/grpc/status/status.go
+++ b/go/vendor/google.golang.org/grpc/status/status.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2017, Google Inc.
- * All rights reserved.
+ * Copyright 2017 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -43,9 +28,11 @@
package status
import (
+ "errors"
"fmt"
"github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
)
@@ -71,16 +58,25 @@ type Status struct {
// Code returns the status code contained in s.
func (s *Status) Code() codes.Code {
+ if s == nil || s.s == nil {
+ return codes.OK
+ }
return codes.Code(s.s.Code)
}
// Message returns the message contained in s.
func (s *Status) Message() string {
+ if s == nil || s.s == nil {
+ return ""
+ }
return s.s.Message
}
// Proto returns s's status as an spb.Status proto message.
func (s *Status) Proto() *spb.Status {
+ if s == nil {
+ return nil
+ }
return proto.Clone(s.s).(*spb.Status)
}
@@ -129,8 +125,57 @@ func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
- if s, ok := err.(*statusError); ok {
- return s.status(), true
+ if se, ok := err.(*statusError); ok {
+ return se.status(), true
}
return nil, false
}
+
+// WithDetails returns a new status with the provided details messages appended to the status.
+// If any errors are encountered, it returns nil and the first error encountered.
+func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
+ if s.Code() == codes.OK {
+ return nil, errors.New("no error details for status with code OK")
+ }
+ // s.Code() != OK implies that s.Proto() != nil.
+ p := s.Proto()
+ for _, detail := range details {
+ any, err := ptypes.MarshalAny(detail)
+ if err != nil {
+ return nil, err
+ }
+ p.Details = append(p.Details, any)
+ }
+ return &Status{s: p}, nil
+}
+
+// Details returns a slice of details messages attached to the status.
+// If a detail cannot be decoded, the error is returned in place of the detail.
+func (s *Status) Details() []interface{} {
+ if s == nil || s.s == nil {
+ return nil
+ }
+ details := make([]interface{}, 0, len(s.s.Details))
+ for _, any := range s.s.Details {
+ detail := &ptypes.DynamicAny{}
+ if err := ptypes.UnmarshalAny(any, detail); err != nil {
+ details = append(details, err)
+ continue
+ }
+ details = append(details, detail.Message)
+ }
+ return details
+}
+
+// Code returns the Code of the error if it is a Status error, codes.OK if err
+// is nil, or codes.Unknown otherwise.
+func Code(err error) codes.Code {
+ // Don't use FromError to avoid allocation of OK status.
+ if err == nil {
+ return codes.OK
+ }
+ if se, ok := err.(*statusError); ok {
+ return se.status().Code()
+ }
+ return codes.Unknown
+}
diff --git a/go/vendor/google.golang.org/grpc/stream.go b/go/vendor/google.golang.org/grpc/stream.go
index bb468dc..f913819 100644
--- a/go/vendor/google.golang.org/grpc/stream.go
+++ b/go/vendor/google.golang.org/grpc/stream.go
@@ -1,51 +1,38 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
package grpc
import (
- "bytes"
"errors"
"io"
- "math"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/trace"
+ "google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/encoding"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"google.golang.org/grpc/transport"
)
@@ -73,11 +60,17 @@ type Stream interface {
// side. On server side, it simply returns the error to the caller.
// SendMsg is called by generated code. Also Users can call SendMsg
// directly when it is really needed in their use cases.
+ // It's safe to have a goroutine calling SendMsg and another goroutine calling
+ // recvMsg on the same stream at the same time.
+ // But it is not safe to call SendMsg on the same stream in different goroutines.
SendMsg(m interface{}) error
// RecvMsg blocks until it receives a message or the stream is
// done. On client side, it returns io.EOF when the stream is done. On
// any other error, it aborts the stream and returns an RPC status. On
// server side, it simply returns the error to the caller.
+ // It's safe to have a goroutine calling SendMsg and another goroutine calling
+ // recvMsg on the same stream at the same time.
+ // But it is not safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error
}
@@ -93,44 +86,92 @@ type ClientStream interface {
// CloseSend closes the send direction of the stream. It closes the stream
// when non-nil error is met.
CloseSend() error
+ // Stream.SendMsg() may return a non-nil error when something wrong happens sending
+ // the request. The returned error indicates the status of this sending, not the final
+ // status of the RPC.
+ // Always call Stream.RecvMsg() to get the final status if you care about the status of
+ // the RPC.
Stream
}
-// NewClientStream creates a new Stream for the client side. This is called
-// by generated code.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+// NewStream creates a new Stream for the client side. This is typically
+// called by generated code.
+func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
}
return newClientStream(ctx, desc, cc, method, opts...)
}
+// NewClientStream creates a new Stream for the client side. This is typically
+// called by generated code.
+//
+// DEPRECATED: Use ClientConn.NewStream instead.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+ return cc.NewStream(ctx, desc, method, opts...)
+}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
var (
t transport.ClientTransport
s *transport.Stream
- put func()
+ done func(balancer.DoneInfo)
cancel context.CancelFunc
)
- c := defaultCallInfo
- if mc, ok := cc.getMethodConfig(method); ok {
- c.failFast = !mc.WaitForReady
- if mc.Timeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
- }
+ c := defaultCallInfo()
+ mc := cc.GetMethodConfig(method)
+ if mc.WaitForReady != nil {
+ c.failFast = !*mc.WaitForReady
}
+
+ if mc.Timeout != nil && *mc.Timeout >= 0 {
+ ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+ defer func() {
+ if err != nil {
+ cancel()
+ }
+ }()
+ }
+
+ opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
- if err := o.before(&c); err != nil {
+ if err := o.before(c); err != nil {
return nil, toRPCErr(err)
}
}
+ c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
- Flush: desc.ServerStreams && desc.ClientStreams,
- }
- if cc.dopts.cp != nil {
+ // If it's not client streaming, we should already have the request to be sent,
+ // so we don't flush the header.
+ // If it's client streaming, the user may never send a request or send it any
+ // time soon, so we ask the transport to flush the header.
+ Flush: desc.ClientStreams,
+ }
+
+ // Set our outgoing compression according to the UseCompressor CallOption, if
+ // set. In that case, also find the compressor from the encoding package.
+ // Otherwise, use the compressor configured by the WithCompressor DialOption,
+ // if set.
+ var cp Compressor
+ var comp encoding.Compressor
+ if ct := c.compressorType; ct != "" {
+ callHdr.SendCompress = ct
+ if ct != encoding.Identity {
+ comp = encoding.GetCompressor(ct)
+ if comp == nil {
+ return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+ }
+ }
+ } else if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
+ cp = cc.dopts.cp
+ }
+ if c.creds != nil {
+ callHdr.Creds = c.creds
}
var trInfo traceInfo
if EnableTracing {
@@ -151,75 +192,85 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}()
}
+ ctx = newContextWithRPCInfo(ctx, c.failFast)
sh := cc.dopts.copts.StatsHandler
if sh != nil {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
begin := &stats.Begin{
Client: true,
BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
- }
- defer func() {
- if err != nil && sh != nil {
- // Only handle end stats if err != nil.
- end := &stats.End{
- Client: true,
- Error: err,
+ defer func() {
+ if err != nil {
+ // Only handle end stats if err != nil.
+ end := &stats.End{
+ Client: true,
+ Error: err,
+ }
+ sh.HandleRPC(ctx, end)
}
- sh.HandleRPC(ctx, end)
- }
- }()
- gopts := BalancerGetOptions{
- BlockingWait: !c.failFast,
+ }()
}
+
for {
- t, put, err = cc.getTransport(ctx, gopts)
+ // Check to make sure the context has expired. This will prevent us from
+ // looping forever if an error occurs for wait-for-ready RPCs where no data
+ // is sent on the wire.
+ select {
+ case <-ctx.Done():
+ return nil, toRPCErr(ctx.Err())
+ default:
+ }
+
+ t, done, err = cc.getTransport(ctx, c.failFast)
if err != nil {
- // TODO(zhaoq): Probably revisit the error handling.
- if _, ok := err.(*rpcError); ok {
- return nil, err
- }
- if err == errConnClosing || err == errConnUnavailable {
- if c.failFast {
- return nil, Errorf(codes.Unavailable, "%v", err)
- }
- continue
- }
- // All the other errors are treated as Internal errors.
- return nil, Errorf(codes.Internal, "%v", err)
+ return nil, err
}
s, err = t.NewStream(ctx, callHdr)
if err != nil {
- if put != nil {
- put()
- put = nil
- }
- if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain {
- if c.failFast {
- return nil, toRPCErr(err)
+ if done != nil {
+ doneInfo := balancer.DoneInfo{Err: err}
+ if _, ok := err.(transport.ConnectionError); ok {
+ // If error is connection error, transport was sending data on wire,
+ // and we are not sure if anything has been sent on wire.
+ // If error is not connection error, we are sure nothing has been sent.
+ doneInfo.BytesSent = true
}
+ done(doneInfo)
+ done = nil
+ }
+ // In the event of any error from NewStream, we never attempted to write
+ // anything to the wire, so we can retry indefinitely for non-fail-fast
+ // RPCs.
+ if !c.failFast {
continue
}
return nil, toRPCErr(err)
}
break
}
+
+ // Set callInfo.peer object from stream's context.
+ if peer, ok := peer.FromContext(s.Context()); ok {
+ c.peer = peer
+ }
cs := &clientStream{
opts: opts,
c: c,
desc: desc,
codec: cc.dopts.codec,
- cp: cc.dopts.cp,
+ cp: cp,
dc: cc.dopts.dc,
+ comp: comp,
cancel: cancel,
- put: put,
- t: t,
- s: s,
- p: &parser{r: s},
+ done: done,
+ t: t,
+ s: s,
+ p: &parser{r: s},
tracing: EnableTracing,
trInfo: trInfo,
@@ -227,23 +278,19 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
statsCtx: ctx,
statsHandler: cc.dopts.copts.StatsHandler,
}
- if cc.dopts.cp != nil {
- cs.cbuf = new(bytes.Buffer)
- }
- // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination
- // when there is no pending I/O operations on this stream.
+ // Listen on s.Context().Done() to detect cancellation and s.Done() to detect
+ // normal termination when there is no pending I/O operations on this stream.
go func() {
select {
case <-t.Error():
// Incur transport error, simply exit.
+ case <-cc.ctx.Done():
+ cs.finish(ErrClientConnClosing)
+ cs.closeTransportStream(ErrClientConnClosing)
case <-s.Done():
// TODO: The trace of the RPC is terminated here when there is no pending
// I/O, which is probably not the optimal solution.
- if s.StatusCode() == codes.OK {
- cs.finish(nil)
- } else {
- cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc()))
- }
+ cs.finish(s.Status().Err())
cs.closeTransportStream(nil)
case <-s.GoAway():
cs.finish(errConnDrain)
@@ -259,23 +306,28 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream.
type clientStream struct {
- opts []CallOption
- c callInfo
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- desc *StreamDesc
- codec Codec
- cp Compressor
- cbuf *bytes.Buffer
- dc Decompressor
+ opts []CallOption
+ c *callInfo
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ desc *StreamDesc
+
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ comp encoding.Compressor
+ decomp encoding.Compressor
+ decompSet bool
+
cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created.
- mu sync.Mutex
- put func()
- closed bool
+ mu sync.Mutex
+ done func(balancer.DoneInfo)
+ closed bool
+ finished bool
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called.
trInfo traceInfo
@@ -323,7 +375,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
return
}
if err == io.EOF {
- // Specialize the process for server streaming. SendMesg is only called
+ // Specialize the process for server streaming. SendMsg is only called
// once when creating the stream object. io.EOF needs to be skipped when
// the rpc is early finished (before the stream object is created.).
// TODO: It is probably better to move this into the generated code.
@@ -343,16 +395,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
Client: true,
}
}
- out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
- defer func() {
- if cs.cbuf != nil {
- cs.cbuf.Reset()
- }
- }()
+ hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
if err != nil {
- return Errorf(codes.Internal, "grpc: %v", err)
+ return err
+ }
+ if cs.c.maxSendMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
- err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
+ if len(data) > *cs.c.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
+ }
+ err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
if err == nil && outPayload != nil {
outPayload.SentTime = time.Now()
cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
@@ -361,28 +414,32 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
- defer func() {
- if err != nil && cs.statsHandler != nil {
- // Only generate End if err != nil.
- // If err == nil, it's not the last RecvMsg.
- // The last RecvMsg gets either an RPC error or io.EOF.
- end := &stats.End{
- Client: true,
- EndTime: time.Now(),
- }
- if err != io.EOF {
- end.Error = toRPCErr(err)
- }
- cs.statsHandler.HandleRPC(cs.statsCtx, end)
- }
- }()
var inPayload *stats.InPayload
if cs.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, inPayload)
+ if cs.c.maxReceiveMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+ if !cs.decompSet {
+ // Block until we receive headers containing received message encoding.
+ if ct := cs.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if cs.dc == nil || cs.dc.Type() != ct {
+ // No configured decompressor, or it does not match the incoming
+ // message encoding; attempt to find a registered compressor that does.
+ cs.dc = nil
+ cs.decomp = encoding.GetCompressor(ct)
+ }
+ } else {
+ // No compression is used; disable our decompressor.
+ cs.dc = nil
+ }
+ // Only initialize this state once per stream.
+ cs.decompSet = true
+ }
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload, cs.decomp)
defer func() {
// err != nil indicates the termination of the stream.
if err != nil {
@@ -405,17 +462,20 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
// Special handling for client streaming rpc.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, nil)
+ if cs.c.maxReceiveMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil, cs.decomp)
cs.closeTransportStream(err)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
if err == io.EOF {
- if cs.s.StatusCode() == codes.OK {
- cs.finish(err)
- return nil
+ if se := cs.s.Status().Err(); se != nil {
+ return se
}
- return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
+ cs.finish(err)
+ return nil
}
return toRPCErr(err)
}
@@ -423,17 +483,17 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
cs.closeTransportStream(err)
}
if err == io.EOF {
- if cs.s.StatusCode() == codes.OK {
- // Returns io.EOF to indicate the end of the stream.
- return
+ if statusErr := cs.s.Status().Err(); statusErr != nil {
+ return statusErr
}
- return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
+ // Returns io.EOF to indicate the end of the stream.
+ return
}
return toRPCErr(err)
}
func (cs *clientStream) CloseSend() (err error) {
- err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
+ err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
defer func() {
if err != nil {
cs.finish(err)
@@ -461,19 +521,38 @@ func (cs *clientStream) closeTransportStream(err error) {
}
func (cs *clientStream) finish(err error) {
+ cs.mu.Lock()
+ defer cs.mu.Unlock()
+ if cs.finished {
+ return
+ }
+ cs.finished = true
defer func() {
if cs.cancel != nil {
cs.cancel()
}
}()
- cs.mu.Lock()
- defer cs.mu.Unlock()
for _, o := range cs.opts {
- o.after(&cs.c)
+ o.after(cs.c)
}
- if cs.put != nil {
- cs.put()
- cs.put = nil
+ if cs.done != nil {
+ cs.done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: cs.s.BytesReceived(),
+ })
+ cs.done = nil
+ }
+ if cs.statsHandler != nil {
+ end := &stats.End{
+ Client: true,
+ EndTime: time.Now(),
+ }
+ if err != io.EOF {
+ // end.Error is nil if the RPC finished successfully.
+ end.Error = toRPCErr(err)
+ }
+ cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
if !cs.tracing {
return
@@ -511,17 +590,19 @@ type ServerStream interface {
// serverStream implements a server side Stream.
type serverStream struct {
- t transport.ServerTransport
- s *transport.Stream
- p *parser
- codec Codec
- cp Compressor
- dc Decompressor
- cbuf *bytes.Buffer
- maxMsgSize int
- statusCode codes.Code
- statusDesc string
- trInfo *traceInfo
+ t transport.ServerTransport
+ s *transport.Stream
+ p *parser
+ codec Codec
+
+ cp Compressor
+ dc Decompressor
+ comp encoding.Compressor
+ decomp encoding.Compressor
+
+ maxReceiveMessageSize int
+ maxSendMessageSize int
+ trInfo *traceInfo
statsHandler stats.Handler
@@ -565,22 +646,23 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
ss.mu.Unlock()
}
+ if err != nil && err != io.EOF {
+ st, _ := status.FromError(toRPCErr(err))
+ ss.t.WriteStatus(ss.s, st)
+ }
}()
var outPayload *stats.OutPayload
if ss.statsHandler != nil {
outPayload = &stats.OutPayload{}
}
- out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
- defer func() {
- if ss.cbuf != nil {
- ss.cbuf.Reset()
- }
- }()
+ hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
if err != nil {
- err = Errorf(codes.Internal, "grpc: %v", err)
return err
}
- if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
+ if len(data) > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
+ }
+ if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
if outPayload != nil {
@@ -604,17 +686,21 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
ss.mu.Unlock()
}
+ if err != nil && err != io.EOF {
+ st, _ := status.FromError(toRPCErr(err))
+ ss.t.WriteStatus(ss.s, st)
+ }
}()
var inPayload *stats.InPayload
if ss.statsHandler != nil {
inPayload = &stats.InPayload{}
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil {
if err == io.EOF {
return err
}
if err == io.ErrUnexpectedEOF {
- err = Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
@@ -623,3 +709,13 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
return nil
}
+
+// MethodFromServerStream returns the method string for the input stream.
+// The returned string is in the format of "/service/method".
+func MethodFromServerStream(stream ServerStream) (string, bool) {
+ s, ok := transport.StreamFromContext(stream.Context())
+ if !ok {
+ return "", ok
+ }
+ return s.Method(), ok
+}
diff --git a/go/vendor/google.golang.org/grpc/tap/tap.go b/go/vendor/google.golang.org/grpc/tap/tap.go
index 0f36647..22b8fb5 100644
--- a/go/vendor/google.golang.org/grpc/tap/tap.go
+++ b/go/vendor/google.golang.org/grpc/tap/tap.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2016, Google Inc.
- * All rights reserved.
+ * Copyright 2016 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -47,8 +32,20 @@ type Info struct {
// TODO: More to be added.
}
-// ServerInHandle defines the function which runs when a new stream is created
-// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead
-// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming
-// work in this handle. Otherwise all the RPCs would slow down.
+// ServerInHandle defines the function which runs before a new stream is created
+// on the server side. If it returns a non-nil error, the stream will not be
+// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
+// The client will receive an RPC error "code = Unavailable, desc = stream
+// terminated by RST_STREAM with error code: REFUSED_STREAM".
+//
+// It's intended to be used in situations where you don't want to waste the
+// resources to accept the new stream (e.g. rate-limiting). And the content of
+// the error will be ignored and won't be sent back to the client. For other
+// general usages, please use interceptors.
+//
+// Note that it is executed in the per-connection I/O goroutine(s) instead of
+// per-RPC goroutine. Therefore, users should NOT have any
+// blocking/time-consuming work in this handle. Otherwise all the RPCs would
+// slow down. Also, for the same reason, this handle won't be called
+// concurrently by gRPC.
type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/go/vendor/google.golang.org/grpc/trace.go b/go/vendor/google.golang.org/grpc/trace.go
index f6747e1..c1c96de 100644
--- a/go/vendor/google.golang.org/grpc/trace.go
+++ b/go/vendor/google.golang.org/grpc/trace.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2015, Google Inc.
- * All rights reserved.
+ * Copyright 2015 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -46,7 +31,7 @@ import (
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
// This should only be set before any RPCs are sent or received by this program.
-var EnableTracing = true
+var EnableTracing bool
// methodFamily returns the trace family for the given method.
// It turns "/pkg.Service/GetFoo" into "pkg.Service".
@@ -91,6 +76,15 @@ func (f *firstLine) String() string {
return line.String()
}
+const truncateSize = 100
+
+func truncate(x string, l int) string {
+ if l > len(x) {
+ return x
+ }
+ return x[:l]
+}
+
// payload represents an RPC request or response payload.
type payload struct {
sent bool // whether this is an outgoing payload
@@ -100,9 +94,9 @@ type payload struct {
func (p payload) String() string {
if p.sent {
- return fmt.Sprintf("sent: %v", p.msg)
+ return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
}
- return fmt.Sprintf("recv: %v", p.msg)
+ return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
}
type fmtStringer struct {
diff --git a/go/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/go/vendor/google.golang.org/grpc/transport/bdp_estimator.go
new file mode 100644
index 0000000..63cd262
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/transport/bdp_estimator.go
@@ -0,0 +1,140 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ // bdpLimit is the maximum value the flow control windows
+ // will be increased to.
+ bdpLimit = (1 << 20) * 4
+ // alpha is a constant factor used to keep a moving average
+ // of RTTs.
+ alpha = 0.9
+ // If the current bdp sample is greater than or equal to
+ // our beta * our estimated bdp and the current bandwidth
+ // sample is the maximum bandwidth observed so far, we
+ // increase our bbp estimate by a factor of gamma.
+ beta = 0.66
+ // To put our bdp to be smaller than or equal to twice the real BDP,
+ // we should multiply our current sample with 4/3, however to round things out
+ // we use 2 as the multiplication factor.
+ gamma = 2
+)
+
+// Adding arbitrary data to ping so that its ack can be identified.
+// Easter-egg: what does the ping message say?
+var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
+
+type bdpEstimator struct {
+ // sentAt is the time when the ping was sent.
+ sentAt time.Time
+
+ mu sync.Mutex
+ // bdp is the current bdp estimate.
+ bdp uint32
+ // sample is the number of bytes received in one measurement cycle.
+ sample uint32
+ // bwMax is the maximum bandwidth noted so far (bytes/sec).
+ bwMax float64
+ // bool to keep track of the beginning of a new measurement cycle.
+ isSent bool
+ // Callback to update the window sizes.
+ updateFlowControl func(n uint32)
+ // sampleCount is the number of samples taken so far.
+ sampleCount uint64
+ // round trip time (seconds)
+ rtt float64
+}
+
+// timesnap registers the time bdp ping was sent out so that
+// network rtt can be calculated when its ack is received.
+// It is called (by controller) when the bdpPing is
+// being written on the wire.
+func (b *bdpEstimator) timesnap(d [8]byte) {
+ if bdpPing.data != d {
+ return
+ }
+ b.sentAt = time.Now()
+}
+
+// add adds bytes to the current sample for calculating bdp.
+// It returns true only if a ping must be sent. This can be used
+// by the caller (handleData) to make decision about batching
+// a window update with it.
+func (b *bdpEstimator) add(n uint32) bool {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.bdp == bdpLimit {
+ return false
+ }
+ if !b.isSent {
+ b.isSent = true
+ b.sample = n
+ b.sentAt = time.Time{}
+ b.sampleCount++
+ return true
+ }
+ b.sample += n
+ return false
+}
+
+// calculate is called when an ack for a bdp ping is received.
+// Here we calculate the current bdp and bandwidth sample and
+// decide if the flow control windows should go up.
+func (b *bdpEstimator) calculate(d [8]byte) {
+ // Check if the ping acked for was the bdp ping.
+ if bdpPing.data != d {
+ return
+ }
+ b.mu.Lock()
+ rttSample := time.Since(b.sentAt).Seconds()
+ if b.sampleCount < 10 {
+ // Bootstrap rtt with an average of first 10 rtt samples.
+ b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
+ } else {
+ // Heed to the recent past more.
+ b.rtt += (rttSample - b.rtt) * float64(alpha)
+ }
+ b.isSent = false
+ // The number of bytes accumulated so far in the sample is smaller
+ // than or equal to 1.5 times the real BDP on a saturated connection.
+ bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
+ if bwCurrent > b.bwMax {
+ b.bwMax = bwCurrent
+ }
+ // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
+ // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
+ // should update our perception of the network BDP.
+ if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
+ sampleFloat := float64(b.sample)
+ b.bdp = uint32(gamma * sampleFloat)
+ if b.bdp > bdpLimit {
+ b.bdp = bdpLimit
+ }
+ bdp := b.bdp
+ b.mu.Unlock()
+ b.updateFlowControl(bdp)
+ return
+ }
+ b.mu.Unlock()
+}
diff --git a/go/vendor/google.golang.org/grpc/transport/control.go b/go/vendor/google.golang.org/grpc/transport/control.go
index 2586cba..0474b09 100644
--- a/go/vendor/google.golang.org/grpc/transport/control.go
+++ b/go/vendor/google.golang.org/grpc/transport/control.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -35,22 +20,67 @@ package transport
import (
"fmt"
+ "io"
+ "math"
"sync"
+ "time"
"golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
)
const (
// The default value of flow control window size in HTTP2 spec.
defaultWindowSize = 65535
// The initial window size for flow control.
- initialWindowSize = defaultWindowSize // for an RPC
- initialConnWindowSize = defaultWindowSize * 16 // for a connection
+ initialWindowSize = defaultWindowSize // for an RPC
+ infinity = time.Duration(math.MaxInt64)
+ defaultClientKeepaliveTime = infinity
+ defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
+ defaultMaxStreamsClient = 100
+ defaultMaxConnectionIdle = infinity
+ defaultMaxConnectionAge = infinity
+ defaultMaxConnectionAgeGrace = infinity
+ defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
+ defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
+ defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
+ // max window limit set by HTTP2 Specs.
+ maxWindowSize = math.MaxInt32
+ // defaultLocalSendQuota sets is default value for number of data
+ // bytes that each stream can schedule before some of it being
+ // flushed out.
+ defaultLocalSendQuota = 128 * 1024
)
// The following defines various control items which could flow through
// the control buffer of transport. They represent different aspects of
// control tasks, e.g., flow control, settings, streaming resetting, etc.
+
+type headerFrame struct {
+ streamID uint32
+ hf []hpack.HeaderField
+ endStream bool
+}
+
+func (*headerFrame) item() {}
+
+type continuationFrame struct {
+ streamID uint32
+ endHeaders bool
+ headerBlockFragment []byte
+}
+
+type dataFrame struct {
+ streamID uint32
+ endStream bool
+ d []byte
+ f func()
+}
+
+func (*dataFrame) item() {}
+
+func (*continuationFrame) item() {}
+
type windowUpdate struct {
streamID uint32
increment uint32
@@ -59,12 +89,16 @@ type windowUpdate struct {
func (*windowUpdate) item() {}
type settings struct {
- ack bool
- ss []http2.Setting
+ ss []http2.Setting
}
func (*settings) item() {}
+type settingsAck struct {
+}
+
+func (*settingsAck) item() {}
+
type resetStream struct {
streamID uint32
code http2.ErrCode
@@ -73,11 +107,16 @@ type resetStream struct {
func (*resetStream) item() {}
type goAway struct {
+ code http2.ErrCode
+ debugData []byte
+ headsUp bool
+ closeConn bool
}
func (*goAway) item() {}
type flushIO struct {
+ closeTr bool
}
func (*flushIO) item() {}
@@ -92,21 +131,17 @@ func (*ping) item() {}
// quotaPool is a pool which accumulates the quota and sends it to acquire()
// when it is available.
type quotaPool struct {
- c chan int
-
- mu sync.Mutex
- quota int
+ mu sync.Mutex
+ c chan struct{}
+ version uint32
+ quota int
}
// newQuotaPool creates a quotaPool which has quota q available to consume.
func newQuotaPool(q int) *quotaPool {
qb := &quotaPool{
- c: make(chan int, 1),
- }
- if q > 0 {
- qb.c <- q
- } else {
- qb.quota = q
+ quota: q,
+ c: make(chan struct{}, 1),
}
return qb
}
@@ -116,43 +151,142 @@ func newQuotaPool(q int) *quotaPool {
func (qb *quotaPool) add(v int) {
qb.mu.Lock()
defer qb.mu.Unlock()
- select {
- case n := <-qb.c:
- qb.quota += n
- default:
+ qb.lockedAdd(v)
+}
+
+func (qb *quotaPool) lockedAdd(v int) {
+ var wakeUp bool
+ if qb.quota <= 0 {
+ wakeUp = true // Wake up potential waiters.
}
qb.quota += v
- if qb.quota <= 0 {
- return
+ if wakeUp && qb.quota > 0 {
+ select {
+ case qb.c <- struct{}{}:
+ default:
+ }
+ }
+}
+
+func (qb *quotaPool) addAndUpdate(v int) {
+ qb.mu.Lock()
+ qb.lockedAdd(v)
+ qb.version++
+ qb.mu.Unlock()
+}
+
+func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
+ qb.mu.Lock()
+ if qb.quota > 0 {
+ if v > qb.quota {
+ v = qb.quota
+ }
+ qb.quota -= v
+ ver := qb.version
+ qb.mu.Unlock()
+ return v, ver, nil
}
- // After the pool has been created, this is the only place that sends on
- // the channel. Since mu is held at this point and any quota that was sent
- // on the channel has been retrieved, we know that this code will always
- // place any positive quota value on the channel.
- select {
- case qb.c <- qb.quota:
- qb.quota = 0
- default:
+ qb.mu.Unlock()
+ for {
+ select {
+ case <-wc.ctx.Done():
+ return 0, 0, ContextErr(wc.ctx.Err())
+ case <-wc.tctx.Done():
+ return 0, 0, ErrConnClosing
+ case <-wc.done:
+ return 0, 0, io.EOF
+ case <-wc.goAway:
+ return 0, 0, errStreamDrain
+ case <-qb.c:
+ qb.mu.Lock()
+ if qb.quota > 0 {
+ if v > qb.quota {
+ v = qb.quota
+ }
+ qb.quota -= v
+ ver := qb.version
+ if qb.quota > 0 {
+ select {
+ case qb.c <- struct{}{}:
+ default:
+ }
+ }
+ qb.mu.Unlock()
+ return v, ver, nil
+
+ }
+ qb.mu.Unlock()
+ }
}
}
-// acquire returns the channel on which available quota amounts are sent.
-func (qb *quotaPool) acquire() <-chan int {
- return qb.c
+func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
+ qb.mu.Lock()
+ if version == qb.version {
+ success()
+ qb.mu.Unlock()
+ return true
+ }
+ failure()
+ qb.mu.Unlock()
+ return false
}
// inFlow deals with inbound flow control
type inFlow struct {
+ mu sync.Mutex
// The inbound flow control limit for pending data.
limit uint32
-
- mu sync.Mutex
// pendingData is the overall data which have been received but not been
// consumed by applications.
pendingData uint32
// The amount of data the application has consumed but grpc has not sent
// window update for them. Used to reduce window update frequency.
pendingUpdate uint32
+ // delta is the extra window update given by receiver when an application
+ // is reading data bigger in size than the inFlow limit.
+ delta uint32
+}
+
+// newLimit updates the inflow window to a new value n.
+// It assumes that n is always greater than the old limit.
+func (f *inFlow) newLimit(n uint32) uint32 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ d := n - f.limit
+ f.limit = n
+ return d
+}
+
+func (f *inFlow) maybeAdjust(n uint32) uint32 {
+ if n > uint32(math.MaxInt32) {
+ n = uint32(math.MaxInt32)
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ // estSenderQuota is the receiver's view of the maximum number of bytes the sender
+ // can send without a window update.
+ estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
+ // estUntransmittedData is the maximum number of bytes the sends might not have put
+ // on the wire yet. A value of 0 or less means that we have already received all or
+ // more bytes than the application is requesting to read.
+ estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
+ // This implies that unless we send a window update, the sender won't be able to send all the bytes
+ // for this message. Therefore we must send an update over the limit since there's an active read
+ // request from the application.
+ if estUntransmittedData > estSenderQuota {
+ // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
+ if f.limit+n > maxWindowSize {
+ f.delta = maxWindowSize - f.limit
+ } else {
+ // Send a window update for the whole message and not just the difference between
+ // estUntransmittedData and estSenderQuota. This will be helpful in case the message
+ // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
+ f.delta = n
+ }
+ return f.delta
+ }
+ return 0
}
// onData is invoked when some data frame is received. It updates pendingData.
@@ -160,7 +294,7 @@ func (f *inFlow) onData(n uint32) error {
f.mu.Lock()
defer f.mu.Unlock()
f.pendingData += n
- if f.pendingData+f.pendingUpdate > f.limit {
+ if f.pendingData+f.pendingUpdate > f.limit+f.delta {
return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
}
return nil
@@ -175,6 +309,13 @@ func (f *inFlow) onRead(n uint32) uint32 {
return 0
}
f.pendingData -= n
+ if n > f.delta {
+ n -= f.delta
+ f.delta = 0
+ } else {
+ f.delta -= n
+ n = 0
+ }
f.pendingUpdate += n
if f.pendingUpdate >= f.limit/4 {
wu := f.pendingUpdate
@@ -184,10 +325,10 @@ func (f *inFlow) onRead(n uint32) uint32 {
return 0
}
-func (f *inFlow) resetPendingData() uint32 {
+func (f *inFlow) resetPendingUpdate() uint32 {
f.mu.Lock()
defer f.mu.Unlock()
- n := f.pendingData
- f.pendingData = 0
+ n := f.pendingUpdate
+ f.pendingUpdate = 0
return n
}
diff --git a/go/vendor/google.golang.org/grpc/transport/go16.go b/go/vendor/google.golang.org/grpc/transport/go16.go
index ee1c46b..5babcf9 100644
--- a/go/vendor/google.golang.org/grpc/transport/go16.go
+++ b/go/vendor/google.golang.org/grpc/transport/go16.go
@@ -1,34 +1,20 @@
// +build go1.6,!go1.7
/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Copyright 2016 gRPC authors.
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -36,6 +22,9 @@ package transport
import (
"net"
+ "net/http"
+
+ "google.golang.org/grpc/codes"
"golang.org/x/net/context"
)
@@ -44,3 +33,19 @@ import (
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+ switch err {
+ case context.DeadlineExceeded:
+ return streamErrorf(codes.DeadlineExceeded, "%v", err)
+ case context.Canceled:
+ return streamErrorf(codes.Canceled, "%v", err)
+ }
+ return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
+
+// contextFromRequest returns a background context.
+func contextFromRequest(r *http.Request) context.Context {
+ return context.Background()
+}
diff --git a/go/vendor/google.golang.org/grpc/transport/go17.go b/go/vendor/google.golang.org/grpc/transport/go17.go
index 356f13f..b7fa6bd 100644
--- a/go/vendor/google.golang.org/grpc/transport/go17.go
+++ b/go/vendor/google.golang.org/grpc/transport/go17.go
@@ -1,46 +1,52 @@
// +build go1.7
/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Copyright 2016 gRPC authors.
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
package transport
import (
+ "context"
"net"
+ "net/http"
+
+ "google.golang.org/grpc/codes"
- "golang.org/x/net/context"
+ netctx "golang.org/x/net/context"
)
// dialContext connects to the address on the named network.
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, network, address)
}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+ switch err {
+ case context.DeadlineExceeded, netctx.DeadlineExceeded:
+ return streamErrorf(codes.DeadlineExceeded, "%v", err)
+ case context.Canceled, netctx.Canceled:
+ return streamErrorf(codes.Canceled, "%v", err)
+ }
+ return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
+
+// contextFromRequest returns a context from the HTTP Request.
+func contextFromRequest(r *http.Request) context.Context {
+ return r.Context()
+}
diff --git a/go/vendor/google.golang.org/grpc/transport/handler_server.go b/go/vendor/google.golang.org/grpc/transport/handler_server.go
index 10b6dc0..27c4ebb 100644
--- a/go/vendor/google.golang.org/grpc/transport/handler_server.go
+++ b/go/vendor/google.golang.org/grpc/transport/handler_server.go
@@ -1,32 +1,18 @@
/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Copyright 2016 gRPC authors.
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -47,12 +33,14 @@ import (
"sync"
"time"
+ "github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/status"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC
@@ -101,14 +89,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTr
continue
}
for _, v := range vv {
- if k == "user-agent" {
- // user-agent is special. Copying logic of http_util.go.
- if i := strings.LastIndex(v, " "); i == -1 {
- // There is no application user agent string being set
- continue
- } else {
- v = v[:i]
- }
+ v, err := decodeMetadataHeader(k, v)
+ if err != nil {
+ return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
}
metakv = append(metakv, k, v)
}
@@ -139,6 +122,10 @@ type serverHandlerTransport struct {
// ServeHTTP (HandleStreams) goroutine. The channel is closed
// when WriteStatus is called.
writes chan func()
+
+ // block concurrent WriteStatus calls
+ // e.g. grpc/(*serverStream).SendMsg/RecvMsg
+ writeStatusMu sync.Mutex
}
func (ht *serverHandlerTransport) Close() error {
@@ -174,15 +161,24 @@ func (a strAddr) String() string { return string(a) }
// do runs fn in the ServeHTTP goroutine.
func (ht *serverHandlerTransport) do(fn func()) error {
+ // Avoid a panic writing to closed channel. Imperfect but maybe good enough.
select {
- case ht.writes <- fn:
- return nil
case <-ht.closedCh:
return ErrConnClosing
+ default:
+ select {
+ case ht.writes <- fn:
+ return nil
+ case <-ht.closedCh:
+ return ErrConnClosing
+ }
}
}
-func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
+ ht.writeStatusMu.Lock()
+ defer ht.writeStatusMu.Unlock()
+
err := ht.do(func() {
ht.writeCommonHeaders(s)
@@ -192,10 +188,21 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
ht.rw.(http.Flusher).Flush()
h := ht.rw.Header()
- h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
- if statusDesc != "" {
- h.Set("Grpc-Message", encodeGrpcMessage(statusDesc))
+ h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
+ if m := st.Message(); m != "" {
+ h.Set("Grpc-Message", encodeGrpcMessage(m))
}
+
+ if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ stBytes, err := proto.Marshal(p)
+ if err != nil {
+ // TODO: return error instead, when callers are able to handle it.
+ panic(err)
+ }
+
+ h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+ }
+
if md := s.Trailer(); len(md) > 0 {
for k, vv := range md {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
@@ -203,15 +210,18 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code,
continue
}
for _, v := range vv {
- // http2 ResponseWriter mechanism to
- // send undeclared Trailers after the
- // headers have possibly been written.
- h.Add(http2.TrailerPrefix+k, v)
+ // http2 ResponseWriter mechanism to send undeclared Trailers after
+ // the headers have possibly been written.
+ h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
}
}
}
})
- close(ht.writes)
+
+ if err == nil { // transport has not been closed
+ ht.Close()
+ close(ht.writes)
+ }
return err
}
@@ -234,15 +244,17 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
h.Add("Trailer", "Grpc-Status")
h.Add("Trailer", "Grpc-Message")
+ h.Add("Trailer", "Grpc-Status-Details-Bin")
if s.sendCompress != "" {
h.Set("Grpc-Encoding", s.sendCompress)
}
}
-func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
return ht.do(func() {
ht.writeCommonHeaders(s)
+ ht.rw.Write(hdr)
ht.rw.Write(data)
if !opts.Delay {
ht.rw.(http.Flusher).Flush()
@@ -260,6 +272,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
continue
}
for _, v := range vv {
+ v = encodeMetadataHeader(k, v)
h.Add(k, v)
}
}
@@ -271,12 +284,12 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
// With this transport type there will be exactly 1 stream: this HTTP request.
- var ctx context.Context
+ ctx := contextFromRequest(ht.req)
var cancel context.CancelFunc
if ht.timeoutSet {
- ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
+ ctx, cancel = context.WithTimeout(ctx, ht.timeout)
} else {
- ctx, cancel = context.WithCancel(context.Background())
+ ctx, cancel = context.WithCancel(ctx)
}
// requestOver is closed when either the request's context is done
@@ -300,13 +313,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
req := ht.req
s := &Stream{
- id: 0, // irrelevant
- windowHandler: func(int) {}, // nothing
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
+ id: 0, // irrelevant
+ requestRead: func(int) {},
+ cancel: cancel,
+ buf: newRecvBuffer(),
+ st: ht,
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
}
pr := &peer.Peer{
Addr: ht.RemoteAddr(),
@@ -314,10 +327,13 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
if req.TLS != nil {
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
}
- ctx = metadata.NewContext(ctx, ht.headerMD)
+ ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
ctx = peer.NewContext(ctx, pr)
s.ctx = newContextWithStream(ctx, s)
- s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
+ s.trReader = &transportReader{
+ reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
+ windowHandler: func(int) {},
+ }
// readerDone is closed when the Body.Read-ing goroutine exits.
readerDone := make(chan struct{})
@@ -329,11 +345,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
for buf := make([]byte, readSize); ; {
n, err := req.Body.Read(buf)
if n > 0 {
- s.buf.put(&recvMsg{data: buf[:n:n]})
+ s.buf.put(recvMsg{data: buf[:n:n]})
buf = buf[n:]
}
if err != nil {
- s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
+ s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
if len(buf) == 0 {
diff --git a/go/vendor/google.golang.org/grpc/transport/http2_client.go b/go/vendor/google.golang.org/grpc/transport/http2_client.go
index 892f8ba..4a12269 100644
--- a/go/vendor/google.golang.org/grpc/transport/http2_client.go
+++ b/go/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -41,6 +26,7 @@ import (
"net"
"strings"
"sync"
+ "sync/atomic"
"time"
"golang.org/x/net/context"
@@ -48,16 +34,17 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
)
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
ctx context.Context
- target string // server name/addr
+ cancel context.CancelFunc
userAgent string
md interface{}
conn net.Conn // underlying communication channel
@@ -66,20 +53,11 @@ type http2Client struct {
authInfo credentials.AuthInfo // auth info about the connection
nextID uint32 // the next stream ID to be used
- // writableChan synchronizes write access to the transport.
- // A writer acquires the write lock by sending a value on writableChan
- // and releases it by receiving from writableChan.
- writableChan chan int
- // shutdownChan is closed when Close is called.
- // Blocking operations should select on shutdownChan to avoid
- // blocking forever after Close.
- // TODO(zhaoq): Maybe have a channel context?
- shutdownChan chan struct{}
- // errorChan is closed to notify the I/O error to the caller.
- errorChan chan struct{}
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
goAway chan struct{}
+ // awakenKeepalive is used to wake up keepalive when after it has gone dormant.
+ awakenKeepalive chan struct{}
framer *framer
hBuf *bytes.Buffer // the buffer for HPACK encoding
@@ -87,20 +65,40 @@ type http2Client struct {
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
- controlBuf *recvBuffer
+ controlBuf *controlBuffer
fc *inFlow
// sendQuotaPool provides flow control to outbound message.
sendQuotaPool *quotaPool
+ // localSendQuota limits the amount of data that can be scheduled
+ // for writing before it is actually written out.
+ localSendQuota *quotaPool
// streamsQuota limits the max number of concurrent streams.
streamsQuota *quotaPool
// The scheme used: https if TLS is on, http otherwise.
scheme string
+ isSecure bool
+
creds []credentials.PerRPCCredentials
+ // Boolean to keep track of reading activity on transport.
+ // 1 is true and 0 is false.
+ activity uint32 // Accessed atomically.
+ kp keepalive.ClientParameters
+
statsHandler stats.Handler
+ initialWindowSize int32
+
+ bdpEst *bdpEstimator
+ outQuotaVersion uint32
+
+ // onSuccess is a callback that client transport calls upon
+ // receiving server preface to signal that a succefull HTTP2
+ // connection was established.
+ onSuccess func()
+
mu sync.Mutex // guard the following variables
state transportState // the state of underlying connection
activeStreams map[uint32]*Stream
@@ -108,10 +106,11 @@ type http2Client struct {
maxStreams int
// the per-stream outbound flow control window size set by the peer.
streamSendQuota uint32
- // goAwayID records the Last-Stream-ID in the GoAway frame from the server.
- goAwayID uint32
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32
+ // goAwayReason records the http2.ErrCode and debug data received with the
+ // GoAway frame.
+ goAwayReason GoAwayReason
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@@ -152,14 +151,21 @@ func isTemporary(err error) bool {
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
-func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (_ ClientTransport, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
scheme := "http"
- conn, err := dial(ctx, opts.Dialer, addr.Addr)
+ ctx, cancel := context.WithCancel(ctx)
+ defer func() {
+ if err != nil {
+ cancel()
+ }
+ }()
+
+ conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
if err != nil {
if opts.FailOnNonTempDialError {
- return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err)
+ return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
}
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
}
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
@@ -167,51 +173,90 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
conn.Close()
}
}(conn)
- var authInfo credentials.AuthInfo
+ var (
+ isSecure bool
+ authInfo credentials.AuthInfo
+ )
if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
- conn, authInfo, err = creds.ClientHandshake(ctx, addr.Addr, conn)
+ conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
if err != nil {
// Credentials handshake errors are typically considered permanent
// to avoid retrying on e.g. bad certificates.
temp := isTemporary(err)
- return nil, connectionErrorf(temp, err, "transport: %v", err)
+ return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
}
+ isSecure = true
+ }
+ kp := opts.KeepaliveParams
+ // Validate keepalive parameters.
+ if kp.Time == 0 {
+ kp.Time = defaultClientKeepaliveTime
+ }
+ if kp.Timeout == 0 {
+ kp.Timeout = defaultClientKeepaliveTimeout
}
- ua := primaryUA
- if opts.UserAgent != "" {
- ua = opts.UserAgent + " " + ua
+ dynamicWindow := true
+ icwz := int32(initialWindowSize)
+ if opts.InitialConnWindowSize >= defaultWindowSize {
+ icwz = opts.InitialConnWindowSize
+ dynamicWindow = false
}
var buf bytes.Buffer
+ writeBufSize := defaultWriteBufSize
+ if opts.WriteBufferSize > 0 {
+ writeBufSize = opts.WriteBufferSize
+ }
+ readBufSize := defaultReadBufSize
+ if opts.ReadBufferSize > 0 {
+ readBufSize = opts.ReadBufferSize
+ }
t := &http2Client{
ctx: ctx,
- target: addr.Addr,
- userAgent: ua,
+ cancel: cancel,
+ userAgent: opts.UserAgent,
md: addr.Metadata,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: authInfo,
// The client initiated stream id is odd starting from 1.
- nextID: 1,
- writableChan: make(chan int, 1),
- shutdownChan: make(chan struct{}),
- errorChan: make(chan struct{}),
- goAway: make(chan struct{}),
- framer: newFramer(conn),
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- controlBuf: newRecvBuffer(),
- fc: &inFlow{limit: initialConnWindowSize},
- sendQuotaPool: newQuotaPool(defaultWindowSize),
- scheme: scheme,
- state: reachable,
- activeStreams: make(map[uint32]*Stream),
- creds: opts.PerRPCCredentials,
- maxStreams: math.MaxInt32,
- streamSendQuota: defaultWindowSize,
- statsHandler: opts.StatsHandler,
+ nextID: 1,
+ goAway: make(chan struct{}),
+ awakenKeepalive: make(chan struct{}, 1),
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ framer: newFramer(conn, writeBufSize, readBufSize),
+ controlBuf: newControlBuffer(),
+ fc: &inFlow{limit: uint32(icwz)},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ localSendQuota: newQuotaPool(defaultLocalSendQuota),
+ scheme: scheme,
+ state: reachable,
+ activeStreams: make(map[uint32]*Stream),
+ isSecure: isSecure,
+ creds: opts.PerRPCCredentials,
+ maxStreams: defaultMaxStreamsClient,
+ streamsQuota: newQuotaPool(defaultMaxStreamsClient),
+ streamSendQuota: defaultWindowSize,
+ kp: kp,
+ statsHandler: opts.StatsHandler,
+ initialWindowSize: initialWindowSize,
+ onSuccess: onSuccess,
+ }
+ if opts.InitialWindowSize >= defaultWindowSize {
+ t.initialWindowSize = opts.InitialWindowSize
+ dynamicWindow = false
+ }
+ if dynamicWindow {
+ t.bdpEst = &bdpEstimator{
+ bdp: initialWindowSize,
+ updateFlowControl: t.updateFlowControl,
+ }
}
+ // Make sure awakenKeepalive can't be written upon.
+ // keepalive routine will make it writable, if need be.
+ t.awakenKeepalive <- struct{}{}
if t.statsHandler != nil {
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
@@ -230,33 +275,39 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
n, err := t.conn.Write(clientPreface)
if err != nil {
t.Close()
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
}
if n != len(clientPreface) {
t.Close()
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
}
- if initialWindowSize != defaultWindowSize {
- err = t.framer.writeSettings(true, http2.Setting{
+ if t.initialWindowSize != defaultWindowSize {
+ err = t.framer.fr.WriteSettings(http2.Setting{
ID: http2.SettingInitialWindowSize,
- Val: uint32(initialWindowSize),
+ Val: uint32(t.initialWindowSize),
})
} else {
- err = t.framer.writeSettings(true)
+ err = t.framer.fr.WriteSettings()
}
if err != nil {
t.Close()
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
}
// Adjust the connection flow control window if needed.
- if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
- if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
+ if delta := uint32(icwz - defaultWindowSize); delta > 0 {
+ if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
t.Close()
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
}
}
- go t.controller()
- t.writableChan <- 0
+ t.framer.writer.Flush()
+ go func() {
+ loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
+ t.conn.Close()
+ }()
+ if t.kp.Time != infinity {
+ go t.keepalive()
+ }
return t, nil
}
@@ -269,27 +320,38 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
buf: newRecvBuffer(),
- fc: &inFlow{limit: initialWindowSize},
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
headerChan: make(chan struct{}),
}
t.nextID += 2
- s.windowHandler = func(n int) {
- t.updateWindow(s, uint32(n))
+ s.requestRead = func(n int) {
+ t.adjustWindow(s, uint32(n))
}
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
- s.dec = &recvBufferReader{
+ s.trReader = &transportReader{
+ reader: &recvBufferReader{
+ ctx: s.ctx,
+ goAway: s.goAway,
+ recv: s.buf,
+ },
+ windowHandler: func(n int) {
+ t.updateWindow(s, uint32(n))
+ },
+ }
+ s.waiters = waiters{
ctx: s.ctx,
+ tctx: t.ctx,
+ done: s.done,
goAway: s.goAway,
- recv: s.buf,
}
return s
}
-// NewStream creates a stream and register it into the transport as "active"
+// NewStream creates a stream and registers it into the transport as "active"
// streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
pr := &peer.Peer{
@@ -299,31 +361,51 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
- userCtx := ctx
ctx = peer.NewContext(ctx, pr)
- authData := make(map[string]string)
- for _, c := range t.creds {
+ var (
+ authData = make(map[string]string)
+ audience string
+ )
+ // Create an audience string only if needed.
+ if len(t.creds) > 0 || callHdr.Creds != nil {
// Construct URI required to get auth request metadata.
- var port string
- if pos := strings.LastIndex(t.target, ":"); pos != -1 {
- // Omit port if it is the default one.
- if t.target[pos+1:] != "443" {
- port = ":" + t.target[pos+1:]
- }
- }
+ // Omit port if it is the default one.
+ host := strings.TrimSuffix(callHdr.Host, ":443")
pos := strings.LastIndex(callHdr.Method, "/")
if pos == -1 {
- return nil, streamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
+ pos = len(callHdr.Method)
}
- audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
+ audience = "https://" + host + callHdr.Method[:pos]
+ }
+ for _, c := range t.creds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
- return nil, streamErrorf(codes.InvalidArgument, "transport: %v", err)
+ return nil, streamErrorf(codes.Internal, "transport: %v", err)
}
for k, v := range data {
+ // Capital header names are illegal in HTTP/2.
+ k = strings.ToLower(k)
authData[k] = v
}
}
+ callAuthData := map[string]string{}
+ // Check if credentials.PerRPCCredentials were provided via call options.
+ // Note: if these credentials are provided both via dial options and call
+ // options, then both sets of credentials will be applied.
+ if callCreds := callHdr.Creds; callCreds != nil {
+ if !t.isSecure && callCreds.RequireTransportSecurity() {
+ return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ }
+ data, err := callCreds.GetRequestMetadata(ctx, audience)
+ if err != nil {
+ return nil, streamErrorf(codes.Internal, "transport: %v", err)
+ }
+ for k, v := range data {
+ // Capital header names are illegal in HTTP/2
+ k = strings.ToLower(k)
+ callAuthData[k] = v
+ }
+ }
t.mu.Lock()
if t.activeStreams == nil {
t.mu.Unlock()
@@ -331,174 +413,129 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
if t.state == draining {
t.mu.Unlock()
- return nil, ErrStreamDrain
+ return nil, errStreamDrain
}
if t.state != reachable {
t.mu.Unlock()
return nil, ErrConnClosing
}
- checkStreamsQuota := t.streamsQuota != nil
t.mu.Unlock()
- if checkStreamsQuota {
- sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire())
- if err != nil {
- return nil, err
- }
- // Returns the quota balance back.
- if sq > 1 {
- t.streamsQuota.add(sq - 1)
- }
- }
- if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
- // Return the quota back now because there is no stream returned to the caller.
- if _, ok := err.(StreamError); ok && checkStreamsQuota {
- t.streamsQuota.add(1)
- }
+ // Get a quota of 1 from streamsQuota.
+ if _, _, err := t.streamsQuota.get(1, waiters{ctx: ctx, tctx: t.ctx}); err != nil {
return nil, err
}
- t.mu.Lock()
- if t.state == draining {
- t.mu.Unlock()
- if checkStreamsQuota {
- t.streamsQuota.add(1)
- }
- // Need to make t writable again so that the rpc in flight can still proceed.
- t.writableChan <- 0
- return nil, ErrStreamDrain
- }
- if t.state != reachable {
- t.mu.Unlock()
- return nil, ErrConnClosing
- }
- s := t.newStream(ctx, callHdr)
- s.clientStatsCtx = userCtx
- t.activeStreams[s.id] = s
-
- // This stream is not counted when applySetings(...) initialize t.streamsQuota.
- // Reset t.streamsQuota to the right value.
- var reset bool
- if !checkStreamsQuota && t.streamsQuota != nil {
- reset = true
- }
- t.mu.Unlock()
- if reset {
- t.streamsQuota.add(-1)
- }
-
- // HPACK encodes various headers. Note that once WriteField(...) is
- // called, the corresponding headers/continuation frame has to be sent
- // because hpack.Encoder is stateful.
- t.hBuf.Reset()
- t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
- t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
- t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
- t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
- t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
+ // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+ // first and create a slice of that exact size.
+ // Make the slice of certain predictable size to reduce allocations made by append.
+ hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
+ hfLen += len(authData) + len(callAuthData)
+ headerFields := make([]hpack.HeaderField, 0, hfLen)
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
if callHdr.SendCompress != "" {
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
}
if dl, ok := ctx.Deadline(); ok {
// Send out timeout regardless its value. The server can detect timeout context by itself.
+ // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
timeout := dl.Sub(time.Now())
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
}
-
for k, v := range authData {
- // Capital header names are illegal in HTTP/2.
- k = strings.ToLower(k)
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
- var (
- hasMD bool
- endHeaders bool
- )
- if md, ok := metadata.FromContext(ctx); ok {
- hasMD = true
- for k, v := range md {
+ for k, v := range callAuthData {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+ }
+ if b := stats.OutgoingTags(ctx); b != nil {
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
+ }
+ if b := stats.OutgoingTrace(ctx); b != nil {
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
+ }
+ if md, ok := metadata.FromOutgoingContext(ctx); ok {
+ for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
if md, ok := t.md.(*metadata.MD); ok {
- for k, v := range *md {
+ for k, vv := range *md {
if isReservedHeader(k) {
continue
}
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
- first := true
- bufLen := t.hBuf.Len()
- // Sends the headers in a single batch even when they span multiple frames.
- for !endHeaders {
- size := t.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- var flush bool
- if endHeaders && (hasMD || callHdr.Flush) {
- flush = true
- }
- if first {
- // Sends a HeadersFrame to server to start a new stream.
- p := http2.HeadersFrameParam{
- StreamID: s.id,
- BlockFragment: t.hBuf.Next(size),
- EndStream: false,
- EndHeaders: endHeaders,
- }
- // Do a force flush for the buffered frames iff it is the last headers frame
- // and there is header metadata to be sent. Otherwise, there is flushing until
- // the corresponding data frame is written.
- err = t.framer.writeHeaders(flush, p)
- first = false
- } else {
- // Sends Continuation frames for the leftover headers.
- err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
- }
- if err != nil {
- t.notifyError(err)
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ t.mu.Lock()
+ if t.state == draining {
+ t.mu.Unlock()
+ t.streamsQuota.add(1)
+ return nil, errStreamDrain
+ }
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ s := t.newStream(ctx, callHdr)
+ t.activeStreams[s.id] = s
+ // If the number of active streams change from 0 to 1, then check if keepalive
+ // has gone dormant. If so, wake it up.
+ if len(t.activeStreams) == 1 {
+ select {
+ case t.awakenKeepalive <- struct{}{}:
+ t.controlBuf.put(&ping{data: [8]byte{}})
+ // Fill the awakenKeepalive channel again as this channel must be
+ // kept non-writable except at the point that the keepalive()
+ // goroutine is waiting either to be awaken or shutdown.
+ t.awakenKeepalive <- struct{}{}
+ default:
}
}
+ t.controlBuf.put(&headerFrame{
+ streamID: s.id,
+ hf: headerFields,
+ endStream: false,
+ })
+ t.mu.Unlock()
+
if t.statsHandler != nil {
outHeader := &stats.OutHeader{
Client: true,
- WireLength: bufLen,
FullMethod: callHdr.Method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: callHdr.SendCompress,
}
- t.statsHandler.HandleRPC(s.clientStatsCtx, outHeader)
+ t.statsHandler.HandleRPC(s.ctx, outHeader)
}
- t.writableChan <- 0
return s, nil
}
// CloseStream clears the footprint of a stream when the stream is not needed any more.
// This must not be executed in reader's goroutine.
func (t *http2Client) CloseStream(s *Stream, err error) {
- var updateStreams bool
t.mu.Lock()
if t.activeStreams == nil {
t.mu.Unlock()
return
}
- if t.streamsQuota != nil {
- updateStreams = true
+ if err != nil {
+ // notify in-flight streams, before the deletion
+ s.write(recvMsg{err: err})
}
delete(t.activeStreams, s.id)
if t.state == draining && len(t.activeStreams) == 0 {
@@ -508,15 +545,27 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
return
}
t.mu.Unlock()
- if updateStreams {
- t.streamsQuota.add(1)
- }
- s.mu.Lock()
- if q := s.fc.resetPendingData(); q > 0 {
- if n := t.fc.onRead(q); n > 0 {
- t.controlBuf.put(&windowUpdate{0, n})
+ // rstStream is true in case the stream is being closed at the client-side
+ // and the server needs to be intimated about it by sending a RST_STREAM
+ // frame.
+ // To make sure this frame is written to the wire before the headers of the
+ // next stream waiting for streamsQuota, we add to streamsQuota pool only
+ // after having acquired the writableChan to send RST_STREAM out (look at
+ // the controller() routine).
+ var rstStream bool
+ var rstError http2.ErrCode
+ defer func() {
+ // In case, the client doesn't have to send RST_STREAM to server
+ // we can safely add back to streamsQuota pool now.
+ if !rstStream {
+ t.streamsQuota.add(1)
+ return
}
- }
+ t.controlBuf.put(&resetStream{s.id, rstError})
+ }()
+ s.mu.Lock()
+ rstStream = s.rstStream
+ rstError = s.rstError
if s.state == streamDone {
s.mu.Unlock()
return
@@ -527,27 +576,25 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
}
s.state = streamDone
s.mu.Unlock()
- if se, ok := err.(StreamError); ok && se.Code != codes.DeadlineExceeded {
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})
+ if _, ok := err.(StreamError); ok {
+ rstStream = true
+ rstError = http2.ErrCodeCancel
}
}
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
-func (t *http2Client) Close() (err error) {
+func (t *http2Client) Close() error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return
- }
- if t.state == reachable || t.state == draining {
- close(t.errorChan)
+ return nil
}
t.state = closing
t.mu.Unlock()
- close(t.shutdownChan)
- err = t.conn.Close()
+ t.cancel()
+ err := t.conn.Close()
t.mu.Lock()
streams := t.activeStreams
t.activeStreams = nil
@@ -568,41 +615,18 @@ func (t *http2Client) Close() (err error) {
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
- return
+ return err
}
+// GracefulClose sets the state to draining, which prevents new streams from
+// being created and causes the transport to be closed when the last active
+// stream is closed. If there are no active streams, the transport is closed
+// immediately. This does nothing if the transport is already draining or
+// closing.
func (t *http2Client) GracefulClose() error {
t.mu.Lock()
switch t.state {
- case unreachable:
- // The server may close the connection concurrently. t is not available for
- // any streams. Close it now.
- t.mu.Unlock()
- t.Close()
- return nil
- case closing:
- t.mu.Unlock()
- return nil
- }
- // Notify the streams which were initiated after the server sent GOAWAY.
- select {
- case <-t.goAway:
- n := t.prevGoAwayID
- if n == 0 && t.nextID > 1 {
- n = t.nextID - 2
- }
- m := t.goAwayID + 2
- if m == 2 {
- m = 1
- }
- for i := m; i <= n; i += 2 {
- if s, ok := t.activeStreams[i]; ok {
- close(s.goAway)
- }
- }
- default:
- }
- if t.state == draining {
+ case closing, draining:
t.mu.Unlock()
return nil
}
@@ -617,95 +641,98 @@ func (t *http2Client) GracefulClose() error {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
-// if it improves the performance.
-func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
- r := bytes.NewBuffer(data)
- for {
- var p []byte
- if r.Len() > 0 {
+func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+ select {
+ case <-s.ctx.Done():
+ return ContextErr(s.ctx.Err())
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
+ if hdr == nil && data == nil && opts.Last {
+ // stream.CloseSend uses this to send an empty frame with endStream=True
+ t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}})
+ return nil
+ }
+ // Add data to header frame so that we can equally distribute data across frames.
+ emptyLen := http2MaxFrameLen - len(hdr)
+ if emptyLen > len(data) {
+ emptyLen = len(data)
+ }
+ hdr = append(hdr, data[:emptyLen]...)
+ data = data[emptyLen:]
+ var (
+ streamQuota int
+ streamQuotaVer uint32
+ err error
+ )
+ for idx, r := range [][]byte{hdr, data} {
+ for len(r) > 0 {
size := http2MaxFrameLen
- // Wait until the stream has some quota to send the data.
- sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
- if err != nil {
- return err
+ if size > len(r) {
+ size = len(r)
}
- // Wait until the transport has some quota to send the data.
- tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
+ if streamQuota == 0 { // Used up all the locally cached stream quota.
+ // Get all the stream quota there is.
+ streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
+ if err != nil {
+ return err
+ }
+ }
+ if size > streamQuota {
+ size = streamQuota
+ }
+
+ // Get size worth quota from transport.
+ tq, _, err := t.sendQuotaPool.get(size, s.waiters)
if err != nil {
return err
}
- if sq < size {
- size = sq
- }
if tq < size {
size = tq
}
- p = r.Next(size)
- ps := len(p)
- if ps < sq {
- // Overbooked stream quota. Return it back.
- s.sendQuotaPool.add(sq - ps)
+ ltq, _, err := t.localSendQuota.get(size, s.waiters)
+ if err != nil {
+ return err
}
- if ps < tq {
- // Overbooked transport quota. Return it back.
- t.sendQuotaPool.add(tq - ps)
+ // even if ltq is smaller than size we don't adjust size since
+ // ltq is only a soft limit.
+ streamQuota -= size
+ p := r[:size]
+ var endStream bool
+ // See if this is the last frame to be written.
+ if opts.Last {
+ if len(r)-size == 0 { // No more data in r after this iteration.
+ if idx == 0 { // We're writing data header.
+ if len(data) == 0 { // There's no data to follow.
+ endStream = true
+ }
+ } else { // We're writing data.
+ endStream = true
+ }
+ }
}
- }
- var (
- endStream bool
- forceFlush bool
- )
- if opts.Last && r.Len() == 0 {
- endStream = true
- }
- // Indicate there is a writer who is about to write a data frame.
- t.framer.adjustNumWriters(1)
- // Got some quota. Try to acquire writing privilege on the transport.
- if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil {
- if _, ok := err.(StreamError); ok || err == io.EOF {
- // Return the connection quota back.
- t.sendQuotaPool.add(len(p))
+ success := func() {
+ ltq := ltq
+ t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { t.localSendQuota.add(ltq) }})
+ r = r[size:]
}
- if t.framer.adjustNumWriters(-1) == 0 {
- // This writer is the last one in this batch and has the
- // responsibility to flush the buffered frames. It queues
- // a flush request to controlBuf instead of flushing directly
- // in order to avoid the race with other writing or flushing.
- t.controlBuf.put(&flushIO{})
+ failure := func() { // The stream quota version must have changed.
+ // Our streamQuota cache is invalidated now, so give it back.
+ s.sendQuotaPool.lockedAdd(streamQuota + size)
}
- return err
- }
- select {
- case <-s.ctx.Done():
- t.sendQuotaPool.add(len(p))
- if t.framer.adjustNumWriters(-1) == 0 {
- t.controlBuf.put(&flushIO{})
+ if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
+ // Couldn't send this chunk out.
+ t.sendQuotaPool.add(size)
+ t.localSendQuota.add(ltq)
+ streamQuota = 0
}
- t.writableChan <- 0
- return ContextErr(s.ctx.Err())
- default:
- }
- if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
- // Do a force flush iff this is last frame for the entire gRPC message
- // and the caller is the only writer at this moment.
- forceFlush = true
- }
- // If WriteData fails, all the pending streams will be handled
- // by http2Client.Close(). No explicit CloseStream() needs to be
- // invoked.
- if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
- t.notifyError(err)
- return connectionErrorf(true, err, "transport: %v", err)
- }
- if t.framer.adjustNumWriters(-1) == 0 {
- t.framer.flushWrite()
- }
- t.writableChan <- 0
- if r.Len() == 0 {
- break
}
}
+ if streamQuota > 0 { // Add the left over quota back to stream.
+ s.sendQuotaPool.add(streamQuota)
+ }
if !opts.Last {
return nil
}
@@ -724,6 +751,24 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
return s, ok
}
+// adjustWindow sends out extra window update over the initial window size
+// of stream if the application is requesting data larger in size than
+// the window.
+func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
+ if w := s.fc.maybeAdjust(n); w > 0 {
+ // Piggyback connection's window update along.
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
+ t.controlBuf.put(&windowUpdate{s.id, w})
+ }
+}
+
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
@@ -733,55 +778,99 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
if s.state == streamDone {
return
}
- if w := t.fc.onRead(n); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
if w := s.fc.onRead(n); w > 0 {
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
t.controlBuf.put(&windowUpdate{s.id, w})
}
}
+// updateFlowControl updates the incoming flow control windows
+// for the transport and the stream based on the current bdp
+// estimation.
+func (t *http2Client) updateFlowControl(n uint32) {
+ t.mu.Lock()
+ for _, s := range t.activeStreams {
+ s.fc.newLimit(n)
+ }
+ t.initialWindowSize = int32(n)
+ t.mu.Unlock()
+ t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
+ t.controlBuf.put(&settings{
+ ss: []http2.Setting{
+ {
+ ID: http2.SettingInitialWindowSize,
+ Val: uint32(n),
+ },
+ },
+ })
+}
+
func (t *http2Client) handleData(f *http2.DataFrame) {
- size := len(f.Data())
- if err := t.fc.onData(uint32(size)); err != nil {
- t.notifyError(connectionErrorf(true, err, "%v", err))
- return
+ size := f.Header().Length
+ var sendBDPPing bool
+ if t.bdpEst != nil {
+ sendBDPPing = t.bdpEst.add(uint32(size))
+ }
+ // Decouple connection's flow control from application's read.
+ // An update on connection's flow control should not depend on
+ // whether user application has read the data or not. Such a
+ // restriction is already imposed on the stream's flow control,
+ // and therefore the sender will be blocked anyways.
+ // Decoupling the connection flow control will prevent other
+ // active(fast) streams from starving in presence of slow or
+ // inactive streams.
+ //
+ // Furthermore, if a bdpPing is being sent out we can piggyback
+ // connection's window update for the bytes we just received.
+ if sendBDPPing {
+ if size != 0 { // Could've been an empty data frame.
+ t.controlBuf.put(&windowUpdate{0, uint32(size)})
+ }
+ t.controlBuf.put(bdpPing)
+ } else {
+ if err := t.fc.onData(uint32(size)); err != nil {
+ t.Close()
+ return
+ }
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
- if w := t.fc.onRead(uint32(size)); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
return
}
if size > 0 {
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
- // The stream has been closed. Release the corresponding quota.
- if w := t.fc.onRead(uint32(size)); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
return
}
if err := s.fc.onData(uint32(size)); err != nil {
- s.state = streamDone
- s.statusCode = codes.Internal
- s.statusDesc = err.Error()
- close(s.done)
+ s.rstStream = true
+ s.rstError = http2.ErrCodeFlowControl
+ s.finish(status.New(codes.Internal, err.Error()))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
return
}
+ if f.Header().Flags.Has(http2.FlagDataPadded) {
+ if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, w})
+ }
+ }
s.mu.Unlock()
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
- data := make([]byte, size)
- copy(data, f.Data())
- s.write(recvMsg{data: data})
+ if len(f.Data()) > 0 {
+ data := make([]byte, len(f.Data()))
+ copy(data, f.Data())
+ s.write(recvMsg{data: data})
+ }
}
// The server has closed the stream without sending trailers. Record that
// the read direction is closed, and set the status appropriately.
@@ -791,10 +880,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
s.mu.Unlock()
return
}
- s.state = streamDone
- s.statusCode = codes.Internal
- s.statusDesc = "server closed the stream without sending trailers"
- close(s.done)
+ s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@@ -810,37 +896,76 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
s.mu.Unlock()
return
}
- s.state = streamDone
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
- s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
+
+ code := http2.ErrCode(f.ErrCode)
+ if code == http2.ErrCodeRefusedStream {
+ // The stream was unprocessed by the server.
+ s.unprocessed = true
+ }
+ statusCode, ok := http2ErrConvTab[code]
if !ok {
- grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
- s.statusCode = codes.Unknown
+ warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+ statusCode = codes.Unknown
}
- s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode)
- close(s.done)
+ s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode))
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
-func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
+func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
if f.IsAck() {
return
}
- var ss []http2.Setting
+ var rs []http2.Setting
+ var ps []http2.Setting
+ isMaxConcurrentStreamsMissing := true
f.ForeachSetting(func(s http2.Setting) error {
- ss = append(ss, s)
+ if s.ID == http2.SettingMaxConcurrentStreams {
+ isMaxConcurrentStreamsMissing = false
+ }
+ if t.isRestrictive(s) {
+ rs = append(rs, s)
+ } else {
+ ps = append(ps, s)
+ }
return nil
})
- // The settings will be applied once the ack is sent.
- t.controlBuf.put(&settings{ack: true, ss: ss})
+ if isFirst && isMaxConcurrentStreamsMissing {
+ // This means server is imposing no limits on
+ // maximum number of concurrent streams initiated by client.
+ // So we must remove our self-imposed limit.
+ ps = append(ps, http2.Setting{
+ ID: http2.SettingMaxConcurrentStreams,
+ Val: math.MaxUint32,
+ })
+ }
+ t.applySettings(rs)
+ t.controlBuf.put(&settingsAck{})
+ t.applySettings(ps)
+}
+
+func (t *http2Client) isRestrictive(s http2.Setting) bool {
+ switch s.ID {
+ case http2.SettingMaxConcurrentStreams:
+ return int(s.Val) < t.maxStreams
+ case http2.SettingInitialWindowSize:
+ // Note: we don't acquire a lock here to read streamSendQuota
+ // because the same goroutine updates it later.
+ return s.Val < t.streamSendQuota
+ }
+ return false
}
func (t *http2Client) handlePing(f *http2.PingFrame) {
- if f.IsAck() { // Do nothing.
+ if f.IsAck() {
+ // Maybe it's a BDP ping.
+ if t.bdpEst != nil {
+ t.bdpEst.calculate(f.Data)
+ }
return
}
pingAck := &ping{ack: true}
@@ -850,31 +975,84 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Lock()
- if t.state == reachable || t.state == draining {
- if f.LastStreamID > 0 && f.LastStreamID%2 != 1 {
- t.mu.Unlock()
- t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID))
- return
- }
- select {
- case <-t.goAway:
- id := t.goAwayID
- // t.goAway has been closed (i.e.,multiple GoAways).
- if id < f.LastStreamID {
- t.mu.Unlock()
- t.notifyError(connectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID))
- return
- }
- t.prevGoAwayID = id
- t.goAwayID = f.LastStreamID
+ if t.state != reachable && t.state != draining {
+ t.mu.Unlock()
+ return
+ }
+ if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
+ infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+ }
+ id := f.LastStreamID
+ if id > 0 && id%2 != 1 {
+ t.mu.Unlock()
+ t.Close()
+ return
+ }
+ // A client can receive multiple GoAways from the server (see
+ // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
+ // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
+ // sent after an RTT delay with the ID of the last stream the server will
+ // process.
+ //
+ // Therefore, when we get the first GoAway we don't necessarily close any
+ // streams. While in case of second GoAway we close all streams created after
+ // the GoAwayId. This way streams that were in-flight while the GoAway from
+ // server was being sent don't get killed.
+ select {
+ case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
+ // If there are multiple GoAways the first one should always have an ID greater than the following ones.
+ if id > t.prevGoAwayID {
t.mu.Unlock()
+ t.Close()
return
- default:
}
- t.goAwayID = f.LastStreamID
+ default:
+ t.setGoAwayReason(f)
close(t.goAway)
+ t.state = draining
+ }
+ // All streams with IDs greater than the GoAwayId
+ // and smaller than the previous GoAway ID should be killed.
+ upperLimit := t.prevGoAwayID
+ if upperLimit == 0 { // This is the first GoAway Frame.
+ upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
+ }
+ for streamID, stream := range t.activeStreams {
+ if streamID > id && streamID <= upperLimit {
+ // The stream was unprocessed by the server.
+ stream.mu.Lock()
+ stream.unprocessed = true
+ stream.finish(statusGoAway)
+ stream.mu.Unlock()
+ close(stream.goAway)
+ }
}
+ t.prevGoAwayID = id
+ active := len(t.activeStreams)
t.mu.Unlock()
+ if active == 0 {
+ t.Close()
+ }
+}
+
+// setGoAwayReason sets the value of t.goAwayReason based
+// on the GoAway frame received.
+// It expects a lock on transport's mutext to be held by
+// the caller.
+func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
+ t.goAwayReason = GoAwayNoReason
+ switch f.ErrCode {
+ case http2.ErrCodeEnhanceYourCalm:
+ if string(f.DebugData()) == "too_many_pings" {
+ t.goAwayReason = GoAwayTooManyPings
+ }
+ }
+}
+
+func (t *http2Client) GetGoAwayReason() GoAwayReason {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ return t.goAwayReason
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -895,18 +1073,18 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !ok {
return
}
+ s.mu.Lock()
+ s.bytesReceived = true
+ s.mu.Unlock()
var state decodeState
- for _, hf := range frame.Fields {
- state.processHeaderField(hf)
- }
- if state.err != nil {
+ if err := state.decodeResponseHeader(frame); err != nil {
s.mu.Lock()
if !s.headerDone {
close(s.headerChan)
s.headerDone = true
}
s.mu.Unlock()
- s.write(recvMsg{err: state.err})
+ s.write(recvMsg{err: err})
// Something wrong. Stops reading even when there is remaining.
return
}
@@ -920,13 +1098,13 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
Client: true,
WireLength: int(frame.Header().Length),
}
- t.statsHandler.HandleRPC(s.clientStatsCtx, inHeader)
+ t.statsHandler.HandleRPC(s.ctx, inHeader)
} else {
inTrailer := &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
}
- t.statsHandler.HandleRPC(s.clientStatsCtx, inTrailer)
+ t.statsHandler.HandleRPC(s.ctx, inTrailer)
}
}
}()
@@ -947,14 +1125,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
s.mu.Unlock()
return
}
-
if len(state.mdata) > 0 {
s.trailer = state.mdata
}
- s.statusCode = state.statusCode
- s.statusDesc = state.statusDesc
- close(s.done)
- s.state = streamDone
+ s.finish(state.status())
s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
@@ -977,21 +1151,24 @@ func handleMalformedHTTP2(s *Stream, err error) {
// TODO(zhaoq): Check the validity of the incoming frame sequence.
func (t *http2Client) reader() {
// Check the validity of server preface.
- frame, err := t.framer.readFrame()
+ frame, err := t.framer.fr.ReadFrame()
if err != nil {
- t.notifyError(err)
+ t.Close()
return
}
+ atomic.CompareAndSwapUint32(&t.activity, 0, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
- t.notifyError(err)
+ t.Close()
return
}
- t.handleSettings(sf)
+ t.onSuccess()
+ t.handleSettings(sf, true)
// loop to keep reading incoming messages on this transport.
for {
- frame, err := t.framer.readFrame()
+ frame, err := t.framer.fr.ReadFrame()
+ atomic.CompareAndSwapUint32(&t.activity, 0, 1)
if err != nil {
// Abort an active stream if the http2.Framer returns a
// http2.StreamError. This can happen only if the server's response
@@ -1002,12 +1179,12 @@ func (t *http2Client) reader() {
t.mu.Unlock()
if s != nil {
// use error detail to provide better err message
- handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail()))
+ handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()))
}
continue
} else {
// Transport error.
- t.notifyError(err)
+ t.Close()
return
}
}
@@ -1019,7 +1196,7 @@ func (t *http2Client) reader() {
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
- t.handleSettings(frame)
+ t.handleSettings(frame, false)
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
@@ -1027,7 +1204,7 @@ func (t *http2Client) reader() {
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
- grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
}
}
}
@@ -1042,22 +1219,14 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
if s.Val > math.MaxInt32 {
s.Val = math.MaxInt32
}
- t.mu.Lock()
- reset := t.streamsQuota != nil
- if !reset {
- t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
- }
ms := t.maxStreams
t.maxStreams = int(s.Val)
- t.mu.Unlock()
- if reset {
- t.streamsQuota.add(int(s.Val) - ms)
- }
+ t.streamsQuota.add(int(s.Val) - ms)
case http2.SettingInitialWindowSize:
t.mu.Lock()
for _, stream := range t.activeStreams {
// Adjust the sending quota for each stream.
- stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
+ stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
}
t.streamSendQuota = s.Val
t.mu.Unlock()
@@ -1065,65 +1234,143 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
}
}
-// controller running in a separate goroutine takes charge of sending control
-// frames (e.g., window update, reset stream, setting, etc.) to the server.
-func (t *http2Client) controller() {
+// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
+// is duplicated between the client and the server.
+// The transport layer needs to be refactored to take care of this.
+func (t *http2Client) itemHandler(i item) (err error) {
+ defer func() {
+ if err != nil {
+ errorf(" error in itemHandler: %v", err)
+ }
+ }()
+ switch i := i.(type) {
+ case *dataFrame:
+ if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
+ return err
+ }
+ i.f()
+ return nil
+ case *headerFrame:
+ t.hBuf.Reset()
+ for _, f := range i.hf {
+ t.hEnc.WriteField(f)
+ }
+ endHeaders := false
+ first := true
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ if first {
+ first = false
+ err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+ StreamID: i.streamID,
+ BlockFragment: t.hBuf.Next(size),
+ EndStream: i.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = t.framer.fr.WriteContinuation(
+ i.streamID,
+ endHeaders,
+ t.hBuf.Next(size),
+ )
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ case *windowUpdate:
+ return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
+ case *settings:
+ return t.framer.fr.WriteSettings(i.ss...)
+ case *settingsAck:
+ return t.framer.fr.WriteSettingsAck()
+ case *resetStream:
+ // If the server needs to be to intimated about stream closing,
+ // then we need to make sure the RST_STREAM frame is written to
+ // the wire before the headers of the next stream waiting on
+ // streamQuota. We ensure this by adding to the streamsQuota pool
+ // only after having acquired the writableChan to send RST_STREAM.
+ err := t.framer.fr.WriteRSTStream(i.streamID, i.code)
+ t.streamsQuota.add(1)
+ return err
+ case *flushIO:
+ return t.framer.writer.Flush()
+ case *ping:
+ if !i.ack {
+ t.bdpEst.timesnap(i.data)
+ }
+ return t.framer.fr.WritePing(i.ack, i.data)
+ default:
+ errorf("transport: http2Client.controller got unexpected item type %v", i)
+ return fmt.Errorf("transport: http2Client.controller got unexpected item type %v", i)
+ }
+}
+
+// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
+func (t *http2Client) keepalive() {
+ p := &ping{data: [8]byte{}}
+ timer := time.NewTimer(t.kp.Time)
for {
select {
- case i := <-t.controlBuf.get():
- t.controlBuf.load()
+ case <-timer.C:
+ if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
+ timer.Reset(t.kp.Time)
+ continue
+ }
+ // Check if keepalive should go dormant.
+ t.mu.Lock()
+ if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
+ // Make awakenKeepalive writable.
+ <-t.awakenKeepalive
+ t.mu.Unlock()
+ select {
+ case <-t.awakenKeepalive:
+ // If the control gets here a ping has been sent
+ // need to reset the timer with keepalive.Timeout.
+ case <-t.ctx.Done():
+ return
+ }
+ } else {
+ t.mu.Unlock()
+ // Send ping.
+ t.controlBuf.put(p)
+ }
+
+ // By the time control gets here a ping has been sent one way or the other.
+ timer.Reset(t.kp.Timeout)
select {
- case <-t.writableChan:
- switch i := i.(type) {
- case *windowUpdate:
- t.framer.writeWindowUpdate(true, i.streamID, i.increment)
- case *settings:
- if i.ack {
- t.framer.writeSettingsAck(true)
- t.applySettings(i.ss)
- } else {
- t.framer.writeSettings(true, i.ss...)
- }
- case *resetStream:
- t.framer.writeRSTStream(true, i.streamID, i.code)
- case *flushIO:
- t.framer.flushWrite()
- case *ping:
- t.framer.writePing(true, i.ack, i.data)
- default:
- grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i)
+ case <-timer.C:
+ if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
+ timer.Reset(t.kp.Time)
+ continue
}
- t.writableChan <- 0
- continue
- case <-t.shutdownChan:
+ t.Close()
return
+ case <-t.ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ return
+ }
+ case <-t.ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
}
- case <-t.shutdownChan:
return
}
}
}
func (t *http2Client) Error() <-chan struct{} {
- return t.errorChan
+ return t.ctx.Done()
}
func (t *http2Client) GoAway() <-chan struct{} {
return t.goAway
}
-
-func (t *http2Client) notifyError(err error) {
- t.mu.Lock()
- // make sure t.errorChan is closed only once.
- if t.state == draining {
- t.mu.Unlock()
- t.Close()
- return
- }
- if t.state == reachable {
- t.state = unreachable
- close(t.errorChan)
- grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
- }
- t.mu.Unlock()
-}
diff --git a/go/vendor/google.golang.org/grpc/transport/http2_server.go b/go/vendor/google.golang.org/grpc/transport/http2_server.go
index a095dd0..6d252c5 100644
--- a/go/vendor/google.golang.org/grpc/transport/http2_server.go
+++ b/go/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -36,21 +21,27 @@ package transport
import (
"bytes"
"errors"
+ "fmt"
"io"
"math"
+ "math/rand"
"net"
"strconv"
"sync"
+ "sync/atomic"
+ "time"
+ "github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
@@ -61,94 +52,171 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
ctx context.Context
+ cancel context.CancelFunc
conn net.Conn
remoteAddr net.Addr
localAddr net.Addr
maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
- // writableChan synchronizes write access to the transport.
- // A writer acquires the write lock by receiving a value on writableChan
- // and releases it by sending on writableChan.
- writableChan chan int
- // shutdownChan is closed when Close is called.
- // Blocking operations should select on shutdownChan to avoid
- // blocking forever after Close.
- shutdownChan chan struct{}
- framer *framer
- hBuf *bytes.Buffer // the buffer for HPACK encoding
- hEnc *hpack.Encoder // HPACK encoder
-
+ framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
- controlBuf *recvBuffer
+ controlBuf *controlBuffer
fc *inFlow
// sendQuotaPool provides flow control to outbound message.
sendQuotaPool *quotaPool
+ // localSendQuota limits the amount of data that can be scheduled
+ // for writing before it is actually written out.
+ localSendQuota *quotaPool
+ stats stats.Handler
+ // Flag to keep track of reading activity on transport.
+ // 1 is true and 0 is false.
+ activity uint32 // Accessed atomically.
+ // Keepalive and max-age parameters for the server.
+ kp keepalive.ServerParameters
+
+ // Keepalive enforcement policy.
+ kep keepalive.EnforcementPolicy
+ // The time instance last ping was received.
+ lastPingAt time.Time
+ // Number of times the client has violated keepalive ping policy so far.
+ pingStrikes uint8
+ // Flag to signify that number of ping strikes should be reset to 0.
+ // This is set whenever data or header frames are sent.
+ // 1 means yes.
+ resetPingStrikes uint32 // Accessed atomically.
+ initialWindowSize int32
+ bdpEst *bdpEstimator
- stats stats.Handler
+ mu sync.Mutex // guard the following
- mu sync.Mutex // guard the following
+ // drainChan is initialized when drain(...) is called the first time.
+ // After which the server writes out the first GoAway(with ID 2^31-1) frame.
+ // Then an independent goroutine will be launched to later send the second GoAway.
+ // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
+ // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
+ // already underway.
+ drainChan chan struct{}
state transportState
activeStreams map[uint32]*Stream
// the per-stream outbound flow control window size set by the peer.
streamSendQuota uint32
+ // idle is the time instant when the connection went idle.
+ // This is either the beginning of the connection or when the number of
+ // RPCs go down to 0.
+ // When the connection is busy, this value is set to 0.
+ idle time.Time
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
- framer := newFramer(conn)
+ writeBufSize := defaultWriteBufSize
+ if config.WriteBufferSize > 0 {
+ writeBufSize = config.WriteBufferSize
+ }
+ readBufSize := defaultReadBufSize
+ if config.ReadBufferSize > 0 {
+ readBufSize = config.ReadBufferSize
+ }
+ framer := newFramer(conn, writeBufSize, readBufSize)
// Send initial settings as connection preface to client.
- var settings []http2.Setting
+ var isettings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec.
maxStreams := config.MaxStreams
if maxStreams == 0 {
maxStreams = math.MaxUint32
} else {
- settings = append(settings, http2.Setting{
+ isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
Val: maxStreams,
})
}
- if initialWindowSize != defaultWindowSize {
- settings = append(settings, http2.Setting{
+ dynamicWindow := true
+ iwz := int32(initialWindowSize)
+ if config.InitialWindowSize >= defaultWindowSize {
+ iwz = config.InitialWindowSize
+ dynamicWindow = false
+ }
+ icwz := int32(initialWindowSize)
+ if config.InitialConnWindowSize >= defaultWindowSize {
+ icwz = config.InitialConnWindowSize
+ dynamicWindow = false
+ }
+ if iwz != defaultWindowSize {
+ isettings = append(isettings, http2.Setting{
ID: http2.SettingInitialWindowSize,
- Val: uint32(initialWindowSize)})
+ Val: uint32(iwz)})
}
- if err := framer.writeSettings(true, settings...); err != nil {
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ if err := framer.fr.WriteSettings(isettings...); err != nil {
+ return nil, connectionErrorf(false, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
- if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
- if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
- return nil, connectionErrorf(true, err, "transport: %v", err)
+ if delta := uint32(icwz - defaultWindowSize); delta > 0 {
+ if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
+ return nil, connectionErrorf(false, err, "transport: %v", err)
}
}
+ kp := config.KeepaliveParams
+ if kp.MaxConnectionIdle == 0 {
+ kp.MaxConnectionIdle = defaultMaxConnectionIdle
+ }
+ if kp.MaxConnectionAge == 0 {
+ kp.MaxConnectionAge = defaultMaxConnectionAge
+ }
+ // Add a jitter to MaxConnectionAge.
+ kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
+ if kp.MaxConnectionAgeGrace == 0 {
+ kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
+ }
+ if kp.Time == 0 {
+ kp.Time = defaultServerKeepaliveTime
+ }
+ if kp.Timeout == 0 {
+ kp.Timeout = defaultServerKeepaliveTimeout
+ }
+ kep := config.KeepalivePolicy
+ if kep.MinTime == 0 {
+ kep.MinTime = defaultKeepalivePolicyMinTime
+ }
var buf bytes.Buffer
+ ctx, cancel := context.WithCancel(context.Background())
t := &http2Server{
- ctx: context.Background(),
- conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: config.AuthInfo,
- framer: framer,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- maxStreams: maxStreams,
- inTapHandle: config.InTapHandle,
- controlBuf: newRecvBuffer(),
- fc: &inFlow{limit: initialConnWindowSize},
- sendQuotaPool: newQuotaPool(defaultWindowSize),
- state: reachable,
- writableChan: make(chan int, 1),
- shutdownChan: make(chan struct{}),
- activeStreams: make(map[uint32]*Stream),
- streamSendQuota: defaultWindowSize,
- stats: config.StatsHandler,
+ ctx: ctx,
+ cancel: cancel,
+ conn: conn,
+ remoteAddr: conn.RemoteAddr(),
+ localAddr: conn.LocalAddr(),
+ authInfo: config.AuthInfo,
+ framer: framer,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ maxStreams: maxStreams,
+ inTapHandle: config.InTapHandle,
+ controlBuf: newControlBuffer(),
+ fc: &inFlow{limit: uint32(icwz)},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ localSendQuota: newQuotaPool(defaultLocalSendQuota),
+ state: reachable,
+ activeStreams: make(map[uint32]*Stream),
+ streamSendQuota: defaultWindowSize,
+ stats: config.StatsHandler,
+ kp: kp,
+ idle: time.Now(),
+ kep: kep,
+ initialWindowSize: iwz,
+ }
+ if dynamicWindow {
+ t.bdpEst = &bdpEstimator{
+ bdp: initialWindowSize,
+ updateFlowControl: t.updateFlowControl,
+ }
}
if t.stats != nil {
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
@@ -158,37 +226,73 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
}
- go t.controller()
- t.writableChan <- 0
+ t.framer.writer.Flush()
+
+ defer func() {
+ if err != nil {
+ t.Close()
+ }
+ }()
+
+ // Check the validity of client preface.
+ preface := make([]byte, len(clientPreface))
+ if _, err := io.ReadFull(t.conn, preface); err != nil {
+ return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
+ }
+ if !bytes.Equal(preface, clientPreface) {
+ return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
+ }
+
+ frame, err := t.framer.fr.ReadFrame()
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ if err != nil {
+ return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
+ }
+ atomic.StoreUint32(&t.activity, 1)
+ sf, ok := frame.(*http2.SettingsFrame)
+ if !ok {
+ return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
+ }
+ t.handleSettings(sf)
+
+ go func() {
+ loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
+ t.conn.Close()
+ }()
+ go t.keepalive()
return t, nil
}
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
- buf := newRecvBuffer()
- s := &Stream{
- id: frame.Header().StreamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: initialWindowSize},
- }
+ streamID := frame.Header().StreamID
var state decodeState
for _, hf := range frame.Fields {
- state.processHeaderField(hf)
- }
- if err := state.err; err != nil {
- if se, ok := err.(StreamError); ok {
- t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
+ if err := state.processHeaderField(hf); err != nil {
+ if se, ok := err.(StreamError); ok {
+ t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]})
+ }
+ return
}
- return
+ }
+
+ buf := newRecvBuffer()
+ s := &Stream{
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ recvCompress: state.encoding,
+ method: state.method,
}
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
- s.recvCompress = state.encoding
if state.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
} else {
@@ -208,15 +312,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context.
if len(state.mdata) > 0 {
- s.ctx = metadata.NewContext(s.ctx, state.mdata)
+ s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
}
-
- s.dec = &recvBufferReader{
- ctx: s.ctx,
- recv: s.buf,
+ if state.statsTags != nil {
+ s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags)
+ }
+ if state.statsTrace != nil {
+ s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace)
}
- s.recvCompress = state.encoding
- s.method = state.method
if t.inTapHandle != nil {
var err error
info := &tap.Info{
@@ -224,7 +327,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
- // TODO: Log the real error.
+ warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
return
}
@@ -236,21 +339,24 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
- t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
+ t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream})
return
}
- if s.id%2 != 1 || s.id <= t.maxStreamID {
+ if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
// illegal gRPC stream id.
- grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id)
+ errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
return true
}
- t.maxStreamID = s.id
+ t.maxStreamID = streamID
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
- t.activeStreams[s.id] = s
+ t.activeStreams[streamID] = s
+ if len(t.activeStreams) == 1 {
+ t.idle = time.Time{}
+ }
t.mu.Unlock()
- s.windowHandler = func(n int) {
- t.updateWindow(s, uint32(n))
+ s.requestRead = func(n int) {
+ t.adjustWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
if t.stats != nil {
@@ -264,6 +370,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
t.stats.HandleRPC(s.ctx, inHeader)
}
+ s.trReader = &transportReader{
+ reader: &recvBufferReader{
+ ctx: s.ctx,
+ recv: s.buf,
+ },
+ windowHandler: func(n int) {
+ t.updateWindow(s, uint32(n))
+ },
+ }
+ s.waiters = waiters{
+ ctx: s.ctx,
+ tctx: t.ctx,
+ }
handle(s)
return
}
@@ -272,39 +391,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
- // Check the validity of client preface.
- preface := make([]byte, len(clientPreface))
- if _, err := io.ReadFull(t.conn, preface); err != nil {
- grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
- t.Close()
- return
- }
- if !bytes.Equal(preface, clientPreface) {
- grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
- t.Close()
- return
- }
-
- frame, err := t.framer.readFrame()
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- t.Close()
- return
- }
- if err != nil {
- grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
- t.Close()
- return
- }
- sf, ok := frame.(*http2.SettingsFrame)
- if !ok {
- grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
- t.Close()
- return
- }
- t.handleSettings(sf)
-
for {
- frame, err := t.framer.readFrame()
+ frame, err := t.framer.fr.ReadFrame()
+ atomic.StoreUint32(&t.activity, 1)
if err != nil {
if se, ok := err.(http2.StreamError); ok {
t.mu.Lock()
@@ -320,7 +409,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
t.Close()
return
}
- grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
t.Close()
return
}
@@ -343,7 +432,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
- grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
}
}
}
@@ -363,6 +452,23 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
return s, true
}
+// adjustWindow sends out extra window update over the initial window size
+// of stream if the application is requesting data larger in size than
+// the window.
+func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
+ if w := s.fc.maybeAdjust(n); w > 0 {
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
+ t.controlBuf.put(&windowUpdate{s.id, w})
+ }
+}
+
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
@@ -372,37 +478,77 @@ func (t *http2Server) updateWindow(s *Stream, n uint32) {
if s.state == streamDone {
return
}
- if w := t.fc.onRead(n); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
if w := s.fc.onRead(n); w > 0 {
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
t.controlBuf.put(&windowUpdate{s.id, w})
}
}
+// updateFlowControl updates the incoming flow control windows
+// for the transport and the stream based on the current bdp
+// estimation.
+func (t *http2Server) updateFlowControl(n uint32) {
+ t.mu.Lock()
+ for _, s := range t.activeStreams {
+ s.fc.newLimit(n)
+ }
+ t.initialWindowSize = int32(n)
+ t.mu.Unlock()
+ t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
+ t.controlBuf.put(&settings{
+ ss: []http2.Setting{
+ {
+ ID: http2.SettingInitialWindowSize,
+ Val: uint32(n),
+ },
+ },
+ })
+
+}
+
func (t *http2Server) handleData(f *http2.DataFrame) {
- size := len(f.Data())
- if err := t.fc.onData(uint32(size)); err != nil {
- grpclog.Printf("transport: http2Server %v", err)
- t.Close()
- return
+ size := f.Header().Length
+ var sendBDPPing bool
+ if t.bdpEst != nil {
+ sendBDPPing = t.bdpEst.add(uint32(size))
+ }
+ // Decouple connection's flow control from application's read.
+ // An update on connection's flow control should not depend on
+ // whether user application has read the data or not. Such a
+ // restriction is already imposed on the stream's flow control,
+ // and therefore the sender will be blocked anyways.
+ // Decoupling the connection flow control will prevent other
+ // active(fast) streams from starving in presence of slow or
+ // inactive streams.
+ //
+ // Furthermore, if a bdpPing is being sent out we can piggyback
+ // connection's window update for the bytes we just received.
+ if sendBDPPing {
+ if size != 0 { // Could be an empty frame.
+ t.controlBuf.put(&windowUpdate{0, uint32(size)})
+ }
+ t.controlBuf.put(bdpPing)
+ } else {
+ if err := t.fc.onData(uint32(size)); err != nil {
+ errorf("transport: http2Server %v", err)
+ t.Close()
+ return
+ }
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
- if w := t.fc.onRead(uint32(size)); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
return
}
if size > 0 {
s.mu.Lock()
if s.state == streamDone {
s.mu.Unlock()
- // The stream has been closed. Release the corresponding quota.
- if w := t.fc.onRead(uint32(size)); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
return
}
if err := s.fc.onData(uint32(size)); err != nil {
@@ -411,13 +557,20 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
return
}
+ if f.Header().Flags.Has(http2.FlagDataPadded) {
+ if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, w})
+ }
+ }
s.mu.Unlock()
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
- data := make([]byte, size)
- copy(data, f.Data())
- s.write(recvMsg{data: data})
+ if len(f.Data()) > 0 {
+ data := make([]byte, len(f.Data()))
+ copy(data, f.Data())
+ s.write(recvMsg{data: data})
+ }
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
// Received the end of stream from the client.
@@ -442,22 +595,98 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
if f.IsAck() {
return
}
- var ss []http2.Setting
+ var rs []http2.Setting
+ var ps []http2.Setting
f.ForeachSetting(func(s http2.Setting) error {
- ss = append(ss, s)
+ if t.isRestrictive(s) {
+ rs = append(rs, s)
+ } else {
+ ps = append(ps, s)
+ }
return nil
})
- // The settings will be applied once the ack is sent.
- t.controlBuf.put(&settings{ack: true, ss: ss})
+ t.applySettings(rs)
+ t.controlBuf.put(&settingsAck{})
+ t.applySettings(ps)
+}
+
+func (t *http2Server) isRestrictive(s http2.Setting) bool {
+ switch s.ID {
+ case http2.SettingInitialWindowSize:
+ // Note: we don't acquire a lock here to read streamSendQuota
+ // because the same goroutine updates it later.
+ return s.Val < t.streamSendQuota
+ }
+ return false
+}
+
+func (t *http2Server) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ if s.ID == http2.SettingInitialWindowSize {
+ t.mu.Lock()
+ for _, stream := range t.activeStreams {
+ stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ t.mu.Unlock()
+ }
+
+ }
}
+const (
+ maxPingStrikes = 2
+ defaultPingTimeout = 2 * time.Hour
+)
+
func (t *http2Server) handlePing(f *http2.PingFrame) {
- if f.IsAck() { // Do nothing.
+ if f.IsAck() {
+ if f.Data == goAwayPing.data && t.drainChan != nil {
+ close(t.drainChan)
+ return
+ }
+ // Maybe it's a BDP ping.
+ if t.bdpEst != nil {
+ t.bdpEst.calculate(f.Data)
+ }
return
}
pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck)
+
+ now := time.Now()
+ defer func() {
+ t.lastPingAt = now
+ }()
+ // A reset ping strikes means that we don't need to check for policy
+ // violation for this ping and the pingStrikes counter should be set
+ // to 0.
+ if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
+ t.pingStrikes = 0
+ return
+ }
+ t.mu.Lock()
+ ns := len(t.activeStreams)
+ t.mu.Unlock()
+ if ns < 1 && !t.kep.PermitWithoutStream {
+ // Keepalive shouldn't be active thus, this new ping should
+ // have come after at least defaultPingTimeout.
+ if t.lastPingAt.Add(defaultPingTimeout).After(now) {
+ t.pingStrikes++
+ }
+ } else {
+ // Check if keepalive policy is respected.
+ if t.lastPingAt.Add(t.kep.MinTime).After(now) {
+ t.pingStrikes++
+ }
+ }
+
+ if t.pingStrikes > maxPingStrikes {
+ // Send goaway and close the connection.
+ errorf("transport: Got too many pings from the client, closing the connection.")
+ t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
+ }
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -472,40 +701,16 @@ func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
}
}
-func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
- first := true
- endHeaders := false
- var err error
- // Sends the headers in a single batch.
- for !endHeaders {
- size := t.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- if first {
- p := http2.HeadersFrameParam{
- StreamID: s.id,
- BlockFragment: b.Next(size),
- EndStream: endStream,
- EndHeaders: endHeaders,
- }
- err = t.framer.writeHeaders(endHeaders, p)
- first = false
- } else {
- err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
- }
- if err != nil {
- t.Close()
- return connectionErrorf(true, err, "transport: %v", err)
- }
- }
- return nil
-}
-
// WriteHeader sends the header metedata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+ select {
+ case <-s.ctx.Done():
+ return ContextErr(s.ctx.Err())
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
s.mu.Lock()
if s.headerOk || s.state == streamDone {
s.mu.Unlock()
@@ -521,35 +726,34 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
md = s.header
s.mu.Unlock()
- if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
- return err
- }
- t.hBuf.Reset()
- t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+ // first and create a slice of that exact size.
+ headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
if s.sendCompress != "" {
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
- for k, v := range md {
+ for k, vv := range md {
if isReservedHeader(k) {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
continue
}
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
- bufLen := t.hBuf.Len()
- if err := t.writeHeaders(s, t.hBuf, false); err != nil {
- return err
- }
+ t.controlBuf.put(&headerFrame{
+ streamID: s.id,
+ hf: headerFields,
+ endStream: false,
+ })
if t.stats != nil {
outHeader := &stats.OutHeader{
- WireLength: bufLen,
+ //WireLength: // TODO(mmukhi): Revisit this later, if needed.
}
t.stats.HandleRPC(s.Context(), outHeader)
}
- t.writableChan <- 0
return nil
}
@@ -557,7 +761,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
-func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
+ select {
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
var headersSent, hasHeader bool
s.mu.Lock()
if s.state == streamDone {
@@ -577,50 +787,59 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
headersSent = true
}
- if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
- return err
- }
- t.hBuf.Reset()
+ // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+ // first and create a slice of that exact size.
+ headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
if !headersSent {
- t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
}
- t.hEnc.WriteField(
- hpack.HeaderField{
- Name: "grpc-status",
- Value: strconv.Itoa(int(statusCode)),
- })
- t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
+
+ if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ stBytes, err := proto.Marshal(p)
+ if err != nil {
+ // TODO: return error instead, when callers are able to handle it.
+ panic(err)
+ }
+
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+ }
+
// Attach the trailer metadata.
- for k, v := range s.trailer {
+ for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
}
- for _, entry := range v {
- t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
- bufLen := t.hBuf.Len()
- if err := t.writeHeaders(s, t.hBuf, true); err != nil {
- t.Close()
- return err
- }
+ t.controlBuf.put(&headerFrame{
+ streamID: s.id,
+ hf: headerFields,
+ endStream: true,
+ })
if t.stats != nil {
- outTrailer := &stats.OutTrailer{
- WireLength: bufLen,
- }
- t.stats.HandleRPC(s.Context(), outTrailer)
+ t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
}
t.closeStream(s)
- t.writableChan <- 0
return nil
}
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
- // TODO(zhaoq): Support multi-writers for a single stream.
+func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+ select {
+ case <-s.ctx.Done():
+ return ContextErr(s.ctx.Err())
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
var writeHeaderFrame bool
s.mu.Lock()
if s.state == streamDone {
@@ -634,149 +853,290 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
if writeHeaderFrame {
t.WriteHeader(s, nil)
}
- r := bytes.NewBuffer(data)
- for {
- if r.Len() == 0 {
- return nil
- }
- size := http2MaxFrameLen
- // Wait until the stream has some quota to send the data.
- sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
- if err != nil {
- return err
- }
- // Wait until the transport has some quota to send the data.
- tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
- if err != nil {
- return err
- }
- if sq < size {
- size = sq
- }
- if tq < size {
- size = tq
- }
- p := r.Next(size)
- ps := len(p)
- if ps < sq {
- // Overbooked stream quota. Return it back.
- s.sendQuotaPool.add(sq - ps)
- }
- if ps < tq {
- // Overbooked transport quota. Return it back.
- t.sendQuotaPool.add(tq - ps)
- }
- t.framer.adjustNumWriters(1)
- // Got some quota. Try to acquire writing privilege on the
- // transport.
- if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil {
- if _, ok := err.(StreamError); ok {
- // Return the connection quota back.
- t.sendQuotaPool.add(ps)
+ // Add data to header frame so that we can equally distribute data across frames.
+ emptyLen := http2MaxFrameLen - len(hdr)
+ if emptyLen > len(data) {
+ emptyLen = len(data)
+ }
+ hdr = append(hdr, data[:emptyLen]...)
+ data = data[emptyLen:]
+ var (
+ streamQuota int
+ streamQuotaVer uint32
+ err error
+ )
+ for _, r := range [][]byte{hdr, data} {
+ for len(r) > 0 {
+ size := http2MaxFrameLen
+ if size > len(r) {
+ size = len(r)
}
- if t.framer.adjustNumWriters(-1) == 0 {
- // This writer is the last one in this batch and has the
- // responsibility to flush the buffered frames. It queues
- // a flush request to controlBuf instead of flushing directly
- // in order to avoid the race with other writing or flushing.
- t.controlBuf.put(&flushIO{})
+ if streamQuota == 0 { // Used up all the locally cached stream quota.
+ // Get all the stream quota there is.
+ streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
+ if err != nil {
+ return err
+ }
}
- return err
- }
- select {
- case <-s.ctx.Done():
- t.sendQuotaPool.add(ps)
- if t.framer.adjustNumWriters(-1) == 0 {
- t.controlBuf.put(&flushIO{})
+ if size > streamQuota {
+ size = streamQuota
+ }
+ // Get size worth quota from transport.
+ tq, _, err := t.sendQuotaPool.get(size, s.waiters)
+ if err != nil {
+ return err
+ }
+ if tq < size {
+ size = tq
+ }
+ ltq, _, err := t.localSendQuota.get(size, s.waiters)
+ if err != nil {
+ return err
+ }
+ // even if ltq is smaller than size we don't adjust size since,
+ // ltq is only a soft limit.
+ streamQuota -= size
+ p := r[:size]
+ // Reset ping strikes when sending data since this might cause
+ // the peer to send ping.
+ atomic.StoreUint32(&t.resetPingStrikes, 1)
+ success := func() {
+ ltq := ltq
+ t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
+ t.localSendQuota.add(ltq)
+ }})
+ r = r[size:]
+ }
+ failure := func() { // The stream quota version must have changed.
+ // Our streamQuota cache is invalidated now, so give it back.
+ s.sendQuotaPool.lockedAdd(streamQuota + size)
+ }
+ if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
+ // Couldn't send this chunk out.
+ t.sendQuotaPool.add(size)
+ t.localSendQuota.add(ltq)
+ streamQuota = 0
}
- t.writableChan <- 0
- return ContextErr(s.ctx.Err())
- default:
- }
- var forceFlush bool
- if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
- forceFlush = true
- }
- if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
- t.Close()
- return connectionErrorf(true, err, "transport: %v", err)
- }
- if t.framer.adjustNumWriters(-1) == 0 {
- t.framer.flushWrite()
}
- t.writableChan <- 0
}
-
+ if streamQuota > 0 {
+ // ADd the left over quota back to stream.
+ s.sendQuotaPool.add(streamQuota)
+ }
+ return nil
}
-func (t *http2Server) applySettings(ss []http2.Setting) {
- for _, s := range ss {
- if s.ID == http2.SettingInitialWindowSize {
+// keepalive running in a separate goroutine does the following:
+// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
+// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
+// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
+// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
+// after an additional duration of keepalive.Timeout.
+func (t *http2Server) keepalive() {
+ p := &ping{}
+ var pingSent bool
+ maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
+ maxAge := time.NewTimer(t.kp.MaxConnectionAge)
+ keepalive := time.NewTimer(t.kp.Time)
+ // NOTE: All exit paths of this function should reset their
+ // respective timers. A failure to do so will cause the
+ // following clean-up to deadlock and eventually leak.
+ defer func() {
+ if !maxIdle.Stop() {
+ <-maxIdle.C
+ }
+ if !maxAge.Stop() {
+ <-maxAge.C
+ }
+ if !keepalive.Stop() {
+ <-keepalive.C
+ }
+ }()
+ for {
+ select {
+ case <-maxIdle.C:
t.mu.Lock()
- defer t.mu.Unlock()
- for _, stream := range t.activeStreams {
- stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
+ idle := t.idle
+ if idle.IsZero() { // The connection is non-idle.
+ t.mu.Unlock()
+ maxIdle.Reset(t.kp.MaxConnectionIdle)
+ continue
}
- t.streamSendQuota = s.Val
+ val := t.kp.MaxConnectionIdle - time.Since(idle)
+ t.mu.Unlock()
+ if val <= 0 {
+ // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
+ // Gracefully close the connection.
+ t.drain(http2.ErrCodeNo, []byte{})
+ // Reseting the timer so that the clean-up doesn't deadlock.
+ maxIdle.Reset(infinity)
+ return
+ }
+ maxIdle.Reset(val)
+ case <-maxAge.C:
+ t.drain(http2.ErrCodeNo, []byte{})
+ maxAge.Reset(t.kp.MaxConnectionAgeGrace)
+ select {
+ case <-maxAge.C:
+ // Close the connection after grace period.
+ t.Close()
+ // Reseting the timer so that the clean-up doesn't deadlock.
+ maxAge.Reset(infinity)
+ case <-t.ctx.Done():
+ }
+ return
+ case <-keepalive.C:
+ if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
+ pingSent = false
+ keepalive.Reset(t.kp.Time)
+ continue
+ }
+ if pingSent {
+ t.Close()
+ // Reseting the timer so that the clean-up doesn't deadlock.
+ keepalive.Reset(infinity)
+ return
+ }
+ pingSent = true
+ t.controlBuf.put(p)
+ keepalive.Reset(t.kp.Timeout)
+ case <-t.ctx.Done():
+ return
}
-
}
}
-// controller running in a separate goroutine takes charge of sending control
-// frames (e.g., window update, reset stream, setting, etc.) to the server.
-func (t *http2Server) controller() {
- for {
- select {
- case i := <-t.controlBuf.get():
- t.controlBuf.load()
+var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
+
+// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
+// is duplicated between the client and the server.
+// The transport layer needs to be refactored to take care of this.
+func (t *http2Server) itemHandler(i item) error {
+ switch i := i.(type) {
+ case *dataFrame:
+ if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
+ return err
+ }
+ i.f()
+ return nil
+ case *headerFrame:
+ t.hBuf.Reset()
+ for _, f := range i.hf {
+ t.hEnc.WriteField(f)
+ }
+ first := true
+ endHeaders := false
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ var err error
+ if first {
+ first = false
+ err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+ StreamID: i.streamID,
+ BlockFragment: t.hBuf.Next(size),
+ EndStream: i.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = t.framer.fr.WriteContinuation(
+ i.streamID,
+ endHeaders,
+ t.hBuf.Next(size),
+ )
+ }
+ if err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint32(&t.resetPingStrikes, 1)
+ return nil
+ case *windowUpdate:
+ return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
+ case *settings:
+ return t.framer.fr.WriteSettings(i.ss...)
+ case *settingsAck:
+ return t.framer.fr.WriteSettingsAck()
+ case *resetStream:
+ return t.framer.fr.WriteRSTStream(i.streamID, i.code)
+ case *goAway:
+ t.mu.Lock()
+ if t.state == closing {
+ t.mu.Unlock()
+ // The transport is closing.
+ return fmt.Errorf("transport: Connection closing")
+ }
+ sid := t.maxStreamID
+ if !i.headsUp {
+ // Stop accepting more streams now.
+ t.state = draining
+ if len(t.activeStreams) == 0 {
+ i.closeConn = true
+ }
+ t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
+ return err
+ }
+ if i.closeConn {
+ // Abruptly close the connection following the GoAway (via
+ // loopywriter). But flush out what's inside the buffer first.
+ t.controlBuf.put(&flushIO{closeTr: true})
+ }
+ return nil
+ }
+ t.mu.Unlock()
+ // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
+ // Follow that with a ping and wait for the ack to come back or a timer
+ // to expire. During this time accept new streams since they might have
+ // originated before the GoAway reaches the client.
+ // After getting the ack or timer expiration send out another GoAway this
+ // time with an ID of the max stream server intends to process.
+ if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
+ return err
+ }
+ if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
+ return err
+ }
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ defer timer.Stop()
select {
- case <-t.writableChan:
- switch i := i.(type) {
- case *windowUpdate:
- t.framer.writeWindowUpdate(true, i.streamID, i.increment)
- case *settings:
- if i.ack {
- t.framer.writeSettingsAck(true)
- t.applySettings(i.ss)
- } else {
- t.framer.writeSettings(true, i.ss...)
- }
- case *resetStream:
- t.framer.writeRSTStream(true, i.streamID, i.code)
- case *goAway:
- t.mu.Lock()
- if t.state == closing {
- t.mu.Unlock()
- // The transport is closing.
- return
- }
- sid := t.maxStreamID
- t.state = draining
- t.mu.Unlock()
- t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil)
- case *flushIO:
- t.framer.flushWrite()
- case *ping:
- t.framer.writePing(true, i.ack, i.data)
- default:
- grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
- }
- t.writableChan <- 0
- continue
- case <-t.shutdownChan:
+ case <-t.drainChan:
+ case <-timer.C:
+ case <-t.ctx.Done():
return
}
- case <-t.shutdownChan:
- return
+ t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData})
+ }()
+ return nil
+ case *flushIO:
+ if err := t.framer.writer.Flush(); err != nil {
+ return err
+ }
+ if i.closeTr {
+ return ErrConnClosing
}
+ return nil
+ case *ping:
+ if !i.ack {
+ t.bdpEst.timesnap(i.data)
+ }
+ return t.framer.fr.WritePing(i.ack, i.data)
+ default:
+ err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i)
+ errorf("%v", err)
+ return err
}
}
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() (err error) {
+func (t *http2Server) Close() error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
@@ -786,8 +1146,8 @@ func (t *http2Server) Close() (err error) {
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
- close(t.shutdownChan)
- err = t.conn.Close()
+ t.cancel()
+ err := t.conn.Close()
// Cancel all active streams.
for _, s := range streams {
s.cancel()
@@ -796,7 +1156,7 @@ func (t *http2Server) Close() (err error) {
connEnd := &stats.ConnEnd{}
t.stats.HandleConn(t.ctx, connEnd)
}
- return
+ return err
}
// closeStream clears the footprint of a stream when the stream is not needed
@@ -804,8 +1164,11 @@ func (t *http2Server) Close() (err error) {
func (t *http2Server) closeStream(s *Stream) {
t.mu.Lock()
delete(t.activeStreams, s.id)
+ if len(t.activeStreams) == 0 {
+ t.idle = time.Now()
+ }
if t.state == draining && len(t.activeStreams) == 0 {
- defer t.Close()
+ defer t.controlBuf.put(&flushIO{closeTr: true})
}
t.mu.Unlock()
// In case stream sending and receiving are invoked in separate
@@ -813,11 +1176,6 @@ func (t *http2Server) closeStream(s *Stream) {
// called to interrupt the potential blocking on other goroutines.
s.cancel()
s.mu.Lock()
- if q := s.fc.resetPendingData(); q > 0 {
- if w := t.fc.onRead(q); w > 0 {
- t.controlBuf.put(&windowUpdate{0, w})
- }
- }
if s.state == streamDone {
s.mu.Unlock()
return
@@ -831,5 +1189,27 @@ func (t *http2Server) RemoteAddr() net.Addr {
}
func (t *http2Server) Drain() {
- t.controlBuf.put(&goAway{})
+ t.drain(http2.ErrCodeNo, []byte{})
+}
+
+func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.drainChan != nil {
+ return
+ }
+ t.drainChan = make(chan struct{})
+ t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
+}
+
+var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+func getJitter(v time.Duration) time.Duration {
+ if v == infinity {
+ return 0
+ }
+ // Generate a jitter between +/- 10% of the value.
+ r := int64(v / 10)
+ j := rgen.Int63n(2*r) - r
+ return time.Duration(j)
}
diff --git a/go/vendor/google.golang.org/grpc/transport/http_util.go b/go/vendor/google.golang.org/grpc/transport/http_util.go
index a3c68d4..39f878c 100644
--- a/go/vendor/google.golang.org/grpc/transport/http_util.go
+++ b/go/vendor/google.golang.org/grpc/transport/http_util.go
@@ -1,33 +1,18 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
@@ -36,30 +21,31 @@ package transport
import (
"bufio"
"bytes"
+ "encoding/base64"
"fmt"
"io"
"net"
+ "net/http"
"strconv"
"strings"
- "sync/atomic"
"time"
+ "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
)
const (
- // The primary user agent
- primaryUA = "grpc-go/1.0"
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
http2MaxFrameLen = 16384 // 16KB frame
// http://http2.github.io/http2-spec/#SettingValues
http2InitHeaderTableSize = 4096
// http2IOBufSize specifies the buffer size for sending frames.
- http2IOBufSize = 32 * 1024
+ defaultWriteBufSize = 32 * 1024
+ defaultReadBufSize = 32 * 1024
)
var (
@@ -87,24 +73,47 @@ var (
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
}
+ httpStatusConvTab = map[int]codes.Code{
+ // 400 Bad Request - INTERNAL.
+ http.StatusBadRequest: codes.Internal,
+ // 401 Unauthorized - UNAUTHENTICATED.
+ http.StatusUnauthorized: codes.Unauthenticated,
+ // 403 Forbidden - PERMISSION_DENIED.
+ http.StatusForbidden: codes.PermissionDenied,
+ // 404 Not Found - UNIMPLEMENTED.
+ http.StatusNotFound: codes.Unimplemented,
+ // 429 Too Many Requests - UNAVAILABLE.
+ http.StatusTooManyRequests: codes.Unavailable,
+ // 502 Bad Gateway - UNAVAILABLE.
+ http.StatusBadGateway: codes.Unavailable,
+ // 503 Service Unavailable - UNAVAILABLE.
+ http.StatusServiceUnavailable: codes.Unavailable,
+ // 504 Gateway timeout - UNAVAILABLE.
+ http.StatusGatewayTimeout: codes.Unavailable,
+ }
)
// Records the states during HPACK decoding. Must be reset once the
// decoding of the entire headers are finished.
type decodeState struct {
- err error // first error encountered decoding
-
encoding string
- // statusCode caches the stream status received from the trailer
- // the server sent. Client side only.
- statusCode codes.Code
- statusDesc string
+ // statusGen caches the stream status received from the trailer the server
+ // sent. Client side only. Do not access directly. After all trailers are
+ // parsed, use the status method to retrieve the status.
+ statusGen *status.Status
+ // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
+ // intended for direct access outside of parsing.
+ rawStatusCode *int
+ rawStatusMsg string
+ httpStatus *int
// Server side only fields.
timeoutSet bool
timeout time.Duration
method string
// key-value metadata map from the peer.
- mdata map[string][]string
+ mdata map[string][]string
+ statsTags []byte
+ statsTrace []byte
}
// isReservedHeader checks whether hdr belongs to HTTP2 headers
@@ -121,6 +130,7 @@ func isReservedHeader(hdr string) bool {
"grpc-message",
"grpc-status",
"grpc-timeout",
+ "grpc-status-details-bin",
"te":
return true
default:
@@ -139,12 +149,6 @@ func isWhitelistedPseudoHeader(hdr string) bool {
}
}
-func (d *decodeState) setErr(err error) {
- if d.err == nil {
- d.err = err
- }
-}
-
func validContentType(t string) bool {
e := "application/grpc"
if !strings.HasPrefix(t, e) {
@@ -158,56 +162,154 @@ func validContentType(t string) bool {
return true
}
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
+func (d *decodeState) status() *status.Status {
+ if d.statusGen == nil {
+ // No status-details were provided; generate status using code/msg.
+ d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg)
+ }
+ return d.statusGen
+}
+
+const binHdrSuffix = "-bin"
+
+func encodeBinHeader(v []byte) string {
+ return base64.RawStdEncoding.EncodeToString(v)
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+ if len(v)%4 == 0 {
+ // Input was padded, or padding was not necessary.
+ return base64.StdEncoding.DecodeString(v)
+ }
+ return base64.RawStdEncoding.DecodeString(v)
+}
+
+func encodeMetadataHeader(k, v string) string {
+ if strings.HasSuffix(k, binHdrSuffix) {
+ return encodeBinHeader(([]byte)(v))
+ }
+ return v
+}
+
+func decodeMetadataHeader(k, v string) (string, error) {
+ if strings.HasSuffix(k, binHdrSuffix) {
+ b, err := decodeBinHeader(v)
+ return string(b), err
+ }
+ return v, nil
+}
+
+func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
+ for _, hf := range frame.Fields {
+ if err := d.processHeaderField(hf); err != nil {
+ return err
+ }
+ }
+
+ // If grpc status exists, no need to check further.
+ if d.rawStatusCode != nil || d.statusGen != nil {
+ return nil
+ }
+
+ // If grpc status doesn't exist and http status doesn't exist,
+ // then it's a malformed header.
+ if d.httpStatus == nil {
+ return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
+ }
+
+ if *(d.httpStatus) != http.StatusOK {
+ code, ok := httpStatusConvTab[*(d.httpStatus)]
+ if !ok {
+ code = codes.Unknown
+ }
+ return streamErrorf(code, http.StatusText(*(d.httpStatus)))
+ }
+
+ // gRPC status doesn't exist and http status is OK.
+ // Set rawStatusCode to be unknown and return nil error.
+ // So that, if the stream has ended this Unknown status
+ // will be propogated to the user.
+ // Otherwise, it will be ignored. In which case, status from
+ // a later trailer, that has StreamEnded flag set, is propogated.
+ code := int(codes.Unknown)
+ d.rawStatusCode = &code
+ return nil
+
+}
+
+func (d *decodeState) addMetadata(k, v string) {
+ if d.mdata == nil {
+ d.mdata = make(map[string][]string)
+ }
+ d.mdata[k] = append(d.mdata[k], v)
+}
+
+func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
switch f.Name {
case "content-type":
if !validContentType(f.Value) {
- d.setErr(streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
- return
+ return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
}
case "grpc-encoding":
d.encoding = f.Value
case "grpc-status":
code, err := strconv.Atoi(f.Value)
if err != nil {
- d.setErr(streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
- return
+ return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
}
- d.statusCode = codes.Code(code)
+ d.rawStatusCode = &code
case "grpc-message":
- d.statusDesc = decodeGrpcMessage(f.Value)
+ d.rawStatusMsg = decodeGrpcMessage(f.Value)
+ case "grpc-status-details-bin":
+ v, err := decodeBinHeader(f.Value)
+ if err != nil {
+ return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+ }
+ s := &spb.Status{}
+ if err := proto.Unmarshal(v, s); err != nil {
+ return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+ }
+ d.statusGen = status.FromProto(s)
case "grpc-timeout":
d.timeoutSet = true
var err error
- d.timeout, err = decodeTimeout(f.Value)
- if err != nil {
- d.setErr(streamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
- return
+ if d.timeout, err = decodeTimeout(f.Value); err != nil {
+ return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
}
case ":path":
d.method = f.Value
+ case ":status":
+ code, err := strconv.Atoi(f.Value)
+ if err != nil {
+ return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
+ }
+ d.httpStatus = &code
+ case "grpc-tags-bin":
+ v, err := decodeBinHeader(f.Value)
+ if err != nil {
+ return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
+ }
+ d.statsTags = v
+ d.addMetadata(f.Name, string(v))
+ case "grpc-trace-bin":
+ v, err := decodeBinHeader(f.Value)
+ if err != nil {
+ return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
+ }
+ d.statsTrace = v
+ d.addMetadata(f.Name, string(v))
default:
- if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
- if f.Name == "user-agent" {
- i := strings.LastIndex(f.Value, " ")
- if i == -1 {
- // There is no application user agent string being set.
- return
- }
- // Extract the application user agent string.
- f.Value = f.Value[:i]
- }
- if d.mdata == nil {
- d.mdata = make(map[string][]string)
- }
- k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
- if err != nil {
- grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
- return
- }
- d.mdata[k] = append(d.mdata[k], v)
+ if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) {
+ break
}
+ v, err := decodeMetadataHeader(f.Name, f.Value)
+ if err != nil {
+ errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
+ return nil
+ }
+ d.addMetadata(f.Name, string(v))
}
+ return nil
}
type timeoutUnit uint8
@@ -373,141 +475,15 @@ type framer struct {
fr *http2.Framer
}
-func newFramer(conn net.Conn) *framer {
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
f := &framer{
- reader: bufio.NewReaderSize(conn, http2IOBufSize),
- writer: bufio.NewWriterSize(conn, http2IOBufSize),
+ reader: bufio.NewReaderSize(conn, readBufferSize),
+ writer: bufio.NewWriterSize(conn, writeBufferSize),
}
f.fr = http2.NewFramer(f.writer, f.reader)
+ // Opt-in to Frame reuse API on framer to reduce garbage.
+ // Frames aren't safe to read from after a subsequent call to ReadFrame.
+ f.fr.SetReuseFrames()
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}
-
-func (f *framer) adjustNumWriters(i int32) int32 {
- return atomic.AddInt32(&f.numWriters, i)
-}
-
-// The following writeXXX functions can only be called when the caller gets
-// unblocked from writableChan channel (i.e., owns the privilege to write).
-
-func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
- if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
- if err := f.fr.WriteData(streamID, endStream, data); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
- if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
- if err := f.fr.WriteHeaders(p); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
- if err := f.fr.WritePing(ack, data); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
- if err := f.fr.WritePriority(streamID, p); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
- if err := f.fr.WritePushPromise(p); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
- if err := f.fr.WriteRSTStream(streamID, code); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
- if err := f.fr.WriteSettings(settings...); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeSettingsAck(forceFlush bool) error {
- if err := f.fr.WriteSettingsAck(); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
- if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
- return err
- }
- if forceFlush {
- return f.writer.Flush()
- }
- return nil
-}
-
-func (f *framer) flushWrite() error {
- return f.writer.Flush()
-}
-
-func (f *framer) readFrame() (http2.Frame, error) {
- return f.fr.ReadFrame()
-}
-
-func (f *framer) errorDetail() error {
- return f.fr.ErrorDetail()
-}
diff --git a/go/vendor/google.golang.org/grpc/transport/log.go b/go/vendor/google.golang.org/grpc/transport/log.go
new file mode 100644
index 0000000..ac8e358
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/transport/log.go
@@ -0,0 +1,50 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file contains wrappers for grpclog functions.
+// The transport package only logs to verbose level 2 by default.
+
+package transport
+
+import "google.golang.org/grpc/grpclog"
+
+const logLevel = 2
+
+func infof(format string, args ...interface{}) {
+ if grpclog.V(logLevel) {
+ grpclog.Infof(format, args...)
+ }
+}
+
+func warningf(format string, args ...interface{}) {
+ if grpclog.V(logLevel) {
+ grpclog.Warningf(format, args...)
+ }
+}
+
+func errorf(format string, args ...interface{}) {
+ if grpclog.V(logLevel) {
+ grpclog.Errorf(format, args...)
+ }
+}
+
+func fatalf(format string, args ...interface{}) {
+ if grpclog.V(logLevel) {
+ grpclog.Fatalf(format, args...)
+ }
+}
diff --git a/go/vendor/google.golang.org/grpc/transport/pre_go16.go b/go/vendor/google.golang.org/grpc/transport/pre_go16.go
deleted file mode 100644
index 33d91c1..0000000
--- a/go/vendor/google.golang.org/grpc/transport/pre_go16.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build !go1.6
-
-/*
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-package transport
-
-import (
- "net"
- "time"
-
- "golang.org/x/net/context"
-)
-
-// dialContext connects to the address on the named network.
-func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
- var dialer net.Dialer
- if deadline, ok := ctx.Deadline(); ok {
- dialer.Timeout = deadline.Sub(time.Now())
- }
- return dialer.Dial(network, address)
-}
diff --git a/go/vendor/google.golang.org/grpc/transport/transport.go b/go/vendor/google.golang.org/grpc/transport/transport.go
index d465991..2e7bcae 100644
--- a/go/vendor/google.golang.org/grpc/transport/transport.go
+++ b/go/vendor/google.golang.org/grpc/transport/transport.go
@@ -1,54 +1,40 @@
/*
*
- * Copyright 2014, Google Inc.
- * All rights reserved.
+ * Copyright 2014 gRPC authors.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*
*/
-/*
-Package transport defines and implements message oriented communication channel
-to complete various transactions (e.g., an RPC).
-*/
+// Package transport defines and implements message oriented communication
+// channel to complete various transactions (e.g., an RPC). It is meant for
+// grpc-internal usage and is not intended to be imported directly by users.
package transport // import "google.golang.org/grpc/transport"
import (
- "bytes"
"fmt"
"io"
"net"
"sync"
"golang.org/x/net/context"
+ "golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
@@ -62,57 +48,56 @@ type recvMsg struct {
err error
}
-func (*recvMsg) item() {}
-
-// All items in an out of a recvBuffer should be the same type.
-type item interface {
- item()
-}
-
-// recvBuffer is an unbounded channel of item.
+// recvBuffer is an unbounded channel of recvMsg structs.
+// Note recvBuffer differs from controlBuffer only in that recvBuffer
+// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
+// recvBuffer is written to much more often than
+// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
type recvBuffer struct {
- c chan item
+ c chan recvMsg
mu sync.Mutex
- backlog []item
+ backlog []recvMsg
}
func newRecvBuffer() *recvBuffer {
b := &recvBuffer{
- c: make(chan item, 1),
+ c: make(chan recvMsg, 1),
}
return b
}
-func (b *recvBuffer) put(r item) {
+func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
- defer b.mu.Unlock()
if len(b.backlog) == 0 {
select {
case b.c <- r:
+ b.mu.Unlock()
return
default:
}
}
b.backlog = append(b.backlog, r)
+ b.mu.Unlock()
}
func (b *recvBuffer) load() {
b.mu.Lock()
- defer b.mu.Unlock()
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
+ b.backlog[0] = recvMsg{}
b.backlog = b.backlog[1:]
default:
}
}
+ b.mu.Unlock()
}
-// get returns the channel that receives an item in the buffer.
+// get returns the channel that receives a recvMsg in the buffer.
//
-// Upon receipt of an item, the caller should call load to send another
-// item onto the channel if there is any.
-func (b *recvBuffer) get() <-chan item {
+// Upon receipt of a recvMsg, the caller should call load to send another
+// recvMsg onto the channel if there is any.
+func (b *recvBuffer) get() <-chan recvMsg {
return b.c
}
@@ -122,7 +107,7 @@ type recvBufferReader struct {
ctx context.Context
goAway chan struct{}
recv *recvBuffer
- last *bytes.Reader // Stores the remaining data in the previous calls.
+ last []byte // Stores the remaining data in the previous calls.
err error
}
@@ -133,25 +118,85 @@ func (r *recvBufferReader) Read(p []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
- defer func() { r.err = err }()
- if r.last != nil && r.last.Len() > 0 {
+ n, r.err = r.read(p)
+ return n, r.err
+}
+
+func (r *recvBufferReader) read(p []byte) (n int, err error) {
+ if r.last != nil && len(r.last) > 0 {
// Read remaining data left in last call.
- return r.last.Read(p)
+ copied := copy(p, r.last)
+ r.last = r.last[copied:]
+ return copied, nil
}
select {
case <-r.ctx.Done():
return 0, ContextErr(r.ctx.Err())
case <-r.goAway:
- return 0, ErrStreamDrain
- case i := <-r.recv.get():
+ return 0, errStreamDrain
+ case m := <-r.recv.get():
r.recv.load()
- m := i.(*recvMsg)
if m.err != nil {
return 0, m.err
}
- r.last = bytes.NewReader(m.data)
- return r.last.Read(p)
+ copied := copy(p, m.data)
+ r.last = m.data[copied:]
+ return copied, nil
+ }
+}
+
+// All items in an out of a controlBuffer should be the same type.
+type item interface {
+ item()
+}
+
+// controlBuffer is an unbounded channel of item.
+type controlBuffer struct {
+ c chan item
+ mu sync.Mutex
+ backlog []item
+}
+
+func newControlBuffer() *controlBuffer {
+ b := &controlBuffer{
+ c: make(chan item, 1),
+ }
+ return b
+}
+
+func (b *controlBuffer) put(r item) {
+ b.mu.Lock()
+ if len(b.backlog) == 0 {
+ select {
+ case b.c <- r:
+ b.mu.Unlock()
+ return
+ default:
+ }
+ }
+ b.backlog = append(b.backlog, r)
+ b.mu.Unlock()
+}
+
+func (b *controlBuffer) load() {
+ b.mu.Lock()
+ if len(b.backlog) > 0 {
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog[0] = nil
+ b.backlog = b.backlog[1:]
+ default:
+ }
}
+ b.mu.Unlock()
+}
+
+// get returns the channel that receives an item in the buffer.
+//
+// Upon receipt of an item, the caller should call load to send another
+// item onto the channel if there is any.
+func (b *controlBuffer) get() <-chan item {
+ return b.c
}
type streamState uint8
@@ -165,59 +210,67 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
- id uint32
- // nil for client side Stream.
- st ServerTransport
- // clientStatsCtx keeps the user context for stats handling.
- // It's only valid on client side. Server side stats context is same as s.ctx.
- // All client side stats collection should use the clientStatsCtx (instead of the stream context)
- // so that all the generated stats for a particular RPC can be associated in the processing phase.
- clientStatsCtx context.Context
- // ctx is the associated context of the stream.
- ctx context.Context
- // cancel is always nil for client side Stream.
- cancel context.CancelFunc
- // done is closed when the final status arrives.
- done chan struct{}
- // goAway is closed when the server sent GoAways signal before this stream was initiated.
- goAway chan struct{}
- // method records the associated RPC method of the stream.
- method string
+ id uint32
+ st ServerTransport // nil for client side Stream
+ ctx context.Context // the associated context of the stream
+ cancel context.CancelFunc // always nil for client side Stream
+ done chan struct{} // closed when the final status arrives
+ goAway chan struct{} // closed when a GOAWAY control message is received
+ method string // the associated RPC method of the stream
recvCompress string
sendCompress string
buf *recvBuffer
- dec io.Reader
+ trReader io.Reader
fc *inFlow
recvQuota uint32
- // The accumulated inbound quota pending for window update.
- updateQuota uint32
- // The handler to control the window update procedure for both this
- // particular stream and the associated transport.
- windowHandler func(int)
+ waiters waiters
+
+ // Callback to state application's intentions to read data. This
+ // is used to adjust flow control, if needed.
+ requestRead func(int)
sendQuotaPool *quotaPool
- // Close headerChan to indicate the end of reception of header metadata.
- headerChan chan struct{}
- // header caches the received header metadata.
- header metadata.MD
- // The key-value map of trailer metadata.
- trailer metadata.MD
-
- mu sync.RWMutex // guard the following
- // headerOK becomes true from the first header is about to send.
- headerOk bool
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ headerDone bool // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ header metadata.MD // the received header metadata.
+ trailer metadata.MD // the key-value map of trailer metadata.
+
+ mu sync.RWMutex // guard the following
+ headerOk bool // becomes true from the first header is about to send
state streamState
- // true iff headerChan is closed. Used to avoid closing headerChan
- // multiple times.
- headerDone bool
- // the status received from the server.
- statusCode codes.Code
- statusDesc string
+
+ status *status.Status // the status error received from the server
+
+ rstStream bool // indicates whether a RST_STREAM frame needs to be sent
+ rstError http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
+
+ bytesReceived bool // indicates whether any bytes have been received on this stream
+ unprocessed bool // set if the server sends a refused stream or GOAWAY including this stream
+}
+
+func (s *Stream) waitOnHeader() error {
+ if s.headerChan == nil {
+ // On the server headerChan is always nil since a stream originates
+ // only after having received headers.
+ return nil
+ }
+ wc := s.waiters
+ select {
+ case <-wc.ctx.Done():
+ return ContextErr(wc.ctx.Err())
+ case <-wc.goAway:
+ return errStreamDrain
+ case <-s.headerChan:
+ return nil
+ }
}
// RecvCompress returns the compression algorithm applied to the inbound
// message. It is empty string if there is no compression applied.
func (s *Stream) RecvCompress() string {
+ if err := s.waitOnHeader(); err != nil {
+ return ""
+ }
return s.recvCompress
}
@@ -240,16 +293,16 @@ func (s *Stream) GoAway() <-chan struct{} {
// Header acquires the key-value pairs of header metadata once it
// is available. It blocks until i) the metadata is ready or ii) there is no
-// header metadata or iii) the stream is cancelled/expired.
+// header metadata or iii) the stream is canceled/expired.
func (s *Stream) Header() (metadata.MD, error) {
+ err := s.waitOnHeader()
+ // Even if the stream is closed, header is returned if available.
select {
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
- case <-s.goAway:
- return nil, ErrStreamDrain
case <-s.headerChan:
return s.header.Copy(), nil
+ default:
}
+ return nil, err
}
// Trailer returns the cached trailer metedata. Note that if it is not called
@@ -257,8 +310,9 @@ func (s *Stream) Header() (metadata.MD, error) {
// side only.
func (s *Stream) Trailer() metadata.MD {
s.mu.RLock()
- defer s.mu.RUnlock()
- return s.trailer.Copy()
+ c := s.trailer.Copy()
+ s.mu.RUnlock()
+ return c
}
// ServerTransport returns the underlying ServerTransport for the stream.
@@ -277,28 +331,25 @@ func (s *Stream) Method() string {
return s.method
}
-// StatusCode returns statusCode received from the server.
-func (s *Stream) StatusCode() codes.Code {
- return s.statusCode
-}
-
-// StatusDesc returns statusDesc received from the server.
-func (s *Stream) StatusDesc() string {
- return s.statusDesc
+// Status returns the status received from the server.
+func (s *Stream) Status() *status.Status {
+ return s.status
}
// SetHeader sets the header metadata. This can be called multiple times.
// Server side only.
func (s *Stream) SetHeader(md metadata.MD) error {
s.mu.Lock()
- defer s.mu.Unlock()
if s.headerOk || s.state == streamDone {
+ s.mu.Unlock()
return ErrIllegalHeaderWrite
}
if md.Len() == 0 {
+ s.mu.Unlock()
return nil
}
s.header = metadata.Join(s.header, md)
+ s.mu.Unlock()
return nil
}
@@ -309,28 +360,78 @@ func (s *Stream) SetTrailer(md metadata.MD) error {
return nil
}
s.mu.Lock()
- defer s.mu.Unlock()
s.trailer = metadata.Join(s.trailer, md)
+ s.mu.Unlock()
return nil
}
func (s *Stream) write(m recvMsg) {
- s.buf.put(&m)
+ s.buf.put(m)
+}
+
+// Read reads all p bytes from the wire for this stream.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.(*transportReader).er; er != nil {
+ return 0, er
+ }
+ s.requestRead(len(p))
+ return io.ReadFull(s.trReader, p)
}
-// Read reads all the data available for this Stream from the transport and
+// tranportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
-func (s *Stream) Read(p []byte) (n int, err error) {
- n, err = s.dec.Read(p)
+type transportReader struct {
+ reader io.Reader
+ // The handler to control the window update procedure for both this
+ // particular stream and the associated transport.
+ windowHandler func(int)
+ er error
+}
+
+func (t *transportReader) Read(p []byte) (n int, err error) {
+ n, err = t.reader.Read(p)
if err != nil {
+ t.er = err
return
}
- s.windowHandler(n)
+ t.windowHandler(n)
return
}
+// finish sets the stream's state and status, and closes the done channel.
+// s.mu must be held by the caller. st must always be non-nil.
+func (s *Stream) finish(st *status.Status) {
+ s.status = st
+ s.state = streamDone
+ close(s.done)
+}
+
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *Stream) BytesReceived() bool {
+ s.mu.Lock()
+ br := s.bytesReceived
+ s.mu.Unlock()
+ return br
+}
+
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *Stream) Unprocessed() bool {
+ s.mu.Lock()
+ br := s.unprocessed
+ s.mu.Unlock()
+ return br
+}
+
+// GoString is implemented by Stream so context.String() won't
+// race when printing %#v.
+func (s *Stream) GoString() string {
+ return fmt.Sprintf("<stream: %p, %v>", s, s.method)
+}
+
// The key to save transport.Stream in the context.
type streamKey struct{}
@@ -351,17 +452,22 @@ type transportState int
const (
reachable transportState = iota
- unreachable
closing
draining
)
// ServerConfig consists of all the configurations to establish a server transport.
type ServerConfig struct {
- MaxStreams uint32
- AuthInfo credentials.AuthInfo
- InTapHandle tap.ServerInHandle
- StatsHandler stats.Handler
+ MaxStreams uint32
+ AuthInfo credentials.AuthInfo
+ InTapHandle tap.ServerInHandle
+ StatsHandler stats.Handler
+ KeepaliveParams keepalive.ServerParameters
+ KeepalivePolicy keepalive.EnforcementPolicy
+ InitialWindowSize int32
+ InitialConnWindowSize int32
+ WriteBufferSize int
+ ReadBufferSize int
}
// NewServerTransport creates a ServerTransport with conn or non-nil error
@@ -374,6 +480,9 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S
type ConnectOptions struct {
// UserAgent is the application user agent.
UserAgent string
+ // Authority is the :authority pseudo-header to use. This field has no effect if
+ // TransportCredentials is set.
+ Authority string
// Dialer specifies how to dial a network address.
Dialer func(context.Context, string) (net.Conn, error)
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
@@ -382,20 +491,31 @@ type ConnectOptions struct {
PerRPCCredentials []credentials.PerRPCCredentials
// TransportCredentials stores the Authenticator required to setup a client connection.
TransportCredentials credentials.TransportCredentials
+ // KeepaliveParams stores the keepalive parameters.
+ KeepaliveParams keepalive.ClientParameters
// StatsHandler stores the handler for stats.
StatsHandler stats.Handler
+ // InitialWindowSize sets the initial window size for a stream.
+ InitialWindowSize int32
+ // InitialConnWindowSize sets the initial window size for a connection.
+ InitialConnWindowSize int32
+ // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
+ WriteBufferSize int
+ // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
+ ReadBufferSize int
}
// TargetInfo contains the information of the target such as network address and metadata.
type TargetInfo struct {
- Addr string
- Metadata interface{}
+ Addr string
+ Metadata interface{}
+ Authority string
}
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
-func NewClientTransport(ctx context.Context, target TargetInfo, opts ConnectOptions) (ClientTransport, error) {
- return newHTTP2Client(ctx, target, opts)
+func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
+ return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
}
// Options provides additional hints and information for message
@@ -407,7 +527,7 @@ type Options struct {
// Delay is a hint to the transport implementation for whether
// the data could be buffered for a batching write. The
- // Transport implementation may ignore the hint.
+ // transport implementation may ignore the hint.
Delay bool
}
@@ -419,18 +539,19 @@ type CallHdr struct {
// Method specifies the operation to perform.
Method string
- // RecvCompress specifies the compression algorithm applied on
- // inbound messages.
- RecvCompress string
-
// SendCompress specifies the compression algorithm applied on
// outbound message.
SendCompress string
+ // Creds specifies credentials.PerRPCCredentials for a call.
+ Creds credentials.PerRPCCredentials
+
// Flush indicates whether a new stream command should be sent
// to the peer without waiting for the first data. This is
- // only a hint. The transport may modify the flush decision
+ // only a hint.
+ // If it's true, the transport may modify the flush decision
// for performance purposes.
+ // If it's false, new stream will never be flushed.
Flush bool
}
@@ -448,7 +569,7 @@ type ClientTransport interface {
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
- Write(s *Stream, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data []byte, opts *Options) error
// NewStream creates a Stream for an RPC.
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@@ -466,10 +587,13 @@ type ClientTransport interface {
// once the transport is initiated.
Error() <-chan struct{}
- // GoAway returns a channel that is closed when ClientTranspor
+ // GoAway returns a channel that is closed when ClientTransport
// receives the draining signal from the server (e.g., GOAWAY frame in
// HTTP/2).
GoAway() <-chan struct{}
+
+ // GetGoAwayReason returns the reason why GoAway frame was received.
+ GetGoAwayReason() GoAwayReason
}
// ServerTransport is the common interface for all gRPC server-side transport
@@ -487,12 +611,11 @@ type ServerTransport interface {
// Write sends the data for the given stream.
// Write may not be called on all streams.
- Write(s *Stream, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data []byte, opts *Options) error
- // WriteStatus sends the status of a stream to the client.
- // WriteStatus is the final call made on a stream and always
- // occurs.
- WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
+ // WriteStatus sends the status of a stream to the client. WriteStatus is
+ // the final call made on a stream and always occurs.
+ WriteStatus(s *Stream, st *status.Status) error
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
@@ -553,11 +676,17 @@ func (e ConnectionError) Origin() error {
var (
// ErrConnClosing indicates that the transport is closing.
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
- // ErrStreamDrain indicates that the stream is rejected by the server because
+ // errStreamDrain indicates that the stream is rejected by the server because
// the server stops accepting new RPCs.
- ErrStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
+ // TODO: delete this error; it is no longer necessary.
+ errStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
+ // StatusGoAway indicates that the server sent a GOAWAY that included this
+ // stream's ID in unprocessed RPCs.
+ statusGoAway = status.New(codes.Unavailable, "the server stopped accepting new RPCs")
)
+// TODO: See if we can replace StreamError with status package errors.
+
// StreamError is an error that only affects one stream within a connection.
type StreamError struct {
Code codes.Code
@@ -565,44 +694,64 @@ type StreamError struct {
}
func (e StreamError) Error() string {
- return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
+ return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
}
-// ContextErr converts the error from context package into a StreamError.
-func ContextErr(err error) StreamError {
- switch err {
- case context.DeadlineExceeded:
- return streamErrorf(codes.DeadlineExceeded, "%v", err)
- case context.Canceled:
- return streamErrorf(codes.Canceled, "%v", err)
- }
- panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
+// waiters are passed to quotaPool get methods to
+// wait on in addition to waiting on quota.
+type waiters struct {
+ ctx context.Context
+ tctx context.Context
+ done chan struct{}
+ goAway chan struct{}
}
-// wait blocks until it can receive from ctx.Done, closing, or proceed.
-// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
-// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise
-// it return the StreamError for ctx.Err.
-// If it receives from goAway, it returns 0, ErrStreamDrain.
-// If it receives from closing, it returns 0, ErrConnClosing.
-// If it receives from proceed, it returns the received integer, nil.
-func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) {
- select {
- case <-ctx.Done():
- return 0, ContextErr(ctx.Err())
- case <-done:
- // User cancellation has precedence.
+// GoAwayReason contains the reason for the GoAway frame received.
+type GoAwayReason uint8
+
+const (
+ // GoAwayInvalid indicates that no GoAway frame is received.
+ GoAwayInvalid GoAwayReason = 0
+ // GoAwayNoReason is the default value when GoAway frame is received.
+ GoAwayNoReason GoAwayReason = 1
+ // GoAwayTooManyPings indicates that a GoAway frame with
+ // ErrCodeEnhanceYourCalm was received and that the debug data said
+ // "too_many_pings".
+ GoAwayTooManyPings GoAwayReason = 2
+)
+
+// loopyWriter is run in a separate go routine. It is the single code path that will
+// write data on wire.
+func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) {
+ for {
select {
+ case i := <-cbuf.get():
+ cbuf.load()
+ if err := handler(i); err != nil {
+ errorf("transport: Error while handling item. Err: %v", err)
+ return
+ }
case <-ctx.Done():
- return 0, ContextErr(ctx.Err())
- default:
+ return
+ }
+ hasData:
+ for {
+ select {
+ case i := <-cbuf.get():
+ cbuf.load()
+ if err := handler(i); err != nil {
+ errorf("transport: Error while handling item. Err: %v", err)
+ return
+ }
+ case <-ctx.Done():
+ return
+ default:
+ if err := handler(&flushIO{}); err != nil {
+ errorf("transport: Error while flushing. Err: %v", err)
+ return
+ }
+ break hasData
+ }
}
- return 0, io.EOF
- case <-goAway:
- return 0, ErrStreamDrain
- case <-closing:
- return 0, ErrConnClosing
- case i := <-proceed:
- return i, nil
}
}
diff --git a/go/vendor/google.golang.org/grpc/vet.sh b/go/vendor/google.golang.org/grpc/vet.sh
new file mode 100755
index 0000000..2ad94fe
--- /dev/null
+++ b/go/vendor/google.golang.org/grpc/vet.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+set -ex # Exit on error; debugging enabled.
+set -o pipefail # Fail a pipe if any sub-command fails.
+
+die() {
+ echo "$@" >&2
+ exit 1
+}
+
+PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
+
+# Check proto in manual runs or cron runs.
+if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then
+ check_proto="true"
+fi
+
+if [ "$1" = "-install" ]; then
+ go get -d \
+ google.golang.org/grpc/...
+ go get -u \
+ github.com/golang/lint/golint \
+ golang.org/x/tools/cmd/goimports \
+ honnef.co/go/tools/cmd/staticcheck \
+ github.com/client9/misspell/cmd/misspell \
+ github.com/golang/protobuf/protoc-gen-go
+ if [[ "$check_proto" = "true" ]]; then
+ if [[ "$TRAVIS" = "true" ]]; then
+ PROTOBUF_VERSION=3.3.0
+ PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
+ pushd /home/travis
+ wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
+ unzip ${PROTOC_FILENAME}
+ bin/protoc --version
+ popd
+ elif ! which protoc > /dev/null; then
+ die "Please install protoc into your path"
+ fi
+ fi
+ exit 0
+elif [[ "$#" -ne 0 ]]; then
+ die "Unknown argument(s): $*"
+fi
+
+# TODO: Remove this check and the mangling below once "context" is imported
+# directly.
+if git status --porcelain | read; then
+ die "Uncommitted or untracked files found; commit changes first"
+fi
+
+git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)
+gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
+goimports -l . 2>&1 | tee /dev/stderr | (! read)
+golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
+
+# Undo any edits made by this script.
+cleanup() {
+ git reset --hard HEAD
+}
+trap cleanup EXIT
+
+# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484).
+# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711).
+git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
+set +o pipefail
+# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
+go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
+set -o pipefail
+git reset --hard HEAD
+
+if [[ "$check_proto" = "true" ]]; then
+ PATH="/home/travis/bin:$PATH" make proto && \
+ git status --porcelain 2>&1 | (! read) || \
+ (git status; git --no-pager diff; exit 1)
+fi
+
+# TODO(menghanl): fix errors in transport_test.
+staticcheck -ignore '
+google.golang.org/grpc/transport/transport_test.go:SA2002
+google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
+google.golang.org/grpc/stats/stats_test.go:SA1019
+google.golang.org/grpc/test/end2end_test.go:SA1019
+' ./...
+misspell -error .
diff --git a/go/vendor/vendor.json b/go/vendor/vendor.json
index 379dd57..5c1f1e3 100644
--- a/go/vendor/vendor.json
+++ b/go/vendor/vendor.json
@@ -9,12 +9,24 @@
"revisionTime": "2017-03-31T03:19:02Z"
},
{
+ "checksumSHA1": "VfkiItDBFFkZluaAMAzJipDXNBY=",
+ "path": "github.com/golang/protobuf/ptypes",
+ "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845",
+ "revisionTime": "2017-11-13T18:07:20Z"
+ },
+ {
"checksumSHA1": "lZFWy27Qo6+m/keDjNFYTxSmvZw=",
"path": "github.com/golang/protobuf/ptypes/any",
"revision": "2bba0603135d7d7f5cb73b2125beeda19c09f4ef",
"revisionTime": "2017-03-31T03:19:02Z"
},
{
+ "checksumSHA1": "hUjAj0dheFVDl84BAnSWj9qy2iY=",
+ "path": "github.com/golang/protobuf/ptypes/duration",
+ "revision": "1e59b77b52bf8e4b449a57e6f79f21226d571845",
+ "revisionTime": "2017-11-13T18:07:20Z"
+ },
+ {
"checksumSHA1": "sfoot+dHmmOgWZS6GJ5X79ClZM0=",
"path": "github.com/golang/protobuf/ptypes/timestamp",
"revision": "2bba0603135d7d7f5cb73b2125beeda19c09f4ef",
@@ -156,82 +168,180 @@
"revisionTime": "2017-04-04T13:20:09Z"
},
{
- "checksumSHA1": "epHwh7hDQSYzDowPIbw8vnLzPS0=",
+ "checksumSHA1": "LXTQppZOmpZb8/zNBzfXmq3GDEg=",
"path": "google.golang.org/grpc",
- "revision": "50955793b0183f9de69bd78e2ec251cf20aab121",
- "revisionTime": "2017-01-11T19:10:52Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "xBhmO0Vn4kzbmySioX+2gBImrkk=",
+ "path": "google.golang.org/grpc/balancer",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "08icuA15HRkdYCt6H+Cs90RPQsY=",
+ "checksumSHA1": "CPWX/IgaQSR3+78j4sPrvHNkW+U=",
+ "path": "google.golang.org/grpc/balancer/base",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "DJ1AtOk4Pu7bqtUMob95Hw8HPNw=",
+ "path": "google.golang.org/grpc/balancer/roundrobin",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "bfmh2m3qW8bb6qpfS/D4Wcl4hZE=",
"path": "google.golang.org/grpc/codes",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=",
+ "path": "google.golang.org/grpc/connectivity",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "K99T+YYvCBu0O1I3zuRcGhM5ADY=",
+ "checksumSHA1": "4DnDX81AOSyVP3UJ5tQmlNcG1MI=",
"path": "google.golang.org/grpc/credentials",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "9DImIDqmAMPO24loHJ77UVJTDxQ=",
+ "path": "google.golang.org/grpc/encoding",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "3Lt5hNAG8qJAYSsNghR5uA1zQns=",
+ "checksumSHA1": "H7SuPUqbPcdbNqgl+k3ohuwMAwE=",
+ "path": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "ntHev01vgZgeIh5VFRmbLx/BSTo=",
"path": "google.golang.org/grpc/grpclog",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "T3Q0p8kzvXFnRkMaK/G8mCv6mc0=",
+ "checksumSHA1": "Qvf3zdmRCSsiM/VoBv0qB/naHtU=",
"path": "google.golang.org/grpc/internal",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "OJnTFsZMDUCKpblFN9NlcBq5r2w=",
+ "checksumSHA1": "hcuHgKp8W0wIzoCnNfKI8NUss5o=",
"path": "google.golang.org/grpc/keepalive",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "4vbTFdNR1O8fvE6I77sESA8gEHw=",
+ "checksumSHA1": "KeUmTZV+2X46C49cKyjp+xM7fvw=",
"path": "google.golang.org/grpc/metadata",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "4GSUFhOQ0kdFlBH4D5OTeKy78z0=",
+ "checksumSHA1": "5dwF592DPvhF2Wcex3m7iV6aGRQ=",
"path": "google.golang.org/grpc/naming",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "3RRoLeH6X2//7tVClOVzxW2bY+E=",
+ "checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=",
"path": "google.golang.org/grpc/peer",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "y8Ta+ctMP9CUTiPyPyxiD154d8w=",
+ "path": "google.golang.org/grpc/resolver",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "WpWF+bDzObsHf+bjoGpb/abeFxo=",
+ "path": "google.golang.org/grpc/resolver/dns",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=",
+ "path": "google.golang.org/grpc/resolver/passthrough",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "xEHHTEIORdW+3USbRp52rt2I7wE=",
+ "checksumSHA1": "G9lgXNi7qClo5sM2s6TbTHLFR3g=",
"path": "google.golang.org/grpc/stats",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "7SPSKZYcLg5QSvaUc2l8ml6eQbM=",
+ "checksumSHA1": "tUo+M0Cb0W9ZEIt5BH30wJz/Kjc=",
"path": "google.golang.org/grpc/status",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "N0TftT6/CyWqp6VRi2DqDx60+Fo=",
+ "checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=",
"path": "google.golang.org/grpc/tap",
- "revision": "6d158dbf32084eac5fc0b9ea6f1feed214290ec6",
- "revisionTime": "2017-04-12T06:39:30Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "yHpUeGwKoqqwd3cbEp3lkcnvft0=",
+ "checksumSHA1": "4PldZ/0JjX6SpJYaMByY1ozywnY=",
"path": "google.golang.org/grpc/transport",
- "revision": "50955793b0183f9de69bd78e2ec251cf20aab121",
- "revisionTime": "2017-01-11T19:10:52Z"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
"checksumSHA1": "fALlQNY1fM99NesfLJ50KguWsio=",